python_code
stringlengths 0
1.02M
| repo_name
stringlengths 9
48
| file_path
stringlengths 5
114
|
---|---|---|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Distribution Strategy-related dataset transformations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import nest
from tensorflow.python.data.util import structure
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import gen_experimental_dataset_ops as ged_ops
class _AutoShardDataset(dataset_ops.UnaryDataset):
"""A `Dataset` that shards the `Dataset` automatically.
This dataset takes in an existing dataset and tries to automatically figure
out how to shard the dataset in a multi-worker scenario. Currently, it uses
Grappler to walk up the dataset graph until it finds a reader dataset (e.g.
CSVDataset, TFRecordDataset), then inserts a ShardDataset op before that node
so that each worker only sees some files.
Args:
num_workers: Total number of workers to shard this dataset across.
index: The current worker index (out of the total number of workers) this
dataset is for.
Raises:
NotFoundError: If we cannot find a suitable reader dataset to begin
automatically sharding the dataset.
"""
def __init__(self, input_dataset, num_workers, index):
self._input_dataset = input_dataset
self._structure = input_dataset._element_structure # pylint: disable=protected-access
variant_tensor = ged_ops.experimental_auto_shard_dataset(
self._input_dataset._variant_tensor, # pylint: disable=protected-access
num_workers=num_workers,
index=index,
**dataset_ops.flat_structure(self))
super(_AutoShardDataset, self).__init__(input_dataset, variant_tensor)
@property
def _element_structure(self):
return self._structure
def _AutoShardDatasetV1(input_dataset, num_workers, index):
return dataset_ops.DatasetV1Adapter(
_AutoShardDataset(input_dataset, num_workers, index))
class _RebatchDataset(dataset_ops.UnaryDataset):
"""A `Dataset` that divides the batch size by `num_workers`."""
def __init__(self, input_dataset, num_workers):
self._input_dataset = input_dataset
def recalculate_output_shapes(output_shapes):
"""Recalculates the output_shapes after dividing it by num_workers."""
if len(output_shapes) < 1:
raise ValueError(
"Input shape should have at least one dimension. "
"Perhaps your input dataset is not batched?")
output_dims = [d for d in output_shapes.dims]
output_dims[0] = (output_dims[0] + num_workers - 1) // num_workers
return tensor_shape.TensorShape(output_dims)
input_types = dataset_ops.get_legacy_output_types(self._input_dataset)
input_shapes = dataset_ops.get_legacy_output_shapes(self._input_dataset)
input_classes = dataset_ops.get_legacy_output_classes(self._input_dataset)
output_shapes = nest.map_structure(recalculate_output_shapes, input_shapes)
self._structure = structure.convert_legacy_structure(
input_types, output_shapes, input_classes)
variant_tensor = ged_ops.experimental_rebatch_dataset(
self._input_dataset._variant_tensor, # pylint: disable=protected-access
num_workers=num_workers,
**dataset_ops.flat_structure(self))
super(_RebatchDataset, self).__init__(input_dataset, variant_tensor)
@property
def _element_structure(self):
return self._structure
_AutoShardDatasetV1.__doc__ = _AutoShardDataset.__doc__
|
tensorflow-master
|
tensorflow/python/data/experimental/ops/distribute.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python wrappers for reader Datasets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import csv
import functools
import numpy as np
from tensorflow.python.data.experimental.ops import batching
from tensorflow.python.data.experimental.ops import error_ops
from tensorflow.python.data.experimental.ops import interleave_ops
from tensorflow.python.data.experimental.ops import parsing_ops
from tensorflow.python.data.experimental.ops import shuffle_ops
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import readers as core_readers
from tensorflow.python.data.util import convert
from tensorflow.python.data.util import nest
from tensorflow.python.data.util import structure
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.lib.io import file_io
from tensorflow.python.ops import gen_experimental_dataset_ops
from tensorflow.python.ops import io_ops
from tensorflow.python.platform import gfile
from tensorflow.python.util.tf_export import tf_export
_ACCEPTABLE_CSV_TYPES = (dtypes.float32, dtypes.float64, dtypes.int32,
dtypes.int64, dtypes.string)
def _is_valid_int32(str_val):
try:
# Checks equality to prevent int32 overflow
return dtypes.int32.as_numpy_dtype(str_val) == dtypes.int64.as_numpy_dtype(
str_val)
except (ValueError, OverflowError):
return False
def _is_valid_int64(str_val):
try:
dtypes.int64.as_numpy_dtype(str_val)
return True
except (ValueError, OverflowError):
return False
def _is_valid_float(str_val, float_dtype):
try:
return float_dtype.as_numpy_dtype(str_val) < np.inf
except ValueError:
return False
def _infer_type(str_val, na_value, prev_type):
"""Given a string, infers its tensor type.
Infers the type of a value by picking the least 'permissive' type possible,
while still allowing the previous type inference for this column to be valid.
Args:
str_val: String value to infer the type of.
na_value: Additional string to recognize as a NA/NaN CSV value.
prev_type: Type previously inferred based on values of this column that
we've seen up till now.
Returns:
Inferred dtype.
"""
if str_val in ("", na_value):
# If the field is null, it gives no extra information about its type
return prev_type
type_list = [
dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64, dtypes.string
] # list of types to try, ordered from least permissive to most
type_functions = [
_is_valid_int32,
_is_valid_int64,
lambda str_val: _is_valid_float(str_val, dtypes.float32),
lambda str_val: _is_valid_float(str_val, dtypes.float64),
lambda str_val: True,
] # Corresponding list of validation functions
for i in range(len(type_list)):
validation_fn = type_functions[i]
if validation_fn(str_val) and (prev_type is None or
prev_type in type_list[:i + 1]):
return type_list[i]
def _next_csv_row(filenames, num_cols, field_delim, use_quote_delim, header):
"""Generator that yields rows of CSV file(s) in order."""
for fn in filenames:
with file_io.FileIO(fn, "r") as f:
rdr = csv.reader(
f,
delimiter=field_delim,
quoting=csv.QUOTE_MINIMAL if use_quote_delim else csv.QUOTE_NONE)
if header:
next(rdr) # Skip header lines
for csv_row in rdr:
if len(csv_row) != num_cols:
raise ValueError(
"Problem inferring types: CSV row has different number of fields "
"than expected.")
yield csv_row
def _infer_column_defaults(filenames, num_cols, field_delim, use_quote_delim,
na_value, header, num_rows_for_inference,
select_columns):
"""Infers column types from the first N valid CSV records of files."""
if select_columns is None:
select_columns = range(num_cols)
inferred_types = [None] * len(select_columns)
for i, csv_row in enumerate(
_next_csv_row(filenames, num_cols, field_delim, use_quote_delim, header)):
if num_rows_for_inference is not None and i >= num_rows_for_inference:
break
for j, col_index in enumerate(select_columns):
inferred_types[j] = _infer_type(csv_row[col_index], na_value,
inferred_types[j])
# Replace None's with a default type
inferred_types = [t or dtypes.string for t in inferred_types]
# Default to 0 or '' for null values
return [
constant_op.constant([0 if t is not dtypes.string else ""], dtype=t)
for t in inferred_types
]
def _infer_column_names(filenames, field_delim, use_quote_delim):
"""Infers column names from first rows of files."""
csv_kwargs = {
"delimiter": field_delim,
"quoting": csv.QUOTE_MINIMAL if use_quote_delim else csv.QUOTE_NONE
}
with file_io.FileIO(filenames[0], "r") as f:
try:
column_names = next(csv.reader(f, **csv_kwargs))
except StopIteration:
raise ValueError(("Received StopIteration when reading the header line "
"of %s. Empty file?") % filenames[0])
for name in filenames[1:]:
with file_io.FileIO(name, "r") as f:
try:
if next(csv.reader(f, **csv_kwargs)) != column_names:
raise ValueError(
"Files have different column names in the header row.")
except StopIteration:
raise ValueError(("Received StopIteration when reading the header line "
"of %s. Empty file?") % filenames[0])
return column_names
def _get_sorted_col_indices(select_columns, column_names):
"""Transforms select_columns argument into sorted column indices."""
names_to_indices = {n: i for i, n in enumerate(column_names)}
num_cols = len(column_names)
for i, v in enumerate(select_columns):
if isinstance(v, int):
if v < 0 or v >= num_cols:
raise ValueError(
"Column index %d specified in select_columns out of valid range." %
v)
continue
if v not in names_to_indices:
raise ValueError(
"Value '%s' specified in select_columns not a valid column index or "
"name." % v)
select_columns[i] = names_to_indices[v]
# Sort and ensure there are no duplicates
result = sorted(set(select_columns))
if len(result) != len(select_columns):
raise ValueError("select_columns contains duplicate columns")
return result
def _maybe_shuffle_and_repeat(
dataset, num_epochs, shuffle, shuffle_buffer_size, shuffle_seed):
"""Optionally shuffle and repeat dataset, as requested."""
if num_epochs != 1 and shuffle:
# Use shuffle_and_repeat for perf
return dataset.apply(
shuffle_ops.shuffle_and_repeat(shuffle_buffer_size, num_epochs,
shuffle_seed))
elif shuffle:
return dataset.shuffle(shuffle_buffer_size, shuffle_seed)
elif num_epochs != 1:
return dataset.repeat(num_epochs)
return dataset
def make_tf_record_dataset(file_pattern,
batch_size,
parser_fn=None,
num_epochs=None,
shuffle=True,
shuffle_buffer_size=None,
shuffle_seed=None,
prefetch_buffer_size=dataset_ops.AUTOTUNE,
num_parallel_reads=None,
num_parallel_parser_calls=None,
drop_final_batch=False):
"""Reads and optionally parses TFRecord files into a dataset.
Provides common functionality such as batching, optional parsing, shuffling,
and performant defaults.
Args:
file_pattern: List of files or patterns of TFRecord file paths.
See `tf.io.gfile.glob` for pattern rules.
batch_size: An int representing the number of records to combine
in a single batch.
parser_fn: (Optional.) A function accepting string input to parse
and process the record contents. This function must map records
to components of a fixed shape, so they may be batched. By
default, uses the record contents unmodified.
num_epochs: (Optional.) An int specifying the number of times this
dataset is repeated. If None (the default), cycles through the
dataset forever.
shuffle: (Optional.) A bool that indicates whether the input
should be shuffled. Defaults to `True`.
shuffle_buffer_size: (Optional.) Buffer size to use for
shuffling. A large buffer size ensures better shuffling, but
increases memory usage and startup time.
shuffle_seed: (Optional.) Randomization seed to use for shuffling.
prefetch_buffer_size: (Optional.) An int specifying the number of
feature batches to prefetch for performance improvement.
Defaults to auto-tune. Set to 0 to disable prefetching.
num_parallel_reads: (Optional.) Number of threads used to read
records from files. By default or if set to a value >1, the
results will be interleaved.
num_parallel_parser_calls: (Optional.) Number of parallel
records to parse in parallel. Defaults to an automatic selection.
drop_final_batch: (Optional.) Whether the last batch should be
dropped in case its size is smaller than `batch_size`; the
default behavior is not to drop the smaller batch.
Returns:
A dataset, where each element matches the output of `parser_fn`
except it will have an additional leading `batch-size` dimension,
or a `batch_size`-length 1-D tensor of strings if `parser_fn` is
unspecified.
"""
files = dataset_ops.Dataset.list_files(
file_pattern, shuffle=shuffle, seed=shuffle_seed)
if num_parallel_reads is None:
# Note: We considered auto-tuning this value, but there is a concern
# that this affects the mixing of records from different files, which
# could affect training convergence/accuracy, so we are defaulting to
# a constant for now.
num_parallel_reads = 24
dataset = core_readers.TFRecordDataset(
files, num_parallel_reads=num_parallel_reads)
if shuffle_buffer_size is None:
# TODO(josh11b): Auto-tune this value when not specified
shuffle_buffer_size = 10000
dataset = _maybe_shuffle_and_repeat(
dataset, num_epochs, shuffle, shuffle_buffer_size, shuffle_seed)
# NOTE(mrry): We set `drop_final_batch=True` when `num_epochs is None` to
# improve the shape inference, because it makes the batch dimension static.
# It is safe to do this because in that case we are repeating the input
# indefinitely, and all batches will be full-sized.
drop_final_batch = drop_final_batch or num_epochs is None
if parser_fn is None:
dataset = dataset.batch(batch_size, drop_remainder=drop_final_batch)
else:
# TODO(josh11b): if num_parallel_parser_calls is None, use some function
# of num cores instead of map_and_batch's default behavior of one batch.
dataset = dataset.apply(batching.map_and_batch(
parser_fn, batch_size, num_parallel_calls=num_parallel_parser_calls,
drop_remainder=drop_final_batch))
if prefetch_buffer_size == 0:
return dataset
else:
return dataset.prefetch(buffer_size=prefetch_buffer_size)
@tf_export("data.experimental.make_csv_dataset", v1=[])
def make_csv_dataset_v2(
file_pattern,
batch_size,
column_names=None,
column_defaults=None,
label_name=None,
select_columns=None,
field_delim=",",
use_quote_delim=True,
na_value="",
header=True,
num_epochs=None,
shuffle=True,
shuffle_buffer_size=10000,
shuffle_seed=None,
prefetch_buffer_size=dataset_ops.AUTOTUNE,
num_parallel_reads=1,
sloppy=False,
num_rows_for_inference=100,
compression_type=None,
ignore_errors=False,
):
"""Reads CSV files into a dataset.
Reads CSV files into a dataset, where each element is a (features, labels)
tuple that corresponds to a batch of CSV rows. The features dictionary
maps feature column names to `Tensor`s containing the corresponding
feature data, and labels is a `Tensor` containing the batch's label data.
Args:
file_pattern: List of files or patterns of file paths containing CSV
records. See `tf.io.gfile.glob` for pattern rules.
batch_size: An int representing the number of records to combine
in a single batch.
column_names: An optional list of strings that corresponds to the CSV
columns, in order. One per column of the input record. If this is not
provided, infers the column names from the first row of the records.
These names will be the keys of the features dict of each dataset element.
column_defaults: A optional list of default values for the CSV fields. One
item per selected column of the input record. Each item in the list is
either a valid CSV dtype (float32, float64, int32, int64, or string), or a
`Tensor` with one of the aforementioned types. The tensor can either be
a scalar default value (if the column is optional), or an empty tensor (if
the column is required). If a dtype is provided instead of a tensor, the
column is also treated as required. If this list is not provided, tries
to infer types based on reading the first num_rows_for_inference rows of
files specified, and assumes all columns are optional, defaulting to `0`
for numeric values and `""` for string values. If both this and
`select_columns` are specified, these must have the same lengths, and
`column_defaults` is assumed to be sorted in order of increasing column
index.
label_name: A optional string corresponding to the label column. If
provided, the data for this column is returned as a separate `Tensor` from
the features dictionary, so that the dataset complies with the format
expected by a `tf.Estimator.train` or `tf.Estimator.evaluate` input
function.
select_columns: An optional list of integer indices or string column
names, that specifies a subset of columns of CSV data to select. If
column names are provided, these must correspond to names provided in
`column_names` or inferred from the file header lines. When this argument
is specified, only a subset of CSV columns will be parsed and returned,
corresponding to the columns specified. Using this results in faster
parsing and lower memory usage. If both this and `column_defaults` are
specified, these must have the same lengths, and `column_defaults` is
assumed to be sorted in order of increasing column index.
field_delim: An optional `string`. Defaults to `","`. Char delimiter to
separate fields in a record.
use_quote_delim: An optional bool. Defaults to `True`. If false, treats
double quotation marks as regular characters inside of the string fields.
na_value: Additional string to recognize as NA/NaN.
header: A bool that indicates whether the first rows of provided CSV files
correspond to header lines with column names, and should not be included
in the data.
num_epochs: An int specifying the number of times this dataset is repeated.
If None, cycles through the dataset forever.
shuffle: A bool that indicates whether the input should be shuffled.
shuffle_buffer_size: Buffer size to use for shuffling. A large buffer size
ensures better shuffling, but increases memory usage and startup time.
shuffle_seed: Randomization seed to use for shuffling.
prefetch_buffer_size: An int specifying the number of feature
batches to prefetch for performance improvement. Recommended value is the
number of batches consumed per training step. Defaults to auto-tune.
num_parallel_reads: Number of threads used to read CSV records from files.
If >1, the results will be interleaved.
sloppy: If `True`, reading performance will be improved at
the cost of non-deterministic ordering. If `False`, the order of elements
produced is deterministic prior to shuffling (elements are still
randomized if `shuffle=True`. Note that if the seed is set, then order
of elements after shuffling is deterministic). Defaults to `False`.
num_rows_for_inference: Number of rows of a file to use for type inference
if record_defaults is not provided. If None, reads all the rows of all
the files. Defaults to 100.
compression_type: (Optional.) A `tf.string` scalar evaluating to one of
`""` (no compression), `"ZLIB"`, or `"GZIP"`. Defaults to no compression.
ignore_errors: (Optional.) If `True`, ignores errors with CSV file parsing,
such as malformed data or empty lines, and moves on to the next valid
CSV record. Otherwise, the dataset raises an error and stops processing
when encountering any invalid records. Defaults to `False`.
Returns:
A dataset, where each element is a (features, labels) tuple that corresponds
to a batch of `batch_size` CSV rows. The features dictionary maps feature
column names to `Tensor`s containing the corresponding column data, and
labels is a `Tensor` containing the column data for the label column
specified by `label_name`.
Raises:
ValueError: If any of the arguments is malformed.
"""
# Create dataset of all matching filenames
filenames = _get_file_names(file_pattern, False)
dataset = dataset_ops.Dataset.from_tensor_slices(filenames)
if shuffle:
dataset = dataset.shuffle(len(filenames), shuffle_seed)
# Clean arguments; figure out column names and defaults
if column_names is None:
if not header:
raise ValueError("Cannot infer column names without a header line.")
# If column names are not provided, infer from the header lines
column_names = _infer_column_names(filenames, field_delim, use_quote_delim)
if len(column_names) != len(set(column_names)):
raise ValueError("Cannot have duplicate column names.")
if select_columns is not None:
select_columns = _get_sorted_col_indices(select_columns, column_names)
if column_defaults is not None:
column_defaults = [
constant_op.constant([], dtype=x) if x in _ACCEPTABLE_CSV_TYPES else x
for x in column_defaults
]
else:
# If column defaults are not provided, infer from records at graph
# construction time
column_defaults = _infer_column_defaults(
filenames, len(column_names), field_delim, use_quote_delim, na_value,
header, num_rows_for_inference, select_columns)
if select_columns is not None and len(column_defaults) != len(select_columns):
raise ValueError(
"If specified, column_defaults and select_columns must have same "
"length."
)
if select_columns is not None and len(column_names) > len(select_columns):
# Pick the relevant subset of column names
column_names = [column_names[i] for i in select_columns]
if label_name is not None and label_name not in column_names:
raise ValueError("`label_name` provided must be one of the columns.")
def filename_to_dataset(filename):
dataset = CsvDataset(
filename,
record_defaults=column_defaults,
field_delim=field_delim,
use_quote_delim=use_quote_delim,
na_value=na_value,
select_cols=select_columns,
header=header,
compression_type=compression_type
)
if ignore_errors:
dataset = dataset.apply(error_ops.ignore_errors())
return dataset
def map_fn(*columns):
"""Organizes columns into a features dictionary.
Args:
*columns: list of `Tensor`s corresponding to one csv record.
Returns:
An OrderedDict of feature names to values for that particular record. If
label_name is provided, extracts the label feature to be returned as the
second element of the tuple.
"""
features = collections.OrderedDict(zip(column_names, columns))
if label_name is not None:
label = features.pop(label_name)
return features, label
return features
# Read files sequentially (if num_parallel_reads=1) or in parallel
dataset = dataset.apply(
interleave_ops.parallel_interleave(
filename_to_dataset, cycle_length=num_parallel_reads, sloppy=sloppy))
dataset = _maybe_shuffle_and_repeat(
dataset, num_epochs, shuffle, shuffle_buffer_size, shuffle_seed)
# Apply batch before map for perf, because map has high overhead relative
# to the size of the computation in each map.
# NOTE(mrry): We set `drop_remainder=True` when `num_epochs is None` to
# improve the shape inference, because it makes the batch dimension static.
# It is safe to do this because in that case we are repeating the input
# indefinitely, and all batches will be full-sized.
dataset = dataset.batch(batch_size=batch_size,
drop_remainder=num_epochs is None)
dataset = dataset_ops.MapDataset(
dataset, map_fn, use_inter_op_parallelism=False)
dataset = dataset.prefetch(prefetch_buffer_size)
return dataset
@tf_export(v1=["data.experimental.make_csv_dataset"])
def make_csv_dataset_v1(
file_pattern,
batch_size,
column_names=None,
column_defaults=None,
label_name=None,
select_columns=None,
field_delim=",",
use_quote_delim=True,
na_value="",
header=True,
num_epochs=None,
shuffle=True,
shuffle_buffer_size=10000,
shuffle_seed=None,
prefetch_buffer_size=dataset_ops.AUTOTUNE,
num_parallel_reads=1,
sloppy=False,
num_rows_for_inference=100,
compression_type=None,
ignore_errors=False,
): # pylint: disable=missing-docstring
return dataset_ops.DatasetV1Adapter(make_csv_dataset_v2(
file_pattern, batch_size, column_names, column_defaults, label_name,
select_columns, field_delim, use_quote_delim, na_value, header,
num_epochs, shuffle, shuffle_buffer_size, shuffle_seed,
prefetch_buffer_size, num_parallel_reads, sloppy, num_rows_for_inference,
compression_type, ignore_errors))
make_csv_dataset_v1.__doc__ = make_csv_dataset_v2.__doc__
_DEFAULT_READER_BUFFER_SIZE_BYTES = 4 * 1024 * 1024 # 4 MB
@tf_export("data.experimental.CsvDataset", v1=[])
class CsvDatasetV2(dataset_ops.DatasetSource):
"""A Dataset comprising lines from one or more CSV files."""
def __init__(self,
filenames,
record_defaults,
compression_type=None,
buffer_size=None,
header=False,
field_delim=",",
use_quote_delim=True,
na_value="",
select_cols=None):
"""Creates a `CsvDataset` by reading and decoding CSV files.
The elements of this dataset correspond to records from the file(s).
RFC 4180 format is expected for CSV files
(https://tools.ietf.org/html/rfc4180)
Note that we allow leading and trailing spaces with int or float field.
For example, suppose we have a file 'my_file0.csv' with four CSV columns of
different data types:
```
abcdefg,4.28E10,5.55E6,12
hijklmn,-5.3E14,,2
```
We can construct a CsvDataset from it as follows:
```python
tf.compat.v1.enable_eager_execution()
dataset = tf.data.experimental.CsvDataset(
"my_file*.csv",
[tf.float32, # Required field, use dtype or empty tensor
tf.constant([0.0], dtype=tf.float32), # Optional field, default to 0.0
tf.int32, # Required field, use dtype or empty tensor
],
select_cols=[1,2,3] # Only parse last three columns
)
```
The expected output of its iterations is:
```python
for element in dataset:
print(element)
>> (4.28e10, 5.55e6, 12)
>> (-5.3e14, 0.0, 2)
```
Args:
filenames: A `tf.string` tensor containing one or more filenames.
record_defaults: A list of default values for the CSV fields. Each item in
the list is either a valid CSV `DType` (float32, float64, int32, int64,
string), or a `Tensor` object with one of the above types. One per
column of CSV data, with either a scalar `Tensor` default value for the
column if it is optional, or `DType` or empty `Tensor` if required. If
both this and `select_columns` are specified, these must have the same
lengths, and `column_defaults` is assumed to be sorted in order of
increasing column index.
compression_type: (Optional.) A `tf.string` scalar evaluating to one of
`""` (no compression), `"ZLIB"`, or `"GZIP"`. Defaults to no
compression.
buffer_size: (Optional.) A `tf.int64` scalar denoting the number of bytes
to buffer while reading files. Defaults to 4MB.
header: (Optional.) A `tf.bool` scalar indicating whether the CSV file(s)
have header line(s) that should be skipped when parsing. Defaults to
`False`.
field_delim: (Optional.) A `tf.string` scalar containing the delimiter
character that separates fields in a record. Defaults to `","`.
use_quote_delim: (Optional.) A `tf.bool` scalar. If `False`, treats
double quotation marks as regular characters inside of string fields
(ignoring RFC 4180, Section 2, Bullet 5). Defaults to `True`.
na_value: (Optional.) A `tf.string` scalar indicating a value that will
be treated as NA/NaN.
select_cols: (Optional.) A sorted list of column indices to select from
the input data. If specified, only this subset of columns will be
parsed. Defaults to parsing all columns.
"""
self._filenames = ops.convert_to_tensor(
filenames, dtype=dtypes.string, name="filenames")
self._compression_type = convert.optional_param_to_tensor(
"compression_type",
compression_type,
argument_default="",
argument_dtype=dtypes.string)
record_defaults = [
constant_op.constant([], dtype=x) if x in _ACCEPTABLE_CSV_TYPES else x
for x in record_defaults
]
self._record_defaults = ops.convert_n_to_tensor(
record_defaults, name="record_defaults")
self._buffer_size = convert.optional_param_to_tensor(
"buffer_size", buffer_size, _DEFAULT_READER_BUFFER_SIZE_BYTES)
self._header = ops.convert_to_tensor(
header, dtype=dtypes.bool, name="header")
self._field_delim = ops.convert_to_tensor(
field_delim, dtype=dtypes.string, name="field_delim")
self._use_quote_delim = ops.convert_to_tensor(
use_quote_delim, dtype=dtypes.bool, name="use_quote_delim")
self._na_value = ops.convert_to_tensor(
na_value, dtype=dtypes.string, name="na_value")
self._select_cols = convert.optional_param_to_tensor(
"select_cols",
select_cols,
argument_default=[],
argument_dtype=dtypes.int64,
)
self._structure = structure.NestedStructure(
tuple(structure.TensorStructure(d.dtype, [])
for d in self._record_defaults))
variant_tensor = gen_experimental_dataset_ops.experimental_csv_dataset(
filenames=self._filenames,
record_defaults=self._record_defaults,
buffer_size=self._buffer_size,
header=self._header,
output_shapes=self._structure._flat_shapes, # pylint: disable=protected-access
field_delim=self._field_delim,
use_quote_delim=self._use_quote_delim,
na_value=self._na_value,
select_cols=self._select_cols,
compression_type=self._compression_type)
super(CsvDatasetV2, self).__init__(variant_tensor)
@property
def _element_structure(self):
return self._structure
@tf_export(v1=["data.experimental.CsvDataset"])
class CsvDatasetV1(dataset_ops.DatasetV1Adapter):
"""A Dataset comprising lines from one or more CSV files."""
@functools.wraps(CsvDatasetV2.__init__)
def __init__(self,
filenames,
record_defaults,
compression_type=None,
buffer_size=None,
header=False,
field_delim=",",
use_quote_delim=True,
na_value="",
select_cols=None):
wrapped = CsvDatasetV2(filenames, record_defaults, compression_type,
buffer_size, header, field_delim, use_quote_delim,
na_value, select_cols)
super(CsvDatasetV1, self).__init__(wrapped)
@tf_export("data.experimental.make_batched_features_dataset", v1=[])
def make_batched_features_dataset_v2(file_pattern,
batch_size,
features,
reader=core_readers.TFRecordDataset,
label_key=None,
reader_args=None,
num_epochs=None,
shuffle=True,
shuffle_buffer_size=10000,
shuffle_seed=None,
prefetch_buffer_size=dataset_ops.AUTOTUNE,
reader_num_threads=1,
parser_num_threads=2,
sloppy_ordering=False,
drop_final_batch=False):
"""Returns a `Dataset` of feature dictionaries from `Example` protos.
If label_key argument is provided, returns a `Dataset` of tuple
comprising of feature dictionaries and label.
Example:
```
serialized_examples = [
features {
feature { key: "age" value { int64_list { value: [ 0 ] } } }
feature { key: "gender" value { bytes_list { value: [ "f" ] } } }
feature { key: "kws" value { bytes_list { value: [ "code", "art" ] } } }
},
features {
feature { key: "age" value { int64_list { value: [] } } }
feature { key: "gender" value { bytes_list { value: [ "f" ] } } }
feature { key: "kws" value { bytes_list { value: [ "sports" ] } } }
}
]
```
We can use arguments:
```
features: {
"age": FixedLenFeature([], dtype=tf.int64, default_value=-1),
"gender": FixedLenFeature([], dtype=tf.string),
"kws": VarLenFeature(dtype=tf.string),
}
```
And the expected output is:
```python
{
"age": [[0], [-1]],
"gender": [["f"], ["f"]],
"kws": SparseTensor(
indices=[[0, 0], [0, 1], [1, 0]],
values=["code", "art", "sports"]
dense_shape=[2, 2]),
}
```
Args:
file_pattern: List of files or patterns of file paths containing
`Example` records. See `tf.io.gfile.glob` for pattern rules.
batch_size: An int representing the number of records to combine
in a single batch.
features: A `dict` mapping feature keys to `FixedLenFeature` or
`VarLenFeature` values. See `tf.io.parse_example`.
reader: A function or class that can be
called with a `filenames` tensor and (optional) `reader_args` and returns
a `Dataset` of `Example` tensors. Defaults to `tf.data.TFRecordDataset`.
label_key: (Optional) A string corresponding to the key labels are stored in
`tf.Examples`. If provided, it must be one of the `features` key,
otherwise results in `ValueError`.
reader_args: Additional arguments to pass to the reader class.
num_epochs: Integer specifying the number of times to read through the
dataset. If None, cycles through the dataset forever. Defaults to `None`.
shuffle: A boolean, indicates whether the input should be shuffled. Defaults
to `True`.
shuffle_buffer_size: Buffer size of the ShuffleDataset. A large capacity
ensures better shuffling but would increase memory usage and startup time.
shuffle_seed: Randomization seed to use for shuffling.
prefetch_buffer_size: Number of feature batches to prefetch in order to
improve performance. Recommended value is the number of batches consumed
per training step. Defaults to auto-tune.
reader_num_threads: Number of threads used to read `Example` records. If >1,
the results will be interleaved.
parser_num_threads: Number of threads to use for parsing `Example` tensors
into a dictionary of `Feature` tensors.
sloppy_ordering: If `True`, reading performance will be improved at
the cost of non-deterministic ordering. If `False`, the order of elements
produced is deterministic prior to shuffling (elements are still
randomized if `shuffle=True`. Note that if the seed is set, then order
of elements after shuffling is deterministic). Defaults to `False`.
drop_final_batch: If `True`, and the batch size does not evenly divide the
input dataset size, the final smaller batch will be dropped. Defaults to
`False`.
Returns:
A dataset of `dict` elements, (or a tuple of `dict` elements and label).
Each `dict` maps feature keys to `Tensor` or `SparseTensor` objects.
Raises:
TypeError: If `reader` is a `tf.compat.v1.ReaderBase` subclass.
ValueError: If `label_key` is not one of the `features` keys.
"""
# Create dataset of all matching filenames
filenames = _get_file_names(file_pattern, False)
dataset = dataset_ops.Dataset.from_tensor_slices(filenames)
if shuffle:
dataset = dataset.shuffle(len(filenames), shuffle_seed)
if isinstance(reader, type) and issubclass(reader, io_ops.ReaderBase):
raise TypeError("The `reader` argument must return a `Dataset` object. "
"`tf.ReaderBase` subclasses are not supported. For "
"example, pass `tf.data.TFRecordDataset` instead of "
"`tf.TFRecordReader`.")
# Read `Example` records from files as tensor objects.
if reader_args is None:
reader_args = []
# Read files sequentially (if reader_num_threads=1) or in parallel
dataset = dataset.apply(
interleave_ops.parallel_interleave(
lambda filename: reader(filename, *reader_args),
cycle_length=reader_num_threads,
sloppy=sloppy_ordering))
# Extract values if the `Example` tensors are stored as key-value tuples.
if dataset_ops.get_legacy_output_types(dataset) == (
dtypes.string, dtypes.string):
dataset = dataset_ops.MapDataset(
dataset, lambda _, v: v, use_inter_op_parallelism=False)
# Apply dataset repeat and shuffle transformations.
dataset = _maybe_shuffle_and_repeat(
dataset, num_epochs, shuffle, shuffle_buffer_size, shuffle_seed)
# NOTE(mrry): We set `drop_remainder=True` when `num_epochs is None` to
# improve the shape inference, because it makes the batch dimension static.
# It is safe to do this because in that case we are repeating the input
# indefinitely, and all batches will be full-sized.
dataset = dataset.batch(
batch_size, drop_remainder=drop_final_batch or num_epochs is None)
# Parse `Example` tensors to a dictionary of `Feature` tensors.
dataset = dataset.apply(
parsing_ops.parse_example_dataset(
features, num_parallel_calls=parser_num_threads))
if label_key:
if label_key not in features:
raise ValueError(
"The `label_key` provided (%r) must be one of the `features` keys." %
label_key)
dataset = dataset.map(lambda x: (x, x.pop(label_key)))
dataset = dataset.prefetch(prefetch_buffer_size)
return dataset
@tf_export(v1=["data.experimental.make_batched_features_dataset"])
def make_batched_features_dataset_v1(file_pattern, # pylint: disable=missing-docstring
batch_size,
features,
reader=core_readers.TFRecordDataset,
label_key=None,
reader_args=None,
num_epochs=None,
shuffle=True,
shuffle_buffer_size=10000,
shuffle_seed=None,
prefetch_buffer_size=dataset_ops.AUTOTUNE,
reader_num_threads=1,
parser_num_threads=2,
sloppy_ordering=False,
drop_final_batch=False):
return dataset_ops.DatasetV1Adapter(make_batched_features_dataset_v2(
file_pattern, batch_size, features, reader, label_key, reader_args,
num_epochs, shuffle, shuffle_buffer_size, shuffle_seed,
prefetch_buffer_size, reader_num_threads, parser_num_threads,
sloppy_ordering, drop_final_batch))
make_batched_features_dataset_v2.__doc__ = (
make_batched_features_dataset_v1.__doc__)
def _get_file_names(file_pattern, shuffle):
"""Parse list of file names from pattern, optionally shuffled.
Args:
file_pattern: File glob pattern, or list of glob patterns.
shuffle: Whether to shuffle the order of file names.
Returns:
List of file names matching `file_pattern`.
Raises:
ValueError: If `file_pattern` is empty, or pattern matches no files.
"""
if isinstance(file_pattern, list):
if not file_pattern:
raise ValueError("File pattern is empty.")
file_names = []
for entry in file_pattern:
file_names.extend(gfile.Glob(entry))
else:
file_names = list(gfile.Glob(file_pattern))
if not file_names:
raise ValueError("No files match %s." % file_pattern)
# Sort files so it will be deterministic for unit tests.
if not shuffle:
file_names = sorted(file_names)
return file_names
@tf_export("data.experimental.SqlDataset", v1=[])
class SqlDatasetV2(dataset_ops.DatasetSource):
"""A `Dataset` consisting of the results from a SQL query."""
def __init__(self, driver_name, data_source_name, query, output_types):
"""Creates a `SqlDataset`.
`SqlDataset` allows a user to read data from the result set of a SQL query.
For example:
```python
tf.compat.v1.enable_eager_execution()
dataset = tf.data.experimental.SqlDataset("sqlite", "/foo/bar.sqlite3",
"SELECT name, age FROM people",
(tf.string, tf.int32))
# Prints the rows of the result set of the above query.
for element in dataset:
print(element)
```
Args:
driver_name: A 0-D `tf.string` tensor containing the database type.
Currently, the only supported value is 'sqlite'.
data_source_name: A 0-D `tf.string` tensor containing a connection string
to connect to the database.
query: A 0-D `tf.string` tensor containing the SQL query to execute.
output_types: A tuple of `tf.DType` objects representing the types of the
columns returned by `query`.
"""
self._driver_name = ops.convert_to_tensor(
driver_name, dtype=dtypes.string, name="driver_name")
self._data_source_name = ops.convert_to_tensor(
data_source_name, dtype=dtypes.string, name="data_source_name")
self._query = ops.convert_to_tensor(
query, dtype=dtypes.string, name="query")
self._structure = structure.NestedStructure(
nest.map_structure(
lambda dtype: structure.TensorStructure(dtype, []), output_types))
variant_tensor = gen_experimental_dataset_ops.experimental_sql_dataset(
self._driver_name, self._data_source_name, self._query,
**dataset_ops.flat_structure(self))
super(SqlDatasetV2, self).__init__(variant_tensor)
@property
def _element_structure(self):
return self._structure
@tf_export(v1=["data.experimental.SqlDataset"])
class SqlDatasetV1(dataset_ops.DatasetV1Adapter):
"""A `Dataset` consisting of the results from a SQL query."""
@functools.wraps(SqlDatasetV2.__init__)
def __init__(self, driver_name, data_source_name, query, output_types):
wrapped = SqlDatasetV2(driver_name, data_source_name, query, output_types)
super(SqlDatasetV1, self).__init__(wrapped)
# TODO(b/119044825): Until all `tf.data` unit tests are converted to V2, keep
# these aliases in place.
CsvDataset = CsvDatasetV1
SqlDataset = SqlDatasetV1
make_batched_features_dataset = make_batched_features_dataset_v1
make_csv_dataset = make_csv_dataset_v1
|
tensorflow-master
|
tensorflow/python/data/experimental/ops/readers.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""StatsAggregator for aggregating statistics from `tf.data` pipelines."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tempfile
from tensorflow.python.ops import gen_experimental_dataset_ops as ged_ops
from tensorflow.python.ops import summary_ops_v2
from tensorflow.python.util.tf_export import tf_export
_DEFAULT_MAX_QUEUE = 10
@tf_export("data.experimental.StatsAggregator", v1=[])
class StatsAggregatorV2(object):
"""A stateful resource that aggregates statistics from one or more iterators.
To record statistics, use one of the custom transformation functions defined
in this module when defining your `tf.data.Dataset`. All statistics will be
aggregated by the `StatsAggregator` that is associated with a particular
iterator (see below). For example, to record the latency of producing each
element by iterating over a dataset:
```python
dataset = ...
dataset = dataset.apply(tf.data.experimental.latency_stats("total_bytes"))
```
To associate a `StatsAggregator` with a `tf.data.Dataset` object, use
the following pattern:
```python
aggregator = tf.data.experimental.StatsAggregator()
dataset = ...
# Apply `StatsOptions` to associate `dataset` with `aggregator`.
options = tf.data.Options()
options.experimental_stats.aggregator = aggregator
dataset = dataset.with_options(options)
```
Note: This interface is experimental and expected to change. In particular,
we expect to add other implementations of `StatsAggregator` that provide
different ways of exporting statistics, and add more types of statistics.
"""
def __init__(self):
self._resource = ged_ops.stats_aggregator_handle_v2()
# There could be a conflict with multiple file writer in the same logdir,
# (b/37351340). Possible workarounds till this bug is resolved are a) having
# multiple dataset stats specific file inside log_dir and b) get default
# summary writer, getting default summary writer quite doesn't solve the
# problem as there might be summary writers in log dir not set as default
# e.g. in Keras calback.
# Creating a summary_writer here could potentially be replaced with getting
# the default summary_writer if any, creating it otherwise or a public
# method to associate summary writer.
self._logdir = tempfile.mkdtemp()
self._summary_writer = summary_ops_v2.create_file_writer_v2(
self._logdir, max_queue=_DEFAULT_MAX_QUEUE)
ged_ops.stats_aggregator_set_summary_writer(self._resource,
self._summary_writer._resource) # pylint: disable=protected-access
@tf_export(v1=["data.experimental.StatsAggregator"])
class StatsAggregatorV1(object):
"""A stateful resource that aggregates statistics from one or more iterators.
To record statistics, use one of the custom transformation functions defined
in this module when defining your `tf.data.Dataset`. All statistics will be
aggregated by the `StatsAggregator` that is associated with a particular
iterator (see below). For example, to record the latency of producing each
element by iterating over a dataset:
```python
dataset = ...
dataset = dataset.apply(tf.data.experimental.latency_stats("total_bytes"))
```
To associate a `StatsAggregator` with a `tf.data.Dataset` object, use
the following pattern:
```python
aggregator = tf.data.experimental.StatsAggregator()
dataset = ...
# Apply `StatsOptions` to associate `dataset` with `aggregator`.
options = tf.data.Options()
options.experimental_stats.aggregator = aggregator
dataset = dataset.with_options(options)
```
To get a protocol buffer summary of the currently aggregated statistics,
use the `StatsAggregator.get_summary()` tensor. The easiest way to do this
is to add the returned tensor to the `tf.GraphKeys.SUMMARIES` collection,
so that the summaries will be included with any existing summaries.
```python
aggregator = tf.data.experimental.StatsAggregator()
# ...
stats_summary = aggregator.get_summary()
tf.compat.v1.add_to_collection(tf.GraphKeys.SUMMARIES, stats_summary)
```
Note: This interface is experimental and expected to change. In particular,
we expect to add other implementations of `StatsAggregator` that provide
different ways of exporting statistics, and add more types of statistics.
"""
def __init__(self):
"""Creates a `StatsAggregator`."""
self._resource = ged_ops.experimental_stats_aggregator_handle()
def get_summary(self):
"""Returns a string `tf.Tensor` that summarizes the aggregated statistics.
The returned tensor will contain a serialized `tf.compat.v1.summary.Summary`
protocol
buffer, which can be used with the standard TensorBoard logging facilities.
Returns:
A scalar string `tf.Tensor` that summarizes the aggregated statistics.
"""
return ged_ops.experimental_stats_aggregator_summary(self._resource)
# TODO(b/116314787): Change this to StatsAggregatorV2 when we have stable
# SummaryWriterInterface, and do not break any users.
StatsAggregator = StatsAggregatorV1
|
tensorflow-master
|
tensorflow/python/data/experimental/ops/stats_aggregator.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Cardinality analysis of `Dataset` objects."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.ops import gen_experimental_dataset_ops as ged_ops
from tensorflow.python.util.tf_export import tf_export
INFINITE = -1
UNKNOWN = -2
tf_export("data.experimental.INFINITE_CARDINALITY").export_constant(
__name__, "INFINITE")
tf_export("data.experimental.UNKNOWN_CARDINALITY").export_constant(
__name__, "UNKNOWN")
@tf_export("data.experimental.cardinality")
def cardinality(dataset):
"""Returns the cardinality of `dataset`, if known.
The operation returns the cardinality of `dataset`. The operation may return
`tf.data.experimental.INFINITE_CARDINALITY` if `dataset` contains an infinite
number of elements or `tf.data.experimental.UNKNOWN_CARDINALITY` if the
analysis fails to determine the number of elements in `dataset` (e.g. when the
dataset source is a file).
Args:
dataset: A `tf.data.Dataset` for which to determine cardinality.
Returns:
A scalar `tf.int64` `Tensor` representing the cardinality of `dataset`. If
the cardinality is infinite or unknown, the operation returns the named
constant `INFINITE_CARDINALITY` and `UNKNOWN_CARDINALITY` respectively.
"""
return ged_ops.experimental_dataset_cardinality(dataset._variant_tensor) # pylint: disable=protected-access
|
tensorflow-master
|
tensorflow/python/data/experimental/ops/cardinality.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Resampling dataset transformations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.data.experimental.ops import batching
from tensorflow.python.data.experimental.ops import interleave_ops
from tensorflow.python.data.experimental.ops import scan_ops
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.util.tf_export import tf_export
@tf_export("data.experimental.rejection_resample")
def rejection_resample(class_func, target_dist, initial_dist=None, seed=None):
"""A transformation that resamples a dataset to achieve a target distribution.
**NOTE** Resampling is performed via rejection sampling; some fraction
of the input values will be dropped.
Args:
class_func: A function mapping an element of the input dataset to a scalar
`tf.int32` tensor. Values should be in `[0, num_classes)`.
target_dist: A floating point type tensor, shaped `[num_classes]`.
initial_dist: (Optional.) A floating point type tensor, shaped
`[num_classes]`. If not provided, the true class distribution is
estimated live in a streaming fashion.
seed: (Optional.) Python integer seed for the resampler.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):
"""Function from `Dataset` to `Dataset` that applies the transformation."""
target_dist_t = ops.convert_to_tensor(target_dist, name="target_dist")
class_values_ds = dataset.map(class_func)
# Get initial distribution.
if initial_dist is not None:
initial_dist_t = ops.convert_to_tensor(initial_dist, name="initial_dist")
acceptance_dist, prob_of_original = (
_calculate_acceptance_probs_with_mixing(initial_dist_t,
target_dist_t))
initial_dist_ds = dataset_ops.Dataset.from_tensors(
initial_dist_t).repeat()
acceptance_dist_ds = dataset_ops.Dataset.from_tensors(
acceptance_dist).repeat()
prob_of_original_ds = dataset_ops.Dataset.from_tensors(
prob_of_original).repeat()
else:
initial_dist_ds = _estimate_initial_dist_ds(
target_dist_t, class_values_ds)
acceptance_and_original_prob_ds = initial_dist_ds.map(
lambda initial: _calculate_acceptance_probs_with_mixing( # pylint: disable=g-long-lambda
initial, target_dist_t))
acceptance_dist_ds = acceptance_and_original_prob_ds.map(
lambda accept_prob, _: accept_prob)
prob_of_original_ds = acceptance_and_original_prob_ds.map(
lambda _, prob_original: prob_original)
filtered_ds = _filter_ds(dataset, acceptance_dist_ds, initial_dist_ds,
class_values_ds, seed)
# Prefetch filtered dataset for speed.
filtered_ds = filtered_ds.prefetch(3)
prob_original_static = _get_prob_original_static(
initial_dist_t, target_dist_t) if initial_dist is not None else None
if prob_original_static == 1:
return dataset_ops.Dataset.zip((class_values_ds, dataset))
elif prob_original_static == 0:
return filtered_ds
else:
return interleave_ops.sample_from_datasets(
[dataset_ops.Dataset.zip((class_values_ds, dataset)), filtered_ds],
weights=prob_of_original_ds.map(lambda prob: [(prob, 1.0 - prob)]),
seed=seed)
return _apply_fn
def _get_prob_original_static(initial_dist_t, target_dist_t):
"""Returns the static probability of sampling from the original.
`tensor_util.constant_value(prob_of_original)` returns `None` if it encounters
an Op that it isn't defined for. We have some custom logic to avoid this.
Args:
initial_dist_t: A tensor of the initial distribution.
target_dist_t: A tensor of the target distribution.
Returns:
The probability of sampling from the original distribution as a constant,
if it is a constant, or `None`.
"""
init_static = tensor_util.constant_value(initial_dist_t)
target_static = tensor_util.constant_value(target_dist_t)
if init_static is None or target_static is None:
return None
else:
return np.min(target_static / init_static)
def _filter_ds(dataset, acceptance_dist_ds, initial_dist_ds, class_values_ds,
seed):
"""Filters a dataset based on per-class acceptance probabilities.
Args:
dataset: The dataset to be filtered.
acceptance_dist_ds: A dataset of acceptance probabilities.
initial_dist_ds: A dataset of the initial probability distribution, given or
estimated.
class_values_ds: A dataset of the corresponding classes.
seed: (Optional.) Python integer seed for the resampler.
Returns:
A dataset of (class value, data) after filtering.
"""
def maybe_warn_on_large_rejection(accept_dist, initial_dist):
proportion_rejected = math_ops.reduce_sum((1 - accept_dist) * initial_dist)
return control_flow_ops.cond(
math_ops.less(proportion_rejected, .5),
lambda: accept_dist,
lambda: logging_ops.Print( # pylint: disable=g-long-lambda
accept_dist, [proportion_rejected, initial_dist, accept_dist],
message="Proportion of examples rejected by sampler is high: ",
summarize=100,
first_n=10))
acceptance_dist_ds = (dataset_ops.Dataset.zip((acceptance_dist_ds,
initial_dist_ds))
.map(maybe_warn_on_large_rejection))
def _gather_and_copy(class_val, acceptance_prob, data):
return class_val, array_ops.gather(acceptance_prob, class_val), data
current_probabilities_and_class_and_data_ds = dataset_ops.Dataset.zip(
(class_values_ds, acceptance_dist_ds, dataset)).map(_gather_and_copy)
filtered_ds = (
current_probabilities_and_class_and_data_ds
.filter(lambda _1, p, _2: random_ops.random_uniform([], seed=seed) < p))
return filtered_ds.map(lambda class_value, _, data: (class_value, data))
def _estimate_initial_dist_ds(
target_dist_t, class_values_ds, dist_estimation_batch_size=32,
smoothing_constant=10):
num_classes = (target_dist_t.shape[0] or array_ops.shape(target_dist_t)[0])
initial_examples_per_class_seen = array_ops.fill(
[num_classes], np.int64(smoothing_constant))
def update_estimate_and_tile(num_examples_per_class_seen, c):
updated_examples_per_class_seen, dist = _estimate_data_distribution(
c, num_examples_per_class_seen)
tiled_dist = array_ops.tile(
array_ops.expand_dims(dist, 0), [dist_estimation_batch_size, 1])
return updated_examples_per_class_seen, tiled_dist
initial_dist_ds = (class_values_ds.batch(dist_estimation_batch_size)
.apply(scan_ops.scan(initial_examples_per_class_seen,
update_estimate_and_tile))
.apply(batching.unbatch()))
return initial_dist_ds
def _get_target_to_initial_ratio(initial_probs, target_probs):
# Add tiny to initial_probs to avoid divide by zero.
denom = (initial_probs + np.finfo(initial_probs.dtype.as_numpy_dtype).tiny)
return target_probs / denom
def _estimate_data_distribution(c, num_examples_per_class_seen):
"""Estimate data distribution as labels are seen.
Args:
c: The class labels. Type `int32`, shape `[batch_size]`.
num_examples_per_class_seen: Type `int64`, shape `[num_classes]`,
containing counts.
Returns:
num_examples_per_lass_seen: Updated counts. Type `int64`, shape
`[num_classes]`.
dist: The updated distribution. Type `float32`, shape `[num_classes]`.
"""
num_classes = num_examples_per_class_seen.get_shape()[0]
# Update the class-count based on what labels are seen in batch.
num_examples_per_class_seen = math_ops.add(
num_examples_per_class_seen, math_ops.reduce_sum(
array_ops.one_hot(c, num_classes, dtype=dtypes.int64), 0))
init_prob_estimate = math_ops.truediv(
num_examples_per_class_seen,
math_ops.reduce_sum(num_examples_per_class_seen))
dist = math_ops.cast(init_prob_estimate, dtypes.float32)
return num_examples_per_class_seen, dist
def _calculate_acceptance_probs_with_mixing(initial_probs, target_probs):
"""Calculates the acceptance probabilities and mixing ratio.
In this case, we assume that we can *either* sample from the original data
distribution with probability `m`, or sample from a reshaped distribution
that comes from rejection sampling on the original distribution. This
rejection sampling is done on a per-class basis, with `a_i` representing the
probability of accepting data from class `i`.
This method is based on solving the following analysis for the reshaped
distribution:
Let F be the probability of a rejection (on any example).
Let p_i be the proportion of examples in the data in class i (init_probs)
Let a_i is the rate the rejection sampler should *accept* class i
Let t_i is the target proportion in the minibatches for class i (target_probs)
```
F = sum_i(p_i * (1-a_i))
= 1 - sum_i(p_i * a_i) using sum_i(p_i) = 1
```
An example with class `i` will be accepted if `k` rejections occur, then an
example with class `i` is seen by the rejector, and it is accepted. This can
be written as follows:
```
t_i = sum_k=0^inf(F^k * p_i * a_i)
= p_i * a_j / (1 - F) using geometric series identity, since 0 <= F < 1
= p_i * a_i / sum_j(p_j * a_j) using F from above
```
Note that the following constraints hold:
```
0 <= p_i <= 1, sum_i(p_i) = 1
0 <= a_i <= 1
0 <= t_i <= 1, sum_i(t_i) = 1
```
A solution for a_i in terms of the other variables is the following:
```a_i = (t_i / p_i) / max_i[t_i / p_i]```
If we try to minimize the amount of data rejected, we get the following:
M_max = max_i [ t_i / p_i ]
M_min = min_i [ t_i / p_i ]
The desired probability of accepting data if it comes from class `i`:
a_i = (t_i/p_i - m) / (M_max - m)
The desired probability of pulling a data element from the original dataset,
rather than the filtered one:
m = M_min
Args:
initial_probs: A Tensor of the initial probability distribution, given or
estimated.
target_probs: A Tensor of the corresponding classes.
Returns:
(A 1D Tensor with the per-class acceptance probabilities, the desired
probability of pull from the original distribution.)
"""
ratio_l = _get_target_to_initial_ratio(initial_probs, target_probs)
max_ratio = math_ops.reduce_max(ratio_l)
min_ratio = math_ops.reduce_min(ratio_l)
# Target prob to sample from original distribution.
m = min_ratio
# TODO(joelshor): Simplify fraction, if possible.
a_i = (ratio_l - m) / (max_ratio - m)
return a_i, m
|
tensorflow-master
|
tensorflow/python/data/experimental/ops/resampling.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python wrapper for prefetching_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.eager import function
from tensorflow.python.framework import device as framework_device
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gen_dataset_ops
from tensorflow.python.ops import gen_experimental_dataset_ops as ged_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.util.tf_export import tf_export
@tf_export("data.experimental.prefetch_to_device")
def prefetch_to_device(device, buffer_size=None):
"""A transformation that prefetches dataset values to the given `device`.
NOTE: Although the transformation creates a `tf.data.Dataset`, the
transformation must be the final `Dataset` in the input pipeline.
Args:
device: A string. The name of a device to which elements will be prefetched.
buffer_size: (Optional.) The number of elements to buffer on `device`.
Defaults to an automatically chosen value.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):
return dataset.apply(
copy_to_device(target_device=device)).prefetch(buffer_size)
return _apply_fn
@tf_export("data.experimental.copy_to_device")
def copy_to_device(target_device, source_device="/cpu:0"):
"""A transformation that copies dataset elements to the given `target_device`.
Args:
target_device: The name of a device to which elements will be copied.
source_device: The original device on which `input_dataset` will be placed.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
options.experimental_optimization.autotune = False
return _CopyToDeviceDataset(
dataset, target_device=target_device,
source_device=source_device).with_options(options)
return _apply_fn
# TODO(rohanj): Use the _input_hostmem attr on the RemoteCall ops to indicate
# all inputs to the Op are in host memory, thereby avoiding some unnecessary
# Sends and Recvs.
class _CopyToDeviceDataset(dataset_ops.UnaryUnchangedStructureDataset):
"""A `Dataset` that copies elements to another device."""
def __init__(self, input_dataset, target_device, source_device="/cpu:0"):
"""Constructs a _CopyToDeviceDataset.
Args:
input_dataset: `Dataset` to be copied
target_device: The name of the device to which elements would be copied.
source_device: Device where input_dataset would be placed.
"""
self._input_dataset = input_dataset
self._target_device = target_device
spec = framework_device.DeviceSpec().from_string(self._target_device)
self._is_gpu_target = (spec.device_type == "GPU")
self._source_device_string = source_device
self._source_device = ops.convert_to_tensor(source_device)
wrap_ds_variant = gen_dataset_ops.wrap_dataset_variant(
self._input_dataset._variant_tensor) # pylint: disable=protected-access
@function.defun()
def _init_func():
"""Creates an iterator for the input dataset.
Returns:
A `string` tensor that encapsulates the iterator created.
"""
ds_variant = gen_dataset_ops.unwrap_dataset_variant(wrap_ds_variant)
resource = gen_dataset_ops.anonymous_iterator(
**dataset_ops.flat_structure(self._input_dataset))
with ops.control_dependencies(
[gen_dataset_ops.make_iterator(ds_variant, resource)]):
return gen_dataset_ops.iterator_to_string_handle(resource)
init_func_concrete = _init_func._get_concrete_function_internal() # pylint: disable=protected-access
@function.defun()
def _remote_init_func():
return functional_ops.remote_call(
target=self._source_device,
args=init_func_concrete.captured_inputs,
Tout=[dtypes.string],
f=init_func_concrete)
self._init_func = _remote_init_func._get_concrete_function_internal() # pylint: disable=protected-access
self._init_captured_args = self._init_func.captured_inputs
@function.defun(input_signature=[tensor_spec.TensorSpec([], dtypes.string)])
def _next_func(string_handle):
"""Calls get_next for created iterator.
Args:
string_handle: An iterator string handle created by _init_func
Returns:
The elements generated from `input_dataset`
"""
with ops.device(self._source_device_string):
iterator = iterator_ops.Iterator.from_string_handle(
string_handle,
dataset_ops.get_legacy_output_types(self),
dataset_ops.get_legacy_output_shapes(self),
dataset_ops.get_legacy_output_classes(self))
return self._element_structure._to_tensor_list(iterator.get_next()) # pylint: disable=protected-access
next_func_concrete = _next_func._get_concrete_function_internal() # pylint: disable=protected-access
@function.defun_with_attributes(
input_signature=[tensor_spec.TensorSpec([], dtypes.string)],
attributes={"experimental_ints_on_device": True})
def _remote_next_func(string_handle):
return functional_ops.remote_call(
target=self._source_device,
args=[string_handle] +
next_func_concrete.captured_inputs,
Tout=self._input_dataset._element_structure._flat_types, # pylint: disable=protected-access
f=next_func_concrete)
self._next_func = _remote_next_func._get_concrete_function_internal() # pylint: disable=protected-access
self._next_captured_args = self._next_func.captured_inputs
@function.defun(input_signature=[tensor_spec.TensorSpec([], dtypes.string)])
def _finalize_func(string_handle):
"""Destroys the iterator resource created.
Args:
string_handle: An iterator string handle created by _init_func
Returns:
Tensor constant 0
"""
iterator_resource = gen_dataset_ops.iterator_from_string_handle_v2(
string_handle,
**dataset_ops.flat_structure(self._input_dataset))
with ops.control_dependencies([
resource_variable_ops.destroy_resource_op(
iterator_resource, ignore_lookup_error=True)]):
return array_ops.constant(0, dtypes.int64)
finalize_func_concrete = _finalize_func._get_concrete_function_internal() # pylint: disable=protected-access
@function.defun(input_signature=[tensor_spec.TensorSpec([], dtypes.string)])
def _remote_finalize_func(string_handle):
return functional_ops.remote_call(
target=self._source_device,
args=[string_handle] + finalize_func_concrete.captured_inputs,
Tout=[dtypes.int64],
f=finalize_func_concrete)
self._finalize_func = _remote_finalize_func._get_concrete_function_internal( # pylint: disable=protected-access
)
self._finalize_captured_args = self._finalize_func.captured_inputs
g = ops.get_default_graph()
self._init_func.add_to_graph(g)
self._next_func.add_to_graph(g)
self._finalize_func.add_to_graph(g)
# pylint: enable=protected-scope
with ops.device(self._target_device):
variant_tensor = gen_dataset_ops.generator_dataset(
self._init_captured_args,
self._next_captured_args,
self._finalize_captured_args,
init_func=self._init_func,
next_func=self._next_func,
finalize_func=self._finalize_func,
**dataset_ops.flat_structure(self._input_dataset))
super(_CopyToDeviceDataset, self).__init__(input_dataset, variant_tensor)
# The one_shot_iterator implementation needs a 0 arg _make_dataset function
# that thereby captures all the inputs required to create the dataset. Since
# there are strings that are inputs to the GeneratorDataset which can't be
# placed on a GPU, this fails for the GPU case. Therefore, disabling it for
# GPU
def make_one_shot_iterator(self):
if self._is_gpu_target:
raise ValueError("Cannot create a one shot iterator when using "
"`tf.data.experimental.copy_to_device()` on GPU. Please "
"use `Dataset.make_initializable_iterator()` instead.")
else:
return super(_CopyToDeviceDataset, self).make_one_shot_iterator()
class _MapOnGpuDataset(dataset_ops.UnaryDataset):
"""A `Dataset` that maps a function over elements in its using a GPU."""
def __init__(self, input_dataset, map_func, use_inter_op_parallelism=True):
"""See `Dataset.map()` for details."""
self._input_dataset = input_dataset
self._use_inter_op_parallelism = use_inter_op_parallelism
self._map_func = dataset_ops.StructuredFunctionWrapper(
map_func,
self._transformation_name(),
dataset=input_dataset,
defun_kwargs={"experimental_ints_on_device": True})
variant_tensor = ged_ops.experimental_map_dataset(
self._input_dataset._variant_tensor, # pylint: disable=protected-access
self._map_func.function.captured_inputs,
f=self._map_func.function,
use_inter_op_parallelism=self._use_inter_op_parallelism,
**dataset_ops.flat_structure(self))
super(_MapOnGpuDataset, self).__init__(input_dataset, variant_tensor)
def _functions(self):
return [self._map_func]
@property
def _element_structure(self):
return self._map_func.output_structure
def _transformation_name(self):
return "map_on_gpu()"
def map_on_gpu(map_func):
"""Maps `map_func` across the elements of this dataset.
NOTE: This is a highly experimental version of `tf.data.Dataset.map` that runs
`map_func` on GPU. It must be used after applying the
`tf.data.experimental.copy_to_device` transformation with a GPU device
argument.
Args:
map_func: A function mapping a nested structure of tensors (having shapes
and types defined by `self.output_shapes` and `self.output_types`) to
another nested structure of tensors.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):
return _MapOnGpuDataset(dataset, map_func)
return _apply_fn
|
tensorflow-master
|
tensorflow/python/data/experimental/ops/prefetching_ops.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Experimental API for controlling threading in `tf.data` pipelines."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.util import options
from tensorflow.python.util.tf_export import tf_export
@tf_export("data.experimental.ThreadingOptions")
class ThreadingOptions(options.OptionsBase):
"""Represents options for dataset threading.
You can set the threading options of a dataset through the
`experimental_threading` property of `tf.data.Options`; the property is
an instance of `tf.data.experimental.ThreadingOptions`.
```python
options = tf.data.Options()
options.experimental_threading.private_threadpool_size = 10
dataset = dataset.with_options(options)
```
"""
max_intra_op_parallelism = options.create_option(
name="max_intra_op_parallelism",
ty=int,
docstring=
"If set, it overrides the maximum degree of intra-op parallelism.")
private_threadpool_size = options.create_option(
name="private_threadpool_size",
ty=int,
docstring=
"If set, the dataset will use a private threadpool of the given size.")
|
tensorflow-master
|
tensorflow/python/data/experimental/ops/threading_options.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Grouping dataset transformations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import nest
from tensorflow.python.data.util import structure
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import gen_experimental_dataset_ops as ged_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.util.tf_export import tf_export
@tf_export("data.experimental.group_by_reducer")
def group_by_reducer(key_func, reducer):
"""A transformation that groups elements and performs a reduction.
This transformation maps element of a dataset to a key using `key_func` and
groups the elements by key. The `reducer` is used to process each group; its
`init_func` is used to initialize state for each group when it is created, the
`reduce_func` is used to update the state every time an element is mapped to
the matching group, and the `finalize_func` is used to map the final state to
an output value.
Args:
key_func: A function mapping a nested structure of tensors
(having shapes and types defined by `self.output_shapes` and
`self.output_types`) to a scalar `tf.int64` tensor.
reducer: An instance of `Reducer`, which captures the reduction logic using
the `init_func`, `reduce_func`, and `finalize_func` functions.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):
"""Function from `Dataset` to `Dataset` that applies the transformation."""
return _GroupByReducerDataset(dataset, key_func, reducer)
return _apply_fn
@tf_export("data.experimental.group_by_window")
def group_by_window(key_func,
reduce_func,
window_size=None,
window_size_func=None):
"""A transformation that groups windows of elements by key and reduces them.
This transformation maps each consecutive element in a dataset to a key
using `key_func` and groups the elements by key. It then applies
`reduce_func` to at most `window_size_func(key)` elements matching the same
key. All except the final window for each key will contain
`window_size_func(key)` elements; the final window may be smaller.
You may provide either a constant `window_size` or a window size determined by
the key through `window_size_func`.
Args:
key_func: A function mapping a nested structure of tensors
(having shapes and types defined by `self.output_shapes` and
`self.output_types`) to a scalar `tf.int64` tensor.
reduce_func: A function mapping a key and a dataset of up to `window_size`
consecutive elements matching that key to another dataset.
window_size: A `tf.int64` scalar `tf.Tensor`, representing the number of
consecutive elements matching the same key to combine in a single
batch, which will be passed to `reduce_func`. Mutually exclusive with
`window_size_func`.
window_size_func: A function mapping a key to a `tf.int64` scalar
`tf.Tensor`, representing the number of consecutive elements matching
the same key to combine in a single batch, which will be passed to
`reduce_func`. Mutually exclusive with `window_size`.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
Raises:
ValueError: if neither or both of {`window_size`, `window_size_func`} are
passed.
"""
if (window_size is not None and window_size_func or
not (window_size is not None or window_size_func)):
raise ValueError("Must pass either window_size or window_size_func.")
if window_size is not None:
def constant_window_func(unused_key):
return ops.convert_to_tensor(window_size, dtype=dtypes.int64)
window_size_func = constant_window_func
assert window_size_func is not None
def _apply_fn(dataset):
"""Function from `Dataset` to `Dataset` that applies the transformation."""
return _GroupByWindowDataset(dataset, key_func, reduce_func,
window_size_func)
return _apply_fn
@tf_export("data.experimental.bucket_by_sequence_length")
def bucket_by_sequence_length(element_length_func,
bucket_boundaries,
bucket_batch_sizes,
padded_shapes=None,
padding_values=None,
pad_to_bucket_boundary=False,
no_padding=False,
drop_remainder=False):
"""A transformation that buckets elements in a `Dataset` by length.
Elements of the `Dataset` are grouped together by length and then are padded
and batched.
This is useful for sequence tasks in which the elements have variable length.
Grouping together elements that have similar lengths reduces the total
fraction of padding in a batch which increases training step efficiency.
Args:
element_length_func: function from element in `Dataset` to `tf.int32`,
determines the length of the element, which will determine the bucket it
goes into.
bucket_boundaries: `list<int>`, upper length boundaries of the buckets.
bucket_batch_sizes: `list<int>`, batch size per bucket. Length should be
`len(bucket_boundaries) + 1`.
padded_shapes: Nested structure of `tf.TensorShape` to pass to
`tf.data.Dataset.padded_batch`. If not provided, will use
`dataset.output_shapes`, which will result in variable length dimensions
being padded out to the maximum length in each batch.
padding_values: Values to pad with, passed to
`tf.data.Dataset.padded_batch`. Defaults to padding with 0.
pad_to_bucket_boundary: bool, if `False`, will pad dimensions with unknown
size to maximum length in batch. If `True`, will pad dimensions with
unknown size to bucket boundary minus 1 (i.e., the maximum length in each
bucket), and caller must ensure that the source `Dataset` does not contain
any elements with length longer than `max(bucket_boundaries)`.
no_padding: `bool`, indicates whether to pad the batch features (features
need to be either of type `tf.SparseTensor` or of same shape).
drop_remainder: (Optional.) A `tf.bool` scalar `tf.Tensor`, representing
whether the last batch should be dropped in the case it has fewer than
`batch_size` elements; the default behavior is not to drop the smaller
batch.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
Raises:
ValueError: if `len(bucket_batch_sizes) != len(bucket_boundaries) + 1`.
"""
with ops.name_scope("bucket_by_seq_length"):
if len(bucket_batch_sizes) != (len(bucket_boundaries) + 1):
raise ValueError(
"len(bucket_batch_sizes) must equal len(bucket_boundaries) + 1")
batch_sizes = constant_op.constant(bucket_batch_sizes, dtype=dtypes.int64)
def element_to_bucket_id(*args):
"""Return int64 id of the length bucket for this element."""
seq_length = element_length_func(*args)
boundaries = list(bucket_boundaries)
buckets_min = [np.iinfo(np.int32).min] + boundaries
buckets_max = boundaries + [np.iinfo(np.int32).max]
conditions_c = math_ops.logical_and(
math_ops.less_equal(buckets_min, seq_length),
math_ops.less(seq_length, buckets_max))
bucket_id = math_ops.reduce_min(array_ops.where(conditions_c))
return bucket_id
def window_size_fn(bucket_id):
# The window size is set to the batch size for this bucket
window_size = batch_sizes[bucket_id]
return window_size
def make_padded_shapes(shapes, none_filler=None):
padded = []
for shape in nest.flatten(shapes):
shape = tensor_shape.TensorShape(shape)
shape = [
none_filler if tensor_shape.dimension_value(d) is None else d
for d in shape
]
padded.append(shape)
return nest.pack_sequence_as(shapes, padded)
def batching_fn(bucket_id, grouped_dataset):
"""Batch elements in dataset."""
batch_size = window_size_fn(bucket_id)
if no_padding:
return grouped_dataset.batch(batch_size, drop_remainder=drop_remainder)
none_filler = None
if pad_to_bucket_boundary:
err_msg = ("When pad_to_bucket_boundary=True, elements must have "
"length < max(bucket_boundaries).")
check = check_ops.assert_less(
bucket_id,
constant_op.constant(len(bucket_batch_sizes) - 1,
dtype=dtypes.int64),
message=err_msg)
with ops.control_dependencies([check]):
boundaries = constant_op.constant(bucket_boundaries,
dtype=dtypes.int64)
bucket_boundary = boundaries[bucket_id]
none_filler = bucket_boundary - 1
input_shapes = dataset_ops.get_legacy_output_shapes(grouped_dataset)
shapes = make_padded_shapes(padded_shapes or input_shapes,
none_filler=none_filler)
return grouped_dataset.padded_batch(
batch_size, shapes, padding_values, drop_remainder=drop_remainder)
def _apply_fn(dataset):
return dataset.apply(
group_by_window(element_to_bucket_id, batching_fn,
window_size_func=window_size_fn))
return _apply_fn
class _GroupByReducerDataset(dataset_ops.UnaryDataset):
"""A `Dataset` that groups its input and performs a reduction."""
def __init__(self, input_dataset, key_func, reducer):
"""See `group_by_reducer()` for details."""
self._input_dataset = input_dataset
self._make_key_func(key_func, input_dataset)
self._make_init_func(reducer.init_func)
self._make_reduce_func(reducer.reduce_func, input_dataset)
self._make_finalize_func(reducer.finalize_func)
variant_tensor = ged_ops.experimental_group_by_reducer_dataset(
self._input_dataset._variant_tensor, # pylint: disable=protected-access
self._key_func.function.captured_inputs,
self._init_func.function.captured_inputs,
self._reduce_func.function.captured_inputs,
self._finalize_func.function.captured_inputs,
key_func=self._key_func.function,
init_func=self._init_func.function,
reduce_func=self._reduce_func.function,
finalize_func=self._finalize_func.function,
**dataset_ops.flat_structure(self))
super(_GroupByReducerDataset, self).__init__(input_dataset, variant_tensor)
def _make_key_func(self, key_func, input_dataset):
"""Make wrapping defun for key_func."""
self._key_func = dataset_ops.StructuredFunctionWrapper(
key_func, self._transformation_name(), dataset=input_dataset)
if not self._key_func.output_structure.is_compatible_with(
structure.TensorStructure(dtypes.int64, [])):
raise ValueError(
"`key_func` must return a single tf.int64 tensor. "
"Got type=%s and shape=%s"
% (self._key_func.output_types, self._key_func.output_shapes))
def _make_init_func(self, init_func):
"""Make wrapping defun for init_func."""
self._init_func = dataset_ops.StructuredFunctionWrapper(
init_func,
self._transformation_name(),
input_structure=structure.TensorStructure(dtypes.int64, []))
def _make_reduce_func(self, reduce_func, input_dataset):
"""Make wrapping defun for reduce_func."""
# Iteratively rerun the reduce function until reaching a fixed point on
# `self._state_structure`.
self._state_structure = self._init_func.output_structure
state_types = self._init_func.output_types
state_shapes = self._init_func.output_shapes
state_classes = self._init_func.output_classes
need_to_rerun = True
while need_to_rerun:
wrapped_func = dataset_ops.StructuredFunctionWrapper(
reduce_func,
self._transformation_name(),
input_structure=structure.NestedStructure(
(self._state_structure, input_dataset._element_structure)), # pylint: disable=protected-access
add_to_graph=False)
# Extract and validate class information from the returned values.
for new_state_class, state_class in zip(
nest.flatten(wrapped_func.output_classes),
nest.flatten(state_classes)):
if not issubclass(new_state_class, state_class):
raise TypeError(
"The element classes for the new state must match the initial "
"state. Expected %s; got %s." %
(self._state_classes, wrapped_func.output_classes))
# Extract and validate type information from the returned values.
for new_state_type, state_type in zip(
nest.flatten(wrapped_func.output_types), nest.flatten(state_types)):
if new_state_type != state_type:
raise TypeError(
"The element types for the new state must match the initial "
"state. Expected %s; got %s." %
(self._init_func.output_types, wrapped_func.output_types))
# Extract shape information from the returned values.
flat_state_shapes = nest.flatten(state_shapes)
flat_new_state_shapes = nest.flatten(wrapped_func.output_shapes)
weakened_state_shapes = [
original.most_specific_compatible_shape(new)
for original, new in zip(flat_state_shapes, flat_new_state_shapes)
]
need_to_rerun = False
for original_shape, weakened_shape in zip(flat_state_shapes,
weakened_state_shapes):
if original_shape.ndims is not None and (
weakened_shape.ndims is None or
original_shape.as_list() != weakened_shape.as_list()):
need_to_rerun = True
break
if need_to_rerun:
state_shapes = nest.pack_sequence_as(
self._init_func.output_shapes, weakened_state_shapes)
self._state_structure = structure.convert_legacy_structure(
state_types, state_shapes, state_classes)
self._reduce_func = wrapped_func
self._reduce_func.function.add_to_graph(ops.get_default_graph())
def _make_finalize_func(self, finalize_func):
"""Make wrapping defun for finalize_func."""
self._finalize_func = dataset_ops.StructuredFunctionWrapper(
finalize_func, self._transformation_name(),
input_structure=self._state_structure)
@property
def _element_structure(self):
return self._finalize_func.output_structure
def _functions(self):
return [
self._key_func, self._init_func, self._reduce_func, self._finalize_func
]
def _transformation_name(self):
return "tf.data.experimental.group_by_reducer()"
class _GroupByWindowDataset(dataset_ops.UnaryDataset):
"""A `Dataset` that groups its input and performs a windowed reduction."""
def __init__(self, input_dataset, key_func, reduce_func, window_size_func):
"""See `group_by_window()` for details."""
self._input_dataset = input_dataset
self._make_key_func(key_func, input_dataset)
self._make_reduce_func(reduce_func, input_dataset)
self._make_window_size_func(window_size_func)
variant_tensor = ged_ops.experimental_group_by_window_dataset(
self._input_dataset._variant_tensor, # pylint: disable=protected-access
self._key_func.function.captured_inputs,
self._reduce_func.function.captured_inputs,
self._window_size_func.function.captured_inputs,
key_func=self._key_func.function,
reduce_func=self._reduce_func.function,
window_size_func=self._window_size_func.function,
**dataset_ops.flat_structure(self))
super(_GroupByWindowDataset, self).__init__(input_dataset, variant_tensor)
def _make_window_size_func(self, window_size_func):
"""Make wrapping defun for window_size_func."""
def window_size_func_wrapper(key):
return ops.convert_to_tensor(window_size_func(key), dtype=dtypes.int64)
self._window_size_func = dataset_ops.StructuredFunctionWrapper(
window_size_func_wrapper,
self._transformation_name(),
input_structure=structure.TensorStructure(dtypes.int64, []))
if not self._window_size_func.output_structure.is_compatible_with(
structure.TensorStructure(dtypes.int64, [])):
raise ValueError(
"`window_size_func` must return a single tf.int64 scalar tensor.")
def _make_key_func(self, key_func, input_dataset):
"""Make wrapping defun for key_func."""
def key_func_wrapper(*args):
return ops.convert_to_tensor(key_func(*args), dtype=dtypes.int64)
self._key_func = dataset_ops.StructuredFunctionWrapper(
key_func_wrapper, self._transformation_name(), dataset=input_dataset)
if not self._key_func.output_structure.is_compatible_with(
structure.TensorStructure(dtypes.int64, [])):
raise ValueError(
"`key_func` must return a single tf.int64 scalar tensor.")
def _make_reduce_func(self, reduce_func, input_dataset):
"""Make wrapping defun for reduce_func."""
nested_dataset = dataset_ops.DatasetStructure(
input_dataset._element_structure) # pylint: disable=protected-access
input_structure = structure.NestedStructure(
(structure.TensorStructure(dtypes.int64, []), nested_dataset))
self._reduce_func = dataset_ops.StructuredFunctionWrapper(
reduce_func, self._transformation_name(),
input_structure=input_structure)
if not isinstance(
self._reduce_func.output_structure, dataset_ops.DatasetStructure):
raise TypeError("`reduce_func` must return a `Dataset` object.")
# pylint: disable=protected-access
self._structure = (
self._reduce_func.output_structure._element_structure)
@property
def _element_structure(self):
return self._structure
def _functions(self):
return [self._key_func, self._reduce_func, self._window_size_func]
def _transformation_name(self):
return "tf.data.experimental.group_by_window()"
@tf_export("data.experimental.Reducer")
class Reducer(object):
"""A reducer is used for reducing a set of elements.
A reducer is represented as a tuple of the three functions:
1) initialization function: key => initial state
2) reduce function: (old state, input) => new state
3) finalization function: state => result
"""
def __init__(self, init_func, reduce_func, finalize_func):
self._init_func = init_func
self._reduce_func = reduce_func
self._finalize_func = finalize_func
@property
def init_func(self):
return self._init_func
@property
def reduce_func(self):
return self._reduce_func
@property
def finalize_func(self):
return self._finalize_func
|
tensorflow-master
|
tensorflow/python/data/experimental/ops/grouping.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Experimental API for controlling threading in `tf.data` pipelines."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import context
from tensorflow.python.ops import gen_experimental_dataset_ops as ged_ops
from tensorflow.python.ops import resource_variable_ops
_uid_counter = 0
_uid_lock = threading.Lock()
def _generate_shared_name(prefix):
with _uid_lock:
global _uid_counter
uid = _uid_counter
_uid_counter += 1
return "{}{}".format(prefix, uid)
# TODO(b/73383364): Properly export in the `tf.data.experimental` API when
# stable or make private / remove.
class PrivateThreadPool(object):
"""A stateful resource that represents a private thread pool."""
def __init__(self, num_threads, display_name=None,
max_intra_op_parallelism=1):
"""Creates a `PrivateThreadPool` with the given number of threads."""
if context.executing_eagerly():
shared_name = _generate_shared_name("privatethreadpool")
self._resource = ged_ops.experimental_thread_pool_handle(
num_threads=num_threads,
max_intra_op_parallelism=max_intra_op_parallelism,
display_name=display_name,
shared_name=shared_name)
self._resource_deleter = resource_variable_ops.EagerResourceDeleter(
handle=self._resource, handle_device=context.context().device_name)
else:
self._resource = ged_ops.experimental_thread_pool_handle(
num_threads=num_threads,
max_intra_op_parallelism=max_intra_op_parallelism,
display_name=display_name)
class _ThreadPoolDataset(dataset_ops.UnaryUnchangedStructureDataset):
"""A `Dataset` that acts as an identity, and sets a custom threadpool."""
def __init__(self, input_dataset, thread_pool):
self._input_dataset = input_dataset
self._thread_pool = thread_pool
variant_tensor = ged_ops.experimental_thread_pool_dataset(
self._input_dataset._variant_tensor, # pylint: disable=protected-access
self._thread_pool._resource, # pylint: disable=protected-access
**dataset_ops.flat_structure(self))
super(_ThreadPoolDataset, self).__init__(input_dataset, variant_tensor)
# TODO(b/73383364): Properly export in the `tf.data.experimental` API when
# stable or make private / remove.
def override_threadpool(dataset, thread_pool):
"""Returns a new dataset that uses the given thread pool for its operations.
Args:
dataset: A `tf.data.Dataset` object.
thread_pool: A `PrivateThreadPool` object.
Returns:
A dataset containing the same values as `dataset`, but which uses
`thread_pool` to compute any of its parallel operations (such as
`tf.data.Dataset.map`).
"""
return _ThreadPoolDataset(dataset, thread_pool)
|
tensorflow-master
|
tensorflow/python/data/experimental/ops/threadpool.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Experimental `dataset` API for parsing example."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import structure
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import gen_experimental_dataset_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.util.tf_export import tf_export
class _ParseExampleDataset(dataset_ops.UnaryDataset):
"""A `Dataset` that parses `example` dataset into a `dict` dataset."""
def __init__(self, input_dataset, features, num_parallel_calls):
self._input_dataset = input_dataset
if not input_dataset._element_structure.is_compatible_with( # pylint: disable=protected-access
structure.TensorStructure(dtypes.string, [None])):
raise TypeError("Input dataset should be a dataset of vectors of strings")
self._num_parallel_calls = num_parallel_calls
# pylint: disable=protected-access
self._features = parsing_ops._prepend_none_dimension(features)
# sparse_keys and dense_keys come back sorted here.
(sparse_keys, sparse_types, dense_keys, dense_types, dense_defaults,
dense_shapes) = parsing_ops._features_to_raw_params(
self._features, [
parsing_ops.VarLenFeature, parsing_ops.SparseFeature,
parsing_ops.FixedLenFeature, parsing_ops.FixedLenSequenceFeature
])
# TODO(b/112859642): Pass sparse_index and sparse_values for SparseFeature.
(_, dense_defaults_vec, sparse_keys, sparse_types, dense_keys, dense_shapes,
dense_shape_as_shape) = parsing_ops._process_raw_parameters(
None, dense_defaults, sparse_keys, sparse_types, dense_keys,
dense_types, dense_shapes)
# pylint: enable=protected-access
self._sparse_keys = sparse_keys
self._sparse_types = sparse_types
self._dense_keys = dense_keys
self._dense_defaults = dense_defaults_vec
self._dense_shapes = dense_shapes
self._dense_types = dense_types
input_dataset_shape = dataset_ops.get_legacy_output_shapes(
self._input_dataset)
dense_output_shapes = [input_dataset_shape.concatenate(shape)
for shape in dense_shape_as_shape]
sparse_output_shapes = [input_dataset_shape.concatenate([None])
for _ in range(len(sparse_keys))]
output_shapes = dict(
zip(self._dense_keys + self._sparse_keys,
dense_output_shapes + sparse_output_shapes))
output_types = dict(
zip(self._dense_keys + self._sparse_keys,
self._dense_types + self._sparse_types))
output_classes = dict(
zip(self._dense_keys + self._sparse_keys,
[ops.Tensor for _ in range(len(self._dense_defaults))] +
[sparse_tensor.SparseTensor for _ in range(len(self._sparse_keys))
]))
self._structure = structure.convert_legacy_structure(
output_types, output_shapes, output_classes)
variant_tensor = (
gen_experimental_dataset_ops.experimental_parse_example_dataset(
self._input_dataset._variant_tensor, # pylint: disable=protected-access
self._num_parallel_calls,
self._dense_defaults,
self._sparse_keys,
self._dense_keys,
self._sparse_types,
self._dense_shapes,
**dataset_ops.flat_structure(self)))
super(_ParseExampleDataset, self).__init__(input_dataset, variant_tensor)
@property
def _element_structure(self):
return self._structure
# TODO(b/111553342): add arguments names and example names as well.
@tf_export("data.experimental.parse_example_dataset")
def parse_example_dataset(features, num_parallel_calls=1):
"""A transformation that parses `Example` protos into a `dict` of tensors.
Parses a number of serialized `Example` protos given in `serialized`. We refer
to `serialized` as a batch with `batch_size` many entries of individual
`Example` protos.
This op parses serialized examples into a dictionary mapping keys to `Tensor`
and `SparseTensor` objects. `features` is a dict from keys to `VarLenFeature`,
`SparseFeature`, and `FixedLenFeature` objects. Each `VarLenFeature`
and `SparseFeature` is mapped to a `SparseTensor`, and each
`FixedLenFeature` is mapped to a `Tensor`. See `tf.io.parse_example` for more
details about feature dictionaries.
Args:
features: A `dict` mapping feature keys to `FixedLenFeature`,
`VarLenFeature`, and `SparseFeature` values.
num_parallel_calls: (Optional.) A `tf.int32` scalar `tf.Tensor`,
representing the number of parsing processes to call in parallel.
Returns:
A dataset transformation function, which can be passed to
`tf.data.Dataset.apply`.
Raises:
ValueError: if features argument is None.
"""
if features is None:
raise ValueError("Missing: features was %s." % features)
def _apply_fn(dataset):
"""Function from `Dataset` to `Dataset` that applies the transformation."""
out_dataset = _ParseExampleDataset(dataset, features, num_parallel_calls)
if any(
isinstance(feature, parsing_ops.SparseFeature)
for _, feature in features.items()
):
# pylint: disable=protected-access
# pylint: disable=g-long-lambda
out_dataset = out_dataset.map(
lambda x: parsing_ops._construct_sparse_tensors_for_sparse_features(
features, x), num_parallel_calls=num_parallel_calls)
return out_dataset
return _apply_fn
|
tensorflow-master
|
tensorflow/python/data/experimental/ops/parsing_ops.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unique element dataset transformations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import gen_experimental_dataset_ops
from tensorflow.python.util.tf_export import tf_export
@tf_export("data.experimental.unique")
def unique():
"""Creates a `Dataset` from another `Dataset`, discarding duplicates.
Use this transformation to produce a dataset that contains one instance of
each unique element in the input. For example:
```python
dataset = tf.data.Dataset.from_tensor_slices([1, 37, 2, 37, 2, 1])
# Using `unique()` will drop the duplicate elements.
dataset = dataset.apply(tf.data.experimental.unique()) # ==> { 1, 37, 2 }
```
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):
return _UniqueDataset(dataset)
return _apply_fn
class _UniqueDataset(dataset_ops.UnaryUnchangedStructureDataset):
"""A `Dataset` contains the unique elements from its input."""
def __init__(self, input_dataset):
"""See `unique()` for details."""
self._input_dataset = input_dataset
if dataset_ops.get_legacy_output_types(input_dataset) not in (
dtypes.int32, dtypes.int64, dtypes.string):
raise TypeError(
"`tf.data.experimental.unique()` only supports inputs with a single "
"`tf.int32`, `tf.int64`, or `tf.string` component.")
variant_tensor = gen_experimental_dataset_ops.experimental_unique_dataset(
self._input_dataset._variant_tensor, # pylint: disable=protected-access
**dataset_ops.flat_structure(self))
super(_UniqueDataset, self).__init__(input_dataset, variant_tensor)
|
tensorflow-master
|
tensorflow/python/data/experimental/ops/unique.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Experimental API for matching input filenames."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import structure
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import gen_experimental_dataset_ops as ged_ops
class MatchingFilesDataset(dataset_ops.DatasetSource):
"""A `Dataset` that list the files according to the input patterns."""
def __init__(self, patterns):
self._patterns = ops.convert_to_tensor(
patterns, dtype=dtypes.string, name="patterns")
variant_tensor = ged_ops.experimental_matching_files_dataset(self._patterns)
super(MatchingFilesDataset, self).__init__(variant_tensor)
@property
def _element_structure(self):
return structure.TensorStructure(dtypes.string, [])
|
tensorflow-master
|
tensorflow/python/data/experimental/ops/matching_files.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python wrappers for Datasets and Iterators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.ops import gen_dataset_ops
from tensorflow.python.util.tf_export import tf_export
@tf_export("data.experimental.get_single_element")
def get_single_element(dataset):
"""Returns the single element in `dataset` as a nested structure of tensors.
This function enables you to use a `tf.data.Dataset` in a stateless
"tensor-in tensor-out" expression, without creating a
`tf.compat.v1.data.Iterator`.
This can be useful when your preprocessing transformations are expressed
as a `Dataset`, and you want to use the transformation at serving time.
For example:
```python
input_batch = tf.compat.v1.placeholder(tf.string, shape=[BATCH_SIZE])
def preprocessing_fn(input_str):
# ...
return image, label
dataset = (tf.data.Dataset.from_tensor_slices(input_batch)
.map(preprocessing_fn, num_parallel_calls=BATCH_SIZE)
.batch(BATCH_SIZE))
image_batch, label_batch = tf.data.experimental.get_single_element(dataset)
```
Args:
dataset: A `tf.data.Dataset` object containing a single element.
Returns:
A nested structure of `tf.Tensor` objects, corresponding to the single
element of `dataset`.
Raises:
TypeError: if `dataset` is not a `tf.data.Dataset` object.
InvalidArgumentError (at runtime): if `dataset` does not contain exactly
one element.
"""
if not isinstance(dataset, dataset_ops.DatasetV2):
raise TypeError("`dataset` must be a `tf.data.Dataset` object.")
# pylint: disable=protected-access
return dataset._element_structure._from_compatible_tensor_list(
gen_dataset_ops.dataset_to_single_element(
dataset._variant_tensor, **dataset_ops.flat_structure(dataset)))
|
tensorflow-master
|
tensorflow/python/data/experimental/ops/get_single_element.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Experimental API for optimizing `tf.data` pipelines."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import structure as structure_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import gen_experimental_dataset_ops
# TODO(jsimsa): Support RE matching for both individual transformation (e.g. to
# account for indexing) and transformation sequence.
def assert_next(transformations):
"""A transformation that asserts which transformations happen next.
Args:
transformations: A `tf.string` vector `tf.Tensor` identifying the
transformations that are expected to happen next.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):
"""Function from `Dataset` to `Dataset` that applies the transformation."""
return _AssertNextDataset(dataset, transformations)
return _apply_fn
def model():
"""A transformation that models performance.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):
"""Function from `Dataset` to `Dataset` that applies the transformation."""
return dataset_ops._ModelDataset(dataset) # pylint: disable=protected-access
return _apply_fn
def non_serializable():
"""A non-serializable identity transformation.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):
"""Function from `Dataset` to `Dataset` that applies the transformation."""
return _NonSerializableDataset(dataset)
return _apply_fn
def optimize(optimizations=None):
"""A transformation that applies optimizations.
Args:
optimizations: (Optional.) A `tf.string` vector `tf.Tensor` identifying
optimizations to use. If not specified, the default set of optimizations
is applied.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):
"""Function from `Dataset` to `Dataset` that applies the transformation."""
return dataset_ops._OptimizeDataset(dataset, optimizations) # pylint: disable=protected-access
return _apply_fn
class _AssertNextDataset(dataset_ops.UnaryUnchangedStructureDataset):
"""A `Dataset` that asserts which transformations happen next."""
def __init__(self, input_dataset, transformations):
"""See `assert_next()` for details."""
self._input_dataset = input_dataset
if transformations is None:
raise ValueError("At least one transformation should be specified")
self._transformations = ops.convert_to_tensor(
transformations, dtype=dtypes.string, name="transformations")
variant_tensor = (
gen_experimental_dataset_ops.experimental_assert_next_dataset(
self._input_dataset._variant_tensor, # pylint: disable=protected-access
self._transformations,
**dataset_ops.flat_structure(self)))
super(_AssertNextDataset, self).__init__(input_dataset, variant_tensor)
class _NonSerializableDataset(dataset_ops.UnaryUnchangedStructureDataset):
"""A `Dataset` that performs non-serializable identity transformation."""
def __init__(self, input_dataset):
"""See `non_serializable()` for details."""
self._input_dataset = input_dataset
variant_tensor = (
gen_experimental_dataset_ops.experimental_non_serializable_dataset(
self._input_dataset._variant_tensor, # pylint: disable=protected-access
**dataset_ops.flat_structure(self)))
super(_NonSerializableDataset, self).__init__(input_dataset, variant_tensor)
class _ChooseFastestDataset(dataset_ops.DatasetV2):
"""A `Dataset` that merges two input datasets."""
def __init__(self, datasets, num_experiments=10):
"""Chooses the fastest of some input datasets.
Given input datasets, produces elements as quickly as the fastest of the
inputs. Note that this dataset assumes that input datasets have the same
elements in the same order, though this is not enforced besides checking
that the input datasets have compatible output types, output shapes, and
cardinality at runtime. The resulting dataset produces elements that are
identical to the input elements, and in the same order.
Note that the time to first iteration is longer when this dataset is used
due to the overhead of dynamically picking the faster dataset. Namely,
for the first num_experiments iterations, this dataset will pull from all
of its inputs simultaneously in order to determine which input is the
fastest. For all subsequent iterations, that input will be used.
Args:
datasets: A list of `Datasets` that all have the same elements in the same
order.
num_experiments: The number of experiments to run before deciding which
dataset is fastest. In each "experiment" iteration, the dataset will
call from all its inputs simultaneously, and update its knowledge of
which input is the fastest.
Returns:
A `Dataset` that has the same elements the inputs.
"""
self._datasets = list(datasets)
self._structure = self._datasets[0]._element_structure # pylint: disable=protected-access
variant_tensor = (
gen_experimental_dataset_ops.experimental_choose_fastest_dataset(
[dataset._variant_tensor for dataset in self._datasets], # pylint: disable=protected-access
num_experiments=num_experiments,
**dataset_ops.flat_structure(self)))
super(_ChooseFastestDataset, self).__init__(variant_tensor)
def _inputs(self):
return self._datasets
@property
def _element_structure(self):
return self._datasets[0]._element_structure # pylint: disable=protected-access
class _ChooseFastestBranchDataset(dataset_ops.UnaryDataset):
"""A `Dataset` that merges two input datasets."""
def __init__(self,
input_dataset,
functions,
ratio_numerator=1,
ratio_denominator=1,
num_elements_per_branch=None):
"""Chooses the fastest of some dataset functions.
Given dataset functions that take input_dataset as input and output
another dataset, produces elements as quickly as the fastest of these
output datasets. Note that datasets in the dataset functions are assumed
to be stateless, and the iterators created by the functions' output datasets
will, given the same input elements, all produce the same output elements.
Datasets in the functions are also expected to iterate over the input
dataset at most once. The violation of these conditions may lead to
undefined behavior.
For example:
```python
dataset = tf.data.Dataset.range(100)
dataset = _ChooseFastestDataset(
dataset,
[
lambda ds: ds.map(lambda x: tf.reshape(x, [1])).batch(10),
lambda ds: ds.batch(10).map(lambda x: tf.reshape(x, [10, 1]))
],
ratio=10,
num_elements_per_branch=10
)
```
The resulting dataset will produce elements equivalent to
`tf.data.Dataset.range(100).map(lambda x: tf.reshape(x, [1])).batch(10)`, or
`tf.data.Dataset.range(100).batch(10).map(lambda x: tf.reshape(x, [10, 1]))`
Note that the first `num_elements_per_branch` iterations may be slower due
to the
overhead of dynamically picking the fastest dataset. Namely, for these
iterations, the dataset will produce elements from any of branches to
determine which input is the fastest. For all subsequent iterations, that
input will be used.
Args:
input_dataset: A `Dataset` that can be used as input to `functions`.
functions: A list of callables, each of which takes a `Dataset` as input
and returns a `Dataset`.
ratio_numerator: The numerator in the ratio of input elements consumed to
output elements produced for each function. This should be the same for
all functions. For example, if the function is
`lambda ds: ds.batch(10)`, the ratio is 10:1, i.e. the input dataset
must produce 10 elements for every element of the output dataset. In
this case, ratio_numerator should be 10.
ratio_denominator: The denominator in the ratio of input elements consumed
to output elements produced for each function. This should be the same
for all functions. For example, if the function is
`lambda ds: ds.batch(10)`, the ratio is 10:1, i.e. the input dataset
must produce 10 elements for every element of the output dataset. In
this case, ratio_denominator should be 1.
num_elements_per_branch: The number of elements to get from each branch
before deciding which dataset is fastest. In the first len(functions) *
num_elements_per_branch iterations, the dataset will call from one of
the branches, and update its knowledge of which input is the fastest.
Note that (num_elements_per_branch * ratio) is expected to be an
integer.
Returns:
A `Dataset` that has the same elements the inputs.
"""
nested_structure = structure_lib.NestedStructure(
dataset_ops.DatasetStructure(dataset_ops.get_structure(input_dataset)))
self._funcs = [
dataset_ops.StructuredFunctionWrapper(
f, "ChooseFastestV2", input_structure=nested_structure)
for f in functions
]
self._structure = self._funcs[0].output_structure._element_structure # pylint: disable=protected-access
self._captured_arguments = []
for f in self._funcs:
self._captured_arguments.extend(f.function.captured_inputs)
self._capture_lengths = [
len(f.function.captured_inputs) for f in self._funcs
]
if ratio_numerator <= 0 or ratio_denominator <= 0:
raise ValueError("ratio must be positive.")
if num_elements_per_branch is None:
# Pick a sensible default based on `ratio_denominator`
num_elements_per_branch = 10 * ratio_denominator
variant_tensor = (
gen_experimental_dataset_ops.choose_fastest_branch_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
ratio_numerator=ratio_numerator,
ratio_denominator=ratio_denominator,
other_arguments=self._captured_arguments,
num_elements_per_branch=num_elements_per_branch,
branches=[f.function for f in self._funcs],
other_arguments_lengths=self._capture_lengths,
**dataset_ops.flat_structure(self)))
super(_ChooseFastestBranchDataset, self).__init__(input_dataset,
variant_tensor)
@property
def _element_structure(self):
return self._structure
|
tensorflow-master
|
tensorflow/python/data/experimental/ops/optimization.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Scan dataset transformation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import nest
from tensorflow.python.data.util import structure
from tensorflow.python.framework import ops
from tensorflow.python.framework import type_spec
from tensorflow.python.ops import gen_experimental_dataset_ops
from tensorflow.python.util.tf_export import tf_export
class _ScanDataset(dataset_ops.UnaryDataset):
"""A dataset that scans a function across its input."""
def __init__(self, input_dataset, initial_state, scan_func):
"""See `scan()` for details."""
self._input_dataset = input_dataset
self._initial_state = structure.normalize_tensors(initial_state)
# Compute initial values for the state classes, shapes and types based on
# the initial state. The shapes may be refined by running `tf_scan_func` one
# or more times below.
self._state_structure = type_spec.type_spec_from_value(self._initial_state)
# Iteratively rerun the scan function until reaching a fixed point on
# `self._state_shapes`.
need_to_rerun = True
while need_to_rerun:
wrapped_func = dataset_ops.StructuredFunctionWrapper(
scan_func,
self._transformation_name(),
input_structure=structure.NestedStructure(
(self._state_structure, input_dataset._element_structure)), # pylint: disable=protected-access
add_to_graph=False)
if not (
isinstance(wrapped_func.output_types, collections.Sequence) and
len(wrapped_func.output_types) == 2):
raise TypeError("The scan function must return a pair comprising the "
"new state and the output value.")
new_state_classes, self._output_classes = wrapped_func.output_classes
# Extract and validate class information from the returned values.
new_state_classes, output_classes = wrapped_func.output_classes
old_state_classes = self._state_structure._to_legacy_output_classes() # pylint: disable=protected-access
for new_state_class, old_state_class in zip(
nest.flatten(new_state_classes),
nest.flatten(old_state_classes)):
if not issubclass(new_state_class, old_state_class):
raise TypeError(
"The element classes for the new state must match the initial "
"state. Expected %s; got %s." %
(old_state_classes, new_state_classes))
# Extract and validate type information from the returned values.
new_state_types, output_types = wrapped_func.output_types
old_state_types = self._state_structure._to_legacy_output_types() # pylint: disable=protected-access
for new_state_type, old_state_type in zip(
nest.flatten(new_state_types), nest.flatten(old_state_types)):
if new_state_type != old_state_type:
raise TypeError(
"The element types for the new state must match the initial "
"state. Expected %s; got %s." %
(old_state_types, new_state_types))
# Extract shape information from the returned values.
new_state_shapes, output_shapes = wrapped_func.output_shapes
old_state_shapes = self._state_structure._to_legacy_output_shapes() # pylint: disable=protected-access
self._structure = structure.convert_legacy_structure(
output_types, output_shapes, output_classes)
flat_state_shapes = nest.flatten(old_state_shapes)
flat_new_state_shapes = nest.flatten(new_state_shapes)
weakened_state_shapes = [
original.most_specific_compatible_shape(new)
for original, new in zip(flat_state_shapes, flat_new_state_shapes)
]
need_to_rerun = False
for original_shape, weakened_shape in zip(flat_state_shapes,
weakened_state_shapes):
if original_shape.ndims is not None and (
weakened_shape.ndims is None or
original_shape.as_list() != weakened_shape.as_list()):
need_to_rerun = True
break
if need_to_rerun:
# TODO(b/110122868): Support a "most specific compatible structure"
# method for combining structures, to avoid using legacy structures
# in this method.
self._state_structure = structure.convert_legacy_structure(
old_state_types,
nest.pack_sequence_as(old_state_shapes, weakened_state_shapes),
old_state_classes)
self._scan_func = wrapped_func
self._scan_func.function.add_to_graph(ops.get_default_graph())
# pylint: disable=protected-access
variant_tensor = gen_experimental_dataset_ops.experimental_scan_dataset(
self._input_dataset._variant_tensor,
self._state_structure._to_tensor_list(self._initial_state),
self._scan_func.function.captured_inputs,
f=self._scan_func.function,
preserve_cardinality=True,
**dataset_ops.flat_structure(self))
super(_ScanDataset, self).__init__(input_dataset, variant_tensor)
def _functions(self):
return [self._scan_func]
@property
def _element_structure(self):
return self._structure
def _transformation_name(self):
return "tf.data.experimental.scan()"
@tf_export("data.experimental.scan")
def scan(initial_state, scan_func):
"""A transformation that scans a function across an input dataset.
This transformation is a stateful relative of `tf.data.Dataset.map`.
In addition to mapping `scan_func` across the elements of the input dataset,
`scan()` accumulates one or more state tensors, whose initial values are
`initial_state`.
Args:
initial_state: A nested structure of tensors, representing the initial state
of the accumulator.
scan_func: A function that maps `(old_state, input_element)` to
`(new_state, output_element). It must take two arguments and return a
pair of nested structures of tensors. The `new_state` must match the
structure of `initial_state`.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):
return _ScanDataset(dataset, initial_state, scan_func)
return _apply_fn
|
tensorflow-master
|
tensorflow/python/data/experimental/ops/scan_ops.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Experimental shuffle ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import random_seed
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import gen_dataset_ops
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
class _ShuffleAndRepeatDataset(dataset_ops.UnaryUnchangedStructureDataset):
"""A `Dataset` that fuses `shuffle` and `repeat`."""
def __init__(self, input_dataset, buffer_size, count=None, seed=None):
self._input_dataset = input_dataset
self._buffer_size = ops.convert_to_tensor(
buffer_size, dtype=dtypes.int64, name="buffer_size")
if count is None:
self._count = constant_op.constant(-1, dtype=dtypes.int64, name="count")
else:
self._count = ops.convert_to_tensor(
count, dtype=dtypes.int64, name="count")
self._seed, self._seed2 = random_seed.get_seed(seed)
variant_tensor = gen_dataset_ops.shuffle_and_repeat_dataset(
self._input_dataset._variant_tensor, # pylint: disable=protected-access
buffer_size=self._buffer_size,
count=self._count,
seed=self._seed,
seed2=self._seed2,
**dataset_ops.flat_structure(self))
super(_ShuffleAndRepeatDataset, self).__init__(input_dataset,
variant_tensor)
@deprecation.deprecated(
None,
"Use `tf.data.Dataset.shuffle(buffer_size, seed)` followed by "
"`tf.data.Dataset.repeat(count)`. Static tf.data optimizations will take "
"care of using the fused implementation.")
@tf_export("data.experimental.shuffle_and_repeat")
def shuffle_and_repeat(buffer_size, count=None, seed=None):
"""Shuffles and repeats a Dataset returning a new permutation for each epoch.
`dataset.apply(tf.data.experimental.shuffle_and_repeat(buffer_size, count))`
is equivalent to
`dataset.shuffle(buffer_size, reshuffle_each_iteration=True).repeat(count)`
The difference is that the latter dataset is not serializable. So,
if you need to checkpoint an input pipeline with reshuffling you must use
this implementation.
Args:
buffer_size: A `tf.int64` scalar `tf.Tensor`, representing the
maximum number elements that will be buffered when prefetching.
count: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the
number of times the dataset should be repeated. The default behavior
(if `count` is `None` or `-1`) is for the dataset be repeated
indefinitely.
seed: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the
random seed that will be used to create the distribution. See
`tf.compat.v1.set_random_seed` for behavior.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
def _apply_fn(dataset): # pylint: disable=missing-docstring
return _ShuffleAndRepeatDataset(dataset, buffer_size, count, seed)
return _apply_fn
|
tensorflow-master
|
tensorflow/python/data/experimental/ops/shuffle_ops.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python wrappers for tf.data writers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import convert
from tensorflow.python.data.util import structure
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import gen_experimental_dataset_ops
from tensorflow.python.util.tf_export import tf_export
@tf_export("data.experimental.TFRecordWriter")
class TFRecordWriter(object):
"""Writes data to a TFRecord file."""
def __init__(self, filename, compression_type=None):
self._filename = ops.convert_to_tensor(
filename, dtypes.string, name="filename")
self._compression_type = convert.optional_param_to_tensor(
"compression_type",
compression_type,
argument_default="",
argument_dtype=dtypes.string)
def write(self, dataset):
"""Returns a `tf.Operation` to write a dataset to a file.
Args:
dataset: a `tf.data.Dataset` whose elements are to be written to a file
Returns:
A `tf.Operation` that, when run, writes contents of `dataset` to a file.
"""
if not isinstance(dataset, dataset_ops.DatasetV2):
raise TypeError("`dataset` must be a `tf.data.Dataset` object.")
if not dataset_ops.get_structure(dataset).is_compatible_with(
structure.TensorStructure(dtypes.string, [])):
raise TypeError(
"`dataset` must produce scalar `DT_STRING` tensors whereas it "
"produces shape {0} and types {1}".format(
dataset_ops.get_legacy_output_shapes(dataset),
dataset_ops.get_legacy_output_types(dataset)))
return gen_experimental_dataset_ops.experimental_dataset_to_tf_record(
dataset._variant_tensor, self._filename, self._compression_type) # pylint: disable=protected-access
|
tensorflow-master
|
tensorflow/python/data/experimental/ops/writers.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Experimental API for gathering statistics from `tf.data` pipelines."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import gen_experimental_dataset_ops
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
@deprecation.deprecated(None, "Use `tf.data.experimental.StatsOptions`.")
def set_stats_aggregator(stats_aggregator, prefix="", counter_prefix=""):
"""Set the given `stats_aggregator` for aggregating the input dataset stats.
Args:
stats_aggregator: A `tf.data.experimental.StatsAggregator` object.
prefix: (Optional) String, all statistics recorded for the input `dataset`
will have given `prefix` prepend with the name.
counter_prefix: (Optional) String, all statistics recorded as `counters`
will have the given `prefix` for the counter. Defaults to "/tensorflow".
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):
return dataset_ops._SetStatsAggregatorDataset( # pylint: disable=protected-access
dataset, stats_aggregator, prefix, counter_prefix)
return _apply_fn
@tf_export("data.experimental.bytes_produced_stats")
def bytes_produced_stats(tag):
"""Records the number of bytes produced by each element of the input dataset.
To consume the statistics, associate a `StatsAggregator` with the output
dataset.
Args:
tag: String. All statistics recorded by the returned transformation will
be associated with the given `tag`.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):
return _StatsDataset(
dataset,
gen_experimental_dataset_ops.experimental_bytes_produced_stats_dataset,
tag)
return _apply_fn
@tf_export("data.experimental.latency_stats")
def latency_stats(tag):
"""Records the latency of producing each element of the input dataset.
To consume the statistics, associate a `StatsAggregator` with the output
dataset.
Args:
tag: String. All statistics recorded by the returned transformation will
be associated with the given `tag`.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):
return _StatsDataset(
dataset,
gen_experimental_dataset_ops.experimental_latency_stats_dataset, tag)
return _apply_fn
class _StatsDataset(dataset_ops.UnaryUnchangedStructureDataset):
"""A `Dataset` that acts as an identity, and also records statistics."""
def __init__(self, input_dataset, op_function, tag):
self._input_dataset = input_dataset
self._op_function = op_function
self._tag = ops.convert_to_tensor(tag, dtype=dtypes.string)
variant_tensor = self._op_function(
self._input_dataset._variant_tensor, # pylint: disable=protected-access
self._tag,
**dataset_ops.flat_structure(self))
super(_StatsDataset, self).__init__(input_dataset, variant_tensor)
|
tensorflow-master
|
tensorflow/python/data/experimental/ops/stats_ops.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Iterator ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.framework import ops
from tensorflow.python.ops import gen_dataset_ops
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.training import session_run_hook
from tensorflow.python.util.tf_export import tf_export
@tf_export("data.experimental.make_saveable_from_iterator")
def make_saveable_from_iterator(iterator):
"""Returns a SaveableObject for saving/restoring iterator state using Saver.
Args:
iterator: Iterator.
Returns:
A SaveableObject for saving/restoring iterator state using Saver.
Raises:
ValueError: If iterator does not support checkpointing.
For example:
```python
with tf.Graph().as_default():
ds = tf.data.Dataset.range(10)
iterator = ds.make_initializable_iterator()
# Build the iterator SaveableObject.
saveable_obj = tf.data.experimental.make_saveable_from_iterator(iterator)
# Add the SaveableObject to the SAVEABLE_OBJECTS collection so
# it can be automatically saved using Saver.
tf.compat.v1.add_to_collection(tf.GraphKeys.SAVEABLE_OBJECTS, saveable_obj)
saver = tf.compat.v1.train.Saver()
while continue_training:
... Perform training ...
if should_save_checkpoint:
saver.save()
```
Note: When restoring the iterator, the existing iterator state is completely
discarded. This means that any changes you may have made to the Dataset
graph will be discarded as well! This includes the new Dataset graph
that you may have built during validation. So, while running validation,
make sure to run the initializer for the validation input pipeline after
restoring the checkpoint.
Note: Not all iterators support checkpointing yet. Attempting to save the
state of an unsupported iterator will throw an error.
"""
return _Saveable(iterator._iterator_resource) # pylint: disable=protected-access
class _Saveable(saver_lib.BaseSaverBuilder.SaveableObject):
"""SaveableObject for saving/restoring iterator state."""
def __init__(self, iterator_resource):
serialized_iterator = gen_dataset_ops.serialize_iterator(iterator_resource)
specs = [
saver_lib.BaseSaverBuilder.SaveSpec(serialized_iterator, "",
iterator_resource.name + "-state")
]
super(_Saveable, self).__init__(iterator_resource, specs,
iterator_resource.name)
def restore(self, restored_tensors, unused_restored_shapes):
with ops.colocate_with(self.op):
return gen_dataset_ops.deserialize_iterator(self.op, restored_tensors[0])
@tf_export("data.experimental.CheckpointInputPipelineHook")
class CheckpointInputPipelineHook(session_run_hook.SessionRunHook):
"""Checkpoints input pipeline state every N steps or seconds.
This hook saves the state of the iterators in the `Graph` so that when
training is resumed the input pipeline continues from where it left off.
This could potentially avoid overfitting in certain pipelines where the
number of training steps per eval are small compared to the dataset
size or if the training pipeline is pre-empted.
Differences from `CheckpointSaverHook`:
1. Saves only the input pipelines in the "iterators" collection and not the
global variables or other saveable objects.
2. Does not write the `GraphDef` and `MetaGraphDef` to the summary.
Example of checkpointing the training pipeline:
```python
est = tf.estimator.Estimator(model_fn)
while True:
est.train(
train_input_fn,
hooks=[tf.data.experimental.CheckpointInputPipelineHook(est)],
steps=train_steps_per_eval)
# Note: We do not pass the hook here.
metrics = est.evaluate(eval_input_fn)
if should_stop_the_training(metrics):
break
```
This hook should be used if the input pipeline state needs to be saved
separate from the model checkpoint. Doing so may be useful for a few reasons:
1. The input pipeline checkpoint may be large, if there are large shuffle
or prefetch buffers for instance, and may bloat the checkpoint size.
2. If the input pipeline is shared between training and validation, restoring
the checkpoint during validation may override the validation input
pipeline.
For saving the input pipeline checkpoint alongside the model weights use
`tf.data.experimental.make_saveable_from_iterator` directly to create a
`SaveableObject` and add to the `SAVEABLE_OBJECTS` collection. Note, however,
that you will need to be careful not to restore the training iterator during
eval. You can do that by not adding the iterator to the SAVEABLE_OBJECTS
collector when building the eval graph.
"""
def __init__(self, estimator):
"""Initializes a `CheckpointInputPipelineHook`.
Args:
estimator: Estimator.
Raises:
ValueError: One of `save_steps` or `save_secs` should be set.
ValueError: At most one of saver or scaffold should be set.
"""
# `checkpoint_basename` is "input.ckpt" for non-distributed pipelines or
# of the form "input_<task_type>_<task_id>.ckpt" for distributed pipelines.
# Note: The default `checkpoint_basename` used by `CheckpointSaverHook` is
# "model.ckpt". We intentionally choose the input pipeline checkpoint prefix
# to be different to avoid conflicts with the model checkpoint.
# pylint: disable=protected-access
checkpoint_prefix = "input"
if estimator._config.num_worker_replicas > 1:
# Distributed setting.
suffix = "_{}_{}".format(estimator._config.task_type,
estimator._config.task_id)
checkpoint_prefix += suffix
# pylint: enable=protected-access
# We use a composition paradigm instead of inheriting from
# `CheckpointSaverHook` because `Estimator` does an `isinstance` check
# to check whether a `CheckpointSaverHook` is already present in the list
# of hooks and if not, adds one. Inheriting from `CheckpointSaverHook`
# would thwart this behavior. This hook checkpoints *only the iterators*
# and not the graph variables.
self._checkpoint_saver_hook = basic_session_run_hooks.CheckpointSaverHook(
estimator.model_dir,
save_secs=estimator._config.save_checkpoints_secs, # pylint: disable=protected-access
save_steps=estimator._config.save_checkpoints_steps, # pylint: disable=protected-access
checkpoint_basename=checkpoint_prefix + ".ckpt")
# Name for the protocol buffer file that will contain the list of most
# recent checkpoints stored as a `CheckpointState` protocol buffer.
# This file, kept in the same directory as the checkpoint files, is
# automatically managed by the `Saver` to keep track of recent checkpoints.
# The default name used by the `Saver` for this file is "checkpoint". Here
# we use the name "checkpoint_<checkpoint_prefix>" so that in case the
# `checkpoint_dir` is the same as the model checkpoint directory, there are
# no conflicts during restore.
self._latest_filename = "checkpoint_" + checkpoint_prefix
self._first_run = True
def begin(self):
# Build a Saver that saves all iterators in the `GLOBAL_ITERATORS`
# collection if no `Saver` or `Scaffold` is provided.
# pylint: disable=protected-access
if (self._checkpoint_saver_hook._saver is None and
self._checkpoint_saver_hook._scaffold is None):
iterators = ops.get_collection(iterator_ops.GLOBAL_ITERATORS)
saveables = [_Saveable(i) for i in iterators]
self._checkpoint_saver_hook._saver = _CustomSaver(saveables,
self._latest_filename)
# pylint: enable=protected-access
self._checkpoint_saver_hook.begin()
def _restore_or_save_initial_ckpt(self, session):
# Ideally this should be run in after_create_session but is not for the
# following reason:
# Currently there is no way of enforcing an order of running the
# `SessionRunHooks`. Hence it is possible that the `_DatasetInitializerHook`
# is run *after* this hook. That is troublesome because
# 1. If a checkpoint exists and this hook restores it, the initializer hook
# will override it.
# 2. If no checkpoint exists, this hook will try to save an uninitialized
# iterator which will result in an exception.
#
# As a temporary fix we enter the following implicit contract between this
# hook and the _DatasetInitializerHook.
# 1. The _DatasetInitializerHook initializes the iterator in the call to
# after_create_session.
# 2. This hook saves the iterator on the first call to `before_run()`, which
# is guaranteed to happen after `after_create_session()` of all hooks
# have been run.
# Check if there is an existing checkpoint. If so, restore from it.
# pylint: disable=protected-access
latest_checkpoint_path = checkpoint_management.latest_checkpoint(
self._checkpoint_saver_hook._checkpoint_dir,
latest_filename=self._latest_filename)
if latest_checkpoint_path:
self._checkpoint_saver_hook._get_saver().restore(session,
latest_checkpoint_path)
else:
# The checkpoint saved here is the state at step "global_step".
# Note: We do not save the GraphDef or MetaGraphDef here.
global_step = session.run(self._checkpoint_saver_hook._global_step_tensor)
self._checkpoint_saver_hook._save(session, global_step)
self._checkpoint_saver_hook._timer.update_last_triggered_step(global_step)
# pylint: enable=protected-access
def before_run(self, run_context):
if self._first_run:
self._restore_or_save_initial_ckpt(run_context.session)
self._first_run = False
return self._checkpoint_saver_hook.before_run(run_context)
def after_run(self, run_context, run_values):
self._checkpoint_saver_hook.after_run(run_context, run_values)
def end(self, session):
self._checkpoint_saver_hook.end(session)
class _CustomSaver(saver_lib.Saver):
"""`Saver` with a different default `latest_filename`.
This is used in the `CheckpointInputPipelineHook` to avoid conflicts with
the model ckpt saved by the `CheckpointSaverHook`.
"""
def __init__(self, var_list, latest_filename):
super(_CustomSaver, self).__init__(var_list)
self._latest_filename = latest_filename
def save(self,
sess,
save_path,
global_step=None,
latest_filename=None,
meta_graph_suffix="meta",
write_meta_graph=True,
write_state=True,
strip_default_attrs=False):
return super(_CustomSaver, self).save(
sess, save_path, global_step, latest_filename or self._latest_filename,
meta_graph_suffix, write_meta_graph, write_state, strip_default_attrs)
|
tensorflow-master
|
tensorflow/python/data/experimental/ops/iterator_ops.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Experimental API for optimizing `tf.data` pipelines."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import gen_dataset_ops
def map_defun(fn,
elems,
output_dtypes,
output_shapes,
max_intra_op_parallelism=1):
"""Map a function on the list of tensors unpacked from `elems` on dimension 0.
Args:
fn: A function (`function.defun`) that takes a list of tensors and returns
another list of tensors. The output list has the same types as
output_dtypes. The elements of the output list have the same dimension 0
as `elems`, and the remaining dimensions correspond to those of
`fn_output_shapes`.
elems: A list of tensors.
output_dtypes: A list of dtypes corresponding to the output types of the
function.
output_shapes: A list of `TensorShape`s corresponding to the output shapes
from each invocation of the function on slices of inputs.
max_intra_op_parallelism: An integer. If positive, sets the max parallelism
limit of each function call to this.
Raises:
ValueError: if any of the inputs are malformed.
Returns:
A list of `Tensor` objects with the same types as `output_dtypes`.
"""
if not isinstance(elems, list):
raise ValueError("`elems` must be a list of tensors.")
if not isinstance(output_dtypes, list):
raise ValueError("`output_dtypes` must be a list of `tf.DType` objects.")
if not isinstance(output_shapes, list):
raise ValueError("`output_shapes` must be a list of `tf.TensorShape` "
"objects.")
concrete_fn = fn._get_concrete_function_internal() # pylint: disable=protected-access
# TODO(shivaniagrawal/rachelim): what about functions created without
# input_signature.
elems = [ops.convert_to_tensor(e) for e in elems]
output_shapes = [tensor_shape.TensorShape(s) for s in output_shapes]
return gen_dataset_ops.map_defun(elems, concrete_fn.captured_inputs,
output_dtypes, output_shapes, concrete_fn,
max_intra_op_parallelism)
|
tensorflow-master
|
tensorflow/python/data/experimental/ops/map_defun.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Experimental API for controlling optimizations in `tf.data` pipelines."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.util import options
from tensorflow.python.util.tf_export import tf_export
@tf_export("data.experimental.MapVectorizationOptions")
class MapVectorizationOptions(options.OptionsBase):
"""Represents options for the MapVectorization optimization."""
# TODO(rachelim): Other configuration parameters can go here, for example,
# how many "experiments" to run with ChooseFastestBranchDataset.
enabled = options.create_option(
name="enabled",
ty=bool,
docstring=
"Whether to vectorize map transformations. If None, defaults to False."
)
use_choose_fastest = options.create_option(
name="use_choose_fastest",
ty=bool,
docstring="Whether to use ChooseFastestBranchDataset with this "
"transformation. If True, the pipeline picks between the vectorized and "
"original segment at runtime based on their iterations speed. If None, "
"defaults to False.")
def _static_optimizations(self):
if self.enabled:
return ["map_vectorization"]
return []
def _static_optimization_configs(self):
if self.use_choose_fastest:
return ["map_vectorization:use_choose_fastest:true"]
else:
return ["map_vectorization:use_choose_fastest:false"]
@tf_export("data.experimental.OptimizationOptions")
class OptimizationOptions(options.OptionsBase):
"""Represents options for dataset optimizations.
You can set the optimization options of a dataset through the
`experimental_optimization` property of `tf.data.Options`; the property is
an instance of `tf.data.experimental.OptimizationOptions`.
```python
options = tf.data.Options()
options.experimental_optimization.noop_elimination = True
options.experimental_optimization.map_vectorization.enabled = True
options.experimental_optimization.apply_default_optimizations = False
dataset = dataset.with_options(options)
```
"""
apply_default_optimizations = options.create_option(
name="apply_default_optimizations",
ty=bool,
docstring=
"Whether to apply default static optimizations. If False, only static "
"optimizations that have been explicitly enabled will be applied.")
autotune = options.create_option(
name="autotune",
ty=bool,
docstring=
"Whether to automatically tune performance knobs. If None, defaults to "
"True.")
autotune_cpu_budget = options.create_option(
name="autotune_cpu_budget",
ty=int,
docstring=
"When autotuning is enabled (through `autotune`), determines the CPU "
"budget to use. Values greater than the number of schedulable CPU cores "
"are allowed but may result in CPU contention. If None, defaults to the "
"number of schedulable CPU cores.")
filter_fusion = options.create_option(
name="filter_fusion",
ty=bool,
docstring=
"Whether to fuse filter transformations. If None, defaults to False.")
filter_with_random_uniform_fusion = options.create_option(
name="filter_with_random_uniform_fusion",
ty=bool,
docstring=
"Whether to fuse filter dataset that predicts random_uniform < rate into "
"a sampling dataset. If None, defaults to False.")
hoist_random_uniform = options.create_option(
name="hoist_random_uniform",
ty=bool,
docstring=
"Whether to hoist `tf.random_uniform()` ops out of map transformations. "
"If None, defaults to False.")
map_and_batch_fusion = options.create_option(
name="map_and_batch_fusion",
ty=bool,
docstring=
"Whether to fuse map and batch transformations. If None, defaults to "
"True.")
map_and_filter_fusion = options.create_option(
name="map_and_filter_fusion",
ty=bool,
docstring=
"Whether to fuse map and filter transformations. If None, defaults to "
"False.")
map_fusion = options.create_option(
name="map_fusion",
ty=bool,
docstring="Whether to fuse map transformations. If None, defaults to "
"False.")
map_parallelization = options.create_option(
name="map_parallelization",
ty=bool,
docstring=
"Whether to parallelize stateless map transformations. If None, defaults "
"to False.")
map_vectorization = options.create_option(
name="map_vectorization",
ty=MapVectorizationOptions,
docstring=
"The map vectorization options associated with the dataset. See "
"`tf.data.experimental.MapVectorizationOptions` for more details.",
default_factory=MapVectorizationOptions)
noop_elimination = options.create_option(
name="noop_elimination",
ty=bool,
docstring=
"Whether to eliminate no-op transformations. If None, defaults to True.")
parallel_batch = options.create_option(
name="parallel_batch",
ty=bool,
docstring="Whether to parallelize copying of batch elements. If None, "
"defaults to False.")
shuffle_and_repeat_fusion = options.create_option(
name="shuffle_and_repeat_fusion",
ty=bool,
docstring="Whether to fuse shuffle and repeat transformations. If None, "
"defaults to True.")
def _static_optimizations(self):
"""Produces the list of enabled static optimizations."""
result = set()
all_optimizations = [
"filter_fusion",
"filter_with_random_uniform_fusion",
"hoist_random_uniform",
"map_and_batch_fusion",
"map_and_filter_fusion",
"map_parallelization",
"map_fusion",
"noop_elimination",
"parallel_batch",
"shuffle_and_repeat_fusion",
]
for optimization in all_optimizations:
if getattr(self, optimization):
result.add(optimization)
if self.apply_default_optimizations is not False:
# The following optimizations are turned on by default, unless the user
# explicitly disables them.
optimizations_to_disable = [
"map_and_batch_fusion",
"noop_elimination",
"shuffle_and_repeat_fusion",
]
for optimization in optimizations_to_disable:
if getattr(self, optimization) is not False:
result.add(optimization)
if self.map_vectorization is not None:
result.update(self.map_vectorization._static_optimizations()) # pylint: disable=protected-access
return sorted(list(result))
def _static_optimization_configs(self):
if self.map_vectorization is not None:
return self.map_vectorization._static_optimization_configs() # pylint: disable=protected-access
return []
|
tensorflow-master
|
tensorflow/python/data/experimental/ops/optimization_options.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Non-deterministic dataset transformations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.ops import random_ops
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import readers
from tensorflow.python.data.util import nest
from tensorflow.python.data.util import structure
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_experimental_dataset_ops
from tensorflow.python.ops import gen_stateless_random_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
@deprecation.deprecated(
None,
"Use `tf.data.Dataset.interleave(map_func, cycle_length, block_length, "
"num_parallel_calls=tf.data.experimental.AUTOTUNE)` instead. If sloppy "
"execution is desired, use `tf.data.Options.experimental_determinstic`.")
@tf_export("data.experimental.parallel_interleave")
def parallel_interleave(map_func,
cycle_length,
block_length=1,
sloppy=False,
buffer_output_elements=None,
prefetch_input_elements=None):
"""A parallel version of the `Dataset.interleave()` transformation.
`parallel_interleave()` maps `map_func` across its input to produce nested
datasets, and outputs their elements interleaved. Unlike
`tf.data.Dataset.interleave`, it gets elements from `cycle_length` nested
datasets in parallel, which increases the throughput, especially in the
presence of stragglers. Furthermore, the `sloppy` argument can be used to
improve performance, by relaxing the requirement that the outputs are produced
in a deterministic order, and allowing the implementation to skip over nested
datasets whose elements are not readily available when requested.
Example usage:
```python
# Preprocess 4 files concurrently.
filenames = tf.data.Dataset.list_files("/path/to/data/train*.tfrecords")
dataset = filenames.apply(
tf.data.experimental.parallel_interleave(
lambda filename: tf.data.TFRecordDataset(filename),
cycle_length=4))
```
WARNING: If `sloppy` is `True`, the order of produced elements is not
deterministic.
Args:
map_func: A function mapping a nested structure of tensors to a `Dataset`.
cycle_length: The number of input `Dataset`s to interleave from in parallel.
block_length: The number of consecutive elements to pull from an input
`Dataset` before advancing to the next input `Dataset`.
sloppy: If false, elements are produced in deterministic order. Otherwise,
the implementation is allowed, for the sake of expediency, to produce
elements in a non-deterministic order.
buffer_output_elements: The number of elements each iterator being
interleaved should buffer (similar to the `.prefetch()` transformation for
each interleaved iterator).
prefetch_input_elements: The number of input elements to transform to
iterators before they are needed for interleaving.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):
return readers.ParallelInterleaveDataset(
dataset, map_func, cycle_length, block_length, sloppy,
buffer_output_elements, prefetch_input_elements)
return _apply_fn
class _DirectedInterleaveDataset(dataset_ops.Dataset):
"""A substitute for `Dataset.interleave()` on a fixed list of datasets."""
def __init__(self, selector_input, data_inputs):
self._selector_input = selector_input
self._data_inputs = list(data_inputs)
first_output_types = dataset_ops.get_legacy_output_types(data_inputs[0])
first_output_classes = dataset_ops.get_legacy_output_classes(data_inputs[0])
for data_input in data_inputs[1:]:
if (dataset_ops.get_legacy_output_types(data_input) != first_output_types
or dataset_ops.get_legacy_output_classes(data_input)
!= first_output_classes):
raise TypeError("All datasets must have the same type and class.")
output_shapes = dataset_ops.get_legacy_output_shapes(self._data_inputs[0])
for data_input in self._data_inputs[1:]:
output_shapes = nest.pack_sequence_as(output_shapes, [
ts1.most_specific_compatible_shape(ts2) for (ts1, ts2) in zip(
nest.flatten(output_shapes),
nest.flatten(dataset_ops.get_legacy_output_shapes(data_input)))
])
self._structure = structure.convert_legacy_structure(
first_output_types, output_shapes, first_output_classes)
super(_DirectedInterleaveDataset, self).__init__()
def _as_variant_tensor(self):
# pylint: disable=protected-access
return (
gen_experimental_dataset_ops.experimental_directed_interleave_dataset(
self._selector_input._variant_tensor,
[data_input._variant_tensor for data_input in self._data_inputs],
**dataset_ops.flat_structure(self)))
# pylint: enable=protected-access
def _inputs(self):
return [self._selector_input] + self._data_inputs
@property
def _element_structure(self):
return self._structure
@tf_export("data.experimental.sample_from_datasets", v1=[])
def sample_from_datasets_v2(datasets, weights=None, seed=None):
"""Samples elements at random from the datasets in `datasets`.
Args:
datasets: A list of `tf.data.Dataset` objects with compatible structure.
weights: (Optional.) A list of `len(datasets)` floating-point values where
`weights[i]` represents the probability with which an element should be
sampled from `datasets[i]`, or a `tf.data.Dataset` object where each
element is such a list. Defaults to a uniform distribution across
`datasets`.
seed: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the
random seed that will be used to create the distribution. See
`tf.compat.v1.set_random_seed` for behavior.
Returns:
A dataset that interleaves elements from `datasets` at random, according to
`weights` if provided, otherwise with uniform probability.
Raises:
TypeError: If the `datasets` or `weights` arguments have the wrong type.
ValueError: If the `weights` argument is specified and does not match the
length of the `datasets` element.
"""
num_datasets = len(datasets)
if not isinstance(weights, dataset_ops.DatasetV2):
if weights is None:
# Select inputs with uniform probability.
logits = [[1.0] * num_datasets]
else:
# Use the given `weights` as the probability of choosing the respective
# input.
weights = ops.convert_to_tensor(weights, name="weights")
if weights.dtype not in (dtypes.float32, dtypes.float64):
raise TypeError("`weights` must be convertible to a tensor of "
"`tf.float32` or `tf.float64` elements.")
if not weights.shape.is_compatible_with([num_datasets]):
raise ValueError(
"`weights` must be a vector of length `len(datasets)`.")
# The `stateless_multinomial()` op expects log-probabilities, as opposed
# to weights.
logits = array_ops.expand_dims(math_ops.log(weights, name="logits"), 0)
# NOTE(mrry): We only specialize when `weights` is not a `Dataset`. When it
# is a `Dataset`, it is possible that evaluating it has a side effect the
# user depends on.
if len(datasets) == 1:
return datasets[0]
def select_dataset_constant_logits(seed):
return array_ops.squeeze(
gen_stateless_random_ops.stateless_multinomial(logits, 1, seed=seed),
axis=[0, 1])
selector_input = dataset_ops.MapDataset(
random_ops.RandomDataset(seed).batch(2),
select_dataset_constant_logits,
use_inter_op_parallelism=False)
else:
# Use each element of the given `weights` dataset as the probability of
# choosing the respective input.
# The `stateless_multinomial()` op expects log-probabilities, as opposed to
# weights.
logits_ds = weights.map(lambda *p: math_ops.log(p, name="logits"))
def select_dataset_varying_logits(logits, seed):
return array_ops.squeeze(
gen_stateless_random_ops.stateless_multinomial(logits, 1, seed=seed),
axis=[0, 1])
logits_and_seeds = dataset_ops.Dataset.zip(
(logits_ds, random_ops.RandomDataset(seed).batch(2)))
selector_input = dataset_ops.MapDataset(
logits_and_seeds,
select_dataset_varying_logits,
use_inter_op_parallelism=False)
return _DirectedInterleaveDataset(selector_input, datasets)
@tf_export(v1=["data.experimental.sample_from_datasets"])
def sample_from_datasets_v1(datasets, weights=None, seed=None):
return dataset_ops.DatasetV1Adapter(
sample_from_datasets_v2(datasets, weights, seed))
sample_from_datasets_v1.__doc__ = sample_from_datasets_v2.__doc__
@tf_export("data.experimental.choose_from_datasets", v1=[])
def choose_from_datasets_v2(datasets, choice_dataset):
"""Creates a dataset that deterministically chooses elements from `datasets`.
For example, given the following datasets:
```python
datasets = [tf.data.Dataset.from_tensors("foo").repeat(),
tf.data.Dataset.from_tensors("bar").repeat(),
tf.data.Dataset.from_tensors("baz").repeat()]
# Define a dataset containing `[0, 1, 2, 0, 1, 2, 0, 1, 2]`.
choice_dataset = tf.data.Dataset.range(3).repeat(3)
result = tf.data.experimental.choose_from_datasets(datasets, choice_dataset)
```
The elements of `result` will be:
```
"foo", "bar", "baz", "foo", "bar", "baz", "foo", "bar", "baz"
```
Args:
datasets: A list of `tf.data.Dataset` objects with compatible structure.
choice_dataset: A `tf.data.Dataset` of scalar `tf.int64` tensors between
`0` and `len(datasets) - 1`.
Returns:
A dataset that interleaves elements from `datasets` according to the values
of `choice_dataset`.
Raises:
TypeError: If the `datasets` or `choice_dataset` arguments have the wrong
type.
"""
if not dataset_ops.get_structure(choice_dataset).is_compatible_with(
structure.TensorStructure(dtypes.int64, [])):
raise TypeError("`choice_dataset` must be a dataset of scalar "
"`tf.int64` tensors.")
return _DirectedInterleaveDataset(choice_dataset, datasets)
@tf_export(v1=["data.experimental.choose_from_datasets"])
def choose_from_datasets_v1(datasets, choice_dataset):
return dataset_ops.DatasetV1Adapter(
choose_from_datasets_v2(datasets, choice_dataset))
choose_from_datasets_v1.__doc__ = choose_from_datasets_v2.__doc__
# TODO(b/119044825): Until all `tf.data` unit tests are converted to V2, keep
# these aliases in place.
choose_from_datasets = choose_from_datasets_v1
sample_from_datasets = sample_from_datasets_v1
|
tensorflow-master
|
tensorflow/python/data/experimental/ops/interleave_ops.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Enumerate dataset transformations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
@deprecation.deprecated(None, "Use `tf.data.Dataset.enumerate()")
@tf_export("data.experimental.enumerate_dataset")
def enumerate_dataset(start=0):
"""A transformation that enumerates the elements of a dataset.
It is similar to python's `enumerate`.
For example:
```python
# NOTE: The following examples use `{ ... }` to represent the
# contents of a dataset.
a = { 1, 2, 3 }
b = { (7, 8), (9, 10) }
# The nested structure of the `datasets` argument determines the
# structure of elements in the resulting dataset.
a.apply(tf.data.experimental.enumerate_dataset(start=5))
=> { (5, 1), (6, 2), (7, 3) }
b.apply(tf.data.experimental.enumerate_dataset())
=> { (0, (7, 8)), (1, (9, 10)) }
```
Args:
start: A `tf.int64` scalar `tf.Tensor`, representing the start value for
enumeration.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):
return dataset.enumerate(start)
return _apply_fn
|
tensorflow-master
|
tensorflow/python/data/experimental/ops/enumerate_ops.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Experimental API for manually injecting delays into `tf.data` pipelines."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.ops import gen_experimental_dataset_ops
class _SleepDataset(dataset_ops.UnaryUnchangedStructureDataset):
"""A `Dataset` that sleeps before producing each upstream element."""
def __init__(self, input_dataset, sleep_microseconds):
self._input_dataset = input_dataset
self._sleep_microseconds = sleep_microseconds
variant_tensor = gen_experimental_dataset_ops.experimental_sleep_dataset(
self._input_dataset._variant_tensor, # pylint: disable=protected-access
self._sleep_microseconds,
**dataset_ops.flat_structure(self))
super(_SleepDataset, self).__init__(input_dataset, variant_tensor)
def sleep(sleep_microseconds):
"""Sleeps for `sleep_microseconds` before producing each input element.
Args:
sleep_microseconds: The number of microseconds to sleep before producing an
input element.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):
return _SleepDataset(dataset, sleep_microseconds)
return _apply_fn
|
tensorflow-master
|
tensorflow/python/data/experimental/ops/sleep.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ignore_errors dataset transformations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.ops import gen_experimental_dataset_ops
from tensorflow.python.util.tf_export import tf_export
@tf_export("data.experimental.ignore_errors")
def ignore_errors():
"""Creates a `Dataset` from another `Dataset` and silently ignores any errors.
Use this transformation to produce a dataset that contains the same elements
as the input, but silently drops any elements that caused an error. For
example:
```python
dataset = tf.data.Dataset.from_tensor_slices([1., 2., 0., 4.])
# Computing `tf.debugging.check_numerics(1. / 0.)` will raise an
InvalidArgumentError.
dataset = dataset.map(lambda x: tf.debugging.check_numerics(1. / x, "error"))
# Using `ignore_errors()` will drop the element that causes an error.
dataset =
dataset.apply(tf.data.experimental.ignore_errors()) # ==> {1., 0.5, 0.2}
```
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):
return _IgnoreErrorsDataset(dataset)
return _apply_fn
class _IgnoreErrorsDataset(dataset_ops.UnaryUnchangedStructureDataset):
"""A `Dataset` that silently ignores errors when computing its input."""
def __init__(self, input_dataset):
"""See `Dataset.ignore_errors()` for details."""
self._input_dataset = input_dataset
variant_tensor = (
gen_experimental_dataset_ops.experimental_ignore_errors_dataset(
self._input_dataset._variant_tensor, # pylint: disable=protected-access
**dataset_ops.flat_structure(self)))
super(_IgnoreErrorsDataset, self).__init__(input_dataset, variant_tensor)
|
tensorflow-master
|
tensorflow/python/data/experimental/ops/error_ops.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""StatsOptions to configure stats aggregation options for `tf.data` pipelines.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.ops import stats_aggregator
from tensorflow.python.data.util import options
from tensorflow.python.util.tf_export import tf_export
@tf_export("data.experimental.StatsOptions")
class StatsOptions(options.OptionsBase):
"""Represents options for collecting dataset stats using `StatsAggregator`.
You can set the stats options of a dataset through the `experimental_stats`
property of `tf.data.Options`; the property is an instance of
`tf.data.experimental.StatsOptions`. For example, to collect latency stats
on all dataset edges, use the following pattern:
```python
aggregator = tf.data.experimental.StatsAggregator()
options = tf.data.Options()
options.experimental_stats.aggregator = aggregator
options.experimental_stats.latency_all_edges = True
dataset = dataset.with_options(options)
```
"""
aggregator = options.create_option(
name="aggregator",
ty=(stats_aggregator.StatsAggregatorV2,
stats_aggregator.StatsAggregatorV1),
docstring=
"Associates the given statistics aggregator with the dataset pipeline.")
prefix = options.create_option(
name="prefix",
ty=str,
docstring=
"Prefix to prepend all statistics recorded for the input `dataset` with.",
default_factory=lambda: "")
counter_prefix = options.create_option(
name="counter_prefix",
ty=str,
docstring="Prefix for the statistics recorded as counter.",
default_factory=lambda: "")
latency_all_edges = options.create_option(
name="latency_all_edges",
ty=bool,
docstring=
"Whether to add latency measurements on all edges. Defaults to False.")
|
tensorflow-master
|
tensorflow/python/data/experimental/ops/stats_options.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Datasets for random number generators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import random_seed
from tensorflow.python.data.util import structure
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import gen_experimental_dataset_ops
from tensorflow.python.util.tf_export import tf_export
@tf_export("data.experimental.RandomDataset", v1=[])
class RandomDatasetV2(dataset_ops.DatasetSource):
"""A `Dataset` of pseudorandom values."""
def __init__(self, seed=None):
"""A `Dataset` of pseudorandom values."""
self._seed, self._seed2 = random_seed.get_seed(seed)
variant_tensor = gen_experimental_dataset_ops.experimental_random_dataset(
seed=self._seed, seed2=self._seed2, **dataset_ops.flat_structure(self))
super(RandomDatasetV2, self).__init__(variant_tensor)
@property
def _element_structure(self):
return structure.TensorStructure(dtypes.int64, [])
@tf_export(v1=["data.experimental.RandomDataset"])
class RandomDatasetV1(dataset_ops.DatasetV1Adapter):
"""A `Dataset` of pseudorandom values."""
@functools.wraps(RandomDatasetV2.__init__)
def __init__(self, seed=None):
wrapped = RandomDatasetV2(seed)
super(RandomDatasetV1, self).__init__(wrapped)
# TODO(b/119044825): Until all `tf.data` unit tests are converted to V2, keep
# this alias in place.
RandomDataset = RandomDatasetV1
|
tensorflow-master
|
tensorflow/python/data/experimental/ops/random_ops.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""take-while dataset transformation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import structure as structure_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import gen_experimental_dataset_ops
from tensorflow.python.util.tf_export import tf_export
class _TakeWhileDataset(dataset_ops.UnaryUnchangedStructureDataset):
"""A dataset that stops iteration when `predicate` returns false."""
def __init__(self, input_dataset, predicate):
"""See `take_while()` for details."""
self._input_dataset = input_dataset
wrapped_func = dataset_ops.StructuredFunctionWrapper(
predicate,
"tf.data.experimental.take_while()",
dataset=self._input_dataset)
if not wrapped_func.output_structure.is_compatible_with(
structure_lib.TensorStructure(dtypes.bool, [])):
raise ValueError("`predicate` must return a scalar boolean tensor.")
self._predicate = wrapped_func
var_tensor = gen_experimental_dataset_ops.experimental_take_while_dataset(
self._input_dataset._variant_tensor, # pylint: disable=protected-access
other_arguments=self._predicate.function.captured_inputs,
predicate=self._predicate.function,
**dataset_ops.flat_structure(self))
super(_TakeWhileDataset, self).__init__(input_dataset, var_tensor)
def _functions(self):
return [self._predicate]
@tf_export("data.experimental.take_while")
def take_while(predicate):
"""A transformation that stops dataset iteration based on a `predicate`.
Args:
predicate: A function that maps a nested structure of tensors (having shapes
and types defined by `self.output_shapes` and `self.output_types`) to a
scalar `tf.bool` tensor.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):
return _TakeWhileDataset(dataset, predicate)
return _apply_fn
|
tensorflow-master
|
tensorflow/python/data/experimental/ops/take_while_ops.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Counter Dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.ops import scan_ops
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.util.tf_export import tf_export
@tf_export("data.experimental.Counter", v1=[])
def CounterV2(start=0, step=1, dtype=dtypes.int64):
"""Creates a `Dataset` that counts from `start` in steps of size `step`.
For example:
```python
Dataset.count() == [0, 1, 2, ...)
Dataset.count(2) == [2, 3, ...)
Dataset.count(2, 5) == [2, 7, 12, ...)
Dataset.count(0, -1) == [0, -1, -2, ...)
Dataset.count(10, -1) == [10, 9, ...)
```
Args:
start: (Optional.) The starting value for the counter. Defaults to 0.
step: (Optional.) The step size for the counter. Defaults to 1.
dtype: (Optional.) The data type for counter elements. Defaults to
`tf.int64`.
Returns:
A `Dataset` of scalar `dtype` elements.
"""
with ops.name_scope("counter"):
start = ops.convert_to_tensor(start, dtype=dtype, name="start")
step = ops.convert_to_tensor(step, dtype=dtype, name="step")
return dataset_ops.Dataset.from_tensors(0).repeat(None).apply(
scan_ops.scan(start, lambda state, _: (state + step, state)))
@tf_export(v1=["data.experimental.Counter"])
def CounterV1(start=0, step=1, dtype=dtypes.int64):
return dataset_ops.DatasetV1Adapter(CounterV2(start, step, dtype))
CounterV1.__doc__ = CounterV2.__doc__
# TODO(b/119044825): Until all `tf.data` unit tests are converted to V2, keep
# this alias in place.
Counter = CounterV1 # pylint: disable=invalid-name
|
tensorflow-master
|
tensorflow/python/data/experimental/ops/counter.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Dataset snapshot and related functionality."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import gen_experimental_dataset_ops as ged_ops
COMPRESSION_GZIP = "GZIP"
COMPRESSION_NONE = None
class _SnapshotDataset(dataset_ops.UnaryUnchangedStructureDataset):
"""A Dataset that captures a snapshot or reads from a snapshot."""
def __init__(self,
input_dataset,
path,
compression=None,
reader_path_prefix=None,
writer_path_prefix=None,
shard_size_bytes=None,
pending_snapshot_expiry_seconds=None):
self._compression = compression if compression is not None else ""
self._reader_path_prefix = (
reader_path_prefix if reader_path_prefix is not None else "")
self._writer_path_prefix = (
writer_path_prefix if writer_path_prefix is not None else "")
self._shard_size_bytes = (
shard_size_bytes if shard_size_bytes is not None else -1)
self._pending_snapshot_expiry_seconds = (
pending_snapshot_expiry_seconds
if pending_snapshot_expiry_seconds is not None else -1)
self._input_dataset = input_dataset
self._path = ops.convert_to_tensor(path, dtype=dtypes.string, name="path")
variant_tensor = ged_ops.snapshot_dataset(
self._input_dataset._variant_tensor, # pylint: disable=protected-access
path=self._path,
compression=self._compression,
reader_path_prefix=self._reader_path_prefix,
writer_path_prefix=self._writer_path_prefix,
shard_size_bytes=self._shard_size_bytes,
pending_snapshot_expiry_seconds=self._pending_snapshot_expiry_seconds,
**dataset_ops.flat_structure(self))
super(_SnapshotDataset, self).__init__(input_dataset, variant_tensor)
def snapshot(path,
compression=None,
reader_path_prefix=None,
writer_path_prefix=None,
shard_size_bytes=None,
pending_snapshot_expiry_seconds=None):
"""Writes to/reads from a snapshot of a dataset.
This function attempts to determine whether a valid snapshot exists at the
`path`, and reads from the snapshot if so. If not, it will run the
preprocessing pipeline as usual, and write out a snapshot of the data
processed for future use.
Args:
path: A directory where we want to save our snapshots and/or read from a
previously saved snapshot.
compression: The type of compression to apply to the Dataset. Currently
supports "GZIP" or None. Defaults to None (no compression).
reader_path_prefix: A prefix to add to the path when reading from snapshots.
Defaults to None.
writer_path_prefix: A prefix to add to the path when writing to snapshots.
Defaults to None.
shard_size_bytes: The size of each shard to be written by the snapshot
dataset op. Defaults to 10 GiB.
pending_snapshot_expiry_seconds: How long to wait (in seconds) before
the snapshot op considers a previously unfinished snapshot to be stale.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):
return _SnapshotDataset(dataset, path, compression, reader_path_prefix,
writer_path_prefix, shard_size_bytes,
pending_snapshot_expiry_seconds)
return _apply_fn
|
tensorflow-master
|
tensorflow/python/data/experimental/ops/snapshot.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for tf.data options."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
def _internal_attr_name(name):
return "_" + name
class OptionsBase(object):
"""Base class for representing a set of tf.data options.
Attributes:
_options: Stores the option values.
"""
def __init__(self):
# NOTE: Cannot use `self._options` here as we override `__setattr__`
object.__setattr__(self, "_options", {})
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
for name in set(self._options) | set(other._options): # pylint: disable=protected-access
if getattr(self, name) != getattr(other, name):
return False
return True
def __ne__(self, other):
if isinstance(other, self.__class__):
return not self.__eq__(other)
else:
return NotImplemented
def __setattr__(self, name, value):
if hasattr(self, name):
object.__setattr__(self, name, value)
else:
raise AttributeError(
"Cannot set the property %s on %s." % (name, type(self).__name__))
def create_option(name, ty, docstring, default_factory=lambda: None):
"""Creates a type-checked property.
Args:
name: The name to use.
ty: The type to use. The type of the property will be validated when it
is set.
docstring: The docstring to use.
default_factory: A callable that takes no arguments and returns a default
value to use if not set.
Returns:
A type-checked property.
"""
def get_fn(option):
# pylint: disable=protected-access
if name not in option._options:
option._options[name] = default_factory()
return option._options.get(name)
def set_fn(option, value):
if not isinstance(value, ty):
raise TypeError("Property \"%s\" must be of type %s, got: %r (type: %r)" %
(name, ty, value, type(value)))
option._options[name] = value # pylint: disable=protected-access
return property(get_fn, set_fn, None, docstring)
def merge_options(*options_list):
"""Merges the given options, returning the result as a new options object.
The input arguments are expected to have a matching type that derives from
`OptionsBase` (and thus each represent a set of options). The method outputs
an object of the same type created by merging the sets of options represented
by the input arguments.
The sets of options can be merged as long as there does not exist an option
with different non-default values.
If an option is an instance of `OptionsBase` itself, then this method is
applied recursively to the set of options represented by this option.
Args:
*options_list: options to merge
Raises:
TypeError: if the input arguments are incompatible or not derived from
`OptionsBase`
ValueError: if the given options cannot be merged
Returns:
A new options object which is the result of merging the given options.
"""
if len(options_list) < 1:
raise ValueError("At least one options should be provided")
result_type = type(options_list[0])
for options in options_list:
if not isinstance(options, result_type):
raise TypeError("Incompatible options type: %r vs %r" % (type(options),
result_type))
if not isinstance(options_list[0], OptionsBase):
raise TypeError("The inputs should inherit from `OptionsBase`")
default_options = result_type()
result = result_type()
for options in options_list:
# Iterate over all set options and merge the into the result.
for name in options._options: # pylint: disable=protected-access
this = getattr(result, name)
that = getattr(options, name)
default = getattr(default_options, name)
if that == default:
continue
elif this == default:
setattr(result, name, that)
elif isinstance(this, OptionsBase):
setattr(result, name, merge_options(this, that))
elif this != that:
raise ValueError(
"Cannot merge incompatible values (%r and %r) of option: %s" %
(this, that, name))
return result
|
tensorflow-master
|
tensorflow/python/data/util/options.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""## Functions for working with arbitrarily nested sequences of elements.
NOTE(mrry): This fork of the `tensorflow.python.util.nest` module
makes two changes:
1. It removes support for lists as a level of nesting in nested structures.
2. It adds support for `SparseTensorValue` as an atomic element.
The motivation for this change is twofold:
1. It seems more natural for lists to be treated (e.g. in Dataset constructors)
as tensors, rather than lists of (lists of...) tensors.
2. This is needed because `SparseTensorValue` is implemented as a `namedtuple`
that would normally be flattened and we want to be able to create sparse
tensor from `SparseTensorValue's similarly to creating tensors from numpy
arrays.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections as _collections
import six as _six
from tensorflow.python import pywrap_tensorflow as _pywrap_tensorflow
from tensorflow.python.framework import sparse_tensor as _sparse_tensor
def _sorted(dict_):
"""Returns a sorted list of the dict keys, with error if keys not sortable."""
try:
return sorted(list(dict_))
except TypeError:
raise TypeError("nest only supports dicts with sortable keys.")
def _sequence_like(instance, args):
"""Converts the sequence `args` to the same type as `instance`.
Args:
instance: an instance of `tuple`, `list`, or a `namedtuple` class.
args: elements to be converted to a sequence.
Returns:
`args` with the type of `instance`.
"""
if isinstance(instance, dict):
# Pack dictionaries in a deterministic order by sorting the keys.
# Notice this means that we ignore the original order of `OrderedDict`
# instances. This is intentional, to avoid potential bugs caused by mixing
# ordered and plain dicts (e.g., flattening a dict but using a
# corresponding `OrderedDict` to pack it back).
result = dict(zip(_sorted(instance), args))
return type(instance)((key, result[key]) for key in instance)
elif (isinstance(instance, tuple) and
hasattr(instance, "_fields") and
isinstance(instance._fields, _collections.Sequence) and
all(isinstance(f, _six.string_types) for f in instance._fields)):
# This is a namedtuple
return type(instance)(*args)
else:
# Not a namedtuple
return type(instance)(args)
def _yield_value(iterable):
if isinstance(iterable, dict):
# Iterate through dictionaries in a deterministic order by sorting the
# keys. Notice this means that we ignore the original order of `OrderedDict`
# instances. This is intentional, to avoid potential bugs caused by mixing
# ordered and plain dicts (e.g., flattening a dict but using a
# corresponding `OrderedDict` to pack it back).
for key in _sorted(iterable):
yield iterable[key]
elif isinstance(iterable, _sparse_tensor.SparseTensorValue):
yield iterable
else:
for value in iterable:
yield value
# See the swig file (../../util/util.i) for documentation.
is_sequence = _pywrap_tensorflow.IsSequenceForData
# See the swig file (../../util/util.i) for documentation.
flatten = _pywrap_tensorflow.FlattenForData
def assert_same_structure(nest1, nest2, check_types=True):
"""Asserts that two structures are nested in the same way.
Args:
nest1: an arbitrarily nested structure.
nest2: an arbitrarily nested structure.
check_types: if `True` (default) types of sequences should be same as
well. For dictionary, "type" of dictionary is considered to include its
keys. In other words, two dictionaries with different keys are considered
to have a different "type". If set to `False`, two iterables are
considered same as long as they yield the elements that have same
structures.
Raises:
ValueError: If the two structures do not have the same number of elements or
if the two structures are not nested in the same way.
TypeError: If the two structures differ in the type of sequence in any of
their substructures. Only possible if `check_types` is `True`.
"""
_pywrap_tensorflow.AssertSameStructureForData(nest1, nest2, check_types)
def _packed_nest_with_indices(structure, flat, index):
"""Helper function for pack_nest_as.
Args:
structure: Substructure (tuple of elements and/or tuples) to mimic
flat: Flattened values to output substructure for.
index: Index at which to start reading from flat.
Returns:
The tuple (new_index, child), where:
* new_index - the updated index into `flat` having processed `structure`.
* packed - the subset of `flat` corresponding to `structure`,
having started at `index`, and packed into the same nested
format.
Raises:
ValueError: if `structure` contains more elements than `flat`
(assuming indexing starts from `index`).
"""
packed = []
for s in _yield_value(structure):
if is_sequence(s):
new_index, child = _packed_nest_with_indices(s, flat, index)
packed.append(_sequence_like(s, child))
index = new_index
else:
packed.append(flat[index])
index += 1
return index, packed
def pack_sequence_as(structure, flat_sequence):
"""Returns a given flattened sequence packed into a nest.
If `structure` is a scalar, `flat_sequence` must be a single-element list;
in this case the return value is `flat_sequence[0]`.
Args:
structure: tuple or list constructed of scalars and/or other tuples/lists,
or a scalar. Note: numpy arrays are considered scalars.
flat_sequence: flat sequence to pack.
Returns:
packed: `flat_sequence` converted to have the same recursive structure as
`structure`.
Raises:
ValueError: If nest and structure have different element counts.
"""
if not (is_sequence(flat_sequence) or isinstance(flat_sequence, list)):
raise TypeError("flat_sequence must be a sequence")
if not is_sequence(structure):
if len(flat_sequence) != 1:
raise ValueError("Structure is a scalar but len(flat_sequence) == %d > 1"
% len(flat_sequence))
return flat_sequence[0]
flat_structure = flatten(structure)
if len(flat_structure) != len(flat_sequence):
raise ValueError(
"Could not pack sequence. Structure had %d elements, but flat_sequence "
"had %d elements. Structure: %s, flat_sequence: %s."
% (len(flat_structure), len(flat_sequence), structure, flat_sequence))
_, packed = _packed_nest_with_indices(structure, flat_sequence, 0)
return _sequence_like(structure, packed)
def map_structure(func, *structure, **check_types_dict):
"""Applies `func` to each entry in `structure` and returns a new structure.
Applies `func(x[0], x[1], ...)` where x[i] is an entry in
`structure[i]`. All structures in `structure` must have the same arity,
and the return value will contain the results in the same structure.
Args:
func: A callable that accepts as many arguments are there are structures.
*structure: scalar, or tuple or list of constructed scalars and/or other
tuples/lists, or scalars. Note: numpy arrays are considered scalars.
**check_types_dict: only valid keyword argument is `check_types`. If set to
`True` (default) the types of iterables within the structures have to be
same (e.g. `map_structure(func, [1], (1,))` raises a `TypeError`
exception). To allow this set this argument to `False`.
Returns:
A new structure with the same arity as `structure`, whose values correspond
to `func(x[0], x[1], ...)` where `x[i]` is a value in the corresponding
location in `structure[i]`. If there are different sequence types and
`check_types` is `False` the sequence types of the first structure will be
used.
Raises:
TypeError: If `func` is not callable or if the structures do not match
each other by depth tree.
ValueError: If no structure is provided or if the structures do not match
each other by type.
ValueError: If wrong keyword arguments are provided.
"""
if not callable(func):
raise TypeError("func must be callable, got: %s" % func)
if not structure:
raise ValueError("Must provide at least one structure")
if check_types_dict:
if "check_types" not in check_types_dict or len(check_types_dict) > 1:
raise ValueError("Only valid keyword argument is check_types")
check_types = check_types_dict["check_types"]
else:
check_types = True
for other in structure[1:]:
assert_same_structure(structure[0], other, check_types=check_types)
flat_structure = [flatten(s) for s in structure]
entries = zip(*flat_structure)
return pack_sequence_as(
structure[0], [func(*x) for x in entries])
def _yield_flat_up_to(shallow_tree, input_tree):
"""Yields elements `input_tree` partially flattened up to `shallow_tree`."""
if is_sequence(shallow_tree):
for shallow_branch, input_branch in zip(_yield_value(shallow_tree),
_yield_value(input_tree)):
for input_leaf in _yield_flat_up_to(shallow_branch, input_branch):
yield input_leaf
else:
yield input_tree
def assert_shallow_structure(shallow_tree, input_tree, check_types=True):
"""Asserts that `shallow_tree` is a shallow structure of `input_tree`.
That is, this function tests if the `input_tree` structure can be created from
the `shallow_tree` structure by replacing its leaf nodes with deeper
tree structures.
Examples:
The following code will raise an exception:
```python
shallow_tree = ["a", "b"]
input_tree = ["c", ["d", "e"], "f"]
assert_shallow_structure(shallow_tree, input_tree)
```
The following code will not raise an exception:
```python
shallow_tree = ["a", "b"]
input_tree = ["c", ["d", "e"]]
assert_shallow_structure(shallow_tree, input_tree)
```
Args:
shallow_tree: an arbitrarily nested structure.
input_tree: an arbitrarily nested structure.
check_types: if `True` (default) the sequence types of `shallow_tree` and
`input_tree` have to be the same.
Raises:
TypeError: If `shallow_tree` is a sequence but `input_tree` is not.
TypeError: If the sequence types of `shallow_tree` are different from
`input_tree`. Only raised if `check_types` is `True`.
ValueError: If the sequence lengths of `shallow_tree` are different from
`input_tree`.
"""
if is_sequence(shallow_tree):
if not is_sequence(input_tree):
raise TypeError(
"If shallow structure is a sequence, input must also be a sequence. "
"Input has type: %s." % type(input_tree))
if check_types and not isinstance(input_tree, type(shallow_tree)):
raise TypeError(
"The two structures don't have the same sequence type. Input "
"structure has type %s, while shallow structure has type %s."
% (type(input_tree), type(shallow_tree)))
if len(input_tree) != len(shallow_tree):
raise ValueError(
"The two structures don't have the same sequence length. Input "
"structure has length %s, while shallow structure has length %s."
% (len(input_tree), len(shallow_tree)))
if check_types and isinstance(shallow_tree, dict):
if set(input_tree) != set(shallow_tree):
raise ValueError(
"The two structures don't have the same keys. Input "
"structure has keys %s, while shallow structure has keys %s." %
(list(input_tree), list(shallow_tree)))
input_tree = list(sorted(_six.iteritems(input_tree)))
shallow_tree = list(sorted(_six.iteritems(shallow_tree)))
for shallow_branch, input_branch in zip(shallow_tree, input_tree):
assert_shallow_structure(shallow_branch, input_branch,
check_types=check_types)
def flatten_up_to(shallow_tree, input_tree):
"""Flattens `input_tree` up to `shallow_tree`.
Any further depth in structure in `input_tree` is retained as elements in the
partially flatten output.
If `shallow_tree` and `input_tree` are not sequences, this returns a
single-element list: `[input_tree]`.
Use Case:
Sometimes we may wish to partially flatten a nested sequence, retaining some
of the nested structure. We achieve this by specifying a shallow structure,
`shallow_tree`, we wish to flatten up to.
The input, `input_tree`, can be thought of as having the same structure as
`shallow_tree`, but with leaf nodes that are themselves tree structures.
Examples:
```python
input_tree = [[[2, 2], [3, 3]], [[4, 9], [5, 5]]]
shallow_tree = [[True, True], [False, True]]
flattened_input_tree = flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = flatten_up_to(shallow_tree, shallow_tree)
# Output is:
# [[2, 2], [3, 3], [4, 9], [5, 5]]
# [True, True, False, True]
```
```python
input_tree = [[('a', 1), [('b', 2), [('c', 3), [('d', 4)]]]]]
shallow_tree = [['level_1', ['level_2', ['level_3', ['level_4']]]]]
input_tree_flattened_as_shallow_tree = flatten_up_to(shallow_tree, input_tree)
input_tree_flattened = flatten(input_tree)
# Output is:
# [('a', 1), ('b', 2), ('c', 3), ('d', 4)]
# ['a', 1, 'b', 2, 'c', 3, 'd', 4]
```
Non-Sequence Edge Cases:
```python
flatten_up_to(0, 0) # Output: [0]
flatten_up_to(0, [0, 1, 2]) # Output: [[0, 1, 2]]
flatten_up_to([0, 1, 2], 0) # Output: TypeError
flatten_up_to([0, 1, 2], [0, 1, 2]) # Output: [0, 1, 2]
```
Args:
shallow_tree: a possibly pruned structure of input_tree.
input_tree: an arbitrarily nested structure or a scalar object.
Note, numpy arrays are considered scalars.
Returns:
A Python list, the partially flattened version of `input_tree` according to
the structure of `shallow_tree`.
Raises:
TypeError: If `shallow_tree` is a sequence but `input_tree` is not.
TypeError: If the sequence types of `shallow_tree` are different from
`input_tree`.
ValueError: If the sequence lengths of `shallow_tree` are different from
`input_tree`.
"""
assert_shallow_structure(shallow_tree, input_tree)
return list(_yield_flat_up_to(shallow_tree, input_tree))
def map_structure_up_to(shallow_tree, func, *inputs):
"""Applies a function or op to a number of partially flattened inputs.
The `inputs` are flattened up to `shallow_tree` before being mapped.
Use Case:
Sometimes we wish to apply a function to a partially flattened
sequence (for example when the function itself takes sequence inputs). We
achieve this by specifying a shallow structure, `shallow_tree` we wish to
flatten up to.
The `inputs`, can be thought of as having the same structure as
`shallow_tree`, but with leaf nodes that are themselves tree structures.
This function, therefore, will return something with the same base structure
as `shallow_tree`.
Examples:
```python
ab_tuple = collections.namedtuple("ab_tuple", "a, b")
op_tuple = collections.namedtuple("op_tuple", "add, mul")
inp_val = ab_tuple(a=2, b=3)
inp_ops = ab_tuple(a=op_tuple(add=1, mul=2), b=op_tuple(add=2, mul=3))
out = map_structure_up_to(inp_val, lambda val, ops: (val + ops.add) * ops.mul,
inp_val, inp_ops)
# Output is: ab_tuple(a=6, b=15)
```
```python
data_list = [[2, 4, 6, 8], [[1, 3, 5, 7, 9], [3, 5, 7]]]
name_list = ['evens', ['odds', 'primes']]
out = map_structure_up_to(
name_list,
lambda name, sec: "first_{}_{}".format(len(sec), name),
name_list, data_list)
# Output is: ['first_4_evens', ['first_5_odds', 'first_3_primes']]
```
Args:
shallow_tree: a shallow tree, common to all the inputs.
func: callable which will be applied to each input individually.
*inputs: arbitrarily nested combination of objects that are compatible with
shallow_tree. The function `func` is applied to corresponding
partially flattened elements of each input, so the function must support
arity of `len(inputs)`.
Raises:
TypeError: If `shallow_tree` is a sequence but `input_tree` is not.
TypeError: If the sequence types of `shallow_tree` are different from
`input_tree`.
ValueError: If the sequence lengths of `shallow_tree` are different from
`input_tree`.
Returns:
result of repeatedly applying `func`, with same structure as
`shallow_tree`.
"""
if not inputs:
raise ValueError("Cannot map over no sequences")
for input_tree in inputs:
assert_shallow_structure(shallow_tree, input_tree)
# Flatten each input separately, apply the function to corresponding elements,
# then repack based on the structure of the first input.
all_flattened_up_to = [flatten_up_to(shallow_tree, input_tree)
for input_tree in inputs]
results = [func(*tensors) for tensors in zip(*all_flattened_up_to)]
return pack_sequence_as(structure=shallow_tree, flat_sequence=results)
|
tensorflow-master
|
tensorflow/python/data/util/nest.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for utilities working with arbitrarily nested structures."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.util import random_seed as data_random_seed
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
class RandomSeedTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def testRandomSeed(self):
zero_t = constant_op.constant(0, dtype=dtypes.int64, name='zero')
one_t = constant_op.constant(1, dtype=dtypes.int64, name='one')
intmax_t = constant_op.constant(
2**31 - 1, dtype=dtypes.int64, name='intmax')
test_cases = [
# Each test case is a tuple with input to get_seed:
# (input_graph_seed, input_op_seed)
# and output from get_seed:
# (output_graph_seed, output_op_seed)
((None, None), (0, 0)),
((None, 1), (random_seed.DEFAULT_GRAPH_SEED, 1)),
((1, 1), (1, 1)),
((0, 0), (0, 2**31 - 1)), # Avoid nondeterministic (0, 0) output
((2**31 - 1, 0), (0, 2**31 - 1)), # Don't wrap to (0, 0) either
((0, 2**31 - 1), (0, 2**31 - 1)), # Wrapping for the other argument
# Once more, with tensor-valued arguments
((None, one_t), (random_seed.DEFAULT_GRAPH_SEED, 1)),
((1, one_t), (1, 1)),
((0, zero_t), (0, 2**31 - 1)), # Avoid nondeterministic (0, 0) output
((2**31 - 1, zero_t), (0, 2**31 - 1)), # Don't wrap to (0, 0) either
((0, intmax_t), (0, 2**31 - 1)), # Wrapping for the other argument
]
for tc in test_cases:
tinput, toutput = tc[0], tc[1]
random_seed.set_random_seed(tinput[0])
g_seed, op_seed = data_random_seed.get_seed(tinput[1])
g_seed = self.evaluate(g_seed)
op_seed = self.evaluate(op_seed)
msg = 'test_case = {0}, got {1}, want {2}'.format(
tinput, (g_seed, op_seed), toutput)
self.assertEqual((g_seed, op_seed), toutput, msg=msg)
random_seed.set_random_seed(None)
if not context.executing_eagerly():
random_seed.set_random_seed(1)
tinput = (1, None)
toutput = (1, ops.get_default_graph()._last_id) # pylint: disable=protected-access
random_seed.set_random_seed(tinput[0])
g_seed, op_seed = data_random_seed.get_seed(tinput[1])
g_seed = self.evaluate(g_seed)
op_seed = self.evaluate(op_seed)
msg = 'test_case = {0}, got {1}, want {2}'.format(1, (g_seed, op_seed),
toutput)
self.assertEqual((g_seed, op_seed), toutput, msg=msg)
random_seed.set_random_seed(None)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/data/util/random_seed_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for utilities for traversing the dataset construction graph."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import traverse
from tensorflow.python.framework import test_util
from tensorflow.python.ops import gen_dataset_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class _TestDataset(dataset_ops.UnaryUnchangedStructureDataset):
def __init__(self, input_dataset):
self._input_dataset = input_dataset
temp_variant_tensor = gen_dataset_ops.prefetch_dataset(
input_dataset._variant_tensor,
buffer_size=1,
**dataset_ops.flat_structure(self))
variant_tensor = gen_dataset_ops.model_dataset(
temp_variant_tensor, **dataset_ops.flat_structure(self))
super(_TestDataset, self).__init__(input_dataset, variant_tensor)
class TraverseTest(test.TestCase):
@test_util.run_deprecated_v1
def testOnlySource(self):
ds = dataset_ops.Dataset.range(10)
variant_tensor_ops = traverse.obtain_all_variant_tensor_ops(ds)
self.assertAllEqual(["RangeDataset"], [x.name for x in variant_tensor_ops])
@test_util.run_deprecated_v1
def testSimplePipeline(self):
ds = dataset_ops.Dataset.range(10).map(math_ops.square)
variant_tensor_ops = traverse.obtain_all_variant_tensor_ops(ds)
self.assertSetEqual(
set(["MapDataset", "RangeDataset"]),
set([x.name for x in variant_tensor_ops]))
@test_util.run_deprecated_v1
def testConcat(self):
ds1 = dataset_ops.Dataset.range(10)
ds2 = dataset_ops.Dataset.range(10)
ds = ds1.concatenate(ds2)
variant_tensor_ops = traverse.obtain_all_variant_tensor_ops(ds)
self.assertSetEqual(
set(["ConcatenateDataset", "RangeDataset", "RangeDataset_1"]),
set([x.name for x in variant_tensor_ops]))
@test_util.run_deprecated_v1
def testZip(self):
ds1 = dataset_ops.Dataset.range(10)
ds2 = dataset_ops.Dataset.range(10)
ds = dataset_ops.Dataset.zip((ds1, ds2))
variant_tensor_ops = traverse.obtain_all_variant_tensor_ops(ds)
self.assertSetEqual(
set(["ZipDataset", "RangeDataset", "RangeDataset_1"]),
set([x.name for x in variant_tensor_ops]))
@test_util.run_deprecated_v1
def testMultipleVariantTensors(self):
ds = dataset_ops.Dataset.range(10)
ds = _TestDataset(ds)
variant_tensor_ops = traverse.obtain_all_variant_tensor_ops(ds)
self.assertSetEqual(
set(["RangeDataset", "ModelDataset", "PrefetchDataset"]),
set([x.name for x in variant_tensor_ops]))
@test_util.run_deprecated_v1
def testFlatMap(self):
ds1 = dataset_ops.Dataset.range(10).repeat(10)
def map_fn(ds):
def _map(x):
return ds.batch(x)
return _map
ds2 = dataset_ops.Dataset.range(20).prefetch(1)
ds2 = ds2.flat_map(map_fn(ds1))
variant_tensor_ops = traverse.obtain_all_variant_tensor_ops(ds2)
self.assertSetEqual(
set([
"FlatMapDataset", "PrefetchDataset", "RepeatDataset",
"RangeDataset", "RangeDataset_1"
]), set([x.name for x in variant_tensor_ops]))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/util/traverse_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helpers constructing Datasets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
def optional_param_to_tensor(argument_name,
argument_value,
argument_default=0,
argument_dtype=dtypes.int64):
if argument_value is not None:
return ops.convert_to_tensor(
argument_value, dtype=argument_dtype, name=argument_name)
else:
return constant_op.constant(
argument_default, dtype=argument_dtype, name=argument_name)
def partial_shape_to_tensor(shape_like):
"""Returns a `tf.Tensor` that represents the given shape.
Args:
shape_like: A value that can be converted to a `tf.TensorShape` or a
`tf.Tensor`.
Returns:
A 1-D `tf.Tensor` of `tf.int64` elements representing the given shape, where
`-1` is substituted for any unknown dimensions.
"""
try:
# First attempt to convert the input to a shape, and return the
# "canonical" tensor representation, which uses `-1` in place of
# `None`.
shape_like = tensor_shape.as_shape(shape_like)
return ops.convert_to_tensor(
[dim if dim is not None else -1 for dim in shape_like.as_list()],
dtype=dtypes.int64)
except (TypeError, ValueError):
# The argument was not trivially convertible to a
# `tf.TensorShape`, so fall back on the conversion to tensor
# machinery.
ret = ops.convert_to_tensor(shape_like, preferred_dtype=dtypes.int64)
if ret.shape.dims is not None and len(ret.shape.dims) != 1:
raise ValueError("The given shape %s must be a 1-D tensor of tf.int64 "
"values, but the shape was %s."
% (shape_like, ret.shape))
if ret.dtype != dtypes.int64:
raise TypeError("The given shape %s must be a 1-D tensor of tf.int64 "
"values, but the element type was %s."
% (shape_like, ret.dtype.name))
return ret
|
tensorflow-master
|
tensorflow/python/data/util/convert.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for utilities working with arbitrarily nested structures."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.util import nest
from tensorflow.python.data.util import structure
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_util
from tensorflow.python.framework import type_spec
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variables
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.ops.ragged import ragged_tensor_value
from tensorflow.python.ops.ragged import ragged_test_util
from tensorflow.python.platform import test
# NOTE(mrry): Arguments of parameterized tests are lifted into lambdas to make
# sure they are not executed before the (eager- or graph-mode) test environment
# has been set up.
#
# TODO(jsimsa): Add tests for OptionalStructure and DatasetStructure.
class StructureTest(test_base.DatasetTestBase, parameterized.TestCase,
ragged_test_util.RaggedTensorTestCase):
# pylint: disable=g-long-lambda,protected-access
@parameterized.named_parameters(
("Tensor", lambda: constant_op.constant(37.0), tensor_spec.TensorSpec,
[dtypes.float32], [[]]),
("TensorArray", lambda: tensor_array_ops.TensorArray(
dtype=dtypes.float32, element_shape=(3,), size=0),
tensor_array_ops.TensorArraySpec, [dtypes.variant], [[]]),
("SparseTensor", lambda: sparse_tensor.SparseTensor(
indices=[[3, 4]], values=[-1], dense_shape=[4, 5]),
sparse_tensor.SparseTensorSpec, [dtypes.variant], [None]),
("RaggedTensor", lambda: ragged_factory_ops.constant([[1, 2], [], [4]]),
ragged_tensor.RaggedTensorSpec, [dtypes.variant], [None]),
("Nested_0",
lambda: (constant_op.constant(37.0), constant_op.constant([1, 2, 3])),
structure.NestedStructure, [dtypes.float32, dtypes.int32], [[], [3]]),
("Nested_1", lambda: {
"a": constant_op.constant(37.0),
"b": constant_op.constant([1, 2, 3])
}, structure.NestedStructure, [dtypes.float32, dtypes.int32], [[], [3]]),
("Nested_2", lambda: {
"a":
constant_op.constant(37.0),
"b": (sparse_tensor.SparseTensor(
indices=[[0, 0]], values=[1], dense_shape=[1, 1]),
sparse_tensor.SparseTensor(
indices=[[3, 4]], values=[-1], dense_shape=[4, 5]))
}, structure.NestedStructure,
[dtypes.float32, dtypes.variant, dtypes.variant], [[], None, None]),
)
def testFlatStructure(self, value_fn, expected_structure, expected_types,
expected_shapes):
value = value_fn()
s = type_spec.type_spec_from_value(value)
self.assertIsInstance(s, expected_structure)
self.assertEqual(expected_types, s._flat_types)
self.assertLen(s._flat_shapes, len(expected_shapes))
for expected, actual in zip(expected_shapes, s._flat_shapes):
if expected is None:
self.assertEqual(actual.ndims, None)
else:
self.assertEqual(actual.as_list(), expected)
@parameterized.named_parameters(
("Tensor", lambda: constant_op.constant(37.0), lambda: [
constant_op.constant(38.0),
array_ops.placeholder(dtypes.float32),
variables.Variable(100.0), 42.0,
np.array(42.0, dtype=np.float32)
], lambda: [constant_op.constant([1.0, 2.0]),
constant_op.constant(37)]),
("TensorArray", lambda: tensor_array_ops.TensorArray(
dtype=dtypes.float32, element_shape=(3,), size=0), lambda: [
tensor_array_ops.TensorArray(
dtype=dtypes.float32, element_shape=(3,), size=0),
tensor_array_ops.TensorArray(
dtype=dtypes.float32, element_shape=(3,), size=10)
], lambda: [
tensor_array_ops.TensorArray(
dtype=dtypes.int32, element_shape=(3,), size=0),
tensor_array_ops.TensorArray(
dtype=dtypes.float32, element_shape=(), size=0)
]),
("SparseTensor", lambda: sparse_tensor.SparseTensor(
indices=[[3, 4]], values=[-1], dense_shape=[4, 5]),
lambda: [
sparse_tensor.SparseTensor(
indices=[[1, 1], [3, 4]], values=[10, -1], dense_shape=[4, 5]),
sparse_tensor.SparseTensorValue(
indices=[[1, 1], [3, 4]], values=[10, -1], dense_shape=[4, 5]),
array_ops.sparse_placeholder(dtype=dtypes.int32),
array_ops.sparse_placeholder(dtype=dtypes.int32, shape=[None, None])
], lambda: [
constant_op.constant(37, shape=[4, 5]),
sparse_tensor.SparseTensor(
indices=[[3, 4]], values=[-1], dense_shape=[5, 6]),
array_ops.sparse_placeholder(
dtype=dtypes.int32, shape=[None, None, None]),
sparse_tensor.SparseTensor(
indices=[[3, 4]], values=[-1.0], dense_shape=[4, 5])
]),
("RaggedTensor", lambda: ragged_factory_ops.constant([[1, 2], [], [3]]),
lambda: [
ragged_factory_ops.constant([[1, 2], [3, 4], []]),
ragged_factory_ops.constant([[1], [2, 3, 4], [5]]),
], lambda: [
ragged_factory_ops.constant(1),
ragged_factory_ops.constant([1, 2]),
ragged_factory_ops.constant([[1], [2]]),
ragged_factory_ops.constant([["a", "b"]]),
]),
("Nested", lambda: {
"a": constant_op.constant(37.0),
"b": constant_op.constant([1, 2, 3])
}, lambda: [{
"a": constant_op.constant(15.0),
"b": constant_op.constant([4, 5, 6])
}], lambda: [{
"a": constant_op.constant(15.0),
"b": constant_op.constant([4, 5, 6, 7])
}, {
"a": constant_op.constant(15),
"b": constant_op.constant([4, 5, 6])
}, {
"a":
constant_op.constant(15),
"b":
sparse_tensor.SparseTensor(
indices=[[0], [1], [2]], values=[4, 5, 6], dense_shape=[3])
}, (constant_op.constant(15.0), constant_op.constant([4, 5, 6]))]),
)
@test_util.run_deprecated_v1
def testIsCompatibleWithStructure(
self, original_value_fn, compatible_values_fn, incompatible_values_fn):
original_value = original_value_fn()
compatible_values = compatible_values_fn()
incompatible_values = incompatible_values_fn()
s = type_spec.type_spec_from_value(original_value)
for compatible_value in compatible_values:
self.assertTrue(
s.is_compatible_with(
type_spec.type_spec_from_value(compatible_value)))
for incompatible_value in incompatible_values:
self.assertFalse(
s.is_compatible_with(
type_spec.type_spec_from_value(incompatible_value)))
@parameterized.named_parameters(
("Tensor",
lambda: constant_op.constant(37.0),
lambda: constant_op.constant(42.0),
lambda: constant_op.constant([5])),
("TensorArray",
lambda: tensor_array_ops.TensorArray(
dtype=dtypes.float32, element_shape=(3,), size=0),
lambda: tensor_array_ops.TensorArray(
dtype=dtypes.float32, element_shape=(3,), size=0),
lambda: tensor_array_ops.TensorArray(
dtype=dtypes.int32, element_shape=(), size=0)),
("SparseTensor",
lambda: sparse_tensor.SparseTensor(
indices=[[3, 4]], values=[-1], dense_shape=[4, 5]),
lambda: sparse_tensor.SparseTensor(
indices=[[1, 2]], values=[42], dense_shape=[4, 5]),
lambda: sparse_tensor.SparseTensor(
indices=[[3]], values=[-1], dense_shape=[5]),
lambda: sparse_tensor.SparseTensor(
indices=[[3, 4]], values=[1.0], dense_shape=[4, 5])),
("RaggedTensor",
lambda: ragged_factory_ops.constant([[[1, 2]], [[3]]]),
lambda: ragged_factory_ops.constant([[[5]], [[8], [3, 2]]]),
lambda: ragged_factory_ops.constant([[[1]], [[2], [3]]],
ragged_rank=1),
lambda: ragged_factory_ops.constant([[[1.0, 2.0]], [[3.0]]]),
lambda: ragged_factory_ops.constant([[[1]], [[2]], [[3]]])),
("Nested",
lambda: {
"a": constant_op.constant(37.0),
"b": constant_op.constant([1, 2, 3])},
lambda: {
"a": constant_op.constant(42.0),
"b": constant_op.constant([4, 5, 6])},
lambda: {
"a": constant_op.constant([1, 2, 3]),
"b": constant_op.constant(37.0)
}),
) # pyformat: disable
def testStructureFromValueEquality(self, value1_fn, value2_fn,
*not_equal_value_fns):
# pylint: disable=g-generic-assert
s1 = type_spec.type_spec_from_value(value1_fn())
s2 = type_spec.type_spec_from_value(value2_fn())
self.assertEqual(s1, s1) # check __eq__ operator.
self.assertEqual(s1, s2) # check __eq__ operator.
self.assertFalse(s1 != s1) # check __ne__ operator.
self.assertFalse(s1 != s2) # check __ne__ operator.
self.assertEqual(hash(s1), hash(s1))
self.assertEqual(hash(s1), hash(s2))
for value_fn in not_equal_value_fns:
s3 = type_spec.type_spec_from_value(value_fn())
self.assertNotEqual(s1, s3) # check __ne__ operator.
self.assertNotEqual(s2, s3) # check __ne__ operator.
self.assertFalse(s1 == s3) # check __eq_ operator.
self.assertFalse(s2 == s3) # check __eq_ operator.
@parameterized.named_parameters(
("RaggedTensor_RaggedRank",
structure.RaggedTensorStructure(dtypes.int32, None, 1),
structure.RaggedTensorStructure(dtypes.int32, None, 2)),
("RaggedTensor_Shape",
structure.RaggedTensorStructure(dtypes.int32, [3, None], 1),
structure.RaggedTensorStructure(dtypes.int32, [5, None], 1)),
("RaggedTensor_DType",
structure.RaggedTensorStructure(dtypes.int32, None, 1),
structure.RaggedTensorStructure(dtypes.float32, None, 1)),
)
def testInequality(self, s1, s2):
# pylint: disable=g-generic-assert
self.assertNotEqual(s1, s2) # check __ne__ operator.
self.assertFalse(s1 == s2) # check __eq__ operator.
@parameterized.named_parameters(
("Tensor", lambda: constant_op.constant(37.0),
lambda: constant_op.constant(42.0), lambda: constant_op.constant([5])),
("TensorArray", lambda: tensor_array_ops.TensorArray(
dtype=dtypes.float32, element_shape=(3,), size=0),
lambda: tensor_array_ops.TensorArray(
dtype=dtypes.float32, element_shape=(3,), size=0),
lambda: tensor_array_ops.TensorArray(
dtype=dtypes.int32, element_shape=(), size=0)),
("SparseTensor", lambda: sparse_tensor.SparseTensor(
indices=[[3, 4]], values=[-1], dense_shape=[4, 5]),
lambda: sparse_tensor.SparseTensor(
indices=[[1, 2]], values=[42], dense_shape=[4, 5]), lambda:
sparse_tensor.SparseTensor(indices=[[3]], values=[-1], dense_shape=[5])),
("Nested", lambda: {
"a": constant_op.constant(37.0),
"b": constant_op.constant([1, 2, 3])
}, lambda: {
"a": constant_op.constant(42.0),
"b": constant_op.constant([4, 5, 6])
}, lambda: {
"a": constant_op.constant([1, 2, 3]),
"b": constant_op.constant(37.0)
}),
)
def testHash(self, value1_fn, value2_fn, value3_fn):
s1 = type_spec.type_spec_from_value(value1_fn())
s2 = type_spec.type_spec_from_value(value2_fn())
s3 = type_spec.type_spec_from_value(value3_fn())
self.assertEqual(hash(s1), hash(s1))
self.assertEqual(hash(s1), hash(s2))
self.assertNotEqual(hash(s1), hash(s3))
self.assertNotEqual(hash(s2), hash(s3))
@parameterized.named_parameters(
(
"Tensor",
lambda: constant_op.constant(37.0),
),
(
"SparseTensor",
lambda: sparse_tensor.SparseTensor(
indices=[[3, 4]], values=[-1], dense_shape=[4, 5]),
),
("TensorArray", lambda: tensor_array_ops.TensorArray(
dtype=dtypes.float32, element_shape=(), size=1).write(0, 7)),
("RaggedTensor", lambda: ragged_factory_ops.constant([[1, 2], [], [3]]),),
(
"Nested_0",
lambda: {
"a": constant_op.constant(37.0),
"b": constant_op.constant([1, 2, 3])
},
),
(
"Nested_1",
lambda: {
"a":
constant_op.constant(37.0),
"b": (sparse_tensor.SparseTensor(
indices=[[0, 0]], values=[1], dense_shape=[1, 1]),
sparse_tensor.SparseTensor(
indices=[[3, 4]], values=[-1], dense_shape=[4, 5]))
},
),
)
def testRoundTripConversion(self, value_fn):
value = value_fn()
s = type_spec.type_spec_from_value(value)
def maybe_stack_ta(v):
if isinstance(v, tensor_array_ops.TensorArray):
return v.stack()
else:
return v
before = self.evaluate(maybe_stack_ta(value))
after = self.evaluate(
maybe_stack_ta(s._from_tensor_list(s._to_tensor_list(value))))
flat_before = nest.flatten(before)
flat_after = nest.flatten(after)
for b, a in zip(flat_before, flat_after):
if isinstance(b, sparse_tensor.SparseTensorValue):
self.assertAllEqual(b.indices, a.indices)
self.assertAllEqual(b.values, a.values)
self.assertAllEqual(b.dense_shape, a.dense_shape)
elif isinstance(
b,
(ragged_tensor.RaggedTensor, ragged_tensor_value.RaggedTensorValue)):
self.assertRaggedEqual(b, a)
else:
self.assertAllEqual(b, a)
# pylint: enable=g-long-lambda
def preserveStaticShape(self):
rt = ragged_factory_ops.constant([[1, 2], [], [3]])
rt_s = type_spec.type_spec_from_value(rt)
rt_after = rt_s._from_tensor_list(rt_s._to_tensor_list(rt))
self.assertEqual(rt_after.row_splits.shape.as_list(),
rt.row_splits.shape.as_list())
self.assertEqual(rt_after.values.shape.as_list(), [None])
st = sparse_tensor.SparseTensor(
indices=[[3, 4]], values=[-1], dense_shape=[4, 5])
st_s = type_spec.type_spec_from_value(st)
st_after = st_s._from_tensor_list(st_s._to_tensor_list(st))
self.assertEqual(st_after.indices.shape.as_list(),
[None, 2])
self.assertEqual(st_after.values.shape.as_list(), [None])
self.assertEqual(st_after.dense_shape.shape.as_list(),
st.dense_shape.shape.as_list())
def testIncompatibleStructure(self):
# Define three mutually incompatible values/structures, and assert that:
# 1. Using one structure to flatten a value with an incompatible structure
# fails.
# 2. Using one structure to restructre a flattened value with an
# incompatible structure fails.
value_tensor = constant_op.constant(42.0)
s_tensor = type_spec.type_spec_from_value(value_tensor)
flat_tensor = s_tensor._to_tensor_list(value_tensor)
value_sparse_tensor = sparse_tensor.SparseTensor(
indices=[[0, 0]], values=[1], dense_shape=[1, 1])
s_sparse_tensor = type_spec.type_spec_from_value(value_sparse_tensor)
flat_sparse_tensor = s_sparse_tensor._to_tensor_list(value_sparse_tensor)
value_nest = {
"a": constant_op.constant(37.0),
"b": constant_op.constant([1, 2, 3])
}
s_nest = type_spec.type_spec_from_value(value_nest)
flat_nest = s_nest._to_tensor_list(value_nest)
with self.assertRaisesRegexp(
ValueError, r"SparseTensor.* is not convertible to a tensor with "
r"dtype.*float32.* and shape \(\)"):
s_tensor._to_tensor_list(value_sparse_tensor)
with self.assertRaisesRegexp(
ValueError, r"Value \{.*\} is not convertible to a tensor with "
r"dtype.*float32.* and shape \(\)"):
s_tensor._to_tensor_list(value_nest)
with self.assertRaisesRegexp(
TypeError, "Neither a SparseTensor nor SparseTensorValue"):
s_sparse_tensor._to_tensor_list(value_tensor)
with self.assertRaisesRegexp(
TypeError, "Neither a SparseTensor nor SparseTensorValue"):
s_sparse_tensor._to_tensor_list(value_nest)
with self.assertRaisesRegexp(
ValueError, "Tensor.* not compatible with the nested structure "
".*TensorSpec.*TensorSpec"):
s_nest._to_tensor_list(value_tensor)
with self.assertRaisesRegexp(
ValueError, "SparseTensor.* not compatible with the nested structure "
".*TensorSpec.*TensorSpec"):
s_nest._to_tensor_list(value_sparse_tensor)
with self.assertRaisesRegexp(ValueError, r"Incompatible input:"):
s_tensor._from_tensor_list(flat_sparse_tensor)
with self.assertRaisesRegexp(ValueError, "Incompatible input: "):
s_tensor._from_tensor_list(flat_nest)
with self.assertRaisesRegexp(ValueError, "Incompatible input: "):
s_sparse_tensor._from_tensor_list(flat_tensor)
with self.assertRaisesRegexp(ValueError, "Incompatible input: "):
s_sparse_tensor._from_tensor_list(flat_nest)
with self.assertRaisesRegexp(
ValueError, "Expected 2 flat values in NestedStructure but got 1."):
s_nest._from_tensor_list(flat_tensor)
with self.assertRaisesRegexp(
ValueError, "Expected 2 flat values in NestedStructure but got 1."):
s_nest._from_tensor_list(flat_sparse_tensor)
def testIncompatibleNestedStructure(self):
# Define three mutually incompatible nested values/structures, and assert
# that:
# 1. Using one structure to flatten a value with an incompatible structure
# fails.
# 2. Using one structure to restructure a flattened value with an
# incompatible structure fails.
value_0 = {
"a": constant_op.constant(37.0),
"b": constant_op.constant([1, 2, 3])
}
s_0 = type_spec.type_spec_from_value(value_0)
flat_s_0 = s_0._to_tensor_list(value_0)
# `value_1` has compatible nested structure with `value_0`, but different
# classes.
value_1 = {
"a":
constant_op.constant(37.0),
"b":
sparse_tensor.SparseTensor(
indices=[[0, 0]], values=[1], dense_shape=[1, 1])
}
s_1 = type_spec.type_spec_from_value(value_1)
flat_s_1 = s_1._to_tensor_list(value_1)
# `value_2` has incompatible nested structure with `value_0` and `value_1`.
value_2 = {
"a":
constant_op.constant(37.0),
"b": (sparse_tensor.SparseTensor(
indices=[[0, 0]], values=[1], dense_shape=[1, 1]),
sparse_tensor.SparseTensor(
indices=[[3, 4]], values=[-1], dense_shape=[4, 5]))
}
s_2 = type_spec.type_spec_from_value(value_2)
flat_s_2 = s_2._to_tensor_list(value_2)
with self.assertRaisesRegexp(
ValueError, ".*SparseTensor.* not compatible with the nested structure "
".*TensorSpec"):
s_0._to_tensor_list(value_1)
with self.assertRaisesRegexp(
ValueError, ".*SparseTensor.*SparseTensor.* not compatible with the "
"nested structure .*TensorSpec"):
s_0._to_tensor_list(value_2)
with self.assertRaisesRegexp(
ValueError, ".*Tensor.* not compatible with the nested structure "
".*SparseTensorSpec"):
s_1._to_tensor_list(value_0)
with self.assertRaisesRegexp(
ValueError, ".*SparseTensor.*SparseTensor.* not compatible with the "
"nested structure .*TensorSpec"):
s_0._to_tensor_list(value_2)
# NOTE(mrry): The repr of the dictionaries is not sorted, so the regexp
# needs to account for "a" coming before or after "b". It might be worth
# adding a deterministic repr for these error messages (among other
# improvements).
with self.assertRaisesRegexp(
ValueError,
".*Tensor.*Tensor.* not compatible with the nested structure "
".*(TensorSpec.*SparseTensorSpec.*SparseTensorSpec|"
"SparseTensorSpec.*SparseTensorSpec.*TensorSpec)"):
s_2._to_tensor_list(value_0)
with self.assertRaisesRegexp(
ValueError, "(Tensor.*SparseTensor|SparseTensor.*Tensor).* "
"not compatible with the nested structure .*"
"(TensorSpec.*SparseTensorSpec.*SparseTensorSpec|"
"SparseTensorSpec.*SparseTensorSpec.*TensorSpec)"):
s_2._to_tensor_list(value_1)
with self.assertRaisesRegexp(ValueError, r"Incompatible input:"):
s_0._from_tensor_list(flat_s_1)
with self.assertRaisesRegexp(
ValueError, "Expected 2 flat values in NestedStructure but got 3."):
s_0._from_tensor_list(flat_s_2)
with self.assertRaisesRegexp(ValueError, "Incompatible input: "):
s_1._from_tensor_list(flat_s_0)
with self.assertRaisesRegexp(
ValueError, "Expected 2 flat values in NestedStructure but got 3."):
s_1._from_tensor_list(flat_s_2)
with self.assertRaisesRegexp(
ValueError, "Expected 3 flat values in NestedStructure but got 2."):
s_2._from_tensor_list(flat_s_0)
with self.assertRaisesRegexp(
ValueError, "Expected 3 flat values in NestedStructure but got 2."):
s_2._from_tensor_list(flat_s_1)
@parameterized.named_parameters(
("Tensor", dtypes.float32, tensor_shape.scalar(), ops.Tensor,
structure.TensorStructure(dtypes.float32, [])),
("SparseTensor", dtypes.int32, tensor_shape.matrix(
2, 2), sparse_tensor.SparseTensor,
structure.SparseTensorStructure(dtypes.int32, [2, 2])),
("TensorArray_0", dtypes.int32, tensor_shape.as_shape(
[None, True, 2, 2]), tensor_array_ops.TensorArray,
structure.TensorArrayStructure(
dtypes.int32, [2, 2], dynamic_size=None, infer_shape=True)),
("TensorArray_1", dtypes.int32, tensor_shape.as_shape(
[True, None, 2, 2]), tensor_array_ops.TensorArray,
structure.TensorArrayStructure(
dtypes.int32, [2, 2], dynamic_size=True, infer_shape=None)),
("TensorArray_2", dtypes.int32, tensor_shape.as_shape(
[True, False, 2, 2]), tensor_array_ops.TensorArray,
structure.TensorArrayStructure(
dtypes.int32, [2, 2], dynamic_size=True, infer_shape=False)),
("RaggedTensor", dtypes.int32, tensor_shape.matrix(2, None),
structure.RaggedTensorStructure(dtypes.int32, [2, None], 1),
structure.RaggedTensorStructure(dtypes.int32, [2, None], 1)),
("Nested", {
"a": dtypes.float32,
"b": (dtypes.int32, dtypes.string)
}, {
"a": tensor_shape.scalar(),
"b": (tensor_shape.matrix(2, 2), tensor_shape.scalar())
}, {
"a": ops.Tensor,
"b": (sparse_tensor.SparseTensor, ops.Tensor)
},
structure.NestedStructure({
"a":
structure.TensorStructure(dtypes.float32, []),
"b": (structure.SparseTensorStructure(dtypes.int32, [2, 2]),
structure.TensorStructure(dtypes.string, []))
})),
)
def testConvertLegacyStructure(self, output_types, output_shapes,
output_classes, expected_structure):
actual_structure = structure.convert_legacy_structure(
output_types, output_shapes, output_classes)
self.assertEqual(actual_structure, expected_structure)
def testNestedNestedStructure(self):
# Although `Structure.from_value()` will not construct one, a nested
# structure containing nested `NestedStructure` objects can occur if a
# structure is constructed manually.
s = structure.NestedStructure(
(structure.TensorStructure(dtypes.int64, []),
structure.NestedStructure(
(structure.TensorStructure(dtypes.float32, []),
structure.TensorStructure(dtypes.string, [])))))
int64_t = constant_op.constant(37, dtype=dtypes.int64)
float32_t = constant_op.constant(42.0)
string_t = constant_op.constant("Foo")
nested_tensors = (int64_t, (float32_t, string_t))
tensor_list = s._to_tensor_list(nested_tensors)
for expected, actual in zip([int64_t, float32_t, string_t], tensor_list):
self.assertIs(expected, actual)
(actual_int64_t, (actual_float32_t, actual_string_t)) = s._from_tensor_list(
tensor_list)
self.assertIs(int64_t, actual_int64_t)
self.assertIs(float32_t, actual_float32_t)
self.assertIs(string_t, actual_string_t)
(actual_int64_t, (actual_float32_t, actual_string_t)) = (
s._from_compatible_tensor_list(tensor_list))
self.assertIs(int64_t, actual_int64_t)
self.assertIs(float32_t, actual_float32_t)
self.assertIs(string_t, actual_string_t)
@parameterized.named_parameters(
("Tensor", structure.TensorStructure(dtypes.float32, []), 32,
structure.TensorStructure(dtypes.float32, [32])),
("TensorUnknown", structure.TensorStructure(dtypes.float32, []), None,
structure.TensorStructure(dtypes.float32, [None])),
("SparseTensor", structure.SparseTensorStructure(dtypes.float32, [None]),
32, structure.SparseTensorStructure(dtypes.float32, [32, None])),
("SparseTensorUnknown",
structure.SparseTensorStructure(dtypes.float32, [4]), None,
structure.SparseTensorStructure(dtypes.float32, [None, 4])),
("RaggedTensor",
structure.RaggedTensorStructure(dtypes.float32, [2, None], 1), 32,
structure.RaggedTensorStructure(dtypes.float32, [32, 2, None], 2)),
("RaggedTensorUnknown",
structure.RaggedTensorStructure(dtypes.float32, [4, None], 1), None,
structure.RaggedTensorStructure(dtypes.float32, [None, 4, None], 2)),
("Nested", structure.NestedStructure({
"a": structure.TensorStructure(dtypes.float32, []),
"b": (structure.SparseTensorStructure(dtypes.int32, [2, 2]),
structure.TensorStructure(dtypes.string, []))}), 128,
structure.NestedStructure({
"a": structure.TensorStructure(dtypes.float32, [128]),
"b": (structure.SparseTensorStructure(dtypes.int32, [128, 2, 2]),
structure.TensorStructure(dtypes.string, [128]))})),
)
def testBatch(self, element_structure, batch_size,
expected_batched_structure):
batched_structure = element_structure._batch(batch_size)
self.assertEqual(batched_structure, expected_batched_structure)
@parameterized.named_parameters(
("Tensor", structure.TensorStructure(dtypes.float32, [32]),
structure.TensorStructure(dtypes.float32, [])),
("TensorUnknown", structure.TensorStructure(dtypes.float32, [None]),
structure.TensorStructure(dtypes.float32, [])),
("SparseTensor",
structure.SparseTensorStructure(dtypes.float32, [32, None]),
structure.SparseTensorStructure(dtypes.float32, [None])),
("SparseTensorUnknown",
structure.SparseTensorStructure(dtypes.float32, [None, 4]),
structure.SparseTensorStructure(dtypes.float32, [4])),
("RaggedTensor",
structure.RaggedTensorStructure(dtypes.float32, [32, None, None], 2),
structure.RaggedTensorStructure(dtypes.float32, [None, None], 1)),
("RaggedTensorUnknown",
structure.RaggedTensorStructure(dtypes.float32, [None, None, None], 2),
structure.RaggedTensorStructure(dtypes.float32, [None, None], 1)),
("Nested", structure.NestedStructure({
"a": structure.TensorStructure(dtypes.float32, [128]),
"b": (structure.SparseTensorStructure(dtypes.int32, [128, 2, 2]),
structure.TensorStructure(dtypes.string, [None]))}),
structure.NestedStructure({
"a": structure.TensorStructure(dtypes.float32, []),
"b": (structure.SparseTensorStructure(dtypes.int32, [2, 2]),
structure.TensorStructure(dtypes.string, []))})),
)
def testUnbatch(self, element_structure, expected_unbatched_structure):
unbatched_structure = element_structure._unbatch()
self.assertEqual(unbatched_structure, expected_unbatched_structure)
# pylint: disable=g-long-lambda
@parameterized.named_parameters(
("Tensor", lambda: constant_op.constant([[1.0, 2.0], [3.0, 4.0]]),
lambda: constant_op.constant([1.0, 2.0])),
("SparseTensor", lambda: sparse_tensor.SparseTensor(
indices=[[0, 0], [1, 1]], values=[13, 27], dense_shape=[2, 2]),
lambda: sparse_tensor.SparseTensor(
indices=[[0]], values=[13], dense_shape=[2])),
("RaggedTensor",
lambda: ragged_factory_ops.constant([[[1]], [[2]]]),
lambda: ragged_factory_ops.constant([[1]])),
("Nest", lambda: (
constant_op.constant([[1.0, 2.0], [3.0, 4.0]]),
sparse_tensor.SparseTensor(
indices=[[0, 0], [1, 1]], values=[13, 27], dense_shape=[2, 2])),
lambda: (constant_op.constant([1.0, 2.0]), sparse_tensor.SparseTensor(
indices=[[0]], values=[13], dense_shape=[2]))),
)
def testToBatchedTensorList(self, value_fn, element_0_fn):
batched_value = value_fn()
s = type_spec.type_spec_from_value(batched_value)
batched_tensor_list = s._to_batched_tensor_list(batched_value)
# The batch dimension is 2 for all of the test cases.
# NOTE(mrry): `tf.shape()` does not currently work for the DT_VARIANT
# tensors in which we store sparse tensors.
for t in batched_tensor_list:
if t.dtype != dtypes.variant:
self.assertEqual(2, self.evaluate(array_ops.shape(t)[0]))
# Test that the 0th element from the unbatched tensor is equal to the
# expected value.
expected_element_0 = self.evaluate(element_0_fn())
unbatched_s = s._unbatch()
actual_element_0 = unbatched_s._from_tensor_list(
[t[0] for t in batched_tensor_list])
for expected, actual in zip(
nest.flatten(expected_element_0), nest.flatten(actual_element_0)):
if sparse_tensor.is_sparse(expected):
self.assertSparseValuesEqual(expected, actual)
elif ragged_tensor.is_ragged(expected):
self.assertRaggedEqual(expected, actual)
else:
self.assertAllEqual(expected, actual)
# pylint: enable=g-long-lambda
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/util/structure_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helpers to traverse the Dataset dependency structure."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import queue as Queue # pylint: disable=redefined-builtin
from tensorflow.python.framework import dtypes
def obtain_all_variant_tensor_ops(dataset):
"""Given an input dataset, finds all dataset ops used for construction.
A series of transformations would have created this dataset with each
transformation including zero or more Dataset ops, each producing a dataset
variant tensor. This method outputs all of them.
Args:
dataset: Dataset to find variant tensors for.
Returns:
A list of variant_tensor producing dataset ops used to construct this
dataset.
"""
all_variant_tensor_ops = []
bfs_q = Queue.Queue()
bfs_q.put(dataset._variant_tensor.op) # pylint: disable=protected-access
visited = []
while not bfs_q.empty():
op = bfs_q.get()
visited.append(op)
# We look for all ops that produce variant tensors as output. This is a bit
# of overkill but the other dataset _inputs() traversal strategies can't
# cover the case of function inputs that capture dataset variants.
# TODO(b/120873778): Make this more efficient.
if op.outputs[0].dtype == dtypes.variant:
all_variant_tensor_ops.append(op)
for i in op.inputs:
input_op = i.op
if input_op not in visited:
bfs_q.put(input_op)
return all_variant_tensor_ops
|
tensorflow-master
|
tensorflow/python/data/util/traverse.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for dataset options utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.util import options
from tensorflow.python.platform import test
class _TestOptions(options.OptionsBase):
x = options.create_option(
name="x",
ty=int,
docstring="the answer to everything",
default_factory=lambda: 42)
y = options.create_option(
name="y", ty=float, docstring="a tasty pie", default_factory=lambda: 3.14)
class _NestedTestOptions(options.OptionsBase):
opts = options.create_option(
name="opts", ty=_TestOptions, docstring="nested options")
class OptionsTest(test.TestCase):
def testDocumentation(self):
self.assertEqual(_TestOptions.x.__doc__, "the answer to everything")
self.assertEqual(_TestOptions.y.__doc__, "a tasty pie")
def testCreateOption(self):
opts = _TestOptions()
self.assertEqual(opts.x, 42)
self.assertEqual(opts.y, 3.14)
self.assertIsInstance(opts.x, int)
self.assertIsInstance(opts.y, float)
opts.x = 0
self.assertEqual(opts.x, 0)
with self.assertRaises(TypeError):
opts.x = 3.14
opts.y = 0.0
self.assertEqual(opts.y, 0.0)
with self.assertRaises(TypeError):
opts.y = 42
def testMergeOptions(self):
options1, options2 = _TestOptions(), _TestOptions()
with self.assertRaises(ValueError):
options.merge_options()
merged_options = options.merge_options(options1, options2)
self.assertEqual(merged_options.x, 42)
self.assertEqual(merged_options.y, 3.14)
options1.x = 0
options2.y = 0.0
merged_options = options.merge_options(options1, options2)
self.assertEqual(merged_options.x, 0)
self.assertEqual(merged_options.y, 0.0)
def testMergeNestedOptions(self):
options1, options2 = _NestedTestOptions(), _NestedTestOptions()
merged_options = options.merge_options(options1, options2)
self.assertEqual(merged_options.opts, None)
options1.opts = _TestOptions()
merged_options = options.merge_options(options1, options2)
self.assertEqual(merged_options.opts, _TestOptions())
options2.opts = _TestOptions()
merged_options = options.merge_options(options1, options2)
self.assertEqual(merged_options.opts, _TestOptions())
options1.opts.x = 0
options2.opts.y = 0.0
merged_options = options.merge_options(options1, options2)
self.assertEqual(merged_options.opts.x, 0)
self.assertEqual(merged_options.opts.y, 0.0)
def testMergeOptionsInvalid(self):
with self.assertRaises(TypeError):
options.merge_options(0)
options1, options2 = _TestOptions(), _NestedTestOptions()
with self.assertRaises(TypeError):
options.merge_options(options1, options2)
def testNoSpuriousAttrs(self):
test_options = _TestOptions()
with self.assertRaises(AttributeError):
test_options.wrong_attr = True
with self.assertRaises(AttributeError):
_ = test_options.wrong_attr
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/util/options_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for describing the structure of a `tf.data` type."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.util import nest
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import type_spec
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.util.tf_export import tf_export
# Define backwards-compatiblity wrappers for using TypeSpec and its subclasses
# to replace Structure and its subclasses. Note that the constructor argument
# order is different in many cases -- in particular, TypeSpec follows TensorSpec
# and uses the order (shape, dtype); but most Structure subclasses use the
# order (dtype, shape).
#
# TODO(b/133606651) Update tf.data to use TypeSpec directly, and then remove
# these compatibility wrappers.
Structure = type_spec.TypeSpec
# pylint: disable=invalid-name
@tf_export("data.experimental.TensorStructure")
def TensorStructure(dtype, shape):
return tensor_spec.TensorSpec(shape, dtype)
@tf_export("data.experimental.SparseTensorStructure")
def SparseTensorStructure(dtype, shape):
return sparse_tensor.SparseTensorSpec(shape, dtype)
@tf_export("data.experimental.TensorArrayStructure")
def TensorArrayStructure(dtype, element_shape, dynamic_size, infer_shape):
return tensor_array_ops.TensorArraySpec(element_shape, dtype,
dynamic_size, infer_shape)
@tf_export("data.experimental.RaggedTensorStructure")
def RaggedTensorStructure(dtype, shape, ragged_rank):
return ragged_tensor.RaggedTensorSpec(shape, dtype, ragged_rank)
def normalize_tensors(tensors):
"""Converts a nested structure of tensor-like objects to tensors.
* `SparseTensor`-like inputs are converted to `SparseTensor`.
* `TensorArray` inputs are passed through.
* Everything else is converted to a dense `Tensor`.
Args:
tensors: A nested structure of tensor-like, list,
`SparseTensor`, `SparseTensorValue`, or `TensorArray` objects.
Returns:
A nested structure of tensor, `SparseTensor`, or `TensorArray` objects.
"""
flat_tensors = nest.flatten(tensors)
prepared = []
with ops.name_scope("normalize_tensors"):
# Imported here to avoid circular dependency
from tensorflow.python.data.ops import dataset_ops # pylint: disable=g-import-not-at-top
for i, t in enumerate(flat_tensors):
spec = type_spec.type_spec_from_value(t)
if isinstance(spec, sparse_tensor.SparseTensorSpec):
prepared.append(
sparse_tensor.SparseTensor.from_value(t))
elif isinstance(spec, ragged_tensor.RaggedTensorSpec):
prepared.append(
ragged_tensor.convert_to_tensor_or_ragged_tensor(
t, name="component_%d" % i))
elif isinstance(spec, tensor_array_ops.TensorArraySpec):
prepared.append(t)
elif isinstance(spec, dataset_ops.DatasetStructure):
prepared.append(t)
else:
prepared.append(ops.convert_to_tensor(t, name="component_%d" % i))
return nest.pack_sequence_as(tensors, prepared)
def convert_legacy_structure(output_types, output_shapes, output_classes):
"""Returns a `Structure` that represents the given legacy structure.
This method provides a way to convert from the existing `Dataset` and
`Iterator` structure-related properties to a `Structure` object. A "legacy"
structure is represented by the `tf.data.Dataset.output_types`,
`tf.data.Dataset.output_shapes`, and `tf.data.Dataset.output_classes`
properties.
TODO(b/110122868): Remove this function once `Structure` is used throughout
`tf.data`.
Args:
output_types: A nested structure of `tf.DType` objects corresponding to
each component of a structured value.
output_shapes: A nested structure of `tf.TensorShape` objects
corresponding to each component a structured value.
output_classes: A nested structure of Python `type` objects corresponding
to each component of a structured value.
Returns:
A `Structure`.
Raises:
TypeError: If a structure cannot be built from the arguments, because one of
the component classes in `output_classes` is not supported.
"""
flat_types = nest.flatten(output_types)
flat_shapes = nest.flatten(output_shapes)
flat_classes = nest.flatten(output_classes)
flat_ret = []
for flat_type, flat_shape, flat_class in zip(flat_types, flat_shapes,
flat_classes):
if isinstance(flat_class, type_spec.TypeSpec):
flat_ret.append(flat_class)
elif issubclass(flat_class, sparse_tensor.SparseTensor):
flat_ret.append(SparseTensorStructure(flat_type, flat_shape))
elif issubclass(flat_class, ops.Tensor):
flat_ret.append(TensorStructure(flat_type, flat_shape))
elif issubclass(flat_class, tensor_array_ops.TensorArray):
# We sneaked the dynamic_size and infer_shape into the legacy shape.
flat_ret.append(
TensorArrayStructure(
flat_type, flat_shape[2:],
dynamic_size=tensor_shape.dimension_value(flat_shape[0]),
infer_shape=tensor_shape.dimension_value(flat_shape[1])))
else:
# NOTE(mrry): Since legacy structures produced by iterators only
# comprise Tensors, SparseTensors, and nests, we do not need to
# support all structure types here.
raise TypeError(
"Could not build a structure for output class %r" % (flat_class,))
ret = nest.pack_sequence_as(output_classes, flat_ret)
if isinstance(ret, type_spec.TypeSpec):
return ret
else:
return NestedStructure(ret)
# TODO(b/133606651) Update the tf.data code to use nests of TypeSpec rather
# than NestedStructure; and then delete this class.
@tf_export("data.experimental.NestedStructure")
class NestedStructure(type_spec.BatchableTypeSpec):
"""Represents a nested structure in which each leaf is a `TypeSpec`."""
# NOTE(edloper): This class makes extensive use of non-public TypeSpec
# methods, so we disable the protected-access lint warning once here.
# pylint: disable=protected-access
__slots__ = ["_nested_structure", "_flat_nested_structure",
"__flat_tensor_specs"]
def __init__(self, nested_structure):
self._nested_structure = nested_structure
self._flat_nested_structure = nest.flatten(nested_structure)
self.__flat_tensor_specs = []
for s in self._flat_nested_structure:
if not isinstance(s, type_spec.TypeSpec):
raise TypeError("nested_structure must be a (potentially nested) tuple "
"or dictionary of TypeSpec objects.")
self.__flat_tensor_specs.extend(s._flat_tensor_specs)
value_type = property(lambda self: type(self._nested_structure))
def _serialize(self):
return self._nested_structure
@classmethod
def _deserialize(cls, nested_structure):
return cls(nested_structure)
def most_specific_compatible_type(self, other):
if type(self) is not type(other):
raise ValueError("Incompatible types")
return self._deserialize(
nest.map_structure(lambda a, b: a.most_specific_compatible_type(b),
self._nested_structure, other._nested_structure))
def __eq__(self, other):
if not isinstance(other, NestedStructure):
return False
try:
nest.assert_same_structure(self._nested_structure,
other._nested_structure)
except (ValueError, TypeError):
return False
return (nest.flatten(self._nested_structure) ==
nest.flatten(other._nested_structure))
def __hash__(self):
return hash(tuple(nest.flatten(self._nested_structure)))
def is_compatible_with(self, other):
if not isinstance(other, NestedStructure):
return False
try:
nest.assert_same_structure(self._nested_structure,
other._nested_structure)
except (ValueError, TypeError):
return False
# pylint: disable=g-complex-comprehension
return all(
substructure.is_compatible_with(other_substructure)
for substructure, other_substructure in zip(
nest.flatten(self._nested_structure),
nest.flatten(other._nested_structure)))
_component_specs = property(lambda self: self._nested_structure)
_flat_tensor_specs = property(lambda self: self.__flat_tensor_specs)
def _to_components(self, value):
return nest.map_structure_up_to(
self._nested_structure, lambda t, v: t._to_components(v),
self._nested_structure, value)
def _from_components(self, value):
return nest.map_structure_up_to(
self._nested_structure, lambda t, v: t._from_components(v),
self._nested_structure, value)
def _to_tensor_list(self, value):
return self.__value_to_tensors(
value, lambda struct, val: struct._to_tensor_list(val))
def _to_batched_tensor_list(self, value):
return self.__value_to_tensors(
value, lambda struct, val: struct._to_batched_tensor_list(val))
def __value_to_tensors(self, value, to_tensor_list_fn):
ret = []
try:
flat_value = nest.flatten_up_to(self._nested_structure, value)
except (ValueError, TypeError):
raise ValueError("The value %r is not compatible with the nested "
"structure %r." % (value, self._nested_structure))
for sub_value, structure in zip(flat_value, self._flat_nested_structure):
if not structure.is_compatible_with(
type_spec.type_spec_from_value(sub_value)):
raise ValueError("Component value %r is not compatible with the nested "
"structure %r." % (sub_value, structure))
ret.extend(to_tensor_list_fn(structure, sub_value))
return ret
def _from_tensor_list(self, value):
return self.__tensors_to_value(
value, lambda struct, val: struct._from_tensor_list(val))
def _from_compatible_tensor_list(self, value):
return self.__tensors_to_value(
value, lambda struct, val: struct._from_compatible_tensor_list(val))
def __tensors_to_value(self, flat_value, from_tensor_list_fn):
if len(flat_value) != len(self._flat_tensor_specs):
raise ValueError("Expected %d flat values in NestedStructure but got %d."
% (len(self._flat_tensor_specs), len(flat_value)))
flat_ret = []
i = 0
for structure in self._flat_nested_structure:
num_flat_values = len(structure._flat_tensor_specs)
sub_value = flat_value[i:i + num_flat_values]
flat_ret.append(from_tensor_list_fn(structure, sub_value))
i += num_flat_values
return nest.pack_sequence_as(self._nested_structure, flat_ret)
@staticmethod
def from_value(value):
flat_nested_structure = [
type_spec.type_spec_from_value(sub_value)
for sub_value in nest.flatten(value)
]
return NestedStructure(nest.pack_sequence_as(value, flat_nested_structure))
def _to_legacy_output_types(self):
return nest.map_structure(
lambda s: s._to_legacy_output_types(), self._nested_structure)
def _to_legacy_output_shapes(self):
return nest.map_structure(
lambda s: s._to_legacy_output_shapes(), self._nested_structure)
def _to_legacy_output_classes(self):
return nest.map_structure(
lambda s: s._to_legacy_output_classes(), self._nested_structure)
def _batch(self, batch_size):
return NestedStructure(nest.map_structure(
lambda s: s._batch(batch_size), self._nested_structure))
def _unbatch(self):
return NestedStructure(nest.map_structure(
lambda s: s._unbatch(), self._nested_structure))
type_spec.register_type_spec_from_value_converter(
tuple, NestedStructure.from_value, allow_subclass=True)
type_spec.register_type_spec_from_value_converter(
dict, NestedStructure.from_value, allow_subclass=True)
# Re-register SparseTensorValue -- it's a subclass of tuple, but we don't
# want the NestedStructure registration to take precedence.
type_spec.register_type_spec_from_value_converter(
sparse_tensor.SparseTensorValue,
sparse_tensor.SparseTensorSpec.from_value)
|
tensorflow-master
|
tensorflow/python/data/util/structure.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python dataset sparse tensor utility functitons."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.util import nest
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import sparse_ops
def any_sparse(classes):
"""Checks for sparse tensor.
Args:
classes: a structure of objects that identify the dataset item classes
Returns:
`True` if `classes` contains a sparse tensor type and `False` otherwise.
"""
return any(c is sparse_tensor.SparseTensor for c in nest.flatten(classes))
def as_dense_shapes(shapes, classes):
"""Converts sparse tensor shapes to their physical shapes.
Args:
shapes: a structure of shapes to convert.
classes: a structure of objects that identify the dataset item classes
Returns:
a structure matching the nested structure of `shapes`, containing
`tensor_shape.unknown_shape()` at positions where `classes` contains
`tf.SparseTensor` and matching contents of `shapes` otherwise
"""
ret = nest.pack_sequence_as(shapes, [
tensor_shape.unknown_shape() if c is sparse_tensor.SparseTensor else shape
for shape, c in zip(nest.flatten(shapes), nest.flatten(classes))
])
return ret
def as_dense_types(types, classes):
"""Converts sparse tensor types to `dtypes.variant`.
Args:
types: a structure of types to convert.
classes: a structure of objects that identify the dataset item classes
Returns:
a structure matching the nested structure of `types`, containing
`dtypes.variant` at positions where `classes` contains `tf.SparseTensor` and
matching contents of `types` otherwise
"""
ret = nest.pack_sequence_as(types, [
dtypes.variant if c is sparse_tensor.SparseTensor else ty
for ty, c in zip(nest.flatten(types), nest.flatten(classes))
])
return ret
def deserialize_sparse_tensors(tensors, types, shapes, classes):
"""Deserializes sparse tensors.
Args:
tensors: a structure of tensors to deserialize.
types: a structure that holds information about types of `tensors`
shapes: a structure that holds information about shapes of `tensors`
classes: a structure of objects that identify the dataset item classes
Returns:
`tensors` with any serialized sparse tensors replaced by their deserialized
version.
"""
ret = nest.pack_sequence_as(types, [
sparse_ops.deserialize_sparse(tensor, dtype=ty, rank=shape.ndims)
if c is sparse_tensor.SparseTensor else tensor
for (tensor, ty, shape, c) in zip(
nest.flatten(tensors), nest.flatten(types), nest.flatten(shapes),
nest.flatten(classes))
])
return ret
def get_classes(tensors):
"""Gets classes for a structure of tensors.
Args:
tensors: the tensor structure to get classes for.
Returns:
a structure matching the nested structure of `tensors`, containing
`tf.SparseTensor` at positions where `tensors` contains a sparse tensor and
`tf.Tensor` otherwise
"""
return nest.pack_sequence_as(tensors, [
sparse_tensor.SparseTensor
if isinstance(tensor, sparse_tensor.SparseTensor) else ops.Tensor
for tensor in nest.flatten(tensors)
])
def serialize_many_sparse_tensors(tensors):
"""Serializes many sparse tensors into a batch.
Args:
tensors: a tensor structure to serialize.
Returns:
`tensors` with any sparse tensors replaced by the serialized batch.
"""
ret = nest.pack_sequence_as(tensors, [
sparse_ops.serialize_many_sparse(tensor, out_type=dtypes.variant)
if sparse_tensor.is_sparse(tensor) else tensor
for tensor in nest.flatten(tensors)
])
return ret
def serialize_sparse_tensors(tensors):
"""Serializes sparse tensors.
Args:
tensors: a tensor structure to serialize.
Returns:
`tensors` with any sparse tensors replaced by their serialized version.
"""
ret = nest.pack_sequence_as(tensors, [
sparse_ops.serialize_sparse(tensor, out_type=dtypes.variant)
if isinstance(tensor, sparse_tensor.SparseTensor) else tensor
for tensor in nest.flatten(tensors)
])
return ret
|
tensorflow-master
|
tensorflow/python/data/util/sparse.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for utilities working with arbitrarily nested structures."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
from tensorflow.python.data.util import nest
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.platform import test
class NestTest(test.TestCase):
def testFlattenAndPack(self):
structure = ((3, 4), 5, (6, 7, (9, 10), 8))
flat = ["a", "b", "c", "d", "e", "f", "g", "h"]
self.assertEqual(nest.flatten(structure), [3, 4, 5, 6, 7, 9, 10, 8])
self.assertEqual(
nest.pack_sequence_as(structure, flat), (("a", "b"), "c",
("d", "e", ("f", "g"), "h")))
point = collections.namedtuple("Point", ["x", "y"])
structure = (point(x=4, y=2), ((point(x=1, y=0),),))
flat = [4, 2, 1, 0]
self.assertEqual(nest.flatten(structure), flat)
restructured_from_flat = nest.pack_sequence_as(structure, flat)
self.assertEqual(restructured_from_flat, structure)
self.assertEqual(restructured_from_flat[0].x, 4)
self.assertEqual(restructured_from_flat[0].y, 2)
self.assertEqual(restructured_from_flat[1][0][0].x, 1)
self.assertEqual(restructured_from_flat[1][0][0].y, 0)
self.assertEqual([5], nest.flatten(5))
self.assertEqual([np.array([5])], nest.flatten(np.array([5])))
self.assertEqual("a", nest.pack_sequence_as(5, ["a"]))
self.assertEqual(
np.array([5]), nest.pack_sequence_as("scalar", [np.array([5])]))
with self.assertRaisesRegexp(ValueError, "Structure is a scalar"):
nest.pack_sequence_as("scalar", [4, 5])
with self.assertRaisesRegexp(TypeError, "flat_sequence"):
nest.pack_sequence_as([4, 5], "bad_sequence")
with self.assertRaises(ValueError):
nest.pack_sequence_as([5, 6, [7, 8]], ["a", "b", "c"])
def testFlattenDictOrder(self):
"""`flatten` orders dicts by key, including OrderedDicts."""
ordered = collections.OrderedDict([("d", 3), ("b", 1), ("a", 0), ("c", 2)])
plain = {"d": 3, "b": 1, "a": 0, "c": 2}
ordered_flat = nest.flatten(ordered)
plain_flat = nest.flatten(plain)
self.assertEqual([0, 1, 2, 3], ordered_flat)
self.assertEqual([0, 1, 2, 3], plain_flat)
def testPackDictOrder(self):
"""Packing orders dicts by key, including OrderedDicts."""
ordered = collections.OrderedDict([("d", 0), ("b", 0), ("a", 0), ("c", 0)])
plain = {"d": 0, "b": 0, "a": 0, "c": 0}
seq = [0, 1, 2, 3]
ordered_reconstruction = nest.pack_sequence_as(ordered, seq)
plain_reconstruction = nest.pack_sequence_as(plain, seq)
self.assertEqual(
collections.OrderedDict([("d", 3), ("b", 1), ("a", 0), ("c", 2)]),
ordered_reconstruction)
self.assertEqual({"d": 3, "b": 1, "a": 0, "c": 2}, plain_reconstruction)
def testFlattenAndPackWithDicts(self):
# A nice messy mix of tuples, lists, dicts, and `OrderedDict`s.
named_tuple = collections.namedtuple("A", ("b", "c"))
mess = (
"z",
named_tuple(3, 4),
{
"c": (
1,
collections.OrderedDict([
("b", 3),
("a", 2),
]),
),
"b": 5
},
17
)
flattened = nest.flatten(mess)
self.assertEqual(flattened, ["z", 3, 4, 5, 1, 2, 3, 17])
structure_of_mess = (
14,
named_tuple("a", True),
{
"c": (
0,
collections.OrderedDict([
("b", 9),
("a", 8),
]),
),
"b": 3
},
"hi everybody",
)
unflattened = nest.pack_sequence_as(structure_of_mess, flattened)
self.assertEqual(unflattened, mess)
# Check also that the OrderedDict was created, with the correct key order.
unflattened_ordered_dict = unflattened[2]["c"][1]
self.assertIsInstance(unflattened_ordered_dict, collections.OrderedDict)
self.assertEqual(list(unflattened_ordered_dict.keys()), ["b", "a"])
def testFlattenSparseValue(self):
st = sparse_tensor.SparseTensorValue([[0]], [0], [1])
single_value = st
list_of_values = [st, st, st]
nest_of_values = ((st), ((st), (st)))
dict_of_values = {"foo": st, "bar": st, "baz": st}
self.assertEqual([st], nest.flatten(single_value))
self.assertEqual([[st, st, st]], nest.flatten(list_of_values))
self.assertEqual([st, st, st], nest.flatten(nest_of_values))
self.assertEqual([st, st, st], nest.flatten(dict_of_values))
def testFlattenRaggedValue(self):
rt = ragged_factory_ops.constant_value([[[0]], [[1]]])
single_value = rt
list_of_values = [rt, rt, rt]
nest_of_values = ((rt), ((rt), (rt)))
dict_of_values = {"foo": rt, "bar": rt, "baz": rt}
self.assertEqual([rt], nest.flatten(single_value))
self.assertEqual([[rt, rt, rt]], nest.flatten(list_of_values))
self.assertEqual([rt, rt, rt], nest.flatten(nest_of_values))
self.assertEqual([rt, rt, rt], nest.flatten(dict_of_values))
def testIsSequence(self):
self.assertFalse(nest.is_sequence("1234"))
self.assertFalse(nest.is_sequence([1, 3, [4, 5]]))
self.assertTrue(nest.is_sequence(((7, 8), (5, 6))))
self.assertFalse(nest.is_sequence([]))
self.assertFalse(nest.is_sequence(set([1, 2])))
ones = array_ops.ones([2, 3])
self.assertFalse(nest.is_sequence(ones))
self.assertFalse(nest.is_sequence(math_ops.tanh(ones)))
self.assertFalse(nest.is_sequence(np.ones((4, 5))))
self.assertTrue(nest.is_sequence({"foo": 1, "bar": 2}))
self.assertFalse(
nest.is_sequence(sparse_tensor.SparseTensorValue([[0]], [0], [1])))
self.assertFalse(
nest.is_sequence(ragged_factory_ops.constant_value([[[0]], [[1]]])))
def testAssertSameStructure(self):
structure1 = (((1, 2), 3), 4, (5, 6))
structure2 = ((("foo1", "foo2"), "foo3"), "foo4", ("foo5", "foo6"))
structure_different_num_elements = ("spam", "eggs")
structure_different_nesting = (((1, 2), 3), 4, 5, (6,))
structure_dictionary = {"foo": 2, "bar": 4, "baz": {"foo": 5, "bar": 6}}
structure_dictionary_diff_nested = {
"foo": 2,
"bar": 4,
"baz": {
"foo": 5,
"baz": 6
}
}
nest.assert_same_structure(structure1, structure2)
nest.assert_same_structure("abc", 1.0)
nest.assert_same_structure("abc", np.array([0, 1]))
nest.assert_same_structure("abc", constant_op.constant([0, 1]))
with self.assertRaisesRegexp(ValueError,
"don't have the same nested structure"):
nest.assert_same_structure(structure1, structure_different_num_elements)
with self.assertRaisesRegexp(ValueError,
"don't have the same nested structure"):
nest.assert_same_structure((0, 1), np.array([0, 1]))
with self.assertRaisesRegexp(ValueError,
"don't have the same nested structure"):
nest.assert_same_structure(0, (0, 1))
with self.assertRaisesRegexp(ValueError,
"don't have the same nested structure"):
nest.assert_same_structure(structure1, structure_different_nesting)
named_type_0 = collections.namedtuple("named_0", ("a", "b"))
named_type_1 = collections.namedtuple("named_1", ("a", "b"))
self.assertRaises(TypeError, nest.assert_same_structure, (0, 1),
named_type_0("a", "b"))
nest.assert_same_structure(named_type_0(3, 4), named_type_0("a", "b"))
self.assertRaises(TypeError, nest.assert_same_structure,
named_type_0(3, 4), named_type_1(3, 4))
with self.assertRaisesRegexp(ValueError,
"don't have the same nested structure"):
nest.assert_same_structure(named_type_0(3, 4), named_type_0((3,), 4))
with self.assertRaisesRegexp(ValueError,
"don't have the same nested structure"):
nest.assert_same_structure(((3,), 4), (3, (4,)))
structure1_list = {"a": ((1, 2), 3), "b": 4, "c": (5, 6)}
structure2_list = {"a": ((1, 2), 3), "b": 4, "d": (5, 6)}
with self.assertRaisesRegexp(TypeError,
"don't have the same sequence type"):
nest.assert_same_structure(structure1, structure1_list)
nest.assert_same_structure(structure1, structure2, check_types=False)
nest.assert_same_structure(structure1, structure1_list, check_types=False)
with self.assertRaisesRegexp(ValueError, "don't have the same set of keys"):
nest.assert_same_structure(structure1_list, structure2_list)
with self.assertRaisesRegexp(ValueError, "don't have the same set of keys"):
nest.assert_same_structure(structure_dictionary,
structure_dictionary_diff_nested)
nest.assert_same_structure(
structure_dictionary,
structure_dictionary_diff_nested,
check_types=False)
nest.assert_same_structure(
structure1_list, structure2_list, check_types=False)
def testMapStructure(self):
structure1 = (((1, 2), 3), 4, (5, 6))
structure2 = (((7, 8), 9), 10, (11, 12))
structure1_plus1 = nest.map_structure(lambda x: x + 1, structure1)
nest.assert_same_structure(structure1, structure1_plus1)
self.assertAllEqual(
[2, 3, 4, 5, 6, 7],
nest.flatten(structure1_plus1))
structure1_plus_structure2 = nest.map_structure(
lambda x, y: x + y, structure1, structure2)
self.assertEqual(
(((1 + 7, 2 + 8), 3 + 9), 4 + 10, (5 + 11, 6 + 12)),
structure1_plus_structure2)
self.assertEqual(3, nest.map_structure(lambda x: x - 1, 4))
self.assertEqual(7, nest.map_structure(lambda x, y: x + y, 3, 4))
with self.assertRaisesRegexp(TypeError, "callable"):
nest.map_structure("bad", structure1_plus1)
with self.assertRaisesRegexp(ValueError, "same nested structure"):
nest.map_structure(lambda x, y: None, 3, (3,))
with self.assertRaisesRegexp(TypeError, "same sequence type"):
nest.map_structure(lambda x, y: None, ((3, 4), 5), {"a": (3, 4), "b": 5})
with self.assertRaisesRegexp(ValueError, "same nested structure"):
nest.map_structure(lambda x, y: None, ((3, 4), 5), (3, (4, 5)))
with self.assertRaisesRegexp(ValueError, "same nested structure"):
nest.map_structure(lambda x, y: None, ((3, 4), 5), (3, (4, 5)),
check_types=False)
with self.assertRaisesRegexp(ValueError, "Only valid keyword argument"):
nest.map_structure(lambda x: None, structure1, foo="a")
with self.assertRaisesRegexp(ValueError, "Only valid keyword argument"):
nest.map_structure(lambda x: None, structure1, check_types=False, foo="a")
def testAssertShallowStructure(self):
inp_ab = ("a", "b")
inp_abc = ("a", "b", "c")
expected_message = (
"The two structures don't have the same sequence length. Input "
"structure has length 2, while shallow structure has length 3.")
with self.assertRaisesRegexp(ValueError, expected_message):
nest.assert_shallow_structure(inp_abc, inp_ab)
inp_ab1 = ((1, 1), (2, 2))
inp_ab2 = {"a": (1, 1), "b": (2, 2)}
expected_message = (
"The two structures don't have the same sequence type. Input structure "
"has type <(type|class) 'tuple'>, while shallow structure has type "
"<(type|class) 'dict'>.")
with self.assertRaisesRegexp(TypeError, expected_message):
nest.assert_shallow_structure(inp_ab2, inp_ab1)
nest.assert_shallow_structure(inp_ab2, inp_ab1, check_types=False)
inp_ab1 = {"a": (1, 1), "b": {"c": (2, 2)}}
inp_ab2 = {"a": (1, 1), "b": {"d": (2, 2)}}
expected_message = (
r"The two structures don't have the same keys. Input "
r"structure has keys \['c'\], while shallow structure has "
r"keys \['d'\].")
with self.assertRaisesRegexp(ValueError, expected_message):
nest.assert_shallow_structure(inp_ab2, inp_ab1)
inp_ab = collections.OrderedDict([("a", 1), ("b", (2, 3))])
inp_ba = collections.OrderedDict([("b", (2, 3)), ("a", 1)])
nest.assert_shallow_structure(inp_ab, inp_ba)
def testFlattenUpTo(self):
input_tree = (((2, 2), (3, 3)), ((4, 9), (5, 5)))
shallow_tree = ((True, True), (False, True))
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [(2, 2), (3, 3), (4, 9), (5, 5)])
self.assertEqual(flattened_shallow_tree, [True, True, False, True])
input_tree = ((("a", 1), (("b", 2), (("c", 3), (("d", 4))))))
shallow_tree = (("level_1", ("level_2", ("level_3", ("level_4")))))
input_tree_flattened_as_shallow_tree = nest.flatten_up_to(shallow_tree,
input_tree)
input_tree_flattened = nest.flatten(input_tree)
self.assertEqual(input_tree_flattened_as_shallow_tree,
[("a", 1), ("b", 2), ("c", 3), ("d", 4)])
self.assertEqual(input_tree_flattened, ["a", 1, "b", 2, "c", 3, "d", 4])
## Shallow non-list edge-case.
# Using iterable elements.
input_tree = ["input_tree"]
shallow_tree = "shallow_tree"
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
input_tree = ("input_tree_0", "input_tree_1")
shallow_tree = "shallow_tree"
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
# Using non-iterable elements.
input_tree = (0,)
shallow_tree = 9
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
input_tree = (0, 1)
shallow_tree = 9
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
## Both non-list edge-case.
# Using iterable elements.
input_tree = "input_tree"
shallow_tree = "shallow_tree"
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
# Using non-iterable elements.
input_tree = 0
shallow_tree = 0
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
## Input non-list edge-case.
# Using iterable elements.
input_tree = "input_tree"
shallow_tree = ("shallow_tree",)
expected_message = ("If shallow structure is a sequence, input must also "
"be a sequence. Input has type: <(type|class) 'str'>.")
with self.assertRaisesRegexp(TypeError, expected_message):
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_shallow_tree, list(shallow_tree))
input_tree = "input_tree"
shallow_tree = ("shallow_tree_9", "shallow_tree_8")
with self.assertRaisesRegexp(TypeError, expected_message):
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_shallow_tree, list(shallow_tree))
# Using non-iterable elements.
input_tree = 0
shallow_tree = (9,)
expected_message = ("If shallow structure is a sequence, input must also "
"be a sequence. Input has type: <(type|class) 'int'>.")
with self.assertRaisesRegexp(TypeError, expected_message):
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_shallow_tree, list(shallow_tree))
input_tree = 0
shallow_tree = (9, 8)
with self.assertRaisesRegexp(TypeError, expected_message):
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_shallow_tree, list(shallow_tree))
# Using dict.
input_tree = {"a": ((2, 2), (3, 3)), "b": ((4, 9), (5, 5))}
shallow_tree = {"a": (True, True), "b": (False, True)}
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [(2, 2), (3, 3), (4, 9), (5, 5)])
self.assertEqual(flattened_shallow_tree, [True, True, False, True])
def testMapStructureUpTo(self):
ab_tuple = collections.namedtuple("ab_tuple", "a, b")
op_tuple = collections.namedtuple("op_tuple", "add, mul")
inp_val = ab_tuple(a=2, b=3)
inp_ops = ab_tuple(a=op_tuple(add=1, mul=2), b=op_tuple(add=2, mul=3))
out = nest.map_structure_up_to(
inp_val, lambda val, ops: (val + ops.add) * ops.mul, inp_val, inp_ops)
self.assertEqual(out.a, 6)
self.assertEqual(out.b, 15)
data_list = ((2, 4, 6, 8), ((1, 3, 5, 7, 9), (3, 5, 7)))
name_list = ("evens", ("odds", "primes"))
out = nest.map_structure_up_to(
name_list, lambda name, sec: "first_{}_{}".format(len(sec), name),
name_list, data_list)
self.assertEqual(out, ("first_4_evens", ("first_5_odds", "first_3_primes")))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/util/nest_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for utilities working with user input."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.util import convert
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
from tensorflow.python.util import compat
class ConvertTest(test.TestCase):
def testInteger(self):
resp = convert.optional_param_to_tensor("foo", 3)
self.assertEqual(3, self.evaluate(resp))
def testIntegerDefault(self):
resp = convert.optional_param_to_tensor("foo", None)
self.assertEqual(0, self.evaluate(resp))
def testStringDefault(self):
resp = convert.optional_param_to_tensor("bar", None, "default",
dtypes.string)
self.assertEqual(compat.as_bytes("default"), self.evaluate(resp))
def testString(self):
resp = convert.optional_param_to_tensor("bar", "value", "default",
dtypes.string)
self.assertEqual(compat.as_bytes("value"), self.evaluate(resp))
def testPartialShapeToTensorKnownDimension(self):
self.assertAllEqual([1],
self.evaluate(
convert.partial_shape_to_tensor(
tensor_shape.TensorShape([1]))))
self.assertAllEqual([1], self.evaluate(
convert.partial_shape_to_tensor((1,))))
self.assertAllEqual([1], self.evaluate(
convert.partial_shape_to_tensor([1])))
self.assertAllEqual([1],
self.evaluate(
convert.partial_shape_to_tensor(
constant_op.constant([1], dtype=dtypes.int64))))
@test_util.run_deprecated_v1
def testPartialShapeToTensorUnknownDimension(self):
self.assertAllEqual([-1],
self.evaluate(
convert.partial_shape_to_tensor(
tensor_shape.TensorShape([None]))))
self.assertAllEqual([-1],
self.evaluate(convert.partial_shape_to_tensor((None,))))
self.assertAllEqual([-1],
self.evaluate(convert.partial_shape_to_tensor([None])))
self.assertAllEqual([-1],
self.evaluate(convert.partial_shape_to_tensor([-1])))
self.assertAllEqual([-1],
self.evaluate(
convert.partial_shape_to_tensor(
constant_op.constant([-1],
dtype=dtypes.int64))))
with self.assertRaisesRegexp(
ValueError, r"The given shape .* must be a 1-D tensor of tf.int64 "
r"values, but the shape was \(2, 2\)."):
convert.partial_shape_to_tensor(constant_op.constant(
[[1, 1], [1, 1]], dtype=dtypes.int64))
with self.assertRaisesRegexp(
TypeError, r"The given shape .* must be a 1-D tensor of tf.int64 "
r"values, but the element type was float32."):
convert.partial_shape_to_tensor(constant_op.constant([1., 1.]))
def testPartialShapeToTensorMultipleDimensions(self):
self.assertAllEqual([3, 6],
self.evaluate(
convert.partial_shape_to_tensor(
tensor_shape.TensorShape([3, 6]))))
self.assertAllEqual([3, 6],
self.evaluate(convert.partial_shape_to_tensor((3, 6))))
self.assertAllEqual([3, 6],
self.evaluate(convert.partial_shape_to_tensor([3, 6])))
self.assertAllEqual([3, 6],
self.evaluate(
convert.partial_shape_to_tensor(
constant_op.constant([3, 6],
dtype=dtypes.int64))))
self.assertAllEqual([3, -1],
self.evaluate(
convert.partial_shape_to_tensor(
tensor_shape.TensorShape([3, None]))))
self.assertAllEqual([3, -1],
self.evaluate(
convert.partial_shape_to_tensor((3, None))))
self.assertAllEqual([3, -1],
self.evaluate(
convert.partial_shape_to_tensor([3, None])))
self.assertAllEqual([3, -1],
self.evaluate(
convert.partial_shape_to_tensor(
constant_op.constant([3, -1],
dtype=dtypes.int64))))
self.assertAllEqual([-1, -1],
self.evaluate(
convert.partial_shape_to_tensor(
tensor_shape.TensorShape([None, None]))))
self.assertAllEqual([-1, -1],
self.evaluate(
convert.partial_shape_to_tensor((None, None))))
self.assertAllEqual([-1, -1],
self.evaluate(
convert.partial_shape_to_tensor([None, None])))
self.assertAllEqual([-1, -1],
self.evaluate(
convert.partial_shape_to_tensor(
constant_op.constant([-1, -1],
dtype=dtypes.int64))))
def testPartialShapeToTensorScalar(self):
self.assertAllEqual([],
self.evaluate(
convert.partial_shape_to_tensor(
tensor_shape.TensorShape([]))))
self.assertAllEqual([], self.evaluate(convert.partial_shape_to_tensor(())))
self.assertAllEqual([], self.evaluate(convert.partial_shape_to_tensor([])))
self.assertAllEqual([],
self.evaluate(
convert.partial_shape_to_tensor(
constant_op.constant([], dtype=dtypes.int64))))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/util/convert_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for generating Tensor-valued random seeds."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
def get_seed(seed):
"""Returns the local seeds an operation should use given an op-specific seed.
See `tf.compat.v1.get_seed` for more details. This wrapper adds support for
the case
where `seed` may be a tensor.
Args:
seed: An integer or a `tf.int64` scalar tensor.
Returns:
A tuple of two `tf.int64` scalar tensors that should be used for the local
seed of the calling dataset.
"""
seed, seed2 = random_seed.get_seed(seed)
if seed is None:
seed = constant_op.constant(0, dtype=dtypes.int64, name="seed")
else:
seed = ops.convert_to_tensor(seed, dtype=dtypes.int64, name="seed")
if seed2 is None:
seed2 = constant_op.constant(0, dtype=dtypes.int64, name="seed2")
else:
with ops.name_scope("seed2") as scope:
seed2 = ops.convert_to_tensor(seed2, dtype=dtypes.int64)
seed2 = array_ops.where(
math_ops.logical_and(
math_ops.equal(seed, 0), math_ops.equal(seed2, 0)),
constant_op.constant(2**31 - 1, dtype=dtypes.int64),
seed2,
name=scope)
return seed, seed2
|
tensorflow-master
|
tensorflow/python/data/util/random_seed.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for utilities working with arbitrarily nested structures."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.util import nest
from tensorflow.python.data.util import sparse
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
class SparseTest(test.TestCase):
def testAnySparse(self):
test_cases = (
{
"classes": (),
"expected": False
},
{
"classes": (ops.Tensor),
"expected": False
},
{
"classes": (((ops.Tensor))),
"expected": False
},
{
"classes": (ops.Tensor, ops.Tensor),
"expected": False
},
{
"classes": (ops.Tensor, sparse_tensor.SparseTensor),
"expected": True
},
{
"classes": (sparse_tensor.SparseTensor, sparse_tensor.SparseTensor),
"expected":
True
},
{
"classes": (sparse_tensor.SparseTensor, ops.Tensor),
"expected": True
},
{
"classes": (((sparse_tensor.SparseTensor))),
"expected": True
},
)
for test_case in test_cases:
self.assertEqual(
sparse.any_sparse(test_case["classes"]), test_case["expected"])
def assertShapesEqual(self, a, b):
for a, b in zip(nest.flatten(a), nest.flatten(b)):
self.assertEqual(a.ndims, b.ndims)
if a.ndims is None:
continue
for c, d in zip(a.as_list(), b.as_list()):
self.assertEqual(c, d)
def testAsDenseShapes(self):
test_cases = (
{
"types": (),
"classes": (),
"expected": ()
},
{
"types": tensor_shape.scalar(),
"classes": ops.Tensor,
"expected": tensor_shape.scalar()
},
{
"types": tensor_shape.scalar(),
"classes": sparse_tensor.SparseTensor,
"expected": tensor_shape.unknown_shape()
},
{
"types": (tensor_shape.scalar()),
"classes": (ops.Tensor),
"expected": (tensor_shape.scalar())
},
{
"types": (tensor_shape.scalar()),
"classes": (sparse_tensor.SparseTensor),
"expected": (tensor_shape.unknown_shape())
},
{
"types": (tensor_shape.scalar(), ()),
"classes": (ops.Tensor, ()),
"expected": (tensor_shape.scalar(), ())
},
{
"types": ((), tensor_shape.scalar()),
"classes": ((), ops.Tensor),
"expected": ((), tensor_shape.scalar())
},
{
"types": (tensor_shape.scalar(), ()),
"classes": (sparse_tensor.SparseTensor, ()),
"expected": (tensor_shape.unknown_shape(), ())
},
{
"types": ((), tensor_shape.scalar()),
"classes": ((), sparse_tensor.SparseTensor),
"expected": ((), tensor_shape.unknown_shape())
},
{
"types": (tensor_shape.scalar(), (), tensor_shape.scalar()),
"classes": (ops.Tensor, (), ops.Tensor),
"expected": (tensor_shape.scalar(), (), tensor_shape.scalar())
},
{
"types": (tensor_shape.scalar(), (), tensor_shape.scalar()),
"classes": (sparse_tensor.SparseTensor, (),
sparse_tensor.SparseTensor),
"expected": (tensor_shape.unknown_shape(), (),
tensor_shape.unknown_shape())
},
{
"types": ((), tensor_shape.scalar(), ()),
"classes": ((), ops.Tensor, ()),
"expected": ((), tensor_shape.scalar(), ())
},
{
"types": ((), tensor_shape.scalar(), ()),
"classes": ((), sparse_tensor.SparseTensor, ()),
"expected": ((), tensor_shape.unknown_shape(), ())
},
)
for test_case in test_cases:
self.assertShapesEqual(
sparse.as_dense_shapes(test_case["types"], test_case["classes"]),
test_case["expected"])
def testAsDenseTypes(self):
test_cases = (
{
"types": (),
"classes": (),
"expected": ()
},
{
"types": dtypes.int32,
"classes": ops.Tensor,
"expected": dtypes.int32
},
{
"types": dtypes.int32,
"classes": sparse_tensor.SparseTensor,
"expected": dtypes.variant
},
{
"types": (dtypes.int32),
"classes": (ops.Tensor),
"expected": (dtypes.int32)
},
{
"types": (dtypes.int32),
"classes": (sparse_tensor.SparseTensor),
"expected": (dtypes.variant)
},
{
"types": (dtypes.int32, ()),
"classes": (ops.Tensor, ()),
"expected": (dtypes.int32, ())
},
{
"types": ((), dtypes.int32),
"classes": ((), ops.Tensor),
"expected": ((), dtypes.int32)
},
{
"types": (dtypes.int32, ()),
"classes": (sparse_tensor.SparseTensor, ()),
"expected": (dtypes.variant, ())
},
{
"types": ((), dtypes.int32),
"classes": ((), sparse_tensor.SparseTensor),
"expected": ((), dtypes.variant)
},
{
"types": (dtypes.int32, (), dtypes.int32),
"classes": (ops.Tensor, (), ops.Tensor),
"expected": (dtypes.int32, (), dtypes.int32)
},
{
"types": (dtypes.int32, (), dtypes.int32),
"classes": (sparse_tensor.SparseTensor, (),
sparse_tensor.SparseTensor),
"expected": (dtypes.variant, (), dtypes.variant)
},
{
"types": ((), dtypes.int32, ()),
"classes": ((), ops.Tensor, ()),
"expected": ((), dtypes.int32, ())
},
{
"types": ((), dtypes.int32, ()),
"classes": ((), sparse_tensor.SparseTensor, ()),
"expected": ((), dtypes.variant, ())
},
)
for test_case in test_cases:
self.assertEqual(
sparse.as_dense_types(test_case["types"], test_case["classes"]),
test_case["expected"])
def testGetClasses(self):
s = sparse_tensor.SparseTensor(indices=[[0]], values=[1], dense_shape=[1])
d = ops.Tensor
t = sparse_tensor.SparseTensor
test_cases = (
{
"classes": (),
"expected": ()
},
{
"classes": s,
"expected": t
},
{
"classes": constant_op.constant([1]),
"expected": d
},
{
"classes": (s),
"expected": (t)
},
{
"classes": (constant_op.constant([1])),
"expected": (d)
},
{
"classes": (s, ()),
"expected": (t, ())
},
{
"classes": ((), s),
"expected": ((), t)
},
{
"classes": (constant_op.constant([1]), ()),
"expected": (d, ())
},
{
"classes": ((), constant_op.constant([1])),
"expected": ((), d)
},
{
"classes": (s, (), constant_op.constant([1])),
"expected": (t, (), d)
},
{
"classes": ((), s, ()),
"expected": ((), t, ())
},
{
"classes": ((), constant_op.constant([1]), ()),
"expected": ((), d, ())
},
)
for test_case in test_cases:
self.assertEqual(
sparse.get_classes(test_case["classes"]), test_case["expected"])
def assertSparseValuesEqual(self, a, b):
if not isinstance(a, sparse_tensor.SparseTensor):
self.assertFalse(isinstance(b, sparse_tensor.SparseTensor))
self.assertEqual(a, b)
return
self.assertTrue(isinstance(b, sparse_tensor.SparseTensor))
with self.cached_session():
self.assertAllEqual(a.eval().indices, self.evaluate(b).indices)
self.assertAllEqual(a.eval().values, self.evaluate(b).values)
self.assertAllEqual(a.eval().dense_shape, self.evaluate(b).dense_shape)
@test_util.run_deprecated_v1
def testSerializeDeserialize(self):
test_cases = (
(),
sparse_tensor.SparseTensor(
indices=[[0, 0]], values=[1], dense_shape=[1, 1]),
sparse_tensor.SparseTensor(
indices=[[3, 4]], values=[-1], dense_shape=[4, 5]),
sparse_tensor.SparseTensor(
indices=[[0, 0], [3, 4]], values=[1, -1], dense_shape=[4, 5]),
(sparse_tensor.SparseTensor(
indices=[[0, 0]], values=[1], dense_shape=[1, 1])),
(sparse_tensor.SparseTensor(
indices=[[0, 0]], values=[1], dense_shape=[1, 1]), ()),
((),
sparse_tensor.SparseTensor(
indices=[[0, 0]], values=[1], dense_shape=[1, 1])),
)
for expected in test_cases:
classes = sparse.get_classes(expected)
shapes = nest.map_structure(lambda _: tensor_shape.TensorShape(None),
classes)
types = nest.map_structure(lambda _: dtypes.int32, classes)
actual = sparse.deserialize_sparse_tensors(
sparse.serialize_sparse_tensors(expected), types, shapes,
sparse.get_classes(expected))
nest.assert_same_structure(expected, actual)
for a, e in zip(nest.flatten(actual), nest.flatten(expected)):
self.assertSparseValuesEqual(a, e)
@test_util.run_deprecated_v1
def testSerializeManyDeserialize(self):
test_cases = (
(),
sparse_tensor.SparseTensor(
indices=[[0, 0]], values=[1], dense_shape=[1, 1]),
sparse_tensor.SparseTensor(
indices=[[3, 4]], values=[-1], dense_shape=[4, 5]),
sparse_tensor.SparseTensor(
indices=[[0, 0], [3, 4]], values=[1, -1], dense_shape=[4, 5]),
(sparse_tensor.SparseTensor(
indices=[[0, 0]], values=[1], dense_shape=[1, 1])),
(sparse_tensor.SparseTensor(
indices=[[0, 0]], values=[1], dense_shape=[1, 1]), ()),
((),
sparse_tensor.SparseTensor(
indices=[[0, 0]], values=[1], dense_shape=[1, 1])),
)
for expected in test_cases:
classes = sparse.get_classes(expected)
shapes = nest.map_structure(lambda _: tensor_shape.TensorShape(None),
classes)
types = nest.map_structure(lambda _: dtypes.int32, classes)
actual = sparse.deserialize_sparse_tensors(
sparse.serialize_many_sparse_tensors(expected), types, shapes,
sparse.get_classes(expected))
nest.assert_same_structure(expected, actual)
for a, e in zip(nest.flatten(actual), nest.flatten(expected)):
self.assertSparseValuesEqual(a, e)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/util/sparse_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Verify that memory usage is minimal in eager mode."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import six
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import multi_device_iterator_ops
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
# memory_profiler might not be available in the OSS version of TensorFlow.
try:
import memory_profiler # pylint:disable=g-import-not-at-top
except ImportError:
memory_profiler = None
@test_util.run_all_in_graph_and_eager_modes
class MemoryCleanupTest(test_base.DatasetTestBase):
def assertNotIncreasingMemory(self,
f,
num_iters=100000,
increase_threshold_absolute_mb=10):
"""Assert memory usage doesn't increase beyond given threshold for f."""
with context.eager_mode():
# Warm up.
f()
# Wait for background threads to start up and take over memory.
# FIXME: The nature of this test leaves few other options. Maybe there
# is a better way to do this.
time.sleep(4)
initial = memory_profiler.memory_usage(-1)[0]
for _ in six.moves.range(num_iters):
f()
increase = memory_profiler.memory_usage(-1)[0] - initial
logging.info("Memory increase observed: %f MB" % increase)
assert increase < increase_threshold_absolute_mb, (
"Increase is too high. Initial memory usage: %f MB. Increase: %f MB. "
"Maximum allowed increase: %f") % (initial, increase,
increase_threshold_absolute_mb)
@test_util.run_v1_only("b/121264236")
def testEagerMemoryUsageWithReset(self):
if not context.executing_eagerly():
self.skipTest("Only eager mode test")
if memory_profiler is None:
self.skipTest("memory_profiler required to run this test")
dataset = dataset_ops.Dataset.range(10)
multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(
dataset, ["/cpu:1", "/cpu:2"])
def f():
self.evaluate(multi_device_iterator.get_next())
multi_device_iterator._eager_reset()
self.assertNotIncreasingMemory(
f, num_iters=100, increase_threshold_absolute_mb=350)
@test_util.run_v1_only("b/121264236")
def testEagerMemoryUsageWithRecreation(self):
if not context.executing_eagerly():
self.skipTest("Only eager mode test")
if memory_profiler is None:
self.skipTest("memory_profiler required to run this test")
dataset = dataset_ops.Dataset.range(10)
def f():
multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(
dataset, ["/cpu:1", "/cpu:2"])
self.evaluate(multi_device_iterator.get_next())
del multi_device_iterator
# TODO(b/123316347): Reduce threshold once bug is fixed.
self.assertNotIncreasingMemory(
f, num_iters=100, increase_threshold_absolute_mb=500)
if __name__ == "__main__":
ops.enable_eager_execution(
config=config_pb2.ConfigProto(device_count={"CPU": 3, "GPU": 1}))
test.main()
|
tensorflow-master
|
tensorflow/python/data/kernel_tests/memory_cleanup_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.experimental.unbatch()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.platform import test
from tensorflow.python.util import compat
@test_util.run_all_in_graph_and_eager_modes
class UnbatchTest(test_base.DatasetTestBase, parameterized.TestCase):
def testUnbatchWithUnknownRankInput(self):
dataset = dataset_ops.Dataset.from_tensors([0, 1, 2, 3]).unbatch()
self.assertDatasetProduces(dataset, range(4))
def testUnbatchScalarDataset(self):
data = tuple([math_ops.range(10) for _ in range(3)])
data = dataset_ops.Dataset.from_tensor_slices(data)
expected_types = (dtypes.int32,) * 3
data = data.batch(2)
self.assertEqual(expected_types, dataset_ops.get_legacy_output_types(data))
data = data.unbatch()
self.assertEqual(expected_types, dataset_ops.get_legacy_output_types(data))
self.assertDatasetProduces(data, [(i,) * 3 for i in range(10)])
def testUnbatchNestedDataset(self):
data = dataset_ops.Dataset.from_tensors(
[dataset_ops.Dataset.range(10) for _ in range(10)])
data = data.unbatch().flat_map(lambda x: x)
self.assertDatasetProduces(data, list(range(10)) * 10)
def testUnbatchDatasetWithStrings(self):
data = tuple([math_ops.range(10) for _ in range(3)])
data = dataset_ops.Dataset.from_tensor_slices(data)
data = data.map(lambda x, y, z: (x, string_ops.as_string(y), z))
expected_types = (dtypes.int32, dtypes.string, dtypes.int32)
data = data.batch(2)
self.assertEqual(expected_types, dataset_ops.get_legacy_output_types(data))
data = data.unbatch()
self.assertEqual(expected_types, dataset_ops.get_legacy_output_types(data))
self.assertDatasetProduces(
data, [(i, compat.as_bytes(str(i)), i) for i in range(10)])
def testUnbatchDatasetWithSparseTensor(self):
st = sparse_tensor.SparseTensorValue(
indices=[[i, i] for i in range(10)],
values=list(range(10)),
dense_shape=[10, 10])
data = dataset_ops.Dataset.from_tensors(st)
data = data.unbatch()
data = data.batch(5)
data = data.unbatch()
expected_output = [
sparse_tensor.SparseTensorValue([[i]], [i], [10]) for i in range(10)
]
self.assertDatasetProduces(data, expected_output=expected_output)
def testUnbatchDatasetWithDenseSparseAndRaggedTensor(self):
st = sparse_tensor.SparseTensorValue(
indices=[[i, i] for i in range(10)],
values=list(range(10)),
dense_shape=[10, 10])
rt = ragged_factory_ops.constant_value([[[0]], [[1]], [[2]], [[3]], [[4]],
[[5]], [[6]], [[7]], [[8]], [[9]]])
data = dataset_ops.Dataset.from_tensors((list(range(10)), st, rt))
data = data.unbatch()
data = data.batch(5)
data = data.unbatch()
expected_output = [(i, sparse_tensor.SparseTensorValue([[i]], [i], [10]),
ragged_factory_ops.constant_value([[i]]))
for i in range(10)]
self.assertDatasetProduces(
data, expected_output=expected_output)
def testUnbatchDatasetWithRaggedTensor(self):
rt = ragged_factory_ops.constant_value([[[0]], [[1]], [[2]], [[3]], [[4]],
[[5]], [[6]], [[7]], [[8]], [[9]]])
data = dataset_ops.Dataset.from_tensors(rt)
data = data.unbatch()
data = data.batch(5)
data = data.batch(2)
data = data.unbatch()
expected_output = [
ragged_factory_ops.constant_value([[[0]], [[1]], [[2]], [[3]], [[4]]]),
ragged_factory_ops.constant_value([[[5]], [[6]], [[7]], [[8]], [[9]]]),
]
self.assertDatasetProduces(
data, expected_output=expected_output)
def testUnbatchSingleElementTupleDataset(self):
data = tuple([(math_ops.range(10),) for _ in range(3)])
data = dataset_ops.Dataset.from_tensor_slices(data)
expected_types = ((dtypes.int32,),) * 3
data = data.batch(2)
self.assertEqual(expected_types, dataset_ops.get_legacy_output_types(data))
data = data.unbatch()
self.assertEqual(expected_types, dataset_ops.get_legacy_output_types(data))
self.assertDatasetProduces(data, [((i,),) * 3 for i in range(10)])
def testUnbatchMultiElementTupleDataset(self):
data = tuple([(math_ops.range(10 * i, 10 * i + 10),
array_ops.fill([10], "hi")) for i in range(3)])
data = dataset_ops.Dataset.from_tensor_slices(data)
expected_types = ((dtypes.int32, dtypes.string),) * 3
data = data.batch(2)
self.assertAllEqual(expected_types,
dataset_ops.get_legacy_output_types(data))
data = data.unbatch()
self.assertAllEqual(expected_types,
dataset_ops.get_legacy_output_types(data))
self.assertDatasetProduces(
data,
[((i, b"hi"), (10 + i, b"hi"), (20 + i, b"hi")) for i in range(10)])
def testUnbatchEmpty(self):
data = dataset_ops.Dataset.from_tensors(
(constant_op.constant([]), constant_op.constant([], shape=[0, 4]),
constant_op.constant([], shape=[0, 4, 0])))
data = data.unbatch()
self.assertDatasetProduces(data, [])
def testUnbatchStaticShapeMismatch(self):
data = dataset_ops.Dataset.from_tensors((np.arange(7), np.arange(8),
np.arange(9)))
with self.assertRaises(ValueError):
data.unbatch()
# Note: dynamic shape mismatch is graph specific test.
@test_util.run_deprecated_v1
def testSkipEagerUnbatchDynamicShapeMismatch(self):
ph1 = array_ops.placeholder(dtypes.int32, shape=[None])
ph2 = array_ops.placeholder(dtypes.int32, shape=None)
data = dataset_ops.Dataset.from_tensors((ph1, ph2))
data = data.unbatch()
iterator = dataset_ops.make_initializable_iterator(data)
next_element = iterator.get_next()
with self.cached_session() as sess:
# Mismatch in the 0th dimension.
sess.run(
iterator.initializer,
feed_dict={
ph1: np.arange(7).astype(np.int32),
ph2: np.arange(8).astype(np.int32)
})
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(next_element)
# No 0th dimension (i.e. scalar value) for one component.
sess.run(
iterator.initializer,
feed_dict={
ph1: np.arange(7).astype(np.int32),
ph2: 7
})
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(next_element)
def testUnbatchDatasetWithUintDtypes(self):
components = (
np.tile(np.array([[0], [1], [2], [3]], dtype=np.uint8), 2),
np.tile(np.array([[1], [2], [3], [256]], dtype=np.uint16), 2),
np.tile(np.array([[2], [3], [4], [65536]], dtype=np.uint32), 2),
np.tile(np.array([[3], [4], [5], [4294967296]], dtype=np.uint64), 2),
)
expected_types = (dtypes.uint8, dtypes.uint16, dtypes.uint32, dtypes.uint64)
expected_output = [tuple([c[i] for c in components]) for i in range(4)]
data = dataset_ops.Dataset.from_tensor_slices(components)
data = data.batch(2)
self.assertEqual(expected_types, dataset_ops.get_legacy_output_types(data))
data = data.unbatch()
self.assertEqual(expected_types, dataset_ops.get_legacy_output_types(data))
self.assertDatasetProduces(data, expected_output)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/kernel_tests/unbatch_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.Dataset.cache()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from os import path
import shutil
import tempfile
import numpy as np
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class FileCacheTest(test_base.DatasetTestBase):
def setUp(self):
self.tmp_dir = tempfile.mkdtemp()
self.cache_prefix = path.join(self.tmp_dir, "cache")
def tearDown(self):
if self.tmp_dir:
shutil.rmtree(self.tmp_dir, ignore_errors=True)
def testCacheDatasetPassthrough(self):
components = (np.array([1, 2, 3, 4]), np.array([5, 6, 7, 8]),
np.array([9.0, 10.0, 11.0, 12.0]))
def dataset_fn(count=5, filename=None):
repeat_dataset = (
dataset_ops.Dataset.from_tensor_slices(components).repeat(count))
if filename:
return repeat_dataset.cache(filename)
else:
return repeat_dataset
self.assertEqual(
tuple([c.shape[1:] for c in components]),
dataset_ops.get_legacy_output_shapes(dataset_fn()))
get_next = self.getNext(dataset_fn())
# First run without caching to collect the "ground truth".
elements = []
for _ in range(20):
elements.append(self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Assert that the cached dataset has the same elements as the
# "ground truth".
get_next = self.getNext(dataset_fn(filename=self.cache_prefix))
cached_elements = []
for _ in range(20):
cached_elements.append(self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
self.assertAllEqual(elements, cached_elements)
# Re-initialize with an empty upstream (to throw errors.OutOfRangeError
# if we didn't use the cache).
get_next = self.getNext(dataset_fn(count=0, filename=self.cache_prefix))
replayed_elements = []
for _ in range(20):
replayed_elements.append(self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
self.assertEqual(cached_elements, replayed_elements)
# Re-initialize with an empty upstream and a missing cache file (should
# throw errors.OutOfRangeError immediately).
get_next = self.getNext(
dataset_fn(count=0, filename=self.cache_prefix + "nonsense"))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
def testConcurrentWriters(self):
components = (np.array([1, 2, 3, 4]), np.array([5, 6, 7, 8]),
np.array([9.0, 10.0, 11.0, 12.0]))
cache_dataset1 = (
dataset_ops.Dataset.from_tensor_slices(components).cache(
self.cache_prefix))
cache_dataset2 = (
dataset_ops.Dataset.from_tensor_slices(components).cache(
self.cache_prefix))
get_next1 = self.getNext(cache_dataset1)
get_next2 = self.getNext(cache_dataset2)
self.evaluate(get_next1()) # this should succeed
with self.assertRaises(errors.AlreadyExistsError):
self.evaluate(get_next2())
self.evaluate(get_next1()) # this should continue to succeed
def testConcurrentReaders(self):
components = (np.array([1, 2, 3, 4]), np.array([5, 6, 7, 8]),
np.array([9.0, 10.0, 11.0, 12.0]))
cache_dataset1 = (
dataset_ops.Dataset.from_tensor_slices(components).cache(
self.cache_prefix))
cache_dataset2 = (
dataset_ops.Dataset.from_tensor_slices(components).cache(
self.cache_prefix))
get_next1 = self.getNext(cache_dataset1)
get_next2 = self.getNext(cache_dataset2)
elements = []
for _ in range(4):
elements.append(self.evaluate(get_next1()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next1())
# Re-initialize
get_next1 = self.getNext(cache_dataset1, requires_initialization=True)
get_next2 = self.getNext(cache_dataset2, requires_initialization=True)
# Reading concurrently should succeed.
elements_itr1 = []
elements_itr2 = []
elements_itr2.append(self.evaluate(get_next2()))
elements_itr1.append(self.evaluate(get_next1()))
elements_itr2.append(self.evaluate(get_next2()))
elements_itr1.append(self.evaluate(get_next1()))
# Intentionally reversing the order
elements_itr1.append(self.evaluate(get_next1()))
elements_itr2.append(self.evaluate(get_next2()))
elements_itr1.append(self.evaluate(get_next1()))
elements_itr2.append(self.evaluate(get_next2()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next2())
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next1())
self.assertAllEqual(elements, elements_itr1)
self.assertAllEqual(elements, elements_itr2)
def testReadingPastEndOfSequence(self):
dataset = dataset_ops.Dataset.range(10).cache(self.cache_prefix)
dataset = dataset.map(lambda a: a).batch(4).repeat(2)
expected_output = [[0, 1, 2, 3], [4, 5, 6, 7], [8, 9]] * 2
self.assertDatasetProduces(dataset, expected_output)
@test_util.run_all_in_graph_and_eager_modes
class MemoryCacheTest(test_base.DatasetTestBase):
def testCacheDatasetPassthrough(self):
with ops.device("cpu:0"):
repeat_count = variables.Variable(constant_op.constant(10, dtypes.int64))
dataset = dataset_ops.Dataset.range(3).flat_map(
lambda x: dataset_ops.Dataset.from_tensors(x).repeat(repeat_count))
cached_dataset = dataset.cache().repeat(2)
uncached_dataset = dataset.repeat(2)
self.evaluate(repeat_count.initializer)
# Needs to be initializable to capture the variable.
cached_next = self.getNext(cached_dataset, requires_initialization=True)
uncached_next = self.getNext(
uncached_dataset, requires_initialization=True)
for i in range(3):
for _ in range(10):
self.assertEqual(self.evaluate(cached_next()), i)
self.assertEqual(self.evaluate(uncached_next()), i)
self.evaluate(repeat_count.assign(0))
# The uncached iterator should now be empty.
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(uncached_next())
# The cached iterator replays from cache.
for i in range(3):
for _ in range(10):
self.assertEqual(self.evaluate(cached_next()), i)
# The cached iterator should now be empty.
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(cached_next())
def testEmptyCacheReading(self):
components = (np.array([1, 2, 3, 4]), np.array([5, 6, 7, 8]),
np.array([9.0, 10.0, 11.0, 12.0]))
repeat_dataset = (
dataset_ops.Dataset.from_tensor_slices(components).repeat(0))
cache_dataset = repeat_dataset.cache()
# Create initialization ops for iterators without and with
# caching, respectively.
self.assertDatasetProduces(cache_dataset, expected_output=[])
def testConcurrentReaders(self):
dataset = dataset_ops.Dataset.range(5).cache()
d1 = dataset.map(lambda x: x + 1)
d2 = dataset.map(lambda x: x + 6)
get_next1 = self.getNext(d1)
self.assertEqual(1, self.evaluate(get_next1()))
self.assertEqual(2, self.evaluate(get_next1()))
self.assertEqual(3, self.evaluate(get_next1()))
get_next2 = self.getNext(d2)
self.assertEqual(6, self.evaluate(get_next2()))
self.assertEqual(7, self.evaluate(get_next2()))
self.assertEqual(4, self.evaluate(get_next1())) # interleave execution
self.assertEqual([8, 5],
[self.evaluate(get_next2()),
self.evaluate(get_next1())])
self.assertEqual(9, self.evaluate(get_next2()))
self.assertEqual(10, self.evaluate(get_next2()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next2())
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next1())
def testCacheTakeRepeat(self):
dataset = dataset_ops.Dataset.range(10).cache().take(5).repeat(2)
expected_output = [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]
self.assertDatasetProduces(dataset, expected_output=expected_output)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/kernel_tests/cache_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.Dataset.filter()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.kernel_tests import filter_test_base
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class FilterTest(filter_test_base.FilterTestBase):
def apply_filter(self, input_dataset, predicate):
return input_dataset.filter(predicate)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/kernel_tests/filter_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.Dataset.reduce()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
from absl.testing import parameterized
import numpy as np
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class ReduceTest(test_base.DatasetTestBase, parameterized.TestCase):
def testSum(self):
for i in range(10):
ds = dataset_ops.Dataset.range(1, i + 1)
result = ds.reduce(np.int64(0), lambda x, y: x + y)
self.assertEqual(((i + 1) * i) // 2, self.evaluate(result))
def testSumTuple(self):
def reduce_fn(state, value):
v1, v2 = value
return state + v1 + v2
for i in range(10):
ds = dataset_ops.Dataset.range(1, i + 1)
ds = dataset_ops.Dataset.zip((ds, ds))
result = ds.reduce(constant_op.constant(0, dtype=dtypes.int64), reduce_fn)
self.assertEqual(((i + 1) * i), self.evaluate(result))
def testSumAndCount(self):
def reduce_fn(state, value):
s, c = state
return s + value, c + 1
for i in range(10):
ds = dataset_ops.Dataset.range(1, i + 1)
result = ds.reduce((constant_op.constant(0, dtype=dtypes.int64),
constant_op.constant(0, dtype=dtypes.int64)),
reduce_fn)
s, c = self.evaluate(result)
self.assertEqual(((i + 1) * i) // 2, s)
self.assertEqual(i, c)
@test_util.run_v1_only("graph-mode specific test")
def testSkipEagerSquareUsingPlaceholder(self):
delta = array_ops.placeholder(dtype=dtypes.int64)
def reduce_fn(state, _):
return state + delta
for i in range(10):
ds = dataset_ops.Dataset.range(1, i + 1)
result = ds.reduce(np.int64(0), reduce_fn)
with self.cached_session() as sess:
square = sess.run(result, feed_dict={delta: i})
self.assertEqual(i * i, square)
def testSparse(self):
def reduce_fn(_, value):
return value
def make_sparse_fn(i):
return sparse_tensor.SparseTensorValue(
indices=np.array([[0, 0]]),
values=(i * np.array([1])),
dense_shape=np.array([1, 1]))
for i in range(10):
ds = dataset_ops.Dataset.from_tensors(make_sparse_fn(i+1))
result = ds.reduce(make_sparse_fn(0), reduce_fn)
self.assertSparseValuesEqual(make_sparse_fn(i + 1), self.evaluate(result))
def testNested(self):
def reduce_fn(state, value):
state["dense"] += value["dense"]
state["sparse"] = value["sparse"]
return state
def make_sparse_fn(i):
return sparse_tensor.SparseTensorValue(
indices=np.array([[0, 0]]),
values=(i * np.array([1])),
dense_shape=np.array([1, 1]))
def map_fn(i):
return {"dense": math_ops.cast(i, dtype=dtypes.int64),
"sparse": make_sparse_fn(math_ops.cast(i, dtype=dtypes.int64))}
for i in range(10):
ds = dataset_ops.Dataset.range(1, i + 1).map(map_fn)
result = ds.reduce(map_fn(0), reduce_fn)
result = self.evaluate(result)
self.assertEqual(((i + 1) * i) // 2, result["dense"])
self.assertSparseValuesEqual(make_sparse_fn(i), result["sparse"])
def testDatasetSideEffect(self):
counter_var = variables.Variable(0)
def increment_fn(x):
counter_var.assign_add(1)
return x
def dataset_fn():
return dataset_ops.Dataset.range(10).map(increment_fn)
def reduce_fn(state, value):
return state + value
@function.defun
def fn():
_ = dataset_fn().reduce(np.int64(0), reduce_fn)
return "hello"
self.evaluate(counter_var.initializer)
self.assertEqual(self.evaluate(fn()), b"hello")
self.assertEqual(self.evaluate(counter_var), 10)
def testSideEffect(self):
counter_var = variables.Variable(0)
def dataset_fn():
return dataset_ops.Dataset.range(10)
def reduce_fn(state, value):
counter_var.assign_add(1)
return state + value
@function.defun
def fn():
_ = dataset_fn().reduce(np.int64(0), reduce_fn)
return "hello"
self.evaluate(counter_var.initializer)
self.assertEqual(self.evaluate(fn()), b"hello")
self.assertEqual(self.evaluate(counter_var), 10)
def testAutomaticControlDependencies(self):
counter_var = variables.Variable(1)
def dataset_fn():
return dataset_ops.Dataset.range(1)
def reduce1_fn(state, value):
counter_var.assign(counter_var + 1)
return state + value
def reduce2_fn(state, value):
counter_var.assign(counter_var * 2)
return state + value
@function.defun
def fn():
_ = dataset_fn().reduce(np.int64(0), reduce1_fn)
_ = dataset_fn().reduce(np.int64(0), reduce2_fn)
return "hello"
self.evaluate(counter_var.initializer)
self.assertEqual(self.evaluate(fn()), b"hello")
self.assertEqual(self.evaluate(counter_var), 4)
def testStateOnGPU(self):
if not test_util.is_gpu_available():
self.skipTest("No GPUs available.")
state = constant_op.constant(0, dtype=dtypes.int64)
def reduce_fn(state, value):
with ops.device("/gpu:0"):
return state + value
for i in range(10):
ds = dataset_ops.Dataset.range(1, i + 1)
result = ds.reduce(state, reduce_fn)
self.assertEqual(((i + 1) * i) // 2, self.evaluate(result))
@test_util.run_v1_only("graph-mode specific test")
def testSkipEagerCancellation(self):
ds = dataset_ops.Dataset.from_tensors(1).repeat()
result = ds.reduce(0, lambda x, y: x + y)
with self.cached_session() as sess:
# The `result` op is guaranteed to not complete before cancelled because
# the dataset that is being reduced is infinite.
thread = self.checkedThread(self.assert_op_cancelled, args=(result,))
thread.start()
time.sleep(0.2)
sess.close()
thread.join()
def testInvalidFunction(self):
ds = dataset_ops.Dataset.range(5)
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(ds.reduce(0, lambda _, __: ()))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/kernel_tests/reduce_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.Dataset.flat_map()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops.ragged import ragged_conversion_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.platform import test
from tensorflow.python.training import server_lib
@test_util.run_all_in_graph_and_eager_modes
class FlatMapTest(test_base.DatasetTestBase):
# pylint: disable=g-long-lambda
def testFlatMapDataset(self):
repeats = [1, 2, 3, 4, 5, 0, 1]
components = np.array(repeats, dtype=np.int64)
dataset = dataset_ops.Dataset.from_tensor_slices(components).flat_map(
lambda x: dataset_ops.Dataset.from_tensors([x]).repeat(x))
expected_output = []
for i in repeats:
expected_output.extend([[i]] * i)
self.assertDatasetProduces(dataset, expected_output=expected_output)
def testNestedFlatMapDataset(self):
repeats = [[1, 2], [3, 4], [5, 0], [1, 7]]
components = np.array(repeats, dtype=np.int64)
dataset = dataset_ops.Dataset.from_tensor_slices(components).flat_map(
lambda x: dataset_ops.Dataset.from_tensor_slices(x).flat_map(
lambda y: dataset_ops.Dataset.from_tensors(y).repeat(y))
)
expected_output = []
for row in repeats:
for i in row:
expected_output.extend([i] * i)
self.assertDatasetProduces(dataset, expected_output=expected_output)
# Note: no eager mode coverage, session specific test.
@test_util.run_deprecated_v1
def testSkipEagerSharedResourceNestedFlatMapDataset(self):
repeats = [[1, 2], [3, 4], [5, 0], [1, 7]]
components = np.array(repeats, dtype=np.int64)
iterator = (
dataset_ops.make_initializable_iterator(
dataset_ops.Dataset.from_tensor_slices(components).flat_map(
lambda x: dataset_ops.Dataset.from_tensor_slices(x).flat_map(
lambda y: dataset_ops.Dataset.from_tensors(y).repeat(y))),
shared_name="shared_flat_map_iterator"))
init_op = iterator.initializer
get_next = iterator.get_next()
# Create two concurrent sessions that share the same iterator
# resource on the same server, and verify that a random
# interleaving of `Session.run(get_next)` calls on the two
# sessions yields the expected result.
server = server_lib.Server.create_local_server()
with session.Session(server.target) as sess1:
with session.Session(server.target) as sess2:
for _ in range(3):
sess = random.choice([sess1, sess2])
sess.run(init_op)
for row in repeats:
for i in row:
for _ in range(i):
sess = random.choice([sess1, sess2])
self.assertEqual(i, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess = random.choice([sess1, sess2])
sess.run(get_next)
def testMapDict(self):
dataset = dataset_ops.Dataset.range(10).map(
lambda x: {"foo": x * 2, "bar": x ** 2}).flat_map(
lambda d: dataset_ops.Dataset.from_tensors(
d["foo"]).repeat(d["bar"]))
get_next = self.getNext(dataset)
for i in range(10):
for _ in range(i**2):
self.assertEqual(i * 2, self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
def testSparse(self):
def _map_fn(i):
return sparse_tensor.SparseTensorValue(
indices=[[0, 0], [1, 1]], values=(i * [1, -1]), dense_shape=[2, 2])
def _flat_map_fn(x):
return dataset_ops.Dataset.from_tensor_slices(
sparse_ops.sparse_to_dense(x.indices, x.dense_shape, x.values))
dataset = dataset_ops.Dataset.range(10).map(_map_fn).flat_map(_flat_map_fn)
expected_output = []
for i in range(10):
for j in range(2):
expected_output.append([i, 0] if j % 2 == 0 else [0, -i])
self.assertDatasetProduces(dataset, expected_output=expected_output)
def testTensorArray(self):
def _map_fn(i):
i = math_ops.cast(i, dtypes.int32)
return (
tensor_array_ops.TensorArray(
dtype=dtypes.int32, element_shape=(), size=i)
.unstack(math_ops.range(i)))
def _flat_map_fn(x):
self.assertIsInstance(x, tensor_array_ops.TensorArray)
return dataset_ops.Dataset.from_tensor_slices(x.stack())
dataset = dataset_ops.Dataset.range(10).map(_map_fn).flat_map(_flat_map_fn)
expected_output = []
for i in range(10):
for j in range(i):
expected_output.append(j)
self.assertDatasetProduces(dataset, expected_output=expected_output)
def testRagged(self):
def _map_fn(i):
return ragged_tensor.RaggedTensor.from_tensor(i * [[1], [-1]])
def _flat_map_fn(x):
return dataset_ops.Dataset.from_tensor_slices(
ragged_conversion_ops.to_tensor(x))
dataset = dataset_ops.Dataset.range(10).map(_map_fn).flat_map(_flat_map_fn)
expected_output = []
for i in range(10):
expected_output.append([i])
expected_output.append([-i])
self.assertDatasetProduces(dataset, expected_output=expected_output)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/kernel_tests/flat_map_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.Dataset.list_files()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from os import path
import shutil
import tempfile
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
from tensorflow.python.util import compat
@test_util.run_all_in_graph_and_eager_modes
class ListFilesTest(test_base.DatasetTestBase):
def setUp(self):
self.tmp_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tmp_dir, ignore_errors=True)
def _touchTempFiles(self, filenames):
for filename in filenames:
open(path.join(self.tmp_dir, filename), 'a').close()
# Note: eager mode fails in assertion error same as initializer in graph mode.
@test_util.run_deprecated_v1
def testSkipEagerEmptyDirectory(self):
dataset = dataset_ops.Dataset.list_files(path.join(self.tmp_dir, '*'))
self.assertDatasetProduces(dataset, expected_output=[])
def testSimpleDirectory(self):
filenames = ['a', 'b', 'c']
self._touchTempFiles(filenames)
dataset = dataset_ops.Dataset.list_files(path.join(self.tmp_dir, '*'))
self.assertDatasetProduces(
dataset,
expected_output=[
compat.as_bytes(path.join(self.tmp_dir, filename))
for filename in filenames
],
assert_items_equal=True)
def testSimpleDirectoryNotShuffled(self):
filenames = ['b', 'c', 'a']
self._touchTempFiles(filenames)
dataset = dataset_ops.Dataset.list_files(
path.join(self.tmp_dir, '*'), shuffle=False)
self.assertDatasetProduces(
dataset,
expected_output=[
compat.as_bytes(path.join(self.tmp_dir, filename))
for filename in sorted(filenames)
])
def testFixedSeedResultsInRepeatableOrder(self):
filenames = ['a', 'b', 'c']
self._touchTempFiles(filenames)
dataset = dataset_ops.Dataset.list_files(
path.join(self.tmp_dir, '*'), shuffle=True, seed=37)
expected_filenames = [
compat.as_bytes(path.join(self.tmp_dir, filename))
for filename in filenames
]
all_actual_filenames = []
for _ in range(3):
actual_filenames = []
next_element = self.getNext(dataset, requires_initialization=True)
try:
while True:
actual_filenames.append(self.evaluate(next_element()))
except errors.OutOfRangeError:
pass
all_actual_filenames.append(actual_filenames)
# Each run should produce the same set of filenames, which may be
# different from the order of `expected_filenames`.
self.assertItemsEqual(expected_filenames, all_actual_filenames[0])
# However, the different runs should produce filenames in the same order
# as each other.
self.assertEqual(all_actual_filenames[0], all_actual_filenames[1])
self.assertEqual(all_actual_filenames[0], all_actual_filenames[2])
def tesEmptyDirectoryInitializer(self):
def dataset_fn():
return dataset_ops.Dataset.list_files(path.join(self.tmp_dir, '*'))
self.assertDatasetProduces(
dataset_fn(),
expected_error=(errors.InvalidArgumentError,
'No files matched pattern'),
requires_initialization=True)
def testSimpleDirectoryInitializer(self):
filenames = ['a', 'b', 'c']
self._touchTempFiles(filenames)
dataset = dataset_ops.Dataset.list_files(path.join(self.tmp_dir, '*'))
self.assertDatasetProduces(
dataset,
expected_output=[
compat.as_bytes(path.join(self.tmp_dir, filename))
for filename in filenames
],
assert_items_equal=True)
def testFileSuffixes(self):
filenames = ['a.txt', 'b.py', 'c.py', 'd.pyc']
self._touchTempFiles(filenames)
dataset = dataset_ops.Dataset.list_files(path.join(self.tmp_dir, '*.py'))
self.assertDatasetProduces(
dataset,
expected_output=[
compat.as_bytes(path.join(self.tmp_dir, filename))
for filename in filenames[1:-1]
],
assert_items_equal=True)
def testFileMiddles(self):
filenames = ['a.txt', 'b.py', 'c.pyc']
self._touchTempFiles(filenames)
dataset = dataset_ops.Dataset.list_files(path.join(self.tmp_dir, '*.py*'))
self.assertDatasetProduces(
dataset,
expected_output=[
compat.as_bytes(path.join(self.tmp_dir, filename))
for filename in filenames[1:]
],
assert_items_equal=True)
def testNoShuffle(self):
filenames = ['a', 'b', 'c']
self._touchTempFiles(filenames)
# Repeat the list twice and ensure that the order is the same each time.
# NOTE(mrry): This depends on an implementation detail of `list_files()`,
# which is that the list of files is captured when the iterator is
# initialized. Otherwise, or if e.g. the iterator were initialized more than
# once, it's possible that the non-determinism of `tf.matching_files()`
# would cause this test to fail. However, it serves as a useful confirmation
# that the `shuffle=False` argument is working as intended.
# TODO(b/73959787): Provide some ordering guarantees so that this test is
# more meaningful.
dataset = dataset_ops.Dataset.list_files(
path.join(self.tmp_dir, '*'), shuffle=False).repeat(2)
next_element = self.getNext(dataset)
expected_filenames = []
actual_filenames = []
for filename in filenames * 2:
expected_filenames.append(
compat.as_bytes(path.join(self.tmp_dir, filename)))
actual_filenames.append(compat.as_bytes(self.evaluate(next_element())))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element())
self.assertItemsEqual(expected_filenames, actual_filenames)
self.assertEqual(actual_filenames[:len(filenames)],
actual_filenames[len(filenames):])
def testMultiplePatternsAsList(self):
filenames = ['a.txt', 'b.py', 'c.py', 'd.pyc']
self._touchTempFiles(filenames)
patterns = [path.join(self.tmp_dir, pat) for pat in ['*.py', '*.txt']]
dataset = dataset_ops.Dataset.list_files(patterns)
self.assertDatasetProduces(
dataset,
expected_output=[
compat.as_bytes(path.join(self.tmp_dir, filename))
for filename in filenames[:-1]
],
assert_items_equal=True)
def testMultiplePatternsAsTensor(self):
filenames = ['a.txt', 'b.py', 'c.py', 'd.pyc']
self._touchTempFiles(filenames)
dataset = dataset_ops.Dataset.list_files(
[path.join(self.tmp_dir, pat) for pat in ['*.py', '*.txt']])
self.assertDatasetProduces(
dataset,
expected_output=[
compat.as_bytes(path.join(self.tmp_dir, filename))
for filename in filenames[:-1]
],
assert_items_equal=True)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/data/kernel_tests/list_files_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.Dataset.range()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class RangeTest(test_base.DatasetTestBase):
def testStop(self):
dataset = dataset_ops.Dataset.range(5)
self.assertDatasetProduces(dataset, expected_output=range(5))
def testStartStop(self):
start, stop = 2, 5
dataset = dataset_ops.Dataset.range(start, stop)
self.assertDatasetProduces(dataset, expected_output=range(2, 5))
def testStartStopStep(self):
start, stop, step = 2, 10, 2
dataset = dataset_ops.Dataset.range(start, stop, step)
self.assertDatasetProduces(dataset, expected_output=range(2, 10, 2))
def testZeroStep(self):
start, stop, step = 2, 10, 0
with self.assertRaises(errors.InvalidArgumentError):
dataset = dataset_ops.Dataset.range(start, stop, step)
self.evaluate(dataset._variant_tensor)
def testNegativeStep(self):
start, stop, step = 2, 10, -1
dataset = dataset_ops.Dataset.range(start, stop, step)
self.assertDatasetProduces(dataset, expected_output=range(2, 10, -1))
def testStopLessThanStart(self):
start, stop = 10, 2
dataset = dataset_ops.Dataset.range(start, stop)
self.assertDatasetProduces(dataset, expected_output=range(10, 2))
def testStopLessThanStartWithPositiveStep(self):
start, stop, step = 10, 2, 2
dataset = dataset_ops.Dataset.range(start, stop, step)
self.assertDatasetProduces(dataset, expected_output=range(10, 2, 2))
def testStopLessThanStartWithNegativeStep(self):
start, stop, step = 10, 2, -1
dataset = dataset_ops.Dataset.range(start, stop, step)
self.assertDatasetProduces(dataset, expected_output=range(10, 2, -1))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/kernel_tests/range_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.Iterator`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import warnings
from absl.testing import parameterized
import numpy as np
from tensorflow.core.protobuf import cluster_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.compat import compat as forward_compat
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.data.ops import readers
from tensorflow.python.data.util import structure
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gen_dataset_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import server_lib
from tensorflow.python.util import compat
class IteratorTest(test.TestCase, parameterized.TestCase):
@test_util.deprecated_graph_mode_only
def testNoGradients(self):
component = constant_op.constant([1.])
side = constant_op.constant(0.)
add = lambda x: x + side
dataset = dataset_ops.Dataset.from_tensor_slices(component).map(add)
value = dataset_ops.make_one_shot_iterator(dataset).get_next()
self.assertIsNone(gradients_impl.gradients(value, component)[0])
self.assertIsNone(gradients_impl.gradients(value, side)[0])
self.assertIsNone(gradients_impl.gradients(value, [component, side])[0])
@test_util.deprecated_graph_mode_only
def testCapturingStateInOneShotRaisesException(self):
var = variables.Variable(37.0, name="myvar")
dataset = (
dataset_ops.Dataset.from_tensor_slices([0.0, 1.0, 2.0])
.map(lambda x: x + var))
with self.assertRaisesRegexp(
ValueError, r"`Dataset.make_one_shot_iterator\(\)` does not support "
"datasets that capture stateful objects.+myvar"):
dataset_ops.make_one_shot_iterator(dataset)
@test_util.deprecated_graph_mode_only
def testOneShotIterator(self):
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
iterator = dataset_ops.make_one_shot_iterator(
dataset_ops.Dataset.from_tensor_slices(components).map(_map_fn)
.repeat(14))
get_next = iterator.get_next()
self.assertEqual([c.shape[1:] for c in components],
[t.shape for t in get_next])
with self.cached_session() as sess:
for _ in range(14):
for i in range(7):
result = sess.run(get_next)
for component, result_component in zip(components, result):
self.assertAllEqual(component[i]**2, result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
@test_util.deprecated_graph_mode_only
def testOneShotIteratorCaptureByValue(self):
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
tensor_components = tuple([ops.convert_to_tensor(c) for c in components])
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
iterator = dataset_ops.make_one_shot_iterator(
dataset_ops.Dataset.from_tensor_slices(tensor_components)
.map(_map_fn).repeat(14))
get_next = iterator.get_next()
self.assertEqual([c.shape[1:] for c in components],
[t.shape for t in get_next])
with self.cached_session() as sess:
for _ in range(14):
for i in range(7):
result = sess.run(get_next)
for component, result_component in zip(components, result):
self.assertAllEqual(component[i]**2, result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testOneShotIteratorInsideContainer(self):
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
def within_container():
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
iterator = dataset_ops.make_one_shot_iterator(
dataset_ops.Dataset.from_tensor_slices(components)
.map(_map_fn).repeat(14))
return iterator.get_next()
server = server_lib.Server.create_local_server()
# Create two iterators within unique containers, and run them to
# make sure that the resources aren't shared.
#
# The test below would fail if cname were the same across both
# sessions.
for j in range(2):
with session.Session(server.target) as sess:
cname = "iteration%d" % j
with ops.container(cname):
get_next = within_container()
for _ in range(14):
for i in range(7):
result = sess.run(get_next)
for component, result_component in zip(components, result):
self.assertAllEqual(component[i]**2, result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
@test_util.deprecated_graph_mode_only
def testOneShotIteratorNonBlocking(self):
dataset = dataset_ops.Dataset.from_tensors([1, 2, 3]).map(lambda x: x * x)
iterator = dataset_ops.make_one_shot_iterator(dataset)
next_element = iterator.get_next()
# Create a session with a single thread to ensure that the
# one-shot iterator initializer does not deadlock.
config = config_pb2.ConfigProto(
inter_op_parallelism_threads=1, use_per_session_threads=True)
with session.Session(config=config) as sess:
self.assertAllEqual([1, 4, 9], sess.run(next_element))
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
# Test with multiple threads invoking the one-shot iterator concurrently.
with session.Session(config=config) as sess:
results = []
def consumer_thread():
try:
results.append(sess.run(next_element))
except errors.OutOfRangeError:
results.append(None)
num_threads = 8
threads = [
self.checkedThread(consumer_thread) for _ in range(num_threads)
]
for t in threads:
t.start()
for t in threads:
t.join()
self.assertEqual(num_threads, len(results))
self.assertEqual(num_threads - 1,
len([None for r in results if r is None]))
self.assertAllEqual([[1, 4, 9]], [r for r in results if r is not None])
@test_util.deprecated_graph_mode_only
def testOneShotIteratorInitializerFails(self):
# Define a dataset whose initialization will always fail.
dataset = dataset_ops.Dataset.from_tensors(
array_ops.check_numerics(
constant_op.constant(1.0) / constant_op.constant(0.0), "oops"))
iterator = dataset_ops.make_one_shot_iterator(dataset)
next_element = iterator.get_next()
with self.cached_session() as sess:
with self.assertRaisesRegexp(errors.InvalidArgumentError, "oops"):
sess.run(next_element)
# Test that subsequent attempts to use the iterator also fail.
with self.assertRaisesRegexp(errors.InvalidArgumentError, "oops"):
sess.run(next_element)
with self.cached_session() as sess:
def consumer_thread():
with self.assertRaisesRegexp(errors.InvalidArgumentError, "oops"):
sess.run(next_element)
num_threads = 8
threads = [
self.checkedThread(consumer_thread) for _ in range(num_threads)
]
for t in threads:
t.start()
for t in threads:
t.join()
@test_util.deprecated_graph_mode_only
def testSimpleSharedResource(self):
components = (np.array(1, dtype=np.int64),
np.array([1, 2, 3], dtype=np.int64),
np.array(37.0, dtype=np.float64))
server = server_lib.Server.create_local_server()
# Create two non-overlapping sessions that share the same iterator
# resource on the same server, and verify that an action of the
# first session (initializing the iterator) is visible in the
# second session.
with ops.Graph().as_default():
iterator = (
dataset_ops.Dataset.from_tensors(components)
.map(lambda x, y, z: (x, y, z)).make_initializable_iterator(
shared_name="shared_iterator"))
init_op = iterator.initializer
get_next = iterator.get_next()
with session.Session(server.target) as sess:
sess.run(init_op)
results = sess.run(get_next)
for component, result_component in zip(components, results):
self.assertAllEqual(component, result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Re-initialize the iterator in the first session.
sess.run(init_op)
with ops.Graph().as_default():
# Re-define the iterator manually, without defining any of the
# functions in this graph, to ensure that we are not
# accidentally redefining functions with the same names in the
# new graph.
iterator = iterator_ops.Iterator.from_structure(
shared_name="shared_iterator",
output_types=(dtypes.int64, dtypes.int64, dtypes.float64),
output_shapes=([], [3], []))
get_next = iterator.get_next()
with session.Session(server.target) as sess:
# Use the iterator without re-initializing in the second session.
results = sess.run(get_next)
for component, result_component in zip(components, results):
self.assertAllEqual(component, result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
@test_util.deprecated_graph_mode_only
def testNotInitializedError(self):
components = (np.array(1), np.array([1, 2, 3]), np.array(37.0))
iterator = dataset_ops.make_initializable_iterator(
dataset_ops.Dataset.from_tensors(components))
get_next = iterator.get_next()
with self.cached_session() as sess:
with self.assertRaisesRegexp(errors.FailedPreconditionError,
"iterator has not been initialized"):
sess.run(get_next)
@test_util.deprecated_graph_mode_only
def testReinitializableIterator(self):
dataset_3 = dataset_ops.Dataset.from_tensors(
constant_op.constant([1, 2, 3]))
dataset_4 = dataset_ops.Dataset.from_tensors(
constant_op.constant([4, 5, 6, 7]))
iterator = iterator_ops.Iterator.from_structure(dataset_3.output_types,
[None])
dataset_3_init_op = iterator.make_initializer(dataset_3)
dataset_4_init_op = iterator.make_initializer(dataset_4)
get_next = iterator.get_next()
self.assertEqual(dataset_3.output_types, iterator.output_types)
self.assertEqual(dataset_4.output_types, iterator.output_types)
self.assertEqual(
[None], dataset_ops.get_legacy_output_shapes(iterator).as_list())
with self.cached_session() as sess:
# The iterator is initially uninitialized.
with self.assertRaises(errors.FailedPreconditionError):
sess.run(get_next)
# Initialize with one dataset.
sess.run(dataset_3_init_op)
self.assertAllEqual([1, 2, 3], sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Initialize with a different dataset.
sess.run(dataset_4_init_op)
self.assertAllEqual([4, 5, 6, 7], sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Reinitialize with the first dataset.
sess.run(dataset_3_init_op)
self.assertAllEqual([1, 2, 3], sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
@test_util.deprecated_graph_mode_only
def testReinitializableIteratorWithFunctions(self):
def g():
for i in range(10):
yield i
iterator = iterator_ops.Iterator.from_structure(dtypes.int64, [])
next_element = iterator.get_next()
with self.cached_session() as sess:
dataset_1 = dataset_ops.Dataset.from_generator(
g, output_types=dtypes.int64)
sess.run(iterator.make_initializer(dataset_1))
for expected in range(10):
self.assertEqual(expected, sess.run(next_element))
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
dataset_2 = dataset_ops.Dataset.from_generator(
g, output_types=dtypes.int64)
sess.run(iterator.make_initializer(dataset_2))
for expected in range(10):
self.assertEqual(expected, sess.run(next_element))
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
def testReinitializableIteratorStaticErrors(self):
# Non-matching structure for types and shapes.
with self.assertRaises(TypeError):
iterator = iterator_ops.Iterator.from_structure(
(dtypes.int64, dtypes.float64), [None])
# Test validation of dataset argument.
iterator = iterator_ops.Iterator.from_structure((dtypes.int64,
dtypes.float64))
# Incompatible structure.
with self.assertRaises(ValueError):
iterator.make_initializer(
dataset_ops.Dataset.from_tensors(((constant_op.constant(
[1, 2, 3], dtype=dtypes.int64),), (constant_op.constant(
[4., 5., 6., 7.], dtype=dtypes.float64),))))
# Incompatible types.
with self.assertRaises(TypeError):
iterator.make_initializer(
dataset_ops.Dataset.from_tensors(
(constant_op.constant([1, 2, 3], dtype=dtypes.int32),
constant_op.constant([4., 5., 6., 7.], dtype=dtypes.float32))))
# Incompatible shapes.
iterator = iterator_ops.Iterator.from_structure(
(dtypes.int64, dtypes.float64), ([None], []))
with self.assertRaises(TypeError):
iterator.make_initializer(
dataset_ops.Dataset.from_tensors(
(constant_op.constant([1, 2, 3], dtype=dtypes.int64),
constant_op.constant([4., 5., 6., 7.], dtype=dtypes.float64))))
@test_util.deprecated_graph_mode_only
def testIteratorStringHandle(self):
dataset_3 = dataset_ops.Dataset.from_tensor_slices([1, 2, 3])
dataset_4 = dataset_ops.Dataset.from_tensor_slices([10, 20, 30, 40])
iterator_3 = dataset_ops.make_one_shot_iterator(dataset_3)
iterator_4 = dataset_ops.make_one_shot_iterator(dataset_4)
handle_placeholder = array_ops.placeholder(dtypes.string, shape=[])
feedable_iterator = iterator_ops.Iterator.from_string_handle(
handle_placeholder, dataset_ops.get_legacy_output_types(dataset_3),
dataset_ops.get_legacy_output_shapes(dataset_3))
next_element = feedable_iterator.get_next()
self.assertTrue(dataset_ops.get_structure(dataset_3).is_compatible_with(
dataset_ops.get_structure(feedable_iterator)))
self.assertTrue(dataset_ops.get_structure(dataset_4).is_compatible_with(
dataset_ops.get_structure(feedable_iterator)))
with self.cached_session() as sess:
iterator_3_handle = sess.run(iterator_3.string_handle())
iterator_4_handle = sess.run(iterator_4.string_handle())
self.assertEqual(10,
sess.run(
next_element,
feed_dict={handle_placeholder: iterator_4_handle}))
self.assertEqual(1,
sess.run(
next_element,
feed_dict={handle_placeholder: iterator_3_handle}))
self.assertEqual(20,
sess.run(
next_element,
feed_dict={handle_placeholder: iterator_4_handle}))
self.assertEqual(2,
sess.run(
next_element,
feed_dict={handle_placeholder: iterator_3_handle}))
self.assertEqual(30,
sess.run(
next_element,
feed_dict={handle_placeholder: iterator_4_handle}))
self.assertEqual(3,
sess.run(
next_element,
feed_dict={handle_placeholder: iterator_3_handle}))
self.assertEqual(40,
sess.run(
next_element,
feed_dict={handle_placeholder: iterator_4_handle}))
with self.assertRaises(errors.OutOfRangeError):
sess.run(
next_element, feed_dict={handle_placeholder: iterator_3_handle})
with self.assertRaises(errors.OutOfRangeError):
sess.run(
next_element, feed_dict={handle_placeholder: iterator_4_handle})
@test_util.deprecated_graph_mode_only
def testIteratorStringHandleFuture(self):
with forward_compat.forward_compatibility_horizon(2018, 8, 4):
dataset_3 = dataset_ops.Dataset.from_tensor_slices([1, 2, 3])
dataset_4 = dataset_ops.Dataset.from_tensor_slices([10, 20, 30, 40])
iterator_3 = dataset_ops.make_one_shot_iterator(dataset_3)
iterator_4 = dataset_ops.make_one_shot_iterator(dataset_4)
handle_placeholder = array_ops.placeholder(dtypes.string, shape=[])
feedable_iterator = iterator_ops.Iterator.from_string_handle(
handle_placeholder, dataset_ops.get_legacy_output_types(dataset_3),
dataset_ops.get_legacy_output_shapes(dataset_3))
next_element = feedable_iterator.get_next()
self.assertTrue(dataset_ops.get_structure(dataset_3).is_compatible_with(
dataset_ops.get_structure(feedable_iterator)))
self.assertTrue(dataset_ops.get_structure(dataset_4).is_compatible_with(
dataset_ops.get_structure(feedable_iterator)))
with self.cached_session() as sess:
iterator_3_handle = sess.run(iterator_3.string_handle())
iterator_4_handle = sess.run(iterator_4.string_handle())
self.assertEqual(
10,
sess.run(
next_element,
feed_dict={handle_placeholder: iterator_4_handle}))
self.assertEqual(
1,
sess.run(
next_element,
feed_dict={handle_placeholder: iterator_3_handle}))
self.assertEqual(
20,
sess.run(
next_element,
feed_dict={handle_placeholder: iterator_4_handle}))
self.assertEqual(
2,
sess.run(
next_element,
feed_dict={handle_placeholder: iterator_3_handle}))
self.assertEqual(
30,
sess.run(
next_element,
feed_dict={handle_placeholder: iterator_4_handle}))
self.assertEqual(
3,
sess.run(
next_element,
feed_dict={handle_placeholder: iterator_3_handle}))
self.assertEqual(
40,
sess.run(
next_element,
feed_dict={handle_placeholder: iterator_4_handle}))
with self.assertRaises(errors.OutOfRangeError):
sess.run(
next_element, feed_dict={handle_placeholder: iterator_3_handle})
with self.assertRaises(errors.OutOfRangeError):
sess.run(
next_element, feed_dict={handle_placeholder: iterator_4_handle})
@test_util.deprecated_graph_mode_only
def testIteratorStringHandleReuseTensorObject(self):
dataset = dataset_ops.Dataset.from_tensor_slices([1, 2, 3])
one_shot_iterator = dataset_ops.make_one_shot_iterator(dataset)
initializable_iterator = dataset_ops.make_initializable_iterator(dataset)
structure_iterator = iterator_ops.Iterator.from_structure(
dataset.output_types)
created_ops = len(ops.get_default_graph().get_operations())
self.assertIs(one_shot_iterator.string_handle(),
one_shot_iterator.string_handle())
self.assertIs(initializable_iterator.string_handle(),
initializable_iterator.string_handle())
self.assertIs(structure_iterator.string_handle(),
structure_iterator.string_handle())
# Assert that getting the (default) string handle creates no ops.
self.assertEqual(created_ops, len(ops.get_default_graph().get_operations()))
# Specifying an explicit name will create a new op.
handle_with_name = one_shot_iterator.string_handle(name="foo")
self.assertEqual("foo", handle_with_name.op.name)
self.assertIsNot(one_shot_iterator.string_handle(), handle_with_name)
handle_with_same_name = one_shot_iterator.string_handle(name="foo")
self.assertEqual("foo_1", handle_with_same_name.op.name)
self.assertIsNot(handle_with_name, handle_with_same_name)
@test_util.deprecated_graph_mode_only
def testIteratorStringHandleError(self):
dataset_int_scalar = (
dataset_ops.Dataset.from_tensor_slices([1, 2, 3]).repeat())
dataset_float_vector = (dataset_ops.Dataset.from_tensors([1.0, 2.0, 3.0]))
handle_placeholder = array_ops.placeholder(dtypes.string, shape=[])
feedable_int_scalar = iterator_ops.Iterator.from_string_handle(
handle_placeholder, dtypes.int32, [])
feedable_int_vector = iterator_ops.Iterator.from_string_handle(
handle_placeholder, dtypes.int32, [None])
feedable_int_any = iterator_ops.Iterator.from_string_handle(
handle_placeholder, dtypes.int32)
with self.cached_session() as sess:
handle_int_scalar = sess.run(dataset_ops.make_one_shot_iterator(
dataset_int_scalar).string_handle())
handle_float_vector = sess.run(dataset_ops.make_one_shot_iterator(
dataset_float_vector).string_handle())
self.assertEqual(1,
sess.run(
feedable_int_scalar.get_next(),
feed_dict={handle_placeholder: handle_int_scalar}))
self.assertEqual(2,
sess.run(
feedable_int_any.get_next(),
feed_dict={handle_placeholder: handle_int_scalar}))
with self.assertRaises(errors.InvalidArgumentError):
print(sess.run(
feedable_int_vector.get_next(),
feed_dict={handle_placeholder: handle_int_scalar}))
with self.assertRaises(errors.InvalidArgumentError):
print(sess.run(
feedable_int_vector.get_next(),
feed_dict={handle_placeholder: handle_float_vector}))
@test_util.deprecated_graph_mode_only
def testRemoteIteratorUsingRemoteCallOpDirectSession(self):
worker_config = config_pb2.ConfigProto()
worker_config.device_count["CPU"] = 3
with ops.device("/job:localhost/replica:0/task:0/cpu:1"):
dataset_3 = dataset_ops.Dataset.from_tensor_slices([1, 2, 3])
iterator_3 = dataset_ops.make_one_shot_iterator(dataset_3)
iterator_3_handle = iterator_3.string_handle()
@function.Defun(dtypes.string)
def _remote_fn(h):
remote_iterator = iterator_ops.Iterator.from_string_handle(
h, dataset_ops.get_legacy_output_types(dataset_3),
dataset_ops.get_legacy_output_shapes(dataset_3))
return remote_iterator.get_next()
with ops.device("/job:localhost/replica:0/task:0/cpu:0"):
target_placeholder = array_ops.placeholder(dtypes.string, shape=[])
remote_op = functional_ops.remote_call(
args=[iterator_3_handle],
Tout=[dtypes.int32],
f=_remote_fn,
target=target_placeholder)
with self.session(config=worker_config) as sess:
elem = sess.run(
remote_op,
feed_dict={
target_placeholder: "/job:localhost/replica:0/task:0/cpu:1"
})
self.assertEqual(elem, [1])
# Fails when target is cpu:2 where the resource is not located.
with self.assertRaises(errors.InvalidArgumentError):
sess.run(
remote_op,
feed_dict={
target_placeholder: "/job:localhost/replica:0/task:0/cpu:2"
})
elem = sess.run(
remote_op,
feed_dict={
target_placeholder: "/job:localhost/replica:0/task:0/cpu:1"
})
self.assertEqual(elem, [2])
elem = sess.run(
remote_op,
feed_dict={
target_placeholder: "/job:localhost/replica:0/task:0/cpu:1"
})
self.assertEqual(elem, [3])
with self.assertRaises(errors.OutOfRangeError):
sess.run(
remote_op,
feed_dict={
target_placeholder: "/job:localhost/replica:0/task:0/cpu:1"
})
@test_util.deprecated_graph_mode_only
def testRemoteIteratorUsingRemoteCallOpMultiWorkers(self):
s1 = server_lib.Server.create_local_server()
s2 = server_lib.Server.create_local_server()
s3 = server_lib.Server.create_local_server()
cluster_def = cluster_pb2.ClusterDef()
workers = cluster_def.job.add()
workers.name = "worker"
workers.tasks[0] = s1.target[len("grpc://"):]
workers.tasks[1] = s2.target[len("grpc://"):]
client = cluster_def.job.add()
client.name = "client"
client.tasks[0] = s3.target[len("grpc://"):]
config = config_pb2.ConfigProto(cluster_def=cluster_def)
worker_devices = [
"/job:worker/replica:0/task:%d/cpu:0" % i for i in range(2)
]
itr_handles = []
for device in worker_devices:
with ops.device(device):
src = dataset_ops.Dataset.from_tensor_slices([device])
itr = dataset_ops.make_one_shot_iterator(src)
itr_handles.append(itr.string_handle())
targets = dataset_ops.Dataset.from_tensor_slices(worker_devices)
handles = dataset_ops.Dataset.from_tensor_slices(itr_handles)
@function.Defun(dtypes.string)
def loading_func(h):
remote_itr = iterator_ops.Iterator.from_string_handle(
h, dataset_ops.get_legacy_output_types(itr),
dataset_ops.get_legacy_output_shapes(itr))
return remote_itr.get_next()
def map_fn(target, handle):
return functional_ops.remote_call(
args=[handle], Tout=[dtypes.string], f=loading_func, target=target)
with ops.device("/job:client"):
client_dataset = dataset_ops.Dataset.zip((targets, handles)).map(map_fn)
itr = dataset_ops.make_initializable_iterator(client_dataset)
n = itr.get_next()
with session.Session(s3.target, config=config) as sess:
sess.run(itr.initializer)
expected_values = worker_devices
for expected in expected_values:
self.assertEqual((compat.as_bytes(expected),), sess.run(n))
with self.assertRaises(errors.OutOfRangeError):
sess.run(n)
@test_util.deprecated_graph_mode_only
def testRemoteIteratorUsingRemoteCallOpDirectSessionGPUCPU(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
with ops.device("/job:localhost/replica:0/task:0/cpu:0"):
dataset_3 = dataset_ops.Dataset.from_tensor_slices([1, 2, 3])
iterator_3 = dataset_ops.make_one_shot_iterator(dataset_3)
iterator_3_handle = iterator_3.string_handle()
def _encode_raw(byte_array):
return bytes(bytearray(byte_array))
@function.Defun(dtypes.uint8)
def _remote_fn(h):
handle = script_ops.py_func(_encode_raw, [h], dtypes.string)
remote_iterator = iterator_ops.Iterator.from_string_handle(
handle, dataset_ops.get_legacy_output_types(dataset_3),
dataset_ops.get_legacy_output_shapes(dataset_3))
return remote_iterator.get_next()
with ops.device("/job:localhost/replica:0/task:0/device:GPU:0"):
target_placeholder = array_ops.placeholder(dtypes.string, shape=[])
iterator_3_handle_uint8 = parsing_ops.decode_raw(
input_bytes=iterator_3_handle, out_type=dtypes.uint8)
remote_op = functional_ops.remote_call(
args=[iterator_3_handle_uint8],
Tout=[dtypes.int32],
f=_remote_fn,
target=target_placeholder)
with self.cached_session() as sess:
elem = sess.run(
remote_op,
feed_dict={
target_placeholder: "/job:localhost/replica:0/task:0/cpu:0"
})
self.assertEqual(elem, [1])
elem = sess.run(
remote_op,
feed_dict={
target_placeholder: "/job:localhost/replica:0/task:0/cpu:0"
})
self.assertEqual(elem, [2])
elem = sess.run(
remote_op,
feed_dict={
target_placeholder: "/job:localhost/replica:0/task:0/cpu:0"
})
self.assertEqual(elem, [3])
with self.assertRaises(errors.OutOfRangeError):
sess.run(
remote_op,
feed_dict={
target_placeholder: "/job:localhost/replica:0/task:0/cpu:0"
})
@test_util.deprecated_graph_mode_only
def testIncorrectIteratorRestore(self):
def _path():
return os.path.join(self.get_temp_dir(), "iterator")
def _save_op(iterator_resource):
iterator_state_variant = gen_dataset_ops.serialize_iterator(
iterator_resource)
save_op = io_ops.write_file(
_path(), parsing_ops.serialize_tensor(iterator_state_variant))
return save_op
def _restore_op(iterator_resource):
iterator_state_variant = parsing_ops.parse_tensor(
io_ops.read_file(_path()), dtypes.variant)
restore_op = gen_dataset_ops.deserialize_iterator(iterator_resource,
iterator_state_variant)
return restore_op
def _build_range_dataset_graph():
start = 1
stop = 10
iterator = dataset_ops.make_initializable_iterator(
dataset_ops.Dataset.range(start, stop))
init_op = iterator.initializer
get_next = iterator.get_next()
save_op = _save_op(iterator._iterator_resource)
restore_op = _restore_op(iterator._iterator_resource)
return init_op, get_next, save_op, restore_op
def _build_reader_dataset_graph():
filenames = ["test"] # Does not exist but we don't care in this test.
iterator = dataset_ops.make_initializable_iterator(
readers.FixedLengthRecordDataset(filenames, 1, 0, 0))
init_op = iterator.initializer
get_next_op = iterator.get_next()
save_op = _save_op(iterator._iterator_resource)
restore_op = _restore_op(iterator._iterator_resource)
return init_op, get_next_op, save_op, restore_op
# Saving iterator for RangeDataset graph.
with ops.Graph().as_default() as g:
init_op, _, save_op, _ = _build_range_dataset_graph()
with self.session(graph=g) as sess:
sess.run(init_op)
sess.run(save_op)
# Attempt to restore the saved iterator into an IteratorResource of
# incompatible type. An iterator of RangeDataset has output type int64,
# while an iterator of FixedLengthRecordDataset has output type string.
# So an InvalidArgumentError should be raised by
# IteratorResource::set_iterator.
with ops.Graph().as_default() as g:
_, _, _, restore_op = _build_reader_dataset_graph()
with self.session(graph=g) as sess:
with self.assertRaises(errors.InvalidArgumentError):
sess.run(restore_op)
@test_util.deprecated_graph_mode_only
def testRepeatedGetNextWarning(self):
iterator = dataset_ops.make_one_shot_iterator(dataset_ops.Dataset.range(10))
warnings.simplefilter("always")
with warnings.catch_warnings(record=True) as w:
for _ in range(100):
iterator.get_next()
self.assertEqual(100 - iterator_ops.GET_NEXT_CALL_WARNING_THRESHOLD, len(w))
for warning in w:
self.assertIn(
iterator_ops.GET_NEXT_CALL_WARNING_MESSAGE, str(warning.message))
# pylint: disable=g-long-lambda
@parameterized.named_parameters(
("Tensor", lambda: constant_op.constant(37.0),
structure.TensorStructure(dtypes.float32, []),
ops.Tensor, dtypes.float32, []),
("SparseTensor", lambda: sparse_tensor.SparseTensor(
indices=[[0]], values=constant_op.constant([0], dtype=dtypes.int32),
dense_shape=[1]),
structure.SparseTensorStructure(dtypes.int32, [1]),
sparse_tensor.SparseTensor, dtypes.int32, [1]),
("Nest", lambda: {
"a": constant_op.constant(37.0),
"b": (constant_op.constant(["Foo"]), constant_op.constant("Bar"))},
structure.NestedStructure({
"a": structure.TensorStructure(dtypes.float32, []),
"b": (structure.TensorStructure(dtypes.string, [1]),
structure.TensorStructure(dtypes.string, []))}),
{"a": ops.Tensor, "b": (ops.Tensor, ops.Tensor)},
{"a": dtypes.float32, "b": (dtypes.string, dtypes.string)},
{"a": [], "b": ([1], [])}),
)
def testIteratorStructure(self, tf_value_fn, expected_element_structure,
expected_output_classes, expected_output_types,
expected_output_shapes):
tf_value = tf_value_fn()
iterator = dataset_ops.make_one_shot_iterator(
dataset_ops.Dataset.from_tensors(tf_value))
self.assertTrue(expected_element_structure.is_compatible_with(
iterator._element_structure))
self.assertTrue(iterator._element_structure.is_compatible_with(
expected_element_structure))
self.assertEqual(expected_output_classes,
dataset_ops.get_legacy_output_classes(iterator))
self.assertEqual(expected_output_types,
dataset_ops.get_legacy_output_types(iterator))
self.assertEqual(expected_output_shapes,
dataset_ops.get_legacy_output_shapes(iterator))
def testIteratorGetNextName(self):
with ops.Graph().as_default():
iterator = dataset_ops.make_one_shot_iterator(
dataset_ops.Dataset.from_tensors(37.0))
next_element = iterator.get_next(name="overridden_name")
self.assertEqual("overridden_name", next_element.op.name)
@parameterized.named_parameters(
("Async", context.ASYNC),
("Sync", context.SYNC),
)
def testIteratorEagerIteration(self, execution_mode):
with context.eager_mode(), context.execution_mode(execution_mode):
val = 0
dataset = dataset_ops.Dataset.range(10)
iterator = iter(dataset)
for foo in iterator:
self.assertEqual(val, foo.numpy())
val += 1
@test_util.run_v2_only
def testIteratorV2Function(self):
queue = data_flow_ops.FIFOQueue(10, dtypes.int64)
@def_function.function
def fn():
dataset = dataset_ops.Dataset.range(10)
iterator = iter(dataset)
for _ in range(10):
queue.enqueue(next(iterator))
fn()
for i in range(10):
self.assertEqual(queue.dequeue().numpy(), i)
@test_util.run_v2_only
def testIteratorV2FunctionError(self):
# In this test we verify that a function that raises an error ends up
# properly deallocating the iterator resource.
queue = data_flow_ops.FIFOQueue(10, dtypes.int64)
queue.enqueue(0)
def init_fn(n):
return n
def next_fn(_):
ds = dataset_ops.Dataset.range(0)
return next(iter(ds))
def finalize_fn(n):
queue.enqueue(0)
return n
@def_function.function
def fn():
dataset = dataset_ops._GeneratorDataset(1, init_fn, next_fn, finalize_fn)
iterator = iter(dataset)
next(iterator)
with self.assertRaises(errors.OutOfRangeError):
fn()
self.assertEqual(queue.size().numpy(), 2)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/kernel_tests/iterator_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.Dataset.from_sparse_tensor_slices()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
@test_util.run_v1_only("deprecated API, no eager or V2 test coverage")
class FromSparseTensorSlicesTest(test_base.DatasetTestBase):
def testFromSparseTensorSlices(self):
"""Test a dataset based on slices of a `tf.SparseTensor`."""
st = array_ops.sparse_placeholder(dtypes.float64)
iterator = dataset_ops.make_initializable_iterator(
dataset_ops.Dataset.from_sparse_tensor_slices(st))
init_op = iterator.initializer
get_next = sparse_tensor.SparseTensor(*iterator.get_next())
with self.cached_session() as sess:
slices = [[1., 2., 3.], [1.], [1.], [1., 2.], [], [1., 2.], [], [], []]
# Test with sparse tensor in the appropriate order.
indices = np.array(
[[i, j] for i in range(len(slices)) for j in range(len(slices[i]))])
values = np.array([val for s in slices for val in s])
dense_shape = np.array([len(slices), max(len(s) for s in slices) + 1])
sparse_feed = sparse_tensor.SparseTensorValue(indices, values,
dense_shape)
sess.run(init_op, feed_dict={st: sparse_feed})
for i, s in enumerate(slices):
results = sess.run(get_next)
self.assertAllEqual(s, results.values)
expected_indices = np.array(
[[j] for j in range(len(slices[i]))]).reshape([-1, 1])
self.assertAllEqual(expected_indices, results.indices)
self.assertAllEqual(dense_shape[1:], results.dense_shape)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Test with sparse tensor in the reverse order, which is not
# currently supported.
reverse_order_indices = indices[::-1, :]
reverse_order_values = values[::-1]
sparse_feed = sparse_tensor.SparseTensorValue(
reverse_order_indices, reverse_order_values, dense_shape)
with self.assertRaises(errors.UnimplementedError):
sess.run(init_op, feed_dict={st: sparse_feed})
# Test with an empty sparse tensor.
empty_indices = np.empty((0, 4), dtype=np.int64)
empty_values = np.empty((0,), dtype=np.float64)
empty_dense_shape = [0, 4, 37, 9]
sparse_feed = sparse_tensor.SparseTensorValue(empty_indices, empty_values,
empty_dense_shape)
sess.run(init_op, feed_dict={st: sparse_feed})
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/kernel_tests/from_sparse_tensor_slices_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Checkpoint tests for `tf.data.Iterator`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import context
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training.tracking import util as trackable_utils
@test_util.run_all_in_graph_and_eager_modes
class IteratorCheckpointingTest(test_base.DatasetTestBase):
def testSaveRestoreOneShotIterator(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
dataset = dataset_ops.Dataset.from_tensor_slices([1, 2, 3, 4, 5, 6]).map(
math_ops.square).batch(2)
iterator = iter(dataset) if context.executing_eagerly(
) else dataset_ops.make_one_shot_iterator(dataset)
get_next = iterator.get_next if context.executing_eagerly(
) else functools.partial(self.evaluate, iterator.get_next())
checkpoint = trackable_utils.Checkpoint(iterator=iterator)
self.assertAllEqual([1, 4], get_next())
save_path = checkpoint.save(checkpoint_prefix)
self.assertAllEqual([9, 16], get_next())
self.assertAllEqual([25, 36], get_next())
checkpoint.restore(save_path).run_restore_ops()
self.assertAllEqual([9, 16], get_next())
self.assertAllEqual([25, 36], get_next())
with self.assertRaises(errors.OutOfRangeError):
get_next()
def testSaveRestoreMultipleIterator(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
dataset = dataset_ops.Dataset.from_tensor_slices(
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])
dataset = dataset.map(math_ops.square).batch(2)
iterator_1 = iter(dataset) if context.executing_eagerly(
) else dataset_ops.make_one_shot_iterator(dataset)
get_next_1 = iterator_1.get_next if context.executing_eagerly(
) else functools.partial(self.evaluate, iterator_1.get_next())
iterator_2 = iter(dataset) if context.executing_eagerly(
) else dataset_ops.make_one_shot_iterator(dataset)
get_next_2 = iterator_2.get_next if context.executing_eagerly(
) else functools.partial(self.evaluate, iterator_2.get_next())
dataset_2 = dataset_ops.Dataset.range(10)
iterator_3 = iter(dataset_2) if context.executing_eagerly(
) else dataset_ops.make_one_shot_iterator(dataset_2)
get_next_3 = iterator_3.get_next if context.executing_eagerly(
) else functools.partial(self.evaluate, iterator_3.get_next())
checkpoint = trackable_utils.Checkpoint(
iterator_1=iterator_1, iterator_2=iterator_2, iterator_3=iterator_3)
self.assertAllEqual([1, 4], get_next_1())
self.assertAllEqual(0, get_next_3())
self.assertAllEqual(1, get_next_3())
self.assertAllEqual(2, get_next_3())
save_path = checkpoint.save(checkpoint_prefix)
self.assertAllEqual([1, 4], get_next_2())
self.assertAllEqual([9, 16], get_next_2())
self.assertAllEqual(3, get_next_3())
checkpoint.restore(save_path).run_restore_ops()
self.assertAllEqual([9, 16], get_next_1())
self.assertAllEqual([1, 4], get_next_2())
self.assertAllEqual(3, get_next_3())
def testRestoreExhaustedIterator(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
dataset = dataset_ops.Dataset.range(3)
iterator = iter(dataset) if context.executing_eagerly(
) else dataset_ops.make_one_shot_iterator(dataset)
get_next = iterator.get_next if context.executing_eagerly(
) else functools.partial(self.evaluate, iterator.get_next())
checkpoint = trackable_utils.Checkpoint(iterator=iterator)
self.assertAllEqual(0, get_next())
self.assertAllEqual(1, get_next())
save_path = checkpoint.save(checkpoint_prefix)
self.assertAllEqual(2, get_next())
checkpoint.restore(save_path).run_restore_ops()
self.assertAllEqual(2, get_next())
save_path = checkpoint.save(checkpoint_prefix)
checkpoint.restore(save_path).run_restore_ops()
with self.assertRaises(errors.OutOfRangeError):
get_next()
def testRestoreInReconstructedIteratorInitializable(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
dataset = dataset_ops.Dataset.range(10)
iterator = iter(dataset) if context.executing_eagerly(
) else dataset_ops.make_initializable_iterator(dataset)
get_next = iterator.get_next
checkpoint = trackable_utils.Checkpoint(iterator=iterator)
for i in range(5):
checkpoint.restore(
checkpoint_management.latest_checkpoint(
checkpoint_directory)).initialize_or_restore()
for j in range(2):
self.assertEqual(i * 2 + j, self.evaluate(get_next()))
checkpoint.save(file_prefix=checkpoint_prefix)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/kernel_tests/iterator_checkpoint_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.Dataset.shuffle()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from absl.testing import parameterized
import numpy as np
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class ShuffleTest(test_base.DatasetTestBase, parameterized.TestCase):
def testShuffleDataset(self):
components = (
np.array([1, 2, 3, 4]), np.array([5, 6, 7, 8]),
np.array([9.0, 10.0, 11.0, 12.0])
)
def dataset_fn(count=5, buffer_size=None, seed=0):
repeat_dataset = (
dataset_ops.Dataset.from_tensor_slices(components).repeat(count))
if buffer_size:
shuffle_dataset = repeat_dataset.shuffle(buffer_size, seed)
self.assertEqual(
tuple([c.shape[1:] for c in components]),
dataset_ops.get_legacy_output_shapes(shuffle_dataset))
return shuffle_dataset
else:
return repeat_dataset
# First run without shuffling to collect the "ground truth".
get_next = self.getNext(dataset_fn())
unshuffled_elements = []
for _ in range(20):
unshuffled_elements.append(self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Assert that the shuffled dataset has the same elements as the
# "ground truth".
get_next = self.getNext(dataset_fn(buffer_size=100, seed=37))
shuffled_elements = []
for _ in range(20):
shuffled_elements.append(self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
self.assertAllEqual(sorted(unshuffled_elements), sorted(shuffled_elements))
# Assert that shuffling twice with the same seeds gives the same sequence.
get_next = self.getNext(dataset_fn(buffer_size=100, seed=37))
reshuffled_elements_same_seed = []
for _ in range(20):
reshuffled_elements_same_seed.append(self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
self.assertEqual(shuffled_elements, reshuffled_elements_same_seed)
# Assert that shuffling twice with a different seed gives a different
# permutation of the same elements.
get_next = self.getNext(dataset_fn(buffer_size=100, seed=137))
reshuffled_elements_different_seed = []
for _ in range(20):
reshuffled_elements_different_seed.append(self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
self.assertNotEqual(shuffled_elements, reshuffled_elements_different_seed)
self.assertAllEqual(
sorted(shuffled_elements), sorted(reshuffled_elements_different_seed))
# Assert that the shuffled dataset has the same elements as the
# "ground truth" when the buffer size is smaller than the input
# dataset.
get_next = self.getNext(dataset_fn(buffer_size=2, seed=37))
reshuffled_elements_small_buffer = []
for _ in range(20):
reshuffled_elements_small_buffer.append(self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
self.assertAllEqual(
sorted(unshuffled_elements), sorted(reshuffled_elements_small_buffer))
# Test the case of shuffling an empty dataset.
get_next = self.getNext(dataset_fn(count=0, buffer_size=100, seed=37))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@test_util.run_deprecated_v1
def testSkipEagerSeedZero(self):
"""Test for same behavior when the seed is a Python or Tensor zero."""
iterator = dataset_ops.make_one_shot_iterator(
dataset_ops.Dataset.range(10).shuffle(10, seed=0))
get_next = iterator.get_next()
elems = []
with self.cached_session() as sess:
for _ in range(10):
elems.append(sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
seed_placeholder = array_ops.placeholder(dtypes.int64, shape=[])
iterator = dataset_ops.make_initializable_iterator(
dataset_ops.Dataset.range(10).shuffle(10, seed=seed_placeholder))
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(iterator.initializer, feed_dict={seed_placeholder: 0})
for elem in elems:
self.assertEqual(elem, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testDefaultArguments(self):
components = [0, 1, 2, 3, 4]
dataset = dataset_ops.Dataset.from_tensor_slices(components).shuffle(
5).repeat()
get_next = self.getNext(dataset)
counts = collections.defaultdict(lambda: 0)
for _ in range(10):
for _ in range(5):
counts[self.evaluate(get_next())] += 1
for i in range(5):
self.assertEqual(10, counts[i])
@parameterized.named_parameters(
("Reshuffle", True),
("NoReshuffle", False),
)
def testReshuffle(self, reshuffle):
dataset = dataset_ops.Dataset.range(10).shuffle(
10, reshuffle_each_iteration=reshuffle).repeat(2)
next_element = self.getNext(dataset)
first_epoch = []
for _ in range(10):
first_epoch.append(self.evaluate(next_element()))
second_epoch = []
for _ in range(10):
second_epoch.append(self.evaluate(next_element()))
self.assertEqual(first_epoch == second_epoch, not reshuffle)
@parameterized.named_parameters(
("ReshuffleGraphLevelSeed", True, 38, None),
("ReshuffleOpLevelSeed", True, None, 42),
("ReshuffleGraphAndOpLevelSeed", True, 38, 42),
("NoReshuffleGraphLevelSeed", False, 38, None),
("NoReshuffleOpLevelSeed", False, None, 42),
("NoReshuffleGraphAndOpLevelSeed", False, 38, 42),
)
def testSkipEagerShuffleSeed(self, reshuffle, graph_level_seed,
op_level_seed):
results = []
for _ in range(2):
with ops.Graph().as_default() as g:
random_seed.set_random_seed(graph_level_seed)
dataset = dataset_ops.Dataset.range(10).shuffle(
10, seed=op_level_seed, reshuffle_each_iteration=reshuffle).repeat(
3)
iterator = dataset_ops.make_one_shot_iterator(dataset)
next_element = iterator.get_next()
run_results = []
with self.session(graph=g) as sess:
for _ in range(30):
run_results.append(sess.run(next_element))
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
results.append(run_results)
self.assertAllEqual(results[0], results[1])
# TODO(b/117581999): fails for eager mode with result[0] equal to result[1],
# debug.
@parameterized.named_parameters(
("ReshuffleOneShot", True, False),
("ReshuffleInitializable", True, True),
("NoReshuffleOneShot", False, False),
("NoReshuffleInitializable", False, True),
)
def testSkipEagerMultipleIterators(self, reshuffle, initializable):
with ops.Graph().as_default() as g:
dataset = dataset_ops.Dataset.range(100).shuffle(
10, reshuffle_each_iteration=reshuffle).repeat(3)
if initializable:
iterators = [dataset_ops.make_initializable_iterator(dataset)
for _ in range(2)]
else:
iterators = [dataset_ops.make_one_shot_iterator(dataset)
for _ in range(2)]
results = []
with self.session(graph=g) as sess:
for iterator in iterators:
if initializable:
sess.run(iterator.initializer)
next_element = iterator.get_next()
run_results = []
for _ in range(300):
run_results.append(sess.run(next_element))
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
results.append(run_results)
self.assertNotEqual(results[0], results[1])
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/kernel_tests/shuffle_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.TextLineDataset`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import os
import zlib
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import readers
from tensorflow.python.eager import context
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
from tensorflow.python.util import compat
try:
import psutil # pylint: disable=g-import-not-at-top
psutil_import_succeeded = True
except ImportError:
psutil_import_succeeded = False
@test_util.run_all_in_graph_and_eager_modes
class TextLineDatasetTest(test_base.DatasetTestBase):
def _lineText(self, f, l):
return compat.as_bytes("%d: %d" % (f, l))
def _createFiles(self,
num_files,
num_lines,
crlf=False,
compression_type=None):
filenames = []
for i in range(num_files):
fn = os.path.join(self.get_temp_dir(), "text_line.%d.txt" % i)
filenames.append(fn)
contents = []
for j in range(num_lines):
contents.append(self._lineText(i, j))
# Always include a newline after the record unless it is
# at the end of the file, in which case we include it
if j + 1 != num_lines or i == 0:
contents.append(b"\r\n" if crlf else b"\n")
contents = b"".join(contents)
if not compression_type:
with open(fn, "wb") as f:
f.write(contents)
elif compression_type == "GZIP":
with gzip.GzipFile(fn, "wb") as f:
f.write(contents)
elif compression_type == "ZLIB":
contents = zlib.compress(contents)
with open(fn, "wb") as f:
f.write(contents)
else:
raise ValueError("Unsupported compression_type", compression_type)
return filenames
def _testTextLineDataset(self, compression_type=None):
test_filenames = self._createFiles(
2, 5, crlf=True, compression_type=compression_type)
def dataset_fn(filenames, num_epochs, batch_size=None):
repeat_dataset = readers.TextLineDataset(
filenames, compression_type=compression_type).repeat(num_epochs)
if batch_size:
return repeat_dataset.batch(batch_size)
return repeat_dataset
# Basic test: read from file 0.
expected_output = [self._lineText(0, i) for i in range(5)]
self.assertDatasetProduces(
dataset_fn([test_filenames[0]], 1), expected_output=expected_output)
# Basic test: read from file 1.
self.assertDatasetProduces(
dataset_fn([test_filenames[1]], 1),
expected_output=[self._lineText(1, i) for i in range(5)])
# Basic test: read from both files.
expected_output = [self._lineText(0, i) for i in range(5)]
expected_output.extend([self._lineText(1, i) for i in range(5)])
self.assertDatasetProduces(
dataset_fn(test_filenames, 1), expected_output=expected_output)
# Test repeated iteration through both files.
expected_output = [self._lineText(0, i) for i in range(5)]
expected_output.extend([self._lineText(1, i) for i in range(5)])
self.assertDatasetProduces(
dataset_fn(test_filenames, 10), expected_output=expected_output * 10)
# Test batched and repeated iteration through both files.
self.assertDatasetProduces(
dataset_fn(test_filenames, 10, 5),
expected_output=[[self._lineText(0, i) for i in range(5)],
[self._lineText(1, i) for i in range(5)]] * 10)
def testTextLineDatasetParallelRead(self):
test_filenames = self._createFiles(10, 10)
files = dataset_ops.Dataset.from_tensor_slices(test_filenames).repeat(10)
expected_output = []
for j in range(10):
expected_output.extend([self._lineText(j, i) for i in range(10)])
dataset = readers.TextLineDataset(files, num_parallel_reads=4)
self.assertDatasetProduces(
dataset, expected_output=expected_output * 10, assert_items_equal=True)
def testTextLineDatasetNoCompression(self):
self._testTextLineDataset()
def testTextLineDatasetGzipCompression(self):
self._testTextLineDataset(compression_type="GZIP")
def testTextLineDatasetZlibCompression(self):
self._testTextLineDataset(compression_type="ZLIB")
def testTextLineDatasetBuffering(self):
test_filenames = self._createFiles(2, 5, crlf=True)
repeat_dataset = readers.TextLineDataset(test_filenames, buffer_size=10)
expected_output = []
for j in range(2):
expected_output.extend([self._lineText(j, i) for i in range(5)])
self.assertDatasetProduces(repeat_dataset, expected_output=expected_output)
def testIteratorResourceCleanup(self):
filename = os.path.join(self.get_temp_dir(), "text.txt")
with open(filename, "wt") as f:
for i in range(3):
f.write("%d\n" % (i,))
with context.eager_mode():
first_iterator = iter(readers.TextLineDataset(filename))
self.assertEqual(b"0", next(first_iterator).numpy())
second_iterator = iter(readers.TextLineDataset(filename))
self.assertEqual(b"0", next(second_iterator).numpy())
# Eager kernel caching is based on op attributes, which includes the
# Dataset's output shape. Create a different kernel to test that they
# don't create resources with the same names.
different_kernel_iterator = iter(
readers.TextLineDataset(filename).repeat().batch(16))
self.assertEqual([16], next(different_kernel_iterator).shape)
# Remove our references to the Python Iterator objects, which (assuming no
# reference cycles) is enough to trigger DestroyResourceOp and close the
# partially-read files.
del first_iterator
del second_iterator
del different_kernel_iterator
if not psutil_import_succeeded:
self.skipTest(
"psutil is required to check that we've closed our files.")
open_files = psutil.Process().open_files()
self.assertNotIn(filename, [open_file.path for open_file in open_files])
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/kernel_tests/text_line_dataset_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.Dataset.from_tensor_slices()."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class FromTensorSlicesTest(test_base.DatasetTestBase):
def testFromTensorSlices(self):
"""Test a dataset that represents the slices from a tuple of tensors."""
components = (
np.tile(np.array([[1], [2], [3], [4]]), 20), np.tile(
np.array([[12], [13], [14], [15]]), 22),
np.array([37.0, 38.0, 39.0, 40.0])
)
dataset = dataset_ops.Dataset.from_tensor_slices(components)
get_next = self.getNext(dataset)
self.assertEqual(
[c.shape[1:] for c in components],
[shape for shape in dataset_ops.get_legacy_output_shapes(dataset)])
for i in range(4):
results = self.evaluate(get_next())
for component, result_component in zip(components, results):
self.assertAllEqual(component[i], result_component)
with self.assertRaises(errors.OutOfRangeError):
results = self.evaluate(get_next())
def testFromTensorSlicesDataset(self):
dss = [dataset_ops.Dataset.range(10) for _ in range(10)]
ds = dataset_ops.Dataset.from_tensor_slices(dss)
ds = ds.flat_map(lambda x: x)
self.assertDatasetProduces(ds, expected_output=list(range(10)) * 10)
def testFromTensorSlicesDatasetInFunction(self):
dss = [dataset_ops.Dataset.range(10) for _ in range(10)]
ds = dataset_ops.Dataset.from_tensors(dss)
ds = ds.flat_map(dataset_ops.Dataset.from_tensor_slices)
ds = ds.flat_map(lambda x: x)
self.assertDatasetProduces(ds, expected_output=list(range(10)) * 10)
def testFromTensorSlicesSparse(self):
"""Test a dataset that represents the slices from a tuple of tensors."""
components = (sparse_tensor.SparseTensorValue(
indices=np.array([[0, 0], [1, 0], [2, 0]]),
values=np.array([0, 0, 0]),
dense_shape=np.array([3, 1])),
sparse_tensor.SparseTensorValue(
indices=np.array([[0, 0], [1, 1], [2, 2]]),
values=np.array([1, 2, 3]),
dense_shape=np.array([3, 3])))
dataset = dataset_ops.Dataset.from_tensor_slices(components)
self.assertEqual(
[tensor_shape.TensorShape(c.dense_shape[1:]) for c in components],
[shape for shape in dataset_ops.get_legacy_output_shapes(dataset)])
expected = [
(sparse_tensor.SparseTensorValue(
indices=np.array([[0]]),
values=np.array([0]),
dense_shape=np.array([1])),
sparse_tensor.SparseTensorValue(
indices=np.array([[0]]),
values=np.array([1]),
dense_shape=np.array([3]))),
(sparse_tensor.SparseTensorValue(
indices=np.array([[0]]),
values=np.array([0]),
dense_shape=np.array([1])),
sparse_tensor.SparseTensorValue(
indices=np.array([[1]]),
values=np.array([2]),
dense_shape=np.array([3]))),
(sparse_tensor.SparseTensorValue(
indices=np.array([[0]]),
values=np.array([0]),
dense_shape=np.array([1])),
sparse_tensor.SparseTensorValue(
indices=np.array([[2]]),
values=np.array([3]),
dense_shape=np.array([3]))),
]
self.assertDatasetProduces(dataset, expected_output=expected)
def testFromTensorSlicesMixed(self):
"""Test a dataset that represents the slices from a tuple of tensors."""
components = (np.tile(np.array([[1], [2], [3]]), 20),
np.tile(np.array([[12], [13], [14]]), 22),
np.array([37.0, 38.0, 39.0]),
sparse_tensor.SparseTensorValue(
indices=np.array([[0, 0], [1, 0], [2, 0]]),
values=np.array([0, 0, 0]),
dense_shape=np.array([3, 1])),
sparse_tensor.SparseTensorValue(
indices=np.array([[0, 0], [1, 1], [2, 2]]),
values=np.array([1, 2, 3]),
dense_shape=np.array([3, 3])))
dataset = dataset_ops.Dataset.from_tensor_slices(components)
get_next = self.getNext(dataset)
self.assertEqual([
tensor_shape.TensorShape(c.dense_shape[1:])
if sparse_tensor.is_sparse(c) else c.shape[1:] for c in components
], [shape for shape in dataset_ops.get_legacy_output_shapes(dataset)])
expected = [
(sparse_tensor.SparseTensorValue(
indices=np.array([[0]]),
values=np.array([0]),
dense_shape=np.array([1])),
sparse_tensor.SparseTensorValue(
indices=np.array([[0]]),
values=np.array([1]),
dense_shape=np.array([3]))),
(sparse_tensor.SparseTensorValue(
indices=np.array([[0]]),
values=np.array([0]),
dense_shape=np.array([1])),
sparse_tensor.SparseTensorValue(
indices=np.array([[1]]),
values=np.array([2]),
dense_shape=np.array([3]))),
(sparse_tensor.SparseTensorValue(
indices=np.array([[0]]),
values=np.array([0]),
dense_shape=np.array([1])),
sparse_tensor.SparseTensorValue(
indices=np.array([[2]]),
values=np.array([3]),
dense_shape=np.array([3]))),
]
for i in range(3):
results = self.evaluate(get_next())
for component, result_component in zip(
(list(zip(*components[:3]))[i] + expected[i]), results):
if sparse_tensor.is_sparse(component):
self.assertSparseValuesEqual(component, result_component)
else:
self.assertAllEqual(component, result_component)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
def testFromTensorSlicesWithDict(self):
components = {"foo": [1, 2, 3], "bar": [[4.0], [5.0], [6.0]]}
dataset = dataset_ops.Dataset.from_tensor_slices(components)
get_next = self.getNext(dataset)
self.assertEqual(dtypes.int32,
dataset_ops.get_legacy_output_types(dataset)["foo"])
self.assertEqual(dtypes.float32,
dataset_ops.get_legacy_output_types(dataset)["bar"])
self.assertEqual((), dataset_ops.get_legacy_output_shapes(dataset)["foo"])
self.assertEqual((1,), dataset_ops.get_legacy_output_shapes(dataset)["bar"])
for i in range(3):
results = self.evaluate(get_next())
self.assertEqual(components["foo"][i], results["foo"])
self.assertEqual(components["bar"][i], results["bar"])
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
def testFromTensorSlicesRagged(self):
components = (
ragged_factory_ops.constant_value([[[0]], [[1]], [[2]]]),
ragged_factory_ops.constant_value([[[3]], [[4]], [[5]]]),
)
dataset = dataset_ops.Dataset.from_tensor_slices(components)
expected = [(ragged_factory_ops.constant_value([[0]]),
ragged_factory_ops.constant_value([[3]])),
(ragged_factory_ops.constant_value([[1]]),
ragged_factory_ops.constant_value([[4]])),
(ragged_factory_ops.constant_value([[2]]),
ragged_factory_ops.constant_value([[5]]))]
self.assertDatasetProduces(dataset, expected_output=expected)
def testFromTensorSlicesMixedRagged(self):
components = (np.tile(np.array([[1], [2], [3]]),
20), np.tile(np.array([[12], [13], [14]]),
22), np.array([37.0, 38.0, 39.0]),
sparse_tensor.SparseTensorValue(
indices=np.array([[0, 0], [1, 0], [2, 0]]),
values=np.array([0, 0, 0]),
dense_shape=np.array([3, 1])),
sparse_tensor.SparseTensorValue(
indices=np.array([[0, 0], [1, 1], [2, 2]]),
values=np.array([1, 2, 3]),
dense_shape=np.array([3, 3])),
ragged_factory_ops.constant_value([[[0]], [[1]], [[2]]]))
dataset = dataset_ops.Dataset.from_tensor_slices(components)
get_next = self.getNext(dataset)
expected = [
(sparse_tensor.SparseTensorValue(
indices=np.array([[0]]),
values=np.array([0]),
dense_shape=np.array([1])),
sparse_tensor.SparseTensorValue(
indices=np.array([[0]]),
values=np.array([1]),
dense_shape=np.array([3])), ragged_factory_ops.constant_value([[0]
])),
(sparse_tensor.SparseTensorValue(
indices=np.array([[0]]),
values=np.array([0]),
dense_shape=np.array([1])),
sparse_tensor.SparseTensorValue(
indices=np.array([[1]]),
values=np.array([2]),
dense_shape=np.array([3])), ragged_factory_ops.constant_value([[1]
])),
(sparse_tensor.SparseTensorValue(
indices=np.array([[0]]),
values=np.array([0]),
dense_shape=np.array([1])),
sparse_tensor.SparseTensorValue(
indices=np.array([[2]]),
values=np.array([3]),
dense_shape=np.array([3])), ragged_factory_ops.constant_value([[2]
])),
]
for i in range(3):
results = self.evaluate(get_next())
for component, result_component in zip(
(list(zip(*components[:3]))[i] + expected[i]), results):
if sparse_tensor.is_sparse(component):
self.assertSparseValuesEqual(component, result_component)
elif ragged_tensor.is_ragged(component):
self.assertRaggedEqual(component, result_component)
else:
self.assertAllEqual(component, result_component)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
def testFromTensorSlicesWithUintDtypes(self):
components = (
np.tile(np.array([[0], [1]], dtype=np.uint8), 2),
np.tile(np.array([[2], [256]], dtype=np.uint16), 2),
np.tile(np.array([[4], [65536]], dtype=np.uint32), 2),
np.tile(np.array([[8], [4294967296]], dtype=np.uint64), 2),
)
expected_types = (dtypes.uint8, dtypes.uint16, dtypes.uint32, dtypes.uint64)
expected_output = [tuple([c[i] for c in components]) for i in range(2)]
dataset = dataset_ops.Dataset.from_tensor_slices(components)
self.assertEqual(expected_types,
dataset_ops.get_legacy_output_types(dataset))
self.assertDatasetProduces(dataset, expected_output)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/kernel_tests/from_tensor_slices_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.Dataset.interleave()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import multiprocessing
from absl.testing import parameterized
import numpy as np
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import errors
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.platform import test
def _interleave(lists, cycle_length, block_length):
"""Reference implementation of interleave used for testing.
Args:
lists: a list of lists to interleave
cycle_length: the length of the interleave cycle
block_length: the length of the interleave block
Yields:
Elements of `lists` interleaved in the order determined by `cycle_length`
and `block_length`.
"""
num_open = 0
# `all_iterators` acts as a queue of iterators over each element of `lists`.
all_iterators = [iter(l) for l in lists]
# `open_iterators` are the iterators whose elements are currently being
# interleaved.
open_iterators = []
if cycle_length == dataset_ops.AUTOTUNE:
cycle_length = multiprocessing.cpu_count()
for i in range(cycle_length):
if all_iterators:
open_iterators.append(all_iterators.pop(0))
num_open += 1
else:
open_iterators.append(None)
while num_open or all_iterators:
for i in range(cycle_length):
if open_iterators[i] is None:
if all_iterators:
open_iterators[i] = all_iterators.pop(0)
num_open += 1
else:
continue
for _ in range(block_length):
try:
yield next(open_iterators[i])
except StopIteration:
open_iterators[i] = None
num_open -= 1
break
def _repeat(values, count):
"""Produces a list of lists suitable for testing interleave.
Args:
values: for each element `x` the result contains `[x] * x`
count: determines how many times to repeat `[x] * x` in the result
Returns:
A list of lists of values suitable for testing interleave.
"""
return [[value] * value for value in np.tile(values, count)]
@test_util.run_all_in_graph_and_eager_modes
class InterleaveTest(test_base.DatasetTestBase, parameterized.TestCase):
@parameterized.named_parameters(
("1", [4, 5, 6], 1, 1, [
4, 4, 4, 4, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 4, 4, 4, 4, 5, 5, 5, 5,
5, 6, 6, 6, 6, 6, 6
]),
("2", [4, 5, 6], 2, 1, [
4, 5, 4, 5, 4, 5, 4, 5, 5, 6, 6, 4, 6, 4, 6, 4, 6, 4, 6, 5, 6, 5, 6,
5, 6, 5, 6, 5, 6, 6
]),
("3", [4, 5, 6], 2, 3, [
4, 4, 4, 5, 5, 5, 4, 5, 5, 6, 6, 6, 4, 4, 4, 6, 6, 6, 4, 5, 5, 5, 6,
6, 6, 5, 5, 6, 6, 6
]),
("4", [4, 5, 6], 7, 2, [
4, 4, 5, 5, 6, 6, 4, 4, 5, 5, 6, 6, 4, 4, 5, 5, 6, 6, 4, 4, 5, 5, 6,
6, 5, 6, 6, 5, 6, 6
]),
("5", [4, 0, 6], 2, 1,
[4, 4, 6, 4, 6, 4, 6, 6, 4, 6, 4, 6, 4, 4, 6, 6, 6, 6, 6, 6]),
)
def testPythonImplementation(self, input_values, cycle_length, block_length,
expected_elements):
input_lists = _repeat(input_values, 2)
for expected, produced in zip(
expected_elements, _interleave(input_lists, cycle_length,
block_length)):
self.assertEqual(expected, produced)
@parameterized.named_parameters(
("1", np.int64([4, 5, 6]), 1, 3, None),
("2", np.int64([4, 5, 6]), 1, 3, 1),
("3", np.int64([4, 5, 6]), 2, 1, None),
("4", np.int64([4, 5, 6]), 2, 1, 1),
("5", np.int64([4, 5, 6]), 2, 1, 2),
("6", np.int64([4, 5, 6]), 2, 3, None),
("7", np.int64([4, 5, 6]), 2, 3, 1),
("8", np.int64([4, 5, 6]), 2, 3, 2),
("9", np.int64([4, 5, 6]), 7, 2, None),
("10", np.int64([4, 5, 6]), 7, 2, 1),
("11", np.int64([4, 5, 6]), 7, 2, 3),
("12", np.int64([4, 5, 6]), 7, 2, 5),
("13", np.int64([4, 5, 6]), 7, 2, 7),
("14", np.int64([4, 5, 6]), dataset_ops.AUTOTUNE, 3, None),
("15", np.int64([4, 5, 6]), dataset_ops.AUTOTUNE, 3, 1),
("16", np.int64([]), 2, 3, None),
("17", np.int64([0, 0, 0]), 2, 3, None),
("18", np.int64([4, 0, 6]), 2, 3, None),
("19", np.int64([4, 0, 6]), 2, 3, 1),
("20", np.int64([4, 0, 6]), 2, 3, 2),
)
def testInterleaveDataset(self, input_values, cycle_length, block_length,
num_parallel_calls):
count = 2
dataset = dataset_ops.Dataset.from_tensor_slices(input_values).repeat(
count).interleave(
lambda x: dataset_ops.Dataset.from_tensors(x).repeat(x),
cycle_length, block_length, num_parallel_calls)
expected_output = [
element for element in _interleave(
_repeat(input_values, count), cycle_length, block_length)
]
self.assertDatasetProduces(dataset, expected_output)
@parameterized.named_parameters(
("1", np.float32([1., np.nan, 2., np.nan, 3.]), 1, 3, None),
("2", np.float32([1., np.nan, 2., np.nan, 3.]), 1, 3, 1),
("3", np.float32([1., np.nan, 2., np.nan, 3.]), 2, 1, None),
("4", np.float32([1., np.nan, 2., np.nan, 3.]), 2, 1, 1),
("5", np.float32([1., np.nan, 2., np.nan, 3.]), 2, 1, 2),
("6", np.float32([1., np.nan, 2., np.nan, 3.]), 2, 3, None),
("7", np.float32([1., np.nan, 2., np.nan, 3.]), 2, 3, 1),
("8", np.float32([1., np.nan, 2., np.nan, 3.]), 2, 3, 2),
("9", np.float32([1., np.nan, 2., np.nan, 3.]), 7, 2, None),
("10", np.float32([1., np.nan, 2., np.nan, 3.]), 7, 2, 1),
("11", np.float32([1., np.nan, 2., np.nan, 3.]), 7, 2, 3),
("12", np.float32([1., np.nan, 2., np.nan, 3.]), 7, 2, 5),
("13", np.float32([1., np.nan, 2., np.nan, 3.]), 7, 2, 7),
)
def testInterleaveDatasetError(self, input_values, cycle_length, block_length,
num_parallel_calls):
dataset = dataset_ops.Dataset.from_tensor_slices(input_values).map(
lambda x: array_ops.check_numerics(x, "message")).interleave(
dataset_ops.Dataset.from_tensors, cycle_length, block_length,
num_parallel_calls)
get_next = self.getNext(dataset)
for value in input_values:
if np.isnan(value):
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(get_next())
else:
self.assertEqual(value, self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
def testInterleaveSparse(self):
def _map_fn(i):
return sparse_tensor.SparseTensorValue(
indices=[[0, 0], [1, 1]], values=(i * [1, -1]), dense_shape=[2, 2])
def _interleave_fn(x):
return dataset_ops.Dataset.from_tensor_slices(
sparse_ops.sparse_to_dense(x.indices, x.dense_shape, x.values))
dataset = dataset_ops.Dataset.range(10).map(_map_fn).interleave(
_interleave_fn, cycle_length=1)
get_next = self.getNext(dataset)
for i in range(10):
for j in range(2):
expected = [i, 0] if j % 2 == 0 else [0, -i]
self.assertAllEqual(expected, self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@parameterized.named_parameters(
("1", np.int64([4, 5, 6]), 1, 3, 1),
("2", np.int64([4, 5, 6]), 2, 1, 1),
("3", np.int64([4, 5, 6]), 2, 1, 2),
("4", np.int64([4, 5, 6]), 2, 3, 1),
("5", np.int64([4, 5, 6]), 2, 3, 2),
("6", np.int64([4, 5, 6]), 7, 2, 1),
("7", np.int64([4, 5, 6]), 7, 2, 3),
("8", np.int64([4, 5, 6]), 7, 2, 5),
("9", np.int64([4, 5, 6]), 7, 2, 7),
("10", np.int64([4, 5, 6]), dataset_ops.AUTOTUNE, 3, 1),
("11", np.int64([4, 0, 6]), 2, 3, 1),
("12", np.int64([4, 0, 6]), 2, 3, 2),
)
def testSloppyInterleaveDataset(self, input_values, cycle_length,
block_length, num_parallel_calls):
count = 2
dataset = dataset_ops.Dataset.from_tensor_slices(input_values).repeat(
count).interleave(
lambda x: dataset_ops.Dataset.from_tensors(x).repeat(x),
cycle_length, block_length, num_parallel_calls)
options = dataset_ops.Options()
options.experimental_deterministic = False
dataset = dataset.with_options(options)
expected_output = [
element for element in _interleave(
_repeat(input_values, count), cycle_length, block_length)
]
get_next = self.getNext(dataset)
actual_output = []
for _ in range(len(expected_output)):
actual_output.append(self.evaluate(get_next()))
self.assertAllEqual(expected_output.sort(), actual_output.sort())
def testInterleaveMap(self):
dataset = dataset_ops.Dataset.range(100)
def interleave_fn(x):
dataset = dataset_ops.Dataset.from_tensors(x)
return dataset.map(lambda x: x + x)
dataset = dataset.interleave(interleave_fn, cycle_length=5)
dataset = dataset.interleave(interleave_fn, cycle_length=5)
self.assertDatasetProduces(dataset, [4 * x for x in range(100)])
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/kernel_tests/interleave_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.Dataset.take()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class TakeTest(test_base.DatasetTestBase):
def testTakeTensorDataset(self):
components = (np.arange(10),)
def do_test(count):
dataset = dataset_ops.Dataset.from_tensor_slices(components).take(count)
self.assertEqual(
[c.shape[1:] for c in components],
[shape for shape in dataset_ops.get_legacy_output_shapes(dataset)])
num_output = min(count, 10) if count != -1 else 10
self.assertDatasetProduces(
dataset, [tuple(components[0][i:i + 1]) for i in range(num_output)])
# Take fewer than input size
do_test(4)
# Take more than input size
do_test(25)
# Take all of input
do_test(-1)
# Take nothing
do_test(0)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/kernel_tests/take_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.Dataset`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import warnings
from absl.testing import parameterized
import numpy as np
from tensorflow.core.framework import graph_pb2
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import optional_ops
from tensorflow.python.data.ops import readers
from tensorflow.python.data.util import nest
from tensorflow.python.data.util import structure
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.framework import type_spec
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
# For testing deserialization of Datasets represented as functions
class _RevivedDataset(dataset_ops.DatasetV2):
def __init__(self, variant, element_structure):
self._structure = element_structure
super(_RevivedDataset, self).__init__(variant)
def _inputs(self):
return []
@property
def _element_structure(self):
return self._structure
@test_util.run_all_in_graph_and_eager_modes
class DatasetTest(test_base.DatasetTestBase, parameterized.TestCase):
def testAsSerializedGraph(self):
dataset = dataset_ops.Dataset.range(10)
graph = graph_pb2.GraphDef().FromString(
self.evaluate(dataset._as_serialized_graph()))
self.assertTrue(any([node.op != "RangeDataset" for node in graph.node]))
def testAsFunctionWithMap(self):
if not context.executing_eagerly():
self.skipTest("Only works executing eagerly")
with ops.device("CPU"):
original_dataset = dataset_ops.Dataset.range(5).map(lambda x: x * 2)
fn = original_dataset._trace_variant_creation()
variant = fn()
revived_dataset = _RevivedDataset(
variant, original_dataset._element_structure)
self.assertDatasetProduces(revived_dataset, range(0, 10, 2))
def testAsFunctionWithMapInFlatMap(self):
if not context.executing_eagerly():
self.skipTest("Only works executing eagerly")
with ops.device("CPU"):
original_dataset = dataset_ops.Dataset.range(5).flat_map(
lambda x: dataset_ops.Dataset.range(5).map(lambda x: x * 2))
fn = original_dataset._trace_variant_creation()
variant = fn()
revived_dataset = _RevivedDataset(
variant, original_dataset._element_structure)
self.assertDatasetProduces(revived_dataset, list(original_dataset))
@staticmethod
def make_apply_fn(dataset):
def apply_fn(dataset):
def _apply_fn(dataset):
return dataset.cache()
return dataset.apply(_apply_fn)
return apply_fn
@staticmethod
def make_gen():
def gen():
yield 42
return gen
@staticmethod
def make_interleave_fn(dataset, num_parallel_calls=None):
def interleave_fn(dataset):
return dataset.interleave(
lambda x: dataset_ops.Dataset.range(0),
cycle_length=2,
num_parallel_calls=num_parallel_calls)
return interleave_fn
@parameterized.named_parameters(
("FixedLengthRecord",
lambda: readers.FixedLengthRecordDataset("", 42)),
("FromGenerator",
lambda: dataset_ops.Dataset.from_generator(
DatasetTest.make_gen(), dtypes.int32),
1),
("FromTensors", lambda: dataset_ops.Dataset.from_tensors([42])),
("FromTensorSlices", lambda: dataset_ops.Dataset.from_tensors([42])),
("Range", lambda: dataset_ops.Dataset.range(10)),
("TextLine", lambda: readers.TextLineDataset("")),
("TFRecord", lambda: readers.TFRecordDataset(""), 1),
)
def testDatasetSimpleSourceInputs(self, dataset_fn, num_inputs=0):
self.assertLen(dataset_fn()._inputs(), num_inputs)
@test_util.run_v1_only("deprecated API, no eager or V2 test coverage")
def testDatasetComplexSourceInputs(self):
dataset_fn = dataset_ops.Dataset.from_sparse_tensor_slices(
sparse_tensor.SparseTensor(
indices=np.array([[0, 0], [1, 0], [2, 0]]),
values=np.array([0, 0, 0]),
dense_shape=np.array([3, 1])))
self.assertEmpty(dataset_fn._inputs())
@parameterized.named_parameters(
("Batch",
lambda x: x.batch(10),
lambda: dataset_ops.Dataset.range(0)),
("Cache",
lambda x: x.cache(),
lambda: dataset_ops.Dataset.range(0)),
("Filter",
lambda x: x.filter(lambda x: True),
lambda: dataset_ops.Dataset.range(0)),
("FlatMap",
lambda x: x.flat_map(lambda x: dataset_ops.Dataset.range(0)),
lambda: dataset_ops.Dataset.range(0)),
("Map",
lambda x: x.map(lambda x: x),
lambda: dataset_ops.Dataset.range(0)),
("PaddedBatch",
lambda x: x.padded_batch(10, []),
lambda: dataset_ops.Dataset.range(0)),
("ParallelMap",
lambda x: x.map(lambda x: x, num_parallel_calls=2),
lambda: dataset_ops.Dataset.range(0)),
("Repeat",
lambda x: x.repeat(),
lambda: dataset_ops.Dataset.range(0)),
("Shuffle",
lambda x: x.shuffle(10),
lambda: dataset_ops.Dataset.range(0)),
("Skip",
lambda x: x.skip(1),
lambda: dataset_ops.Dataset.range(0)),
("Take",
lambda x: x.take(1),
lambda: dataset_ops.Dataset.range(0)),
("Window",
lambda x: x.window(10),
lambda: dataset_ops.Dataset.range(0)),
)
def testUnaryTransformationInputs(self, dataset_fn, input_dataset_fn):
input_dataset = input_dataset_fn()
self.assertEqual([input_dataset], dataset_fn(input_dataset)._inputs())
def testUnaryTransformationInputsApply(self):
input_dataset = dataset_ops.Dataset.range(0)
dataset_fn = self.make_apply_fn(dataset_ops.Dataset.range(0))
self.assertEqual([input_dataset], dataset_fn(input_dataset)._inputs())
@parameterized.named_parameters(
("ParallelInterleave",
[lambda: dataset_ops.Dataset.range(0), 2],
lambda: dataset_ops.Dataset.range(0)),
("Interleave",
[lambda: dataset_ops.Dataset.range(0), None],
lambda: dataset_ops.Dataset.range(0)),
)
def testUnaryTransformationInputsWithInterleaveFn(
self, interleave_fn_args, input_dataset_fn):
input_dataset = input_dataset_fn()
dataset_fn = self.make_interleave_fn(*interleave_fn_args)
self.assertEqual([input_dataset], dataset_fn(input_dataset)._inputs())
def testNoWarnings(self):
with test.mock.patch.object(warnings, "warn") as mock_log:
dataset_fn = self.make_interleave_fn(dataset_ops.Dataset.range(10))
dataset_fn(dataset_ops.Dataset.range(10))
self.assertEmpty(mock_log.call_args_list)
@parameterized.named_parameters(
("Concatenate", lambda x, y: x.concatenate(y),
lambda: dataset_ops.Dataset.range(0),
lambda: dataset_ops.Dataset.range(1)))
def testBinaryTransformationInputs(self, dataset_fn, input1_fn, input2_fn):
input1 = input1_fn()
input2 = input2_fn()
self.assertEqual([input1, input2], dataset_fn(input1, input2)._inputs())
@parameterized.named_parameters(
("ZipOne",
dataset_ops.Dataset.zip,
lambda: (dataset_ops.Dataset.range(0))),
("ZipNest",
dataset_ops.Dataset.zip,
lambda: (dataset_ops.Dataset.range(0),
(dataset_ops.Dataset.range(1),
dataset_ops.Dataset.range(2)))),
("ZipTuple",
dataset_ops.Dataset.zip,
lambda: (dataset_ops.Dataset.range(0),
dataset_ops.Dataset.range(1))),
)
def testVariadicTransformationInputs(self, dataset_fn, input_datasets_fn):
input_datasets = input_datasets_fn()
self.assertEqual(
nest.flatten(input_datasets),
dataset_fn(input_datasets)._inputs())
def testFunctions(self):
dataset = dataset_ops.Dataset.range(5).map(lambda x: x * 2)
self.assertLen(dataset._functions(), 1)
def testCollectInputs(self):
ds1 = dataset_ops.Dataset.range(0)
ds2 = ds1.concatenate(ds1)
ds3 = dataset_ops.Dataset.zip((ds2, ds1, ds2))
inputs = []
queue = [ds3]
while queue:
ds = queue[0]
queue = queue[1:]
queue.extend(ds._inputs())
inputs.append(ds)
self.assertEqual(5, inputs.count(ds1))
self.assertEqual(2, inputs.count(ds2))
self.assertEqual(1, inputs.count(ds3))
# pylint: disable=g-long-lambda
@parameterized.named_parameters(
("Tensor", lambda: constant_op.constant(37.0),
structure.TensorStructure(dtypes.float32, [])),
("SparseTensor", lambda: sparse_tensor.SparseTensor(
indices=[[0]], values=constant_op.constant([0], dtype=dtypes.int32),
dense_shape=[1]),
structure.SparseTensorStructure(dtypes.int32, [1])),
("Nest", lambda: {
"a": constant_op.constant(37.0),
"b": (constant_op.constant(["Foo"]), constant_op.constant("Bar"))},
structure.NestedStructure({
"a": structure.TensorStructure(dtypes.float32, []),
"b": (structure.TensorStructure(dtypes.string, [1]),
structure.TensorStructure(dtypes.string, []))})),
("Dataset", lambda: dataset_ops.Dataset.from_tensor_slices(
constant_op.constant([1, 2, 3])),
dataset_ops.DatasetStructure(
structure.TensorStructure(dtypes.int32, []))),
("Optional", lambda: optional_ops.Optional.from_value(37.0),
optional_ops.OptionalStructure(
structure.TensorStructure(dtypes.float32, []))),
)
def testDatasetStructure(self, tf_value_fn, expected_element_structure):
dataset = dataset_ops.Dataset.from_tensors(0).map(lambda _: tf_value_fn())
dataset_structure = type_spec.type_spec_from_value(dataset)
self.assertIsInstance(dataset_structure, dataset_ops.DatasetStructure)
# TODO(b/110122868): Add a public API to `tf.data.Dataset` for accessing
# the element structure.
self.assertTrue(expected_element_structure.is_compatible_with(
dataset_structure._element_structure))
self.assertTrue(dataset_structure._element_structure.is_compatible_with(
expected_element_structure))
self.assertEqual([dtypes.variant], dataset_structure._flat_types)
self.assertEqual([tensor_shape.scalar()], dataset_structure._flat_shapes)
# Assert that the `Dataset` survives a round-trip via _from_tensor_list()
# and _to_tensor_list().
round_trip_dataset = dataset_structure._from_tensor_list(
dataset_structure._to_tensor_list(dataset))
value = tf_value_fn()
if isinstance(value, dataset_ops.Dataset):
self.assertDatasetsEqual(value, dataset.flat_map(lambda x: x))
elif isinstance(value, optional_ops.Optional):
self.assertDatasetProduces(
round_trip_dataset.map(lambda opt: opt.get_value()),
[self.evaluate(value.get_value())],
requires_initialization=True)
else:
self.assertDatasetProduces(
round_trip_dataset, [self.evaluate(tf_value_fn())],
requires_initialization=True)
@test_util.run_v1_only("graph mode specific, no eager or V2 test coverage")
def testSkipEagerSameGraphErrorOneShot(self):
dataset = dataset_ops.Dataset.range(10)
with ops.Graph().as_default():
with self.assertRaisesRegexp(ValueError, "must be from the same graph"):
dataset = dataset.batch(2)
@test_util.run_v1_only("graph mode specific, no eager or V2 test coverage")
def testSkipEagerSameGraphErrorOneShotSimple(self):
dataset = dataset_ops.Dataset.range(10)
with ops.Graph().as_default():
with test.mock.patch.object(logging, "warning") as mock_log:
_ = dataset_ops.make_one_shot_iterator(dataset)
self.assertRegexpMatches(
str(mock_log.call_args), "Please ensure that all datasets in the "
"pipeline are created in the same graph as the iterator.")
@test_util.run_v1_only("graph mode specific, no eager or V2 test coverage")
def testSkipEagerSameGraphErrorInitializable(self):
dataset = dataset_ops.Dataset.range(10)
with ops.Graph().as_default():
with self.assertRaisesRegexp(ValueError, "must be from the same graph"):
dataset = dataset.batch(2)
@parameterized.named_parameters(
("Async", context.ASYNC),
("Sync", context.SYNC),
)
def testDatasetEagerIteration(self, execution_mode):
with context.eager_mode(), context.execution_mode(execution_mode):
val = 0
dataset = dataset_ops.Dataset.range(10)
for foo in dataset:
self.assertEqual(val, foo.numpy())
val += 1
def testDatasetAsFunctionArgument(self):
@def_function.function
def _uses_dataset(d):
accumulator = array_ops.zeros([], dtype=dtypes.int64)
for value in d:
accumulator += value
return accumulator
with ops.device("CPU"):
first_dataset = dataset_ops.Dataset.range(10)
self.assertEqual(45, self.evaluate(_uses_dataset(first_dataset)))
second_dataset = dataset_ops.Dataset.range(11)
self.assertEqual(55, self.evaluate(_uses_dataset(second_dataset)))
first_concrete = _uses_dataset.get_concrete_function(first_dataset)
# The dataset should not be a captured input
self.assertEmpty(first_concrete.graph.captures)
# The two datasets have the same structure and so should re-use a trace.
self.assertIs(first_concrete,
_uses_dataset.get_concrete_function(second_dataset))
# With a different structure we should use a different trace.
self.assertIsNot(
first_concrete,
_uses_dataset.get_concrete_function(
dataset_ops.Dataset.zip((first_dataset, second_dataset))))
def testLimitedRetracing(self):
trace_count = [0]
@def_function.function
def f(ds):
trace_count[0] += 1
counter = np.int64(0)
for elem in ds:
counter += elem
return counter
dataset = dataset_ops.Dataset.range(5)
dataset2 = dataset_ops.Dataset.range(10)
for _ in range(10):
self.assertEqual(self.evaluate(f(dataset)), 10)
self.assertEqual(self.evaluate(f(dataset2)), 45)
self.assertEqual(trace_count[0], 1)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/kernel_tests/dataset_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.Dataset.prefetch()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class PrefetchTest(test_base.DatasetTestBase, parameterized.TestCase):
@parameterized.parameters((-1), (0), (5))
def testBufferSize(self, buffer_size):
dataset = dataset_ops.Dataset.range(10).prefetch(buffer_size=buffer_size)
self.assertDatasetProduces(dataset, expected_output=range(10))
@parameterized.parameters((-2), (-42))
def testInvalidBufferSize(self, buffer_size):
with self.assertRaises(errors.InvalidArgumentError):
dataset = dataset_ops.Dataset.range(10).prefetch(buffer_size=buffer_size)
self.evaluate(dataset._variant_tensor)
@parameterized.parameters(*[(buffer_size, slack_period)
for buffer_size in (-1, None, 0, 5)
for slack_period in (1, 8)])
def testPrefetchWithSlack(self, buffer_size, slack_period):
dataset = dataset_ops.Dataset.range(100)
dataset = dataset_ops.PrefetchDataset(
dataset, buffer_size, slack_period=slack_period)
self.assertDatasetProduces(dataset, expected_output=range(100))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/kernel_tests/prefetch_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.Dataset.enumerate()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class EnumerateTest(test_base.DatasetTestBase):
def testEnumerate(self):
components = (["a", "b"], [1, 2], [37.0, 38])
start = constant_op.constant(20, dtype=dtypes.int64)
dataset = dataset_ops.Dataset.from_tensor_slices(components).enumerate(
start)
self.assertEqual(dtypes.int64,
dataset_ops.get_legacy_output_types(dataset)[0])
dataset_output_shapes = dataset_ops.get_legacy_output_shapes(dataset)
self.assertEqual((), dataset_output_shapes[0])
self.assertEqual([tensor_shape.TensorShape([])] * 3,
[shape for shape in dataset_output_shapes[1]])
self.assertDatasetProduces(dataset, [(20, (b"a", 1, 37.0)),
(21, (b"b", 2, 38.0))])
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/kernel_tests/enumerate_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.Dataset.concatenate()."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import nest
from tensorflow.python.framework import errors
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class ConcatenateTest(test_base.DatasetTestBase):
def testConcatenateDataset(self):
input_components = (
np.tile(np.array([[1], [2], [3], [4]]), 20),
np.tile(np.array([[12], [13], [14], [15]]), 15),
np.array([37.0, 38.0, 39.0, 40.0]))
to_concatenate_components = (
np.tile(np.array([[1], [2], [3], [4], [5]]), 20),
np.tile(np.array([[12], [13], [14], [15], [16]]), 15),
np.array([37.0, 38.0, 39.0, 40.0, 41.0]))
input_dataset = dataset_ops.Dataset.from_tensor_slices(input_components)
dataset_to_concatenate = dataset_ops.Dataset.from_tensor_slices(
to_concatenate_components)
concatenated = input_dataset.concatenate(dataset_to_concatenate)
self.assertEqual(
dataset_ops.get_legacy_output_shapes(concatenated),
(tensor_shape.TensorShape([20]), tensor_shape.TensorShape([15]),
tensor_shape.TensorShape([])))
get_next = self.getNext(concatenated)
for i in range(9):
result = self.evaluate(get_next())
if i < 4:
for component, result_component in zip(input_components, result):
self.assertAllEqual(component[i], result_component)
else:
for component, result_component in zip(to_concatenate_components,
result):
self.assertAllEqual(component[i - 4], result_component)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
def testConcatenateDatasetDifferentShape(self):
input_components = (
np.tile(np.array([[1], [2], [3], [4]]), 20),
np.tile(np.array([[12], [13], [14], [15]]), 4))
to_concatenate_components = (
np.tile(np.array([[1], [2], [3], [4], [5]]), 20),
np.tile(np.array([[12], [13], [14], [15], [16]]), 15))
input_dataset = dataset_ops.Dataset.from_tensor_slices(input_components)
dataset_to_concatenate = dataset_ops.Dataset.from_tensor_slices(
to_concatenate_components)
concatenated = input_dataset.concatenate(dataset_to_concatenate)
self.assertEqual(
[ts.as_list()
for ts in nest.flatten(
dataset_ops.get_legacy_output_shapes(concatenated))],
[[20], [None]])
get_next = self.getNext(concatenated)
for i in range(9):
result = self.evaluate(get_next())
if i < 4:
for component, result_component in zip(input_components, result):
self.assertAllEqual(component[i], result_component)
else:
for component, result_component in zip(to_concatenate_components,
result):
self.assertAllEqual(component[i - 4], result_component)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
def testConcatenateDatasetDifferentStructure(self):
input_components = (
np.tile(np.array([[1], [2], [3], [4]]), 5),
np.tile(np.array([[12], [13], [14], [15]]), 4))
to_concatenate_components = (
np.tile(np.array([[1], [2], [3], [4], [5]]), 20),
np.tile(np.array([[12], [13], [14], [15], [16]]), 15),
np.array([37.0, 38.0, 39.0, 40.0, 41.0]))
input_dataset = dataset_ops.Dataset.from_tensor_slices(input_components)
dataset_to_concatenate = dataset_ops.Dataset.from_tensor_slices(
to_concatenate_components)
with self.assertRaisesRegexp(TypeError, "have different types"):
input_dataset.concatenate(dataset_to_concatenate)
def testConcatenateDatasetDifferentKeys(self):
input_components = {
"foo": np.array([[1], [2], [3], [4]]),
"bar": np.array([[12], [13], [14], [15]])
}
to_concatenate_components = {
"foo": np.array([[1], [2], [3], [4]]),
"baz": np.array([[5], [6], [7], [8]])
}
input_dataset = dataset_ops.Dataset.from_tensor_slices(input_components)
dataset_to_concatenate = dataset_ops.Dataset.from_tensor_slices(
to_concatenate_components)
with self.assertRaisesRegexp(TypeError, "have different types"):
input_dataset.concatenate(dataset_to_concatenate)
def testConcatenateDatasetDifferentType(self):
input_components = (
np.tile(np.array([[1], [2], [3], [4]]), 5),
np.tile(np.array([[12], [13], [14], [15]]), 4))
to_concatenate_components = (
np.tile(np.array([[1.0], [2.0], [3.0], [4.0]]), 5),
np.tile(np.array([[12], [13], [14], [15]]), 15))
input_dataset = dataset_ops.Dataset.from_tensor_slices(input_components)
dataset_to_concatenate = dataset_ops.Dataset.from_tensor_slices(
to_concatenate_components)
with self.assertRaisesRegexp(TypeError, "have different types"):
input_dataset.concatenate(dataset_to_concatenate)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/kernel_tests/concatenate_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.FixedLengthRecordDataset`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import os
import zlib
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import readers
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
from tensorflow.python.util import compat
@test_util.run_all_in_graph_and_eager_modes
class FixedLengthRecordDatasetTest(test_base.DatasetTestBase):
def setUp(self):
super(FixedLengthRecordDatasetTest, self).setUp()
self._num_files = 2
self._num_records = 7
self._header_bytes = 5
self._record_bytes = 3
self._footer_bytes = 2
def _record(self, f, r):
return compat.as_bytes(str(f * 2 + r) * self._record_bytes)
def _createFiles(self, compression_type=None):
filenames = []
for i in range(self._num_files):
fn = os.path.join(self.get_temp_dir(), "fixed_length_record.%d.txt" % i)
filenames.append(fn)
contents = []
contents.append(b"H" * self._header_bytes)
for j in range(self._num_records):
contents.append(self._record(i, j))
contents.append(b"F" * self._footer_bytes)
contents = b"".join(contents)
if not compression_type:
with open(fn, "wb") as f:
f.write(contents)
elif compression_type == "GZIP":
with gzip.GzipFile(fn, "wb") as f:
f.write(contents)
elif compression_type == "ZLIB":
contents = zlib.compress(contents)
with open(fn, "wb") as f:
f.write(contents)
else:
raise ValueError("Unsupported compression_type", compression_type)
return filenames
def _testFixedLengthRecordDataset(self, compression_type=None):
test_filenames = self._createFiles(compression_type=compression_type)
def dataset_fn(filenames, num_epochs, batch_size=None):
repeat_dataset = readers.FixedLengthRecordDataset(
filenames,
self._record_bytes,
self._header_bytes,
self._footer_bytes,
compression_type=compression_type).repeat(num_epochs)
if batch_size:
return repeat_dataset.batch(batch_size)
return repeat_dataset
# Basic test: read from file 0.
self.assertDatasetProduces(
dataset_fn([test_filenames[0]], 1),
expected_output=[
self._record(0, i) for i in range(self._num_records)
])
# Basic test: read from file 1.
self.assertDatasetProduces(
dataset_fn([test_filenames[1]], 1),
expected_output=[
self._record(1, i) for i in range(self._num_records)
])
# Basic test: read from both files.
expected_output = []
for j in range(self._num_files):
expected_output.extend(
[self._record(j, i) for i in range(self._num_records)])
self.assertDatasetProduces(
dataset_fn(test_filenames, 1), expected_output=expected_output)
# Test repeated iteration through both files.
get_next = self.getNext(dataset_fn(test_filenames, 10))
for _ in range(10):
for j in range(self._num_files):
for i in range(self._num_records):
self.assertEqual(self._record(j, i), self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Test batched and repeated iteration through both files.
get_next = self.getNext(dataset_fn(test_filenames, 10, self._num_records))
for _ in range(10):
for j in range(self._num_files):
self.assertAllEqual(
[self._record(j, i) for i in range(self._num_records)],
self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
def testFixedLengthRecordDatasetNoCompression(self):
self._testFixedLengthRecordDataset()
def testFixedLengthRecordDatasetGzipCompression(self):
self._testFixedLengthRecordDataset(compression_type="GZIP")
def testFixedLengthRecordDatasetZlibCompression(self):
self._testFixedLengthRecordDataset(compression_type="ZLIB")
def testFixedLengthRecordDatasetBuffering(self):
test_filenames = self._createFiles()
dataset = readers.FixedLengthRecordDataset(
test_filenames,
self._record_bytes,
self._header_bytes,
self._footer_bytes,
buffer_size=10)
expected_output = []
for j in range(self._num_files):
expected_output.extend(
[self._record(j, i) for i in range(self._num_records)])
self.assertDatasetProduces(dataset, expected_output=expected_output)
def testFixedLengthRecordDatasetParallelRead(self):
test_filenames = self._createFiles()
dataset = readers.FixedLengthRecordDataset(
test_filenames,
self._record_bytes,
self._header_bytes,
self._footer_bytes,
buffer_size=10,
num_parallel_reads=4)
expected_output = []
for j in range(self._num_files):
expected_output.extend(
[self._record(j, i) for i in range(self._num_records)])
self.assertDatasetProduces(dataset, expected_output=expected_output,
assert_items_equal=True)
def testFixedLengthRecordDatasetWrongSize(self):
test_filenames = self._createFiles()
dataset = readers.FixedLengthRecordDataset(
test_filenames,
self._record_bytes + 1, # Incorrect record length.
self._header_bytes,
self._footer_bytes,
buffer_size=10)
self.assertDatasetProduces(
dataset,
expected_error=(
errors.InvalidArgumentError,
r"Excluding the header \(5 bytes\) and footer \(2 bytes\), input "
r"file \".*fixed_length_record.0.txt\" has body length 21 bytes, "
r"which is not an exact multiple of the record length \(4 bytes\).")
)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/kernel_tests/fixed_length_record_dataset_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.Dataset.shard()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
@test_util.run_v1_only("deprecated API, no eager or V2 test coverage")
class ShardTest(test_base.DatasetTestBase):
def testSimpleCase(self):
dataset = dataset_ops.Dataset.range(10).shard(5, 2)
self.assertDatasetProduces(dataset, expected_output=[2, 7])
def testNestedData(self):
dataset_a = dataset_ops.Dataset.range(10)
dataset_b = dataset_ops.Dataset.range(10, 0, -1)
dataset = dataset_ops.Dataset.zip((dataset_a, dataset_b)).shard(5, 2)
self.assertDatasetProduces(dataset, expected_output=[(2, 8), (7, 3)])
def testOffsetZero(self):
dataset = dataset_ops.Dataset.range(10).shard(5, 0)
self.assertDatasetProduces(dataset, expected_output=[0, 5])
def testOffsetGreaterNumShards(self):
with self.assertRaises(errors.InvalidArgumentError):
dataset = dataset_ops.Dataset.range(10).shard(5, 7)
self.evaluate(self.getNext(dataset)())
def testNegativeOffset(self):
with self.assertRaises(errors.InvalidArgumentError):
dataset = dataset_ops.Dataset.range(10).shard(5, -3)
self.evaluate(self.getNext(dataset)())
def testNegativeNumShards(self):
with self.assertRaises(errors.InvalidArgumentError):
dataset = dataset_ops.Dataset.range(10).shard(-3, 1)
self.evaluate(self.getNext(dataset)())
def testZeroNumShards(self):
with self.assertRaises(errors.InvalidArgumentError):
dataset = dataset_ops.Dataset.range(10).shard(0, 1)
self.evaluate(self.getNext(dataset)())
def testIteratorEndsBeforeFirstElem(self):
dataset = dataset_ops.Dataset.range(1).shard(5, 2)
self.assertDatasetProduces(dataset, expected_output=[])
def testLargerWorkerPool(self):
dataset = dataset_ops.Dataset.range(10).shard(7, 5)
self.assertDatasetProduces(dataset, expected_output=[5])
def testIndexEqualsNumShards(self):
dataset = dataset_ops.Dataset.range(10).shard(5, 4)
self.assertDatasetProduces(dataset, expected_output=[4, 9])
def testIndexEqualsNumShards2(self):
dataset = dataset_ops.Dataset.range(10).shard(4, 3)
self.assertDatasetProduces(dataset, expected_output=[3, 7])
def testNumShardsLargerThanDataset(self):
dataset = dataset_ops.Dataset.range(10).shard(20, 5)
self.assertDatasetProduces(dataset, expected_output=[5])
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/kernel_tests/shard_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Checkpoint tests for `tf.data.Dataset`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import gen_dataset_ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
class DatasetCheckpointTest(test_base.DatasetTestBase):
def tearDown(self):
# Remove all checkpoint files.
prefix = self._iterator_checkpoint_prefix()
pattern = prefix + "*"
files = gfile.Glob(pattern)
map(gfile.Remove, files)
def _iterator_checkpoint_prefix(self):
return os.path.join(self.get_temp_dir(), "iterator")
def _save_op(self, iterator_resource):
iterator_state_variant = gen_dataset_ops.serialize_iterator(
iterator_resource)
save_op = io_ops.write_file(
self._iterator_checkpoint_prefix(),
parsing_ops.serialize_tensor(iterator_state_variant))
return save_op
def _restore_op(self, iterator_resource):
iterator_state_variant = parsing_ops.parse_tensor(
io_ops.read_file(self._iterator_checkpoint_prefix()), dtypes.variant)
restore_op = gen_dataset_ops.deserialize_iterator(iterator_resource,
iterator_state_variant)
return restore_op
def testSaveRestore(self):
def _build_graph(start, stop):
iterator = dataset_ops.make_initializable_iterator(
dataset_ops.Dataset.range(start, stop))
init_op = iterator.initializer
get_next = iterator.get_next()
save_op = self._save_op(iterator._iterator_resource)
restore_op = self._restore_op(iterator._iterator_resource)
return init_op, get_next, save_op, restore_op
# Saving and restoring in different sessions.
start = 2
stop = 10
break_point = 5
with ops.Graph().as_default() as g:
init_op, get_next, save_op, _ = _build_graph(start, stop)
with self.session(graph=g) as sess:
sess.run(variables.global_variables_initializer())
sess.run(init_op)
for i in range(start, break_point):
self.assertEqual(i, sess.run(get_next))
sess.run(save_op)
with ops.Graph().as_default() as g:
init_op, get_next, _, restore_op = _build_graph(start, stop)
with self.session(graph=g) as sess:
sess.run(init_op)
sess.run(restore_op)
for i in range(break_point, stop):
self.assertEqual(i, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Saving and restoring in same session.
with ops.Graph().as_default() as g:
init_op, get_next, save_op, restore_op = _build_graph(start, stop)
with self.session(graph=g) as sess:
sess.run(variables.global_variables_initializer())
sess.run(init_op)
for i in range(start, break_point):
self.assertEqual(i, sess.run(get_next))
sess.run(save_op)
sess.run(restore_op)
for i in range(break_point, stop):
self.assertEqual(i, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testRestoreWithoutBuildingDatasetGraph(self):
def _build_graph(start, stop, num_epochs):
dataset = dataset_ops.Dataset.range(start, stop).repeat(num_epochs)
iterator = dataset_ops.make_initializable_iterator(dataset)
init_op = iterator.initializer
get_next = iterator.get_next()
save_op = self._save_op(iterator._iterator_resource)
restore_op = self._restore_op(iterator._iterator_resource)
return init_op, get_next, save_op, restore_op
# Saving and restoring in different sessions.
start = 2
stop = 10
num_epochs = 5
break_point = 5
break_epoch = 3
with ops.Graph().as_default() as g:
init_op, get_next, save_op, _ = _build_graph(start, stop, num_epochs)
with self.session(graph=g) as sess:
sess.run(variables.global_variables_initializer())
sess.run(init_op)
for _ in range(break_epoch):
for i in range(start, stop):
self.assertEqual(i, sess.run(get_next))
for i in range(start, break_point):
self.assertEqual(i, sess.run(get_next))
sess.run(save_op)
with ops.Graph().as_default() as g:
# Create an empty IteratorResource and restore the Iterator into it.
output_types = dtypes.int64
output_shapes = tensor_shape.scalar()
iterator = iterator_ops.Iterator.from_structure(output_types,
output_shapes)
restore_op = self._restore_op(iterator._iterator_resource)
get_next = iterator.get_next()
with self.session(graph=g) as sess:
sess.run(restore_op)
for i in range(break_point, stop):
self.assertEqual(i, sess.run(get_next))
for _ in range(break_epoch + 1, num_epochs):
for i in range(start, stop):
self.assertEqual(i, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testRestoreInModifiedGraph(self):
def _build_graph(start, stop):
dataset = dataset_ops.Dataset.range(start, stop)
iterator = dataset_ops.make_initializable_iterator(dataset)
init_op = iterator.initializer
get_next = iterator.get_next()
save_op = self._save_op(iterator._iterator_resource)
restore_op = self._restore_op(iterator._iterator_resource)
return init_op, get_next, save_op, restore_op
# Saving and restoring in different sessions.
start = 2
stop = 10
stop_1 = 8
break_point = 5
with ops.Graph().as_default() as g:
init_op, get_next, save_op, _ = _build_graph(start, stop)
with self.session(graph=g) as sess:
sess.run(variables.global_variables_initializer())
sess.run(init_op)
for i in range(start, break_point):
self.assertEqual(i, sess.run(get_next))
sess.run(save_op)
with ops.Graph().as_default() as g:
# Intentionally build a graph with a different value for stop to make sure
# the original dataset graph is actually getting loaded.
init_op, get_next, _, restore_op = _build_graph(start, stop_1)
with self.session(graph=g) as sess:
sess.run(restore_op)
for i in range(break_point, stop):
self.assertEqual(i, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testInitThenRestore(self):
# Note: Calling init_op before restore_op is redundant. This test just makes
# sure we do not fail if restore is called on an already initialized
# iterator resource.
def _build_graph(start, stop):
dataset = dataset_ops.Dataset.range(start, stop)
iterator = dataset_ops.make_initializable_iterator(dataset)
init_op = iterator.initializer
get_next = iterator.get_next()
save_op = self._save_op(iterator._iterator_resource)
restore_op = self._restore_op(iterator._iterator_resource)
return init_op, get_next, save_op, restore_op
# Saving and restoring in different sessions.
start = 2
stop = 10
break_point = 5
with ops.Graph().as_default() as g:
init_op, get_next, save_op, _ = _build_graph(start, stop)
with self.session(graph=g) as sess:
sess.run(variables.global_variables_initializer())
sess.run(init_op)
for i in range(start, break_point):
self.assertEqual(i, sess.run(get_next))
sess.run(save_op)
with ops.Graph().as_default() as g:
init_op, get_next, _, restore_op = _build_graph(start, stop)
with self.session(graph=g) as sess:
sess.run(init_op)
sess.run(restore_op)
for i in range(break_point, stop):
self.assertEqual(i, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testMultipleSaves(self):
def _build_graph(start, stop):
iterator = dataset_ops.make_initializable_iterator(
dataset_ops.Dataset.range(start, stop))
init_op = iterator.initializer
get_next = iterator.get_next()
save_op = self._save_op(iterator._iterator_resource)
restore_op = self._restore_op(iterator._iterator_resource)
return init_op, get_next, save_op, restore_op
start = 2
stop = 10
break_point1 = 5
break_point2 = 7
with ops.Graph().as_default() as g:
init_op, get_next, save_op, _ = _build_graph(start, stop)
with self.session(graph=g) as sess:
sess.run(variables.global_variables_initializer())
sess.run(init_op)
for i in range(start, break_point1):
self.assertEqual(i, sess.run(get_next))
sess.run(save_op)
with ops.Graph().as_default() as g:
init_op, get_next, save_op, restore_op = _build_graph(start, stop)
with self.session(graph=g) as sess:
sess.run(restore_op)
for i in range(break_point1, break_point2):
self.assertEqual(i, sess.run(get_next))
sess.run(save_op)
break_point2 = 7
with ops.Graph().as_default() as g:
init_op, get_next, save_op, restore_op = _build_graph(start, stop)
with self.session(graph=g) as sess:
sess.run(restore_op)
for i in range(break_point2, stop):
self.assertEqual(i, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testSaveRestoreWithRepeat(self):
def _build_graph(start, stop, num_epochs):
iterator = dataset_ops.make_initializable_iterator(
dataset_ops.Dataset.range(start, stop).repeat(num_epochs))
init_op = iterator.initializer
get_next = iterator.get_next()
save_op = self._save_op(iterator._iterator_resource)
restore_op = self._restore_op(iterator._iterator_resource)
return init_op, get_next, save_op, restore_op
start = 2
stop = 10
num_epochs = 5
break_range = 5
break_epoch = 3
with ops.Graph().as_default() as g:
init_op, get_next, save_op, restore_op = _build_graph(
start, stop, num_epochs)
with self.session(graph=g) as sess:
sess.run(variables.global_variables_initializer())
sess.run(init_op)
# Note: There is no checkpoint saved currently so a NotFoundError is
# raised.
with self.assertRaises(errors.NotFoundError):
sess.run(restore_op)
for _ in range(break_epoch - 1):
for i in range(start, stop):
self.assertEqual(i, sess.run(get_next))
for i in range(start, break_range):
self.assertEqual(i, sess.run(get_next))
sess.run(save_op)
with ops.Graph().as_default() as g:
init_op, get_next, _, restore_op = _build_graph(start, stop, num_epochs)
with self.session(graph=g) as sess:
sess.run(restore_op)
for i in range(break_range, stop):
self.assertEqual(i, sess.run(get_next))
for _ in range(break_epoch, num_epochs):
for i in range(start, stop):
self.assertEqual(i, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testSaveRestoreExhaustedIterator(self):
def _build_graph(start, stop, num_epochs):
iterator = dataset_ops.make_initializable_iterator(
dataset_ops.Dataset.range(start, stop).repeat(num_epochs))
init_op = iterator.initializer
get_next = iterator.get_next()
save_op = self._save_op(iterator._iterator_resource)
restore_op = self._restore_op(iterator._iterator_resource)
return init_op, get_next, save_op, restore_op
start = 2
stop = 10
num_epochs = 5
with ops.Graph().as_default() as g:
init_op, get_next, save_op, restore_op = _build_graph(
start, stop, num_epochs)
with self.session(graph=g) as sess:
sess.run(variables.global_variables_initializer())
sess.run(init_op)
# Note: There is no checkpoint saved currently so a NotFoundError is
# raised.
with self.assertRaises(errors.NotFoundError):
sess.run(restore_op)
for _ in range(num_epochs):
for i in range(start, stop):
self.assertEqual(i, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
sess.run(save_op)
with ops.Graph().as_default() as g:
init_op, get_next, _, restore_op = _build_graph(start, stop, num_epochs)
with self.session(graph=g) as sess:
sess.run(restore_op)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/kernel_tests/dataset_checkpoint_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.Optional`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.data.ops import optional_ops
from tensorflow.python.data.util import structure
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.framework import type_spec
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class OptionalTest(test_base.DatasetTestBase, parameterized.TestCase):
def testFromValue(self):
opt = optional_ops.Optional.from_value(constant_op.constant(37.0))
self.assertTrue(self.evaluate(opt.has_value()))
self.assertEqual(37.0, self.evaluate(opt.get_value()))
def testFromStructuredValue(self):
opt = optional_ops.Optional.from_value({
"a": constant_op.constant(37.0),
"b": (constant_op.constant(["Foo"]), constant_op.constant("Bar"))
})
self.assertTrue(self.evaluate(opt.has_value()))
self.assertEqual({
"a": 37.0,
"b": ([b"Foo"], b"Bar")
}, self.evaluate(opt.get_value()))
def testFromSparseTensor(self):
st_0 = sparse_tensor.SparseTensorValue(
indices=np.array([[0]]),
values=np.array([0], dtype=np.int64),
dense_shape=np.array([1]))
st_1 = sparse_tensor.SparseTensorValue(
indices=np.array([[0, 0], [1, 1]]),
values=np.array([-1., 1.], dtype=np.float32),
dense_shape=np.array([2, 2]))
opt = optional_ops.Optional.from_value((st_0, st_1))
self.assertTrue(self.evaluate(opt.has_value()))
val_0, val_1 = opt.get_value()
for expected, actual in [(st_0, val_0), (st_1, val_1)]:
self.assertAllEqual(expected.indices, self.evaluate(actual.indices))
self.assertAllEqual(expected.values, self.evaluate(actual.values))
self.assertAllEqual(expected.dense_shape,
self.evaluate(actual.dense_shape))
def testFromNone(self):
value_structure = structure.TensorStructure(dtypes.float32, [])
opt = optional_ops.Optional.none_from_structure(value_structure)
self.assertTrue(opt.value_structure.is_compatible_with(value_structure))
self.assertFalse(
opt.value_structure.is_compatible_with(
structure.TensorStructure(dtypes.float32, [1])))
self.assertFalse(
opt.value_structure.is_compatible_with(
structure.TensorStructure(dtypes.int32, [])))
self.assertFalse(self.evaluate(opt.has_value()))
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(opt.get_value())
def testAddN(self):
devices = ["/cpu:0"]
if test_util.is_gpu_available():
devices.append("/gpu:0")
for device in devices:
with ops.device(device):
# With value
opt1 = optional_ops.Optional.from_value((1.0, 2.0))
opt2 = optional_ops.Optional.from_value((3.0, 4.0))
add_tensor = math_ops.add_n([opt1._variant_tensor,
opt2._variant_tensor])
add_opt = optional_ops._OptionalImpl(add_tensor, opt1.value_structure)
self.assertAllEqual(self.evaluate(add_opt.get_value()), (4.0, 6.0))
# Without value
opt_none1 = optional_ops.Optional.none_from_structure(
opt1.value_structure)
opt_none2 = optional_ops.Optional.none_from_structure(
opt2.value_structure)
add_tensor = math_ops.add_n([opt_none1._variant_tensor,
opt_none2._variant_tensor])
add_opt = optional_ops._OptionalImpl(add_tensor,
opt_none1.value_structure)
self.assertFalse(self.evaluate(add_opt.has_value()))
def testNestedAddN(self):
devices = ["/cpu:0"]
if test_util.is_gpu_available():
devices.append("/gpu:0")
for device in devices:
with ops.device(device):
opt1 = optional_ops.Optional.from_value([1, 2.0])
opt2 = optional_ops.Optional.from_value([3, 4.0])
opt3 = optional_ops.Optional.from_value((5.0, opt1._variant_tensor))
opt4 = optional_ops.Optional.from_value((6.0, opt2._variant_tensor))
add_tensor = math_ops.add_n([opt3._variant_tensor,
opt4._variant_tensor])
add_opt = optional_ops._OptionalImpl(add_tensor, opt3.value_structure)
self.assertEqual(self.evaluate(add_opt.get_value()[0]), 11.0)
inner_add_opt = optional_ops._OptionalImpl(add_opt.get_value()[1],
opt1.value_structure)
self.assertAllEqual(inner_add_opt.get_value(), [4, 6.0])
def testZerosLike(self):
devices = ["/cpu:0"]
if test_util.is_gpu_available():
devices.append("/gpu:0")
for device in devices:
with ops.device(device):
# With value
opt = optional_ops.Optional.from_value((1.0, 2.0))
zeros_tensor = array_ops.zeros_like(opt._variant_tensor)
zeros_opt = optional_ops._OptionalImpl(zeros_tensor,
opt.value_structure)
self.assertAllEqual(self.evaluate(zeros_opt.get_value()),
(0.0, 0.0))
# Without value
opt_none = optional_ops.Optional.none_from_structure(
opt.value_structure)
zeros_tensor = array_ops.zeros_like(opt_none._variant_tensor)
zeros_opt = optional_ops._OptionalImpl(zeros_tensor,
opt_none.value_structure)
self.assertFalse(self.evaluate(zeros_opt.has_value()))
def testNestedZerosLike(self):
devices = ["/cpu:0"]
if test_util.is_gpu_available():
devices.append("/gpu:0")
for device in devices:
with ops.device(device):
opt1 = optional_ops.Optional.from_value(1.0)
opt2 = optional_ops.Optional.from_value(opt1._variant_tensor)
zeros_tensor = array_ops.zeros_like(opt2._variant_tensor)
zeros_opt = optional_ops._OptionalImpl(zeros_tensor,
opt2.value_structure)
inner_zeros_opt = optional_ops._OptionalImpl(zeros_opt.get_value(),
opt1.value_structure)
self.assertEqual(self.evaluate(inner_zeros_opt.get_value()), 0.0)
def testCopyToGPU(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
with ops.device("/cpu:0"):
optional_with_value = optional_ops.Optional.from_value(
(constant_op.constant(37.0), constant_op.constant("Foo"),
constant_op.constant(42)))
optional_none = optional_ops.Optional.none_from_structure(
structure.TensorStructure(dtypes.float32, []))
with ops.device("/gpu:0"):
gpu_optional_with_value = optional_ops._OptionalImpl(
array_ops.identity(optional_with_value._variant_tensor),
optional_with_value.value_structure)
gpu_optional_none = optional_ops._OptionalImpl(
array_ops.identity(optional_none._variant_tensor),
optional_none.value_structure)
gpu_optional_with_value_has_value = gpu_optional_with_value.has_value()
gpu_optional_with_value_values = gpu_optional_with_value.get_value()
gpu_optional_none_has_value = gpu_optional_none.has_value()
self.assertTrue(self.evaluate(gpu_optional_with_value_has_value))
self.assertEqual((37.0, b"Foo", 42),
self.evaluate(gpu_optional_with_value_values))
self.assertFalse(self.evaluate(gpu_optional_none_has_value))
def testNestedCopyToGPU(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
with ops.device("/cpu:0"):
optional_with_value = optional_ops.Optional.from_value(
(constant_op.constant(37.0), constant_op.constant("Foo"),
constant_op.constant(42)))
optional_none = optional_ops.Optional.none_from_structure(
structure.TensorStructure(dtypes.float32, []))
nested_optional = optional_ops.Optional.from_value(
(optional_with_value._variant_tensor, optional_none._variant_tensor,
1.0))
with ops.device("/gpu:0"):
gpu_nested_optional = optional_ops._OptionalImpl(
array_ops.identity(nested_optional._variant_tensor),
nested_optional.value_structure)
gpu_nested_optional_has_value = gpu_nested_optional.has_value()
gpu_nested_optional_values = gpu_nested_optional.get_value()
self.assertTrue(self.evaluate(gpu_nested_optional_has_value))
inner_with_value = optional_ops._OptionalImpl(
gpu_nested_optional_values[0], optional_with_value.value_structure)
inner_none = optional_ops._OptionalImpl(
gpu_nested_optional_values[1], optional_none.value_structure)
self.assertEqual((37.0, b"Foo", 42),
self.evaluate(inner_with_value.get_value()))
self.assertFalse(self.evaluate(inner_none.has_value()))
self.assertEqual(1.0, self.evaluate(gpu_nested_optional_values[2]))
def _assertElementValueEqual(self, expected, actual):
if isinstance(expected, dict):
self.assertItemsEqual(list(expected.keys()), list(actual.keys()))
for k in expected.keys():
self._assertElementValueEqual(expected[k], actual[k])
elif isinstance(expected, sparse_tensor.SparseTensorValue):
self.assertAllEqual(expected.indices, actual.indices)
self.assertAllEqual(expected.values, actual.values)
self.assertAllEqual(expected.dense_shape, actual.dense_shape)
else:
self.assertAllEqual(expected, actual)
# pylint: disable=g-long-lambda
@parameterized.named_parameters(
("Tensor", lambda: constant_op.constant(37.0),
structure.TensorStructure(dtypes.float32, [])),
("SparseTensor", lambda: sparse_tensor.SparseTensor(
indices=[[0, 1]],
values=constant_op.constant([0], dtype=dtypes.int32),
dense_shape=[10, 10]),
structure.SparseTensorStructure(dtypes.int32, [10, 10])),
("Nest", lambda: {
"a": constant_op.constant(37.0),
"b": (constant_op.constant(["Foo"]), constant_op.constant("Bar"))},
structure.NestedStructure({
"a": structure.TensorStructure(dtypes.float32, []),
"b": (structure.TensorStructure(dtypes.string, [1]),
structure.TensorStructure(dtypes.string, []))})),
("Optional", lambda: optional_ops.Optional.from_value(37.0),
optional_ops.OptionalStructure(
structure.TensorStructure(dtypes.float32, []))),
)
def testOptionalStructure(self, tf_value_fn, expected_value_structure):
tf_value = tf_value_fn()
opt = optional_ops.Optional.from_value(tf_value)
self.assertTrue(
expected_value_structure.is_compatible_with(opt.value_structure))
self.assertTrue(
opt.value_structure.is_compatible_with(expected_value_structure))
opt_structure = type_spec.type_spec_from_value(opt)
self.assertIsInstance(opt_structure, optional_ops.OptionalStructure)
self.assertTrue(opt_structure.is_compatible_with(opt_structure))
self.assertTrue(opt_structure._value_structure.is_compatible_with(
expected_value_structure))
self.assertEqual([dtypes.variant], opt_structure._flat_types)
self.assertEqual([tensor_shape.scalar()], opt_structure._flat_shapes)
# All OptionalStructure objects are not compatible with a non-optional
# value.
non_optional_structure = type_spec.type_spec_from_value(
constant_op.constant(42.0))
self.assertFalse(opt_structure.is_compatible_with(non_optional_structure))
# Assert that the optional survives a round-trip via _from_tensor_list()
# and _to_tensor_list().
round_trip_opt = opt_structure._from_tensor_list(
opt_structure._to_tensor_list(opt))
if isinstance(tf_value, optional_ops.Optional):
self._assertElementValueEqual(
self.evaluate(tf_value.get_value()),
self.evaluate(round_trip_opt.get_value().get_value()))
else:
self._assertElementValueEqual(
self.evaluate(tf_value),
self.evaluate(round_trip_opt.get_value()))
@parameterized.named_parameters(
("Tensor", np.array([1, 2, 3], dtype=np.int32),
lambda: constant_op.constant([4, 5, 6], dtype=dtypes.int32), True),
("SparseTensor", sparse_tensor.SparseTensorValue(
indices=[[0, 0], [1, 1]],
values=np.array([-1., 1.], dtype=np.float32), dense_shape=[2, 2]),
lambda: sparse_tensor.SparseTensor(
indices=[[0, 1], [1, 0]], values=[37.0, 42.0], dense_shape=[2, 2]),
False),
("Nest", {"a": np.array([1, 2, 3], dtype=np.int32),
"b": sparse_tensor.SparseTensorValue(
indices=[[0, 0], [1, 1]],
values=np.array([-1., 1.], dtype=np.float32),
dense_shape=[2, 2])},
lambda: {"a": constant_op.constant([4, 5, 6], dtype=dtypes.int32),
"b": sparse_tensor.SparseTensor(
indices=[[0, 1], [1, 0]], values=[37.0, 42.0],
dense_shape=[2, 2])}, False),
)
def testIteratorGetNextAsOptional(self, np_value, tf_value_fn,
works_on_gpu):
if not works_on_gpu and test.is_gpu_available():
self.skipTest("Test case not yet supported on GPU.")
ds = dataset_ops.Dataset.from_tensors(np_value).repeat(3)
if context.executing_eagerly():
iterator = dataset_ops.make_one_shot_iterator(ds)
# For each element of the dataset, assert that the optional evaluates to
# the expected value.
for _ in range(3):
next_elem = iterator_ops.get_next_as_optional(iterator)
self.assertIsInstance(next_elem, optional_ops.Optional)
self.assertTrue(
next_elem.value_structure.is_compatible_with(
type_spec.type_spec_from_value(tf_value_fn())))
self.assertTrue(next_elem.has_value())
self._assertElementValueEqual(np_value, next_elem.get_value())
# After exhausting the iterator, `next_elem.has_value()` will evaluate to
# false, and attempting to get the value will fail.
for _ in range(2):
next_elem = iterator_ops.get_next_as_optional(iterator)
self.assertFalse(self.evaluate(next_elem.has_value()))
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(next_elem.get_value())
else:
iterator = dataset_ops.make_initializable_iterator(ds)
next_elem = iterator_ops.get_next_as_optional(iterator)
self.assertIsInstance(next_elem, optional_ops.Optional)
self.assertTrue(
next_elem.value_structure.is_compatible_with(
type_spec.type_spec_from_value(tf_value_fn())))
# Before initializing the iterator, evaluating the optional fails with
# a FailedPreconditionError. This is only relevant in graph mode.
elem_has_value_t = next_elem.has_value()
elem_value_t = next_elem.get_value()
with self.assertRaises(errors.FailedPreconditionError):
self.evaluate(elem_has_value_t)
with self.assertRaises(errors.FailedPreconditionError):
self.evaluate(elem_value_t)
# Now we initialize the iterator.
self.evaluate(iterator.initializer)
# For each element of the dataset, assert that the optional evaluates to
# the expected value.
for _ in range(3):
elem_has_value, elem_value = self.evaluate(
[elem_has_value_t, elem_value_t])
self.assertTrue(elem_has_value)
self._assertElementValueEqual(np_value, elem_value)
# After exhausting the iterator, `next_elem.has_value()` will evaluate to
# false, and attempting to get the value will fail.
for _ in range(2):
self.assertFalse(self.evaluate(elem_has_value_t))
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(elem_value_t)
def testFunctionBoundaries(self):
@def_function.function
def get_optional():
x = constant_op.constant(1.0)
opt = optional_ops.Optional.from_value(x)
# TODO(skyewm): support returning Optionals from functions?
return opt._variant_tensor
# TODO(skyewm): support Optional arguments?
@def_function.function
def consume_optional(opt_tensor):
value_structure = structure.TensorStructure(dtypes.float32, [])
opt = optional_ops._OptionalImpl(opt_tensor, value_structure)
return opt.get_value()
opt_tensor = get_optional()
val = consume_optional(opt_tensor)
self.assertEqual(self.evaluate(val), 1.0)
def testLimitedRetracing(self):
trace_count = [0]
@def_function.function
def f(opt):
trace_count[0] += 1
return opt.get_value()
opt1 = optional_ops.Optional.from_value(constant_op.constant(37.0))
opt2 = optional_ops.Optional.from_value(constant_op.constant(42.0))
for _ in range(10):
self.assertEqual(self.evaluate(f(opt1)), 37.0)
self.assertEqual(self.evaluate(f(opt2)), 42.0)
self.assertEqual(trace_count[0], 1)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/kernel_tests/optional_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.Dataset.map()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import namedtuple
import threading
import warnings
from absl.testing import parameterized
import numpy as np
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.data.experimental.ops import threading_options
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import map_fn
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.ops.ragged import ragged_concat_ops
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.platform import test
def _make_coordinated_sloppy_dataset(num_elements, num_parallel_calls):
"""Produces a dataset iterator and events to control the order of elements.
Args:
num_elements: the number of input elements
num_parallel_calls: the degree of map parallelism
Returns:
A dataset iterator (represented as `get_next` op) and events that can be
used to control the order of output elements.
"""
# Set up threading events used to sequence when items are produced that
# are subsequently interleaved. These events allow us to deterministically
# simulate slowdowns and force sloppiness.
coordination_events = {i: threading.Event() for i in range(num_elements)}
def map_py_fn(x):
coordination_events[x].wait()
coordination_events[x].clear()
return x * x
def map_fn(x):
return script_ops.py_func(map_py_fn, [x], x.dtype)
options = dataset_ops.Options()
options.experimental_deterministic = False
dataset = dataset_ops.Dataset.range(num_elements).map(
map_fn, num_parallel_calls).with_options(options)
return dataset, coordination_events
# TODO(jsimsa): Add tests for `map_with_legacy_function`.
@test_util.run_all_in_graph_and_eager_modes
class MapTest(test_base.DatasetTestBase, parameterized.TestCase):
def _buildMapDataset(self, components, count):
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
dataset = dataset_ops.Dataset.from_tensor_slices(components).map(
_map_fn).repeat(count)
self.assertEqual(
[c.shape[1:] for c in components],
[shape for shape in dataset_ops.get_legacy_output_shapes(dataset)])
return dataset
def testMapDataset(self):
"""Test an dataset that maps a TF function across its input elements."""
# The pipeline is TensorSliceDataset -> MapDataset(square_3) ->
# RepeatDataset(count).
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
# Test single-threaded access to the iterator.
get_next = self.getNext(self._buildMapDataset(components, 14))
for _ in range(14):
for i in range(7):
result = self.evaluate(get_next())
for component, result_component in zip(components, result):
self.assertAllEqual(component[i]**2, result_component)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# TODO(b/117581999): add eager coverage, different threads run in graph
# context.
@test_util.run_v1_only("b/120545219")
def testSkipEagerMapDatasetMultithreaded(self):
# Test multi-threaded access to the same iterator.
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
get_next = self.getNext(self._buildMapDataset(components, 18))
results = []
with self.cached_session() as sess:
def iterator_thread():
while True:
try:
results.append(sess.run(get_next()))
except errors.OutOfRangeError:
return
threads = [self.checkedThread(target=iterator_thread) for _ in range(8)]
for t in threads:
t.start()
for t in threads:
t.join()
# `results` will contain the same elements components**2
# repeated 18 times, but in a non-deterministic order. Sort the
# results, and assert that each element of components**2 is
# produced 18 times.
results.sort(key=lambda x: x[0])
for i in range(7):
for j in range(18):
for component, result_component in zip(components,
results[i * 18 + j]):
self.assertAllEqual(component[i]**2, result_component)
def _buildParallelMapDataset(self, components, count, num_parallel_calls,
output_buffer_size):
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
dataset = dataset_ops.Dataset.from_tensor_slices(components).map(
_map_fn, num_parallel_calls=num_parallel_calls).prefetch(
output_buffer_size).repeat(count)
self.assertEqual(
[c.shape[1:] for c in components],
[shape for shape in dataset_ops.get_legacy_output_shapes(dataset)])
return dataset
def testParallelMapDataset(self):
"""Test an dataset that maps a TF function across its input elements."""
# The pipeline is TensorSliceDataset -> ParallelMapDataset(square_3) ->
# RepeatDataset(count).
def do_test(num_parallel_calls, output_buffer_size):
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
# Test single-threaded access to the iterator.
get_next = self.getNext(
self._buildParallelMapDataset(components, 14, num_parallel_calls,
output_buffer_size))
for _ in range(14):
for i in range(7):
result = self.evaluate(get_next())
for component, result_component in zip(components, result):
self.assertAllEqual(component[i]**2, result_component)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
for num_parallel_calls_val, output_buffer_size_val in [(1, 1), (1, 2), (2,
2),
(2, 4), (8, 8),
(8, 16)]:
do_test(num_parallel_calls_val, output_buffer_size_val)
# TODO(b/117581999): add eager coverage, different threads run in graph
# context.
@test_util.run_v1_only("b/120545219")
def testSkipEagerParallelMapDatasetMultithreaded(self):
def do_test(num_parallel_calls, output_buffer_size):
# Test multi-threaded access to the same iterator.
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
get_next = self.getNext(
self._buildParallelMapDataset(components, 18, num_parallel_calls,
output_buffer_size))
results = []
with self.cached_session() as sess:
def iterator_thread():
while True:
try:
results.append(sess.run(get_next()))
except errors.OutOfRangeError:
return
threads = [self.checkedThread(target=iterator_thread)
for _ in range(64)]
for t in threads:
t.start()
for t in threads:
t.join()
# `results` will contain the same elements components**2
# repeated 18 times, but in a non-deterministic order. Sort the
# results, and assert that each element of components**2 is
# produced 18 times.
results.sort(key=lambda x: x[0])
for i in range(7):
for j in range(18):
for component, result_component in zip(components,
results[i * 18 + j]):
self.assertAllEqual(component[i]**2, result_component)
for num_parallel_calls_val, output_buffer_size_val in [
(1, 1), (1, 2), (2, 2), (2, 4), (8, 8), (8, 16)]:
do_test(num_parallel_calls_val, output_buffer_size_val)
def testImplicitDisposeParallelMapDataset(self):
# Tests whether a parallel map dataset will be cleaned up correctly when
# the pipeline does not run it until exhaustion.
# The pipeline is TensorSliceDataset -> MapDataset(square_3) ->
# RepeatDataset(1000).
components = (np.arange(1000),
np.array([[1, 2, 3]]) * np.arange(1000)[:, np.newaxis],
np.array(37.0) * np.arange(1000))
dataset = self._buildParallelMapDataset(components, 1000, 100, 100)
# NOTE(mrry): Also test that the prefetching thread is cancelled correctly.
dataset = dataset.prefetch(100)
get_next = self.getNext(dataset)
for _ in range(3):
self.evaluate(get_next())
def testParallelMapUnspecifiedOutputSize(self):
components = np.array([1., 2., 3., np.nan, 5.]).astype(np.float32)
dataset = (dataset_ops.Dataset.from_tensor_slices(components)
.map(lambda x: array_ops.check_numerics(x, "message"),
num_parallel_calls=2))
get_next = self.getNext(dataset)
for _ in range(3):
self.evaluate(get_next())
def testParallelMapError(self):
components = np.array([1., 2., 3., np.nan, 5.]).astype(np.float32)
dataset = (dataset_ops.Dataset.from_tensor_slices(components)
.map(lambda x: array_ops.check_numerics(x, "message"),
num_parallel_calls=2))
get_next = self.getNext(dataset)
for _ in range(3):
self.evaluate(get_next())
# The 4th element is NaN, so `array_ops.check_numerics()` should fail.
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(get_next())
self.evaluate(get_next())
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
def testPrefetchError(self):
components = np.array([1., 2., 3., np.nan, 5.]).astype(np.float32)
dataset = (dataset_ops.Dataset.from_tensor_slices(components)
.map(lambda x: array_ops.check_numerics(x, "message"))
.prefetch(2))
get_next = self.getNext(dataset)
for _ in range(3):
self.evaluate(get_next())
# The 4th element is NaN, so `array_ops.check_numerics()` should fail.
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(get_next())
self.evaluate(get_next())
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
def testCaptureIterator(self):
def _build_ds(iterator):
def _map_fn(x):
get_next = iterator.get_next()
return x * get_next
return dataset_ops.Dataset.range(10).map(_map_fn)
def _build_graph():
if context.executing_eagerly():
captured_iterator = iter(dataset_ops.Dataset.range(10))
else:
captured_iterator = dataset_ops.make_initializable_iterator(
dataset_ops.Dataset.range(10))
ds = _build_ds(captured_iterator)
return captured_iterator, ds
captured_iter, ds = _build_graph()
if not context.executing_eagerly():
self.evaluate(captured_iter.initializer)
get_next = self.getNext(ds, requires_initialization=True)
for i in range(10):
self.assertEqual(i * i, self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
def testCaptureHashTable(self):
# NOTE(mrry): We must use the V2 variants of `HashTable`
# etc. because these produce a `tf.resource`-typed output that is
# compatible with the in-graph function implementation.
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup_ops.HashTable(
lookup_ops.KeyValueTensorInitializer(keys, values), default_val)
input_sentences = dataset_ops.Dataset.from_tensor_slices(
["brain brain tank salad surgery", "surgery brain"])
dataset = input_sentences.map(lambda x: string_ops.string_split([x]).values
).map(table.lookup)
get_next = self.getNext(dataset, requires_initialization=True)
self.evaluate(table.initializer)
self.evaluate(get_next())
self.evaluate(get_next())
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@test_util.run_v1_only("b/123904513")
def testCaptureQueue(self):
elements = np.random.randint(100, size=[200])
queue = data_flow_ops.FIFOQueue(200, dtypes.int64, shapes=[])
enqueue_op = queue.enqueue_many(elements)
close_op = queue.close()
dataset = dataset_ops.Dataset.from_tensors(0).repeat(
-1).map(lambda _: queue.dequeue())
get_next = self.getNext(dataset, requires_initialization=True)
self.evaluate(enqueue_op)
self.evaluate(close_op)
for element in elements:
self.assertEqual(element, self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# TODO(b/117581999): Possible deadlock in eager mode, debug.
@test_util.run_v1_only("b/120545219")
def testSkipEagerCaptureSameResourceMultipleTimes(self):
elements = np.random.randint(100, size=[200])
queue = data_flow_ops.FIFOQueue(
200, dtypes.int64, shapes=[], shared_name="shared_queue")
queue_2 = data_flow_ops.FIFOQueue(
200, dtypes.int64, shapes=[], shared_name="shared_queue")
enqueue_op = queue.enqueue_many(elements)
close_op = queue.close()
dataset = dataset_ops.Dataset.from_tensors(0).repeat(
-1).map(lambda _: (queue.dequeue(), queue_2.dequeue()))
self.evaluate(enqueue_op)
self.evaluate(close_op)
get_next = self.getNext(dataset, requires_initialization=True)
for i in range(100):
self.assertCountEqual([elements[i * 2], elements[i * 2 + 1]],
self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
def testSeededStatefulOperatorIsProperlyStateful(self):
dataset = dataset_ops.Dataset.from_tensors(0).repeat(
10).map(lambda _: random_ops.random_uniform((), seed=11)).batch(2)
get_next = self.getNext(dataset, requires_initialization=True)
random_values = []
with self.assertRaises(errors.OutOfRangeError):
while True:
random_values.extend(self.evaluate(get_next()))
self.assertLen(random_values, 10)
self.assertGreater(np.abs(np.diff(random_values)).max(), 1e-6)
get_next = self.getNext(dataset, requires_initialization=True)
random_values_2 = []
with self.assertRaises(errors.OutOfRangeError):
while True:
random_values_2.extend(self.evaluate(get_next()))
# Randomness is repeatable given same seed
self.assertAllClose(random_values, random_values_2)
def testStatefulMapKeepsStateAcrossIterators(self):
dataset = dataset_ops.Dataset.from_tensors(0).repeat(10).map(
lambda _: random_ops.random_uniform((), seed=11)).repeat(1000).batch(10)
get_next = self.getNext(dataset)
random_values = self.evaluate(get_next())
# Assert that one of the next 99 batches yielded by the iterator is
# different from the first.
i = 0
while i < 99:
if np.any(random_values != self.evaluate(get_next())):
break
i += 1
self.assertLess(i, 99)
def testStatefulOperationInShortCircuit(self):
counter_var = variable_scope.get_variable(
"counter", (), dtypes.int32, use_resource=True)
def increment_fn(x):
counter_var.assign_add(1)
return x
dataset = dataset_ops.Dataset.range(10).map(increment_fn)
get_next = self.getNext(dataset, requires_initialization=True)
self.evaluate(counter_var.initializer)
for i in range(10):
self.assertEqual(i, self.evaluate(counter_var))
self.assertEqual(i, self.evaluate(get_next()))
self.assertEqual(10, self.evaluate(counter_var))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
self.assertEqual(10, self.evaluate(counter_var))
def testMapDict(self):
dataset = dataset_ops.Dataset.range(10).map(
lambda x: {"foo": x * 2, "bar": x**2}).map(
lambda d: d["foo"] + d["bar"])
self.assertDatasetProduces(
dataset, expected_output=[i * 2 + i**2 for i in range(10)])
def testMapNamedtuple(self, count=10):
# construct dataset of tuples
labels = dataset_ops.Dataset.range(count)
images = labels.map(lambda l: -l)
dataset_tuple = dataset_ops.Dataset.zip((labels, images))
# convert dataset of tuples to dataset of namedtuples
example = namedtuple("Example", ["label", "image"])
dataset_namedtuple = dataset_tuple.map(example)
def preprocess_tuple(label, image):
image = 2 * image
return label, image
def preprocess_namedtuple(example):
return example._replace(image=2 * example.image)
# preprocess both datasets
dataset_tuple = dataset_tuple.map(preprocess_tuple)
dataset_namedtuple = dataset_namedtuple.map(preprocess_namedtuple)
next_tuple = self.getNext(dataset_tuple)
next_namedtuple = self.getNext(dataset_namedtuple)
# make sure both datasets contain the same data
for i in range(count):
tuple_, namedtuple_ = self.evaluate([next_tuple(), next_namedtuple()])
self.assertEqual(tuple_, namedtuple_)
self.assertEqual(tuple_, (i, -2 * i))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_namedtuple())
def testUseStepContainerInMap(self):
row = np.arange(6)
dataset = dataset_ops.Dataset.from_tensors(
row).map(lambda elems: map_fn.map_fn(lambda x: x * x, elems))
self.assertDatasetProduces(dataset, expected_output=[row**2])
def testCaseAndCondInMap(self):
def control_map_fn(x, y):
def multiply():
return x * 2
def divide():
return x // 2
def defaults_two():
return control_flow_ops.cond(
math_ops.equal(math_ops.mod(x, 2), 0),
multiply,
divide,
name="cond_mult")
pred_fn_pairs = {
math_ops.logical_or(math_ops.equal(y, 2), math_ops.equal(y, 3)):
defaults_two,
}
return control_flow_ops.case(
pred_fn_pairs, default=multiply, exclusive=True)
def build_dataset(row, num):
dataset = dataset_ops.Dataset.from_tensor_slices(
row).map(lambda x: control_map_fn(x, num))
return self.getNext(dataset)
row = np.arange(6)
for num in [2, 3, 4]:
get_next = build_dataset(row, num)
for i in range(6):
self.assertEqual(
(i // 2 if i % 2 else i * 2) if (num == 2 or num == 3) else i * 2,
self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
def testCaseInWhileInMap(self):
def control_map_fn(x, y):
def multiply():
return x * 2
def divide():
return x // 2
pred_fn_pairs = {
math_ops.logical_or(math_ops.equal(y, 2), math_ops.equal(y, 3)):
divide,
}
return control_flow_ops.case(
pred_fn_pairs, default=multiply, exclusive=True)
def build_dataset(row, num):
# pylint: disable=g-long-lambda
dataset = dataset_ops.Dataset.from_tensors(
row).map(lambda elems: map_fn.map_fn(
lambda x: control_map_fn(x, num), elems))
return self.getNext(dataset)
row = np.arange(6)
for num in [2, 3, 4]:
get_next = build_dataset(row, num)
self.assertAllEqual(
[x // 2 if (num == 2 or num == 3) else x * 2 for x in row],
self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
def testCaseAndCondInWhileInMap(self):
def control_map_fn(x, y):
def multiply():
return x * 2
def divide():
return x // 2
def defaults_two():
return control_flow_ops.cond(
math_ops.equal(math_ops.mod(x, 2), 0),
multiply,
divide,
name="cond_mult")
pred_fn_pairs = {
math_ops.logical_or(math_ops.equal(y, 2), math_ops.equal(y, 3)):
defaults_two,
}
return control_flow_ops.case(
pred_fn_pairs, default=multiply, exclusive=True)
row = np.arange(6)
num = 2
# pylint: disable=g-long-lambda
dataset = dataset_ops.Dataset.from_tensors(
row).map(lambda elems: map_fn.map_fn(
lambda x: control_map_fn(x, num), elems))
# pylint: enable=g-long-lambda
get_next = self.getNext(dataset)
self.assertAllEqual([(x // 2 if x % 2 else x * 2) if
(num == 2 or num == 3) else x * 2 for x in row],
self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
def testNestedListMapDataset(self):
dataset = dataset_ops.Dataset.from_tensors(
[0, 1, 2]).repeat(10).map(lambda a: ([a[1], a[0] + a[2]], a[1]))
expected_output = [(np.array([1, 2]), 1)] * 10
self.assertDatasetProduces(dataset, expected_output=expected_output)
def testPrefetch(self):
# We will use this event to test that `_map_py_func()` has been
# invoked a certain number of times (6 times, to be exact) after
# consuming fewer elements from the iterator.
ev = threading.Event()
set_event_during_invocation = 5
def _map_py_func(x):
if x == set_event_during_invocation:
ev.set()
return x * x
def _map_fn(x):
return script_ops.py_func(_map_py_func, [x], x.dtype)
def do_test(buffer_size):
dataset = dataset_ops.Dataset.range(100).map(_map_fn).prefetch(
buffer_size)
get_next = self.getNext(dataset)
# Simple test that prefetch yields the expected values in the
# expected order.
for i in range(100):
self.assertEqual(i * i, self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
for buffer_size in [1, 10, 100, 1000]:
do_test(buffer_size)
# We can indirectly observe that varying the buffer size has the
# intended effect by observing when `ev` is set (on the 6th
# invocation of `_map_py_func()`).
# NOTE(mrry): We do not test with `buffer_size ==
# set_event_during_invocation`, because we must consume at least
# one element to start the prefetching.
def do_test_ev(buffer_size):
dataset = dataset_ops.Dataset.range(100).map(_map_fn).prefetch(
buffer_size)
get_next = self.getNext(dataset)
event_will_be_set_after_consuming = (
set_event_during_invocation - buffer_size + 1)
ev.clear()
for i in range(event_will_be_set_after_consuming):
self.assertFalse(ev.is_set())
self.assertEqual(i * i, self.evaluate(get_next()))
ev.wait()
for i in range(event_will_be_set_after_consuming, 100):
self.assertEqual(i * i, self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
for buffer_size in range(1, set_event_during_invocation):
do_test_ev(buffer_size)
def testReturnList(self):
dataset = dataset_ops.Dataset.range(
10).map(lambda x: [x, constant_op.constant(37.0)])
self.assertDatasetProduces(
dataset, expected_output=[(i, 37.0) for i in range(10)])
def testMultiOutputPyFunc(self):
# The `tf.py_func()` op returns a list of tensors for its outputs.
def _map_fn(x_tensor):
def _map_py_func(x):
return x, np.array(37.0, dtype=np.float64)
return script_ops.py_func(
_map_py_func, [x_tensor], [dtypes.int64, dtypes.float64])
dataset = dataset_ops.Dataset.range(10).map(_map_fn)
self.assertDatasetProduces(
dataset, expected_output=[(i, 37.0) for i in range(10)])
def testSparse(self):
def _sparse(i):
return sparse_tensor.SparseTensorValue(
indices=np.array([[0, 0]]),
values=(i * np.array([1])),
dense_shape=np.array([1, 1]))
dataset = dataset_ops.Dataset.range(10).map(_sparse)
self.assertDatasetProduces(
dataset, expected_output=[_sparse(i) for i in range(10)])
def testSparseChain(self):
def _sparse(i):
return sparse_tensor.SparseTensorValue(
indices=np.array([[0, 0]]),
values=(i * np.array([1])),
dense_shape=np.array([1, 1]))
def _check(i):
self.assertTrue(sparse_tensor.is_sparse(i))
return sparse_ops.sparse_concat(0, [i, i])
dataset = dataset_ops.Dataset.range(10).map(_sparse).map(_check)
self.assertDatasetProduces(
dataset,
expected_output=[self.evaluate(_check(_sparse(i))) for i in range(10)])
def testTensorArray(self):
def _tensor_array(i):
i = math_ops.cast(i, dtypes.int32)
return (
tensor_array_ops.TensorArray(dtypes.int32, element_shape=(), size=i)
.unstack(math_ops.range(i, dtype=dtypes.int32)))
dataset = dataset_ops.Dataset.range(10).map(_tensor_array)
self.assertDatasetProduces(
dataset, expected_output=[list(range(i)) for i in range(10)])
def testTensorArrayChain(self):
def _tensor_array(i):
i = math_ops.cast(i, dtypes.int32)
return (
tensor_array_ops.TensorArray(dtypes.int32, element_shape=(), size=i)
.unstack(math_ops.range(i, dtype=dtypes.int32)))
def _check(x):
self.assertIsInstance(x, tensor_array_ops.TensorArray)
return x.identity()
dataset = dataset_ops.Dataset.range(10).map(_tensor_array).map(_check)
self.assertDatasetProduces(
dataset,
expected_output=[list(range(i)) for i in range(10)])
def testRagged(self):
def _ragged(i):
return ragged_tensor.RaggedTensor.from_tensor(i * [[1]])
dataset = dataset_ops.Dataset.range(5).map(_ragged)
self.assertDatasetProduces(
dataset,
expected_output=[ragged_factory_ops.constant([[i]]) for i in range(5)])
def testRaggedChain(self):
def _ragged(i):
return ragged_tensor.RaggedTensor.from_tensor(i * [[1]])
def _concat(i):
self.assertTrue(ragged_tensor.is_ragged(i))
return ragged_concat_ops.concat([i, i], 0)
dataset = dataset_ops.Dataset.range(10).map(_ragged).map(_concat)
self.assertDatasetProduces(
dataset,
expected_output=[
self.evaluate(_concat(ragged_factory_ops.constant([[i]])))
for i in range(10)
])
@test_util.run_v1_only("b/123904513")
def testParallelMapOutOfRangeError(self):
def raising_py_func(i):
if i == 100:
raise StopIteration()
else:
return i
dataset = dataset_ops.Dataset.range(105).map(
lambda x: script_ops.py_func(raising_py_func, [x], dtypes.int64),
num_parallel_calls=2)
get_next = self.getNext(dataset)
for i in range(100):
self.assertEqual(i, self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
def testConstantOutput(self):
dataset = dataset_ops.Dataset.range(10).map(lambda x: [x, "hello", 10])
self.assertDatasetProduces(dataset, [(i, b"hello", 10) for i in range(10)])
def testWarnOnLookupTable(self):
def collecting_function(x):
_ = lookup_ops.HashTable(
lookup_ops.KeyValueTensorInitializer(["a"], [1.]), 0.0, name="t1")
return x
warnings.simplefilter("always")
with warnings.catch_warnings(record=True) as w:
_ = dataset_ops.Dataset.range(10).map(collecting_function)
# NOTE(mrry): Python 3 prints other warnings in addition to the one we are
# testing, so we search for the expected warning.
self.assertGreaterEqual(len(w), 1)
found_warning = False
for warning in w:
if ("Creating resources inside a function passed to Dataset.map() is "
"not supported." in str(warning)):
found_warning = True
break
self.assertTrue(found_warning)
@test_util.run_v1_only("map_with_legacy_function v1 only")
def testWarnOnLookupTableLegacyFunction(self):
def collecting_function(x):
_ = lookup_ops.HashTable(
lookup_ops.KeyValueTensorInitializer(["a"], [1.]), 0.0, name="t1")
return x
warnings.simplefilter("always")
with warnings.catch_warnings(record=True) as w:
_ = dataset_ops.Dataset.range(10).map_with_legacy_function(
collecting_function)
# NOTE(mrry): Python 3 prints other warnings in addition to the one we are
# testing, so we search for the expected warning.
self.assertGreaterEqual(len(w), 1)
found_warning = False
for warning in w:
if ("Creating resources inside a function passed to Dataset.map() is "
"not supported." in str(warning)):
found_warning = True
break
self.assertTrue(found_warning)
def testWarnOnSeedFromOuterGraph(self):
with ops.Graph().as_default() as g:
g.seed = 10
warnings.simplefilter("always")
# map_fun doesn't use seed, so no warning is generated.
with warnings.catch_warnings(record=True) as w:
_ = dataset_ops.Dataset.range(10).map(math_ops.square)
found_warning = False
for warning in w:
if ("Explicitly set the seed in the function if this is not the "
"intended behavior" in str(warning)):
found_warning = True
break
self.assertFalse(found_warning)
def random_func(x):
x = math_ops.add(x, 1)
random_ops.random_shuffle([x, math_ops.square(x)])
return x
with warnings.catch_warnings(record=True) as w:
_ = dataset_ops.Dataset.range(10).map(random_func)
self.assertGreaterEqual(len(w), 1)
found_warning = False
for warning in w:
if ("Explicitly set the seed in the function if this is not the "
"intended behavior" in str(warning)):
found_warning = True
break
self.assertTrue(found_warning)
def random_func_seeded(x):
ops.get_default_graph().seed = None
random_ops.random_shuffle(x)
return x
with warnings.catch_warnings(record=True) as w:
_ = dataset_ops.Dataset.range(10).batch(2).map(random_func_seeded)
found_warning = False
for warning in w:
if ("Explicitly set the seed in the function if this is not the "
"intended behavior" in str(warning)):
found_warning = True
break
self.assertFalse(found_warning)
with warnings.catch_warnings(record=True) as w:
_ = dataset_ops.Dataset.range(10).batch(
2).map(lambda x: random_ops.random_shuffle(x, seed=37))
found_warning = False
for warning in w:
if ("Explicitly set the seed in the function if this is not the "
"intended behavior" in str(warning)):
found_warning = True
break
self.assertFalse(found_warning)
def testNestedDatasetMap(self):
# TODO(b/110122868): When iterators can yield a `tf.data.Dataset`, remove
# the `get_single_element()` call.
dataset = dataset_ops.Dataset.from_tensors([1.0, 2.0, 3.0]).map(
dataset_ops.Dataset.from_tensor_slices).map(
lambda ds: ds.batch(3)).flat_map(lambda x: x)
self.assertDatasetProduces(dataset, expected_output=[[1.0, 2.0, 3.0]])
def testReturnValueError(self):
dataset = dataset_ops.Dataset.from_tensors([1.0, 2.0, 3.0])
with self.assertRaisesRegexp(
TypeError, r"Unsupported return value from function passed to "
r"Dataset.map\(\): None."):
_ = dataset.map(lambda x: None)
def testBrokenFunctionErrorOnInitialization(self):
dataset = dataset_ops.Dataset.from_tensor_slices([1.0, 2.0, 3.0])
def broken_function(_):
"""A function deliberately designed to fail on instantiation."""
value = []
tensor_value = attr_value_pb2.AttrValue()
tensor_value.tensor.CopyFrom(
tensor_util.make_tensor_proto(
value, dtype=dtypes.float32, shape=[0], verify_shape=False))
dtype_value = attr_value_pb2.AttrValue(type=dtypes.int32.as_datatype_enum)
# Create a "Const" op with a `tf.float32` value and a `tf.int32` type
# attr.
const_tensor = ops.get_default_graph().create_op(
"Const", [], [dtypes.int32],
attrs={
"value": tensor_value,
"dtype": dtype_value
},
name="BrokenConst").outputs[0]
return const_tensor
dataset = dataset.map(broken_function)
self.assertDatasetProduces(
dataset, expected_error=(errors.InvalidArgumentError, "BrokenConst"))
# pylint: disable=g-long-lambda
@parameterized.named_parameters(
("Map", lambda dataset, func:
dataset_ops.MapDataset(dataset, func, use_inter_op_parallelism=False)),
("ParallelMap", lambda dataset, func:
dataset_ops.ParallelMapDataset(dataset, func, num_parallel_calls=1,
use_inter_op_parallelism=False)),
)
def testNoInterOpParallelism(self, make_dataset_fn):
dataset = dataset_ops.Dataset.from_tensors(0)
def _get_tid():
return np.int64(threading.current_thread().ident)
def _map_fn(_):
tids = []
for _ in range(10):
tids.append(script_ops.py_func(_get_tid, [], dtypes.int64))
return tids
dataset = make_dataset_fn(dataset, _map_fn)
get_next = self.getNext(dataset)
tids = self.evaluate(get_next())
self.assertTrue(all(tids[0] == tid for tid in tids))
# pylint: enable=g-long-lambda
@parameterized.named_parameters(
("SequentialIdentity", None, lambda x: x, None),
("SequentialReplicate", None, lambda x: (x, x), None),
("SequentialSwap", (None, None), lambda x, y: (y, x), None),
("SequentialProject", (None, None), lambda x, y: x, None),
("ParallelIdentity", None, lambda x: x, 10),
("ParallelReplicate", None, lambda x: (x, x), 10),
("ParallelSwap", (None, None), lambda x, y: (y, x), 10),
("ParallelProject", (None, None), lambda x, y: x, 10),
)
def testShortCircuit(self, structure, map_fn, num_parallel_calls):
dataset = self.structuredDataset(structure).repeat().map(
map_fn, num_parallel_calls=num_parallel_calls)
get_next = self.getNext(dataset)
if isinstance(structure, tuple):
expected = map_fn(*self.evaluate(self.structuredElement(structure)))
else:
expected = map_fn(self.evaluate(self.structuredElement(structure)))
self.assertEqual(expected, self.evaluate(get_next()))
@parameterized.named_parameters(
("Sequential", None),
("Parallel", 10),
)
def testShortCircuitCapturedInput(self, num_parallel_calls):
captured_t = variables.Variable(42)
dataset = self.structuredDataset(None).repeat().map(
lambda x: captured_t, num_parallel_calls=num_parallel_calls)
self.evaluate(variables.global_variables_initializer())
get_next = self.getNext(dataset, requires_initialization=True)
self.assertEqual(42, self.evaluate(get_next()))
@parameterized.named_parameters(
("1", 1, 1),
("2", 10, 1),
("3", 10, 10),
("4", 100, 1),
("5", 100, 10),
("6", 100, 100),
)
def testSloppyInterleaveInOrder(self, num_elements, num_parallel_calls):
dataset, coordination_events = _make_coordinated_sloppy_dataset(
num_elements, num_parallel_calls)
options = dataset_ops.Options()
options.experimental_threading = threading_options.ThreadingOptions()
options.experimental_threading.private_threadpool_size = (
num_parallel_calls + 1)
dataset = dataset.with_options(options)
get_next = self.getNext(dataset, requires_initialization=True)
for i in range(num_elements):
coordination_events[i].set()
self.assertEqual(i * i, self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@parameterized.named_parameters(
("1", 10, 10),
("2", 100, 10),
("3", 100, 100),
)
def testSloppyInterleaveOutOfOrder(self, num_elements, num_parallel_calls):
dataset, coordination_events = _make_coordinated_sloppy_dataset(
num_elements, num_parallel_calls)
options = dataset_ops.Options()
options.experimental_threading = threading_options.ThreadingOptions()
options.experimental_threading.private_threadpool_size = (
num_parallel_calls + 1)
dataset = dataset.with_options(options)
get_next = self.getNext(dataset, requires_initialization=True)
elements = [x for x in range(num_elements)]
for i in [1, 4, 7]:
elements[i], elements[i + 1] = elements[i + 1], elements[i]
for element in elements:
coordination_events[element].set()
self.assertEqual(element * element, self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@parameterized.named_parameters(
("Map", None),
("ParallelMap", 12),
)
def testPreserveCardinality(self, num_parallel_calls):
def py_fn(_):
raise StopIteration()
dataset = dataset_ops.DatasetV2.from_tensors(0).map(
lambda x: script_ops.py_func(py_fn, [x], dtypes.int64),
num_parallel_calls=num_parallel_calls)
get_next = self.getNext(dataset)
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(get_next())
# NOTE: collection test is specific to graph mode only, no eager coverage.
@test_util.run_v1_only("graph specific test")
def testSkipEagerCollectionCopy(self):
w = variable_scope.get_variable("w", [])
self.assertIn(w, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES))
def func(x):
self.assertIn(w, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES))
return x
dataset = dataset_ops.Dataset.from_tensors(constant_op.constant(1.0))
dataset.map(func)
# TODO(shivaniagarwal): separate out `map` and `map_with_legacy_function` tests
# as later would not work in v2.
@test_util.run_all_in_graph_and_eager_modes
class MapWithCapturedVariableTests(test_base.DatasetTestBase,
parameterized.TestCase):
# TODO(b/126553094): map doesnt work with variable defined inside function in
# eager mode, possible Graph tensors leak out of the function building context
# from function graph in eager mode as variables are created in init_scope.
@test_util.run_v1_only("b/126553094")
def testSkipEagerCreateVariableInsideFunctionWithGetter(self):
def func(_):
with variable_scope.variable_scope(
"variable", reuse=variable_scope.AUTO_REUSE):
counter_var = variable_scope.get_variable(
"counter", (), dtypes.int32, use_resource=True)
return counter_var.assign_add(1)
# NOTE: In the legacy function, resource is captured by value for variable
# getter.
dataset = dataset_ops.Dataset.from_tensors(0).repeat(10)
with self.assertRaisesWithPredicateMatch(
AttributeError, "'Tensor' object has no attribute 'assign_add'"):
dataset.map_with_legacy_function(func)
dataset = dataset.map(func)
self.evaluate(variables.global_variables_initializer())
get_next = self.getNext(dataset, requires_initialization=True)
for i in range(10):
self.assertEqual(i + 1, self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@parameterized.named_parameters(
("MapLegacyFunction",
lambda dataset, func: dataset.map_with_legacy_function(func)),
("Map", lambda dataset, func: dataset.map(func)),
)
@test_util.run_v1_only("map_with_legacy_function is only available in v1.")
def testCaptureVariable(self, transformation_function):
counter_var = variable_scope.get_variable(
"counter", (), dtypes.int32, use_resource=True)
dataset = dataset_ops.Dataset.from_tensors(0).repeat(10)
dataset = transformation_function(
dataset, lambda _: counter_var.assign_add(1))
get_next = self.getNext(dataset, requires_initialization=True)
self.evaluate(counter_var.initializer)
for i in range(10):
self.assertEqual(i, self.evaluate(counter_var))
self.assertEqual(i + 1, self.evaluate(get_next()))
self.assertEqual(10, self.evaluate(counter_var))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
self.assertEqual(10, self.evaluate(counter_var))
# NOTE: no need to explicitly initialize variables in eager mode.
@parameterized.named_parameters(
("MapLegacyFunction",
lambda dataset, func: dataset.map_with_legacy_function(func)),
("Map", lambda dataset, func: dataset.map(func)),
)
@test_util.run_v1_only("this test is meant to run in graph mode only.")
def testSkipEagerCaptureUninitializedVariableError(self,
transformation_function):
counter_var = variable_scope.get_variable(
"counter", (), dtypes.int32, use_resource=True)
dataset = dataset_ops.Dataset.from_tensors(0).repeat(10)
dataset = transformation_function(
dataset, lambda _: counter_var.assign_add(1))
get_next = self.getNext(dataset, requires_initialization=True)
with self.assertRaises(errors.NotFoundError):
self.evaluate(get_next())
# TODO(b/121264236): add eager mode coverage when we have multi-device setup.
@parameterized.named_parameters(
("MapLegacyFunction",
lambda dataset, func: dataset.map_with_legacy_function(func)),
("Map", lambda dataset, func: dataset.map(func)),
)
@test_util.run_v1_only("b/121264236")
def testSkipEagerCaptureConstantsWithConflictingDevices(
self, transformation_function):
config = config_pb2.ConfigProto(device_count={"CPU": 3})
with self.cached_session(config=config):
with ops.device("/device:CPU:0"):
a = constant_op.constant(3.0)
with ops.device("/device:CPU:1"):
b = constant_op.constant(5.0)
def func(_):
return math_ops.add(a, b)
dataset = dataset_ops.Dataset.from_tensors(0).repeat(10)
dataset = transformation_function(dataset, func)
expected_output = [8.0] * 10
self.assertDatasetProduces(dataset, expected_output=expected_output)
# TODO(b/121264236): add eager mode coverage when we have multi-device setup.
@test_util.run_v1_only("b/121264236")
def testSkipEagerRefVariablesWithMultipleDevices(self):
config = config_pb2.ConfigProto(device_count={"CPU": 3})
with self.cached_session(config=config):
def func(_):
with ops.device("/device:CPU:0"):
a = variables.VariableV1(3.0)
with ops.device("/device:CPU:1"):
b = variables.VariableV1(5.0)
return math_ops.add(a, b)
# NOTE: Use the legacy function implementation as eager function will
# convert RefVariables to ResourceVariables.
dataset = dataset_ops.Dataset.from_tensors(0).repeat(10)
dataset = dataset.map_with_legacy_function(func)
self.evaluate(variables.global_variables_initializer())
expected_output = [8.0] * 10
self.assertDatasetProduces(
dataset,
expected_output=expected_output,
requires_initialization=True)
# TODO(b/121264236): add eager mode coverage when we have multi-device setup.
@test_util.run_v1_only("b/121264236")
def testSkipEagerResourceVariablesWithMultipleDevices(self):
config = config_pb2.ConfigProto(device_count={"CPU": 3})
def func(_):
with variable_scope.variable_scope(
"variable", reuse=variable_scope.AUTO_REUSE):
with ops.device("/device:CPU:0"):
a_var = variable_scope.get_variable(
"a", (), dtypes.int32, use_resource=True)
a_var = math_ops.add(a_var, 1)
with ops.device("/device:CPU:1"):
b_var = variable_scope.get_variable(
"b", (), dtypes.int32, use_resource=True)
return math_ops.add(a_var, b_var)
g_1 = ops.Graph()
with self.session(config=config, graph=g_1):
# The MapDataset node ends up with two ResourceVariable inputs, one on
# device CPU:0 and the other on device CPU:1.
dataset = dataset_ops.Dataset.from_tensors(0).repeat(10)
dataset = dataset.map(func)
self.evaluate(variables.global_variables_initializer())
expected_output = [1] * 10
self.assertDatasetProduces(
dataset,
expected_output=expected_output,
requires_initialization=True)
g_2 = ops.Graph()
with self.session(config=config, graph=g_2):
# In old-Defun variable is captured as value, hence there is no colocation
# error.
dataset = dataset_ops.Dataset.from_tensors(0).repeat(10)
dataset = dataset.map_with_legacy_function(func)
self.evaluate(variables.global_variables_initializer())
expected_output = [1] * 10
self.assertDatasetProduces(
dataset,
expected_output=expected_output,
requires_initialization=True)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/kernel_tests/map_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.Dataset.from_tensors()."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import nest
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class FromTensorsTest(test_base.DatasetTestBase):
def testFromTensors(self):
"""Test a dataset that represents a single tuple of tensors."""
components = (np.array(1), np.array([1, 2, 3]), np.array(37.0))
dataset = dataset_ops.Dataset.from_tensors(components)
self.assertEqual(
[c.shape for c in components],
nest.flatten(dataset_ops.get_legacy_output_shapes(dataset)))
self.assertDatasetProduces(dataset, expected_output=[components])
def testFromTensorsDataset(self):
"""Test a dataset that represents a dataset."""
dataset = dataset_ops.Dataset.from_tensors(dataset_ops.Dataset.range(10))
dataset = dataset.flat_map(lambda x: x)
self.assertDatasetProduces(dataset, expected_output=range(10))
def testFromTensorsTensorArray(self):
"""Test a dataset that represents a TensorArray."""
components = (
tensor_array_ops.TensorArray(dtypes.float32, element_shape=(), size=2)
.unstack([1.0, 2.0]))
dataset = dataset_ops.Dataset.from_tensors(components)
self.assertDatasetProduces(
dataset, expected_output=[[1.0, 2.0]], requires_initialization=True)
def testFromTensorsSparse(self):
"""Test a dataset that represents a single tuple of tensors."""
components = (sparse_tensor.SparseTensorValue(
indices=np.array([[0]]),
values=np.array([0]),
dense_shape=np.array([1])),
sparse_tensor.SparseTensorValue(
indices=np.array([[0, 0], [1, 1]]),
values=np.array([-1, 1]),
dense_shape=np.array([2, 2])))
dataset = dataset_ops.Dataset.from_tensors(components)
self.assertEqual(
[tensor_shape.TensorShape(c.dense_shape) for c in components],
[shape for shape in dataset_ops.get_legacy_output_shapes(dataset)])
self.assertDatasetProduces(dataset, expected_output=[components])
def testFromTensorsMixed(self):
"""Test an dataset that represents a single tuple of tensors."""
components = (np.array(1), np.array([1, 2, 3]), np.array(37.0),
sparse_tensor.SparseTensorValue(
indices=np.array([[0]]),
values=np.array([0]),
dense_shape=np.array([1])),
sparse_tensor.SparseTensorValue(
indices=np.array([[0, 0], [1, 1]]),
values=np.array([-1, 1]),
dense_shape=np.array([2, 2])))
dataset = dataset_ops.Dataset.from_tensors(components)
self.assertEqual([
tensor_shape.TensorShape(c.dense_shape)
if sparse_tensor.is_sparse(c) else c.shape for c in components
], [shape for shape in dataset_ops.get_legacy_output_shapes(dataset)])
self.assertDatasetProduces(dataset, expected_output=[components])
def testFromTensorsRagged(self):
components = (
ragged_factory_ops.constant_value([[[0]], [[1]], [[2]]]),
ragged_factory_ops.constant_value([[[3]], [[4]], [[5]]]),
)
dataset = dataset_ops.Dataset.from_tensors(components)
self.assertDatasetProduces(dataset, expected_output=[components])
def testFromTensorsMixedRagged(self):
components = (np.array(1), np.array([1, 2, 3]), np.array(37.0),
sparse_tensor.SparseTensorValue(
indices=np.array([[0]]),
values=np.array([0]),
dense_shape=np.array([1])),
sparse_tensor.SparseTensorValue(
indices=np.array([[0, 0], [1, 1]]),
values=np.array([-1, 1]),
dense_shape=np.array([2, 2])),
ragged_factory_ops.constant_value([[[0]], [[1]], [[2]]]))
dataset = dataset_ops.Dataset.from_tensors(components)
self.assertDatasetProduces(dataset, expected_output=[components])
# pylint: disable=g-long-lambda,unnecessary-lambda
def testNestedStructure(self):
components = (np.array([1, 2, 3], dtype=np.int64),
(np.array([4., 5.]), np.array([6., 7.])),
np.array([8, 9, 10], dtype=np.int64))
dataset = dataset_ops.Dataset.from_tensors(components)
self.assertEqual((dtypes.int64, (dtypes.float64, dtypes.float64),
dtypes.int64),
dataset_ops.get_legacy_output_types(dataset))
self.assertEqual(([3], ([2], [2]), [3]),
dataset_ops.get_legacy_output_shapes(dataset))
dataset = dataset.shuffle(10, 10)
self.assertEqual((dtypes.int64, (dtypes.float64, dtypes.float64),
dtypes.int64),
dataset_ops.get_legacy_output_types(dataset))
self.assertEqual(([3], ([2], [2]), [3]),
dataset_ops.get_legacy_output_shapes(dataset))
dataset = dataset.repeat(-1)
self.assertEqual((dtypes.int64, (dtypes.float64, dtypes.float64),
dtypes.int64),
dataset_ops.get_legacy_output_types(dataset))
self.assertEqual(([3], ([2], [2]), [3]),
dataset_ops.get_legacy_output_shapes(dataset))
dataset = dataset.filter(lambda x, y, z: True)
self.assertEqual((dtypes.int64, (dtypes.float64, dtypes.float64),
dtypes.int64),
dataset_ops.get_legacy_output_types(dataset))
self.assertEqual(([3], ([2], [2]), [3]),
dataset_ops.get_legacy_output_shapes(dataset))
dataset = dataset.take(5)
self.assertEqual((dtypes.int64, (dtypes.float64, dtypes.float64),
dtypes.int64),
dataset_ops.get_legacy_output_types(dataset))
self.assertEqual(([3], ([2], [2]), [3]),
dataset_ops.get_legacy_output_shapes(dataset))
dataset = dataset.map(lambda x, y, z: ((x, z), (y[0], y[1])))
self.assertEqual(((dtypes.int64, dtypes.int64),
(dtypes.float64, dtypes.float64)),
dataset_ops.get_legacy_output_types(dataset))
self.assertEqual((([3], [3]), ([2], [2])),
dataset_ops.get_legacy_output_shapes(dataset))
dataset = dataset.flat_map(
lambda x, y: dataset_ops.Dataset.from_tensors(((x[0], x[1]),
(y[0], y[1])))
)
self.assertEqual(((dtypes.int64, dtypes.int64),
(dtypes.float64, dtypes.float64)),
dataset_ops.get_legacy_output_types(dataset))
self.assertEqual((([3], [3]), ([2], [2])),
dataset_ops.get_legacy_output_shapes(dataset))
dataset = dataset.batch(32)
self.assertEqual(((dtypes.int64, dtypes.int64),
(dtypes.float64, dtypes.float64)),
dataset_ops.get_legacy_output_types(dataset))
dataset_output_shapes = dataset_ops.get_legacy_output_shapes(dataset)
self.assertEqual((([None, 3], [None, 3]), ([None, 2], [None, 2])),
nest.pack_sequence_as(dataset_output_shapes, [
s.as_list()
for s in nest.flatten(dataset_output_shapes)
]))
# Define a separate set of components with matching leading
# dimension for the from-slices constructor.
components_for_slices = (np.array([1, 2, 3], dtype=np.int64),
(np.array([4., 5., 6.]), np.array([7., 8., 9.])),
np.array([10, 11, 12], dtype=np.int64))
dataset = dataset_ops.Dataset.from_tensor_slices(components_for_slices)
self.assertEqual((dtypes.int64,
(dtypes.float64, dtypes.float64), dtypes.int64),
dataset_ops.get_legacy_output_types(dataset))
self.assertEqual(([], ([], []), []),
dataset_ops.get_legacy_output_shapes(dataset))
# TODO(b/117581999): more specific shapes in eager mode.
@test_util.run_deprecated_v1
def testSkipEagerNestedStructure(self):
components = (np.array([1, 2, 3], dtype=np.int64), (np.array([4., 5.]),
np.array([6., 7.])),
np.array([8, 9, 10], dtype=np.int64))
dataset = dataset_ops.Dataset.from_tensors(components)
dataset = dataset.map(lambda x, y, z: ((x, z), (y[0], y[1])))
dataset = dataset.flat_map(
lambda x, y: dataset_ops.Dataset.from_tensors(
((x[0], x[1]), (y[0], y[1])))).batch(32)
get_next = self.getNext(dataset)
(w, x), (y, z) = get_next()
self.assertEqual(dtypes.int64, w.dtype)
self.assertEqual(dtypes.int64, x.dtype)
self.assertEqual(dtypes.float64, y.dtype)
self.assertEqual(dtypes.float64, z.dtype)
self.assertEqual([None, 3], w.shape.as_list())
self.assertEqual([None, 3], x.shape.as_list())
self.assertEqual([None, 2], y.shape.as_list())
self.assertEqual([None, 2], z.shape.as_list())
get_next = self.getNext(dataset)
(w, x), (y, z) = get_next()
self.assertEqual(dtypes.int64, w.dtype)
self.assertEqual(dtypes.int64, x.dtype)
self.assertEqual(dtypes.float64, y.dtype)
self.assertEqual(dtypes.float64, z.dtype)
self.assertEqual([None, 3], w.shape.as_list())
self.assertEqual([None, 3], x.shape.as_list())
self.assertEqual([None, 2], y.shape.as_list())
self.assertEqual([None, 2], z.shape.as_list())
def testNestedDict(self):
components = {"a": {"aa": 1, "ab": [2.0, 2.0]}, "b": [3, 3, 3]}
dataset = dataset_ops.Dataset.from_tensors(components)
self.assertEqual(dtypes.int32,
dataset_ops.get_legacy_output_types(dataset)["a"]["aa"])
self.assertEqual(dtypes.float32,
dataset_ops.get_legacy_output_types(dataset)["a"]["ab"])
self.assertEqual(dtypes.int32,
dataset_ops.get_legacy_output_types(dataset)["b"])
self.assertEqual([],
dataset_ops.get_legacy_output_shapes(dataset)["a"]["aa"])
self.assertEqual([2],
dataset_ops.get_legacy_output_shapes(dataset)["a"]["ab"])
self.assertEqual([3],
dataset_ops.get_legacy_output_shapes(dataset)["b"])
def testNonSequenceNestedStructure(self):
components = np.array([1, 2, 3], dtype=np.int64)
dataset = dataset_ops.Dataset.from_tensors(components)
self.assertEqual(dtypes.int64,
dataset_ops.get_legacy_output_types(dataset))
self.assertEqual([3], dataset_ops.get_legacy_output_shapes(dataset))
dataset = dataset.filter(
lambda x: math_ops.reduce_all(math_ops.equal(x, components)))
self.assertEqual(dtypes.int64,
dataset_ops.get_legacy_output_types(dataset))
self.assertEqual([3], dataset_ops.get_legacy_output_shapes(dataset))
dataset = dataset.map(lambda x: array_ops.stack([x, x]))
self.assertEqual(dtypes.int64,
dataset_ops.get_legacy_output_types(dataset))
self.assertEqual([2, 3], dataset_ops.get_legacy_output_shapes(dataset))
dataset = dataset.flat_map(
lambda x: dataset_ops.Dataset.from_tensor_slices(x))
self.assertEqual(dtypes.int64,
dataset_ops.get_legacy_output_types(dataset))
self.assertEqual([3], dataset_ops.get_legacy_output_shapes(dataset))
get_next = self.getNext(dataset)
self.assertEqual(dtypes.int64, get_next().dtype)
self.assertEqual([3], get_next().shape)
# TODO(b/121264236): needs mechanism for multiple device in eager mode.
def testSkipEagerSplitPipeline(self):
with session.Session(
target="",
config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
dataset = dataset_ops.Dataset.from_tensors(0)
# Define a pipeline that attempts to use variables on two
# different devices.
#
# Initialize the variables before creating to iterator, to avoid the
# placement algorithm overriding the DT_RESOURCE colocation constraints.
with ops.device("/cpu:0"):
var_0 = resource_variable_ops.ResourceVariable(initial_value=1)
dataset = dataset.map(lambda x: x + var_0.read_value())
sess.run(var_0.initializer)
with ops.device("/cpu:1"):
var_1 = resource_variable_ops.ResourceVariable(initial_value=1)
dataset = dataset.map(lambda x: x + var_1.read_value())
sess.run(var_1.initializer)
iterator = dataset_ops.make_initializable_iterator(dataset)
sess.run(iterator.initializer)
self.assertEqual(sess.run(iterator.get_next()), 2)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/kernel_tests/from_tensors_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.Iterator` using distributed sessions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib import lookup as lookup_ops
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.platform import test
class IteratorClusterTest(test.TestCase):
@test_util.run_v1_only("b/120545219")
def testRemoteIteratorWithoutRemoteCallFail(self):
worker_config = config_pb2.ConfigProto()
worker_config.device_count["CPU"] = 2
worker, _ = test_util.create_local_cluster(
1, 1, worker_config=worker_config)
with ops.device("/job:worker/replica:0/task:0/cpu:1"):
dataset_3 = dataset_ops.Dataset.from_tensor_slices([1, 2, 3])
iterator_3 = dataset_ops.make_one_shot_iterator(dataset_3)
iterator_3_handle = iterator_3.string_handle()
with ops.device("/job:worker/replica:0/task:0/cpu:0"):
remote_it = iterator_ops.Iterator.from_string_handle(
iterator_3_handle, dataset_ops.get_legacy_output_types(dataset_3),
dataset_ops.get_legacy_output_shapes(dataset_3))
get_next_op = remote_it.get_next()
with session.Session(worker[0].target) as sess:
with self.assertRaises(errors.InvalidArgumentError):
sess.run(get_next_op)
def _testRemoteIteratorHelper(self, device0, device1, target):
with ops.device(device1):
dataset_3 = dataset_ops.Dataset.from_tensor_slices([1, 2, 3])
iterator_3 = dataset_ops.make_one_shot_iterator(dataset_3)
iterator_3_handle = iterator_3.string_handle()
@function.Defun(dtypes.string)
def _remote_fn(h):
remote_iterator = iterator_ops.Iterator.from_string_handle(
h, dataset_ops.get_legacy_output_types(dataset_3),
dataset_ops.get_legacy_output_shapes(dataset_3))
return remote_iterator.get_next()
with ops.device(device0):
target_placeholder = array_ops.placeholder(dtypes.string, shape=[])
remote_op = functional_ops.remote_call(
args=[iterator_3_handle],
Tout=[dtypes.int32],
f=_remote_fn,
target=target_placeholder)
with session.Session(target) as sess:
elem = sess.run(remote_op, feed_dict={target_placeholder: device1})
self.assertEqual(elem, [1])
# Fails when target is cpu:0 where the resource is not located.
with self.assertRaises(errors.InvalidArgumentError):
sess.run(remote_op, feed_dict={target_placeholder: device0})
elem = sess.run(iterator_3.get_next())
self.assertEqual(elem, [2])
elem = sess.run(remote_op, feed_dict={target_placeholder: device1})
self.assertEqual(elem, [3])
with self.assertRaises(errors.OutOfRangeError):
sess.run(remote_op, feed_dict={target_placeholder: device1})
@test_util.run_v1_only("b/120545219")
def testRemoteIteratorUsingRemoteCallOp(self):
worker_config = config_pb2.ConfigProto()
worker_config.device_count["CPU"] = 2
worker, _ = test_util.create_local_cluster(
1, 1, worker_config=worker_config)
self._testRemoteIteratorHelper("/job:worker/replica:0/task:0/cpu:0",
"/job:worker/replica:0/task:0/cpu:1",
worker[0].target)
@test_util.run_v1_only("b/120545219")
def testRemoteIteratorUsingRemoteCallOpCrossProcess(self):
workers, _ = test_util.create_local_cluster(2, 1)
self._testRemoteIteratorHelper("/job:worker/replica:0/task:0/cpu:0",
"/job:worker/replica:0/task:1/cpu:0",
workers[0].target)
@test_util.run_v1_only("b/120545219")
def testCaptureHashTableInSharedIterator(self):
worker, _ = test_util.create_local_cluster(1, 1)
# NOTE(mrry): We must use the V2 variants of `HashTable`
# etc. because these produce a `tf.resource`-typed output that is
# compatible with the in-graph function implementation.
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup_ops.HashTable(
lookup_ops.KeyValueTensorInitializer(keys, values),
default_val,
shared_name="shared_table")
input_sentences = dataset_ops.Dataset.from_tensor_slices(
["brain brain tank salad surgery", "surgery brain"])
iterator = (
input_sentences.map(lambda x: string_ops.string_split([x]).values).map(
table.lookup)
.make_initializable_iterator(shared_name="shared_iterator"))
init_op = iterator.initializer
get_next = iterator.get_next()
with session.Session(worker[0].target) as sess:
sess.run(table.initializer)
sess.run(init_op)
self.assertAllEqual([0, 0, -1, 1, 2], sess.run(get_next))
with session.Session(worker[0].target) as sess:
self.assertAllEqual([2, 0], sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
@test_util.run_v1_only("b/120545219")
def testImplicitDisposeParallelMapDataset(self):
# Tests whether a parallel map dataset will be cleaned up correctly when
# the pipeline does not run it until exhaustion.
# The pipeline is TensorSliceDataset -> MapDataset(square_3) ->
# RepeatDataset(None) -> PrefetchDataset(100).
worker, _ = test_util.create_local_cluster(1, 1)
components = (np.arange(1000),
np.array([[1, 2, 3]]) * np.arange(1000)[:, np.newaxis],
np.array(37.0) * np.arange(1000))
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
dataset = (
dataset_ops.Dataset.from_tensor_slices(components).map(_map_fn)
.repeat(None).prefetch(10000))
iterator = dataset_ops.make_initializable_iterator(dataset)
init_op = iterator.initializer
get_next = iterator.get_next()
with session.Session(worker[0].target) as sess:
sess.run(init_op)
for _ in range(3):
sess.run(get_next)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/kernel_tests/iterator_cluster_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.Options`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.ops import optimization_options
from tensorflow.python.data.experimental.ops import stats_options
from tensorflow.python.data.experimental.ops import threading_options
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.platform import test
class OptionsTest(test_base.DatasetTestBase):
def testOptionsDefault(self):
ds = dataset_ops.Dataset.range(0)
self.assertEqual(dataset_ops.Options(), ds.options())
def testOptionsOnce(self):
options = dataset_ops.Options()
ds = dataset_ops.Dataset.range(0).with_options(options).cache()
self.assertEqual(options, ds.options())
def testOptionsTwiceSame(self):
options = dataset_ops.Options()
options.experimental_optimization.autotune = True
ds = dataset_ops.Dataset.range(0).with_options(options).with_options(
options)
self.assertEqual(options, ds.options())
def testOptionsTwiceDifferent(self):
options1 = dataset_ops.Options()
options1.experimental_optimization.autotune = True
options2 = dataset_ops.Options()
options2.experimental_deterministic = False
ds = dataset_ops.Dataset.range(0).with_options(options1).with_options(
options2)
self.assertTrue(ds.options().experimental_optimization.autotune)
# Explicitly check that flag is False since assertFalse allows None
self.assertIs(ds.options().experimental_deterministic, False)
def testOptionsTwiceDifferentError(self):
options1 = dataset_ops.Options()
options1.experimental_optimization.autotune = True
options2 = dataset_ops.Options()
options2.experimental_optimization.autotune = False
with self.assertRaisesRegexp(ValueError,
"Cannot merge incompatible values"):
dataset_ops.Dataset.range(0).with_options(options1).with_options(options2)
def testOptionsMergeOptionsFromMultipleInputs(self):
options1 = dataset_ops.Options()
options1.experimental_optimization.autotune = True
options2 = dataset_ops.Options()
options2.experimental_deterministic = True
ds = dataset_ops.Dataset.zip(
(dataset_ops.Dataset.range(0).with_options(options1),
dataset_ops.Dataset.range(0).with_options(options2)))
self.assertTrue(ds.options().experimental_optimization.autotune)
self.assertTrue(ds.options().experimental_deterministic)
def testOptionsHaveDefaults(self):
options1 = dataset_ops.Options()
options2 = dataset_ops.Options()
self.assertIsNot(options1.experimental_optimization,
options2.experimental_optimization)
self.assertIsNot(options1.experimental_stats,
options2.experimental_stats)
self.assertIsNot(options1.experimental_threading,
options2.experimental_threading)
self.assertEquals(options1.experimental_optimization,
optimization_options.OptimizationOptions())
self.assertEquals(options1.experimental_stats,
stats_options.StatsOptions())
self.assertEquals(options1.experimental_threading,
threading_options.ThreadingOptions())
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/kernel_tests/options_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.Dataset.zip()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import errors
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class ZipTest(test_base.DatasetTestBase):
def testZipDataset(self):
def dataset_fn(components):
datasets = tuple([
dataset_ops.Dataset.from_tensor_slices(component)
for component in components
])
return dataset_ops.Dataset.zip(datasets)
equal_length_components = [
np.tile(np.array([[1], [2], [3], [4]]), 20),
np.tile(np.array([[12], [13], [14], [15]]), 22),
np.array([37.0, 38.0, 39.0, 40.0])
]
get_next = self.getNext(dataset_fn(equal_length_components))
for i in range(4):
results = self.evaluate(get_next())
for component, result_component in zip(equal_length_components, results):
self.assertAllEqual(component[i], result_component)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
variable_length_components = [[1, 2, 3, 4], [1, 2, 3, 4, 5], [1.0, 2.0]]
get_next = self.getNext(dataset_fn(variable_length_components))
for i in range(2):
results = self.evaluate(get_next())
for component, result_component in zip(variable_length_components,
results):
self.assertAllEqual(component[i], result_component)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
def testNestedZipDataset(self):
equal_length_components = [
np.tile(np.array([[1], [2], [3], [4]]), 20),
np.tile(np.array([[12], [13], [14], [15]]), 22),
np.array([37.0, 38.0, 39.0, 40.0])
]
datasets = [
dataset_ops.Dataset.from_tensor_slices(component)
for component in equal_length_components
]
dataset = dataset_ops.Dataset.zip((datasets[0], (datasets[1], datasets[2])))
self.assertEqual(
dataset_ops.get_legacy_output_shapes(dataset),
(tensor_shape.TensorShape([20]),
(tensor_shape.TensorShape([22]), tensor_shape.TensorShape([]))))
get_next = self.getNext(dataset)
for i in range(4):
result1, (result2, result3) = self.evaluate(get_next())
self.assertAllEqual(equal_length_components[0][i], result1)
self.assertAllEqual(equal_length_components[1][i], result2)
self.assertAllEqual(equal_length_components[2][i], result3)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/kernel_tests/zip_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.TFRecordDataset`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import os
import zlib
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import readers
from tensorflow.python.framework import test_util
from tensorflow.python.lib.io import python_io
from tensorflow.python.platform import test
from tensorflow.python.util import compat
@test_util.run_all_in_graph_and_eager_modes
class TFRecordDatasetTest(test_base.DatasetTestBase):
def setUp(self):
super(TFRecordDatasetTest, self).setUp()
self._num_files = 2
self._num_records = 7
self.test_filenames = self._createFiles()
def dataset_fn(self,
filenames,
compression_type="",
num_epochs=1,
batch_size=None):
repeat_dataset = readers.TFRecordDataset(
filenames, compression_type).repeat(num_epochs)
if batch_size:
return repeat_dataset.batch(batch_size)
return repeat_dataset
def _record(self, f, r):
return compat.as_bytes("Record %d of file %d" % (r, f))
def _createFiles(self):
filenames = []
for i in range(self._num_files):
fn = os.path.join(self.get_temp_dir(), "tf_record.%d.txt" % i)
filenames.append(fn)
writer = python_io.TFRecordWriter(fn)
for j in range(self._num_records):
writer.write(self._record(i, j))
writer.close()
return filenames
def testReadOneEpoch(self):
# Basic test: read from file 0.
dataset = self.dataset_fn(self.test_filenames[0])
self.assertDatasetProduces(
dataset,
expected_output=[self._record(0, i) for i in range(self._num_records)])
# Basic test: read from file 1.
dataset = self.dataset_fn(self.test_filenames[1])
self.assertDatasetProduces(
dataset,
expected_output=[self._record(1, i) for i in range(self._num_records)])
# Basic test: read from both files.
dataset = self.dataset_fn(self.test_filenames)
expected_output = []
for j in range(self._num_files):
expected_output.extend(
[self._record(j, i) for i in range(self._num_records)])
self.assertDatasetProduces(dataset, expected_output=expected_output)
def testReadTenEpochs(self):
dataset = self.dataset_fn(self.test_filenames, num_epochs=10)
expected_output = []
for j in range(self._num_files):
expected_output.extend(
[self._record(j, i) for i in range(self._num_records)])
self.assertDatasetProduces(dataset, expected_output=expected_output * 10)
def testReadTenEpochsOfBatches(self):
dataset = self.dataset_fn(
self.test_filenames, num_epochs=10, batch_size=self._num_records)
expected_output = []
for j in range(self._num_files):
expected_output.append(
[self._record(j, i) for i in range(self._num_records)])
self.assertDatasetProduces(dataset, expected_output=expected_output * 10)
def testReadZlibFiles(self):
zlib_files = []
for i, fn in enumerate(self.test_filenames):
with open(fn, "rb") as f:
cdata = zlib.compress(f.read())
zfn = os.path.join(self.get_temp_dir(), "tfrecord_%s.z" % i)
with open(zfn, "wb") as f:
f.write(cdata)
zlib_files.append(zfn)
expected_output = []
for j in range(self._num_files):
expected_output.extend(
[self._record(j, i) for i in range(self._num_records)])
dataset = self.dataset_fn(zlib_files, compression_type="ZLIB")
self.assertDatasetProduces(dataset, expected_output=expected_output)
def testReadGzipFiles(self):
gzip_files = []
for i, fn in enumerate(self.test_filenames):
with open(fn, "rb") as f:
gzfn = os.path.join(self.get_temp_dir(), "tfrecord_%s.gz" % i)
with gzip.GzipFile(gzfn, "wb") as gzf:
gzf.write(f.read())
gzip_files.append(gzfn)
expected_output = []
for j in range(self._num_files):
expected_output.extend(
[self._record(j, i) for i in range(self._num_records)])
dataset = self.dataset_fn(gzip_files, compression_type="GZIP")
self.assertDatasetProduces(dataset, expected_output=expected_output)
def testReadWithBuffer(self):
one_mebibyte = 2**20
dataset = readers.TFRecordDataset(
self.test_filenames, buffer_size=one_mebibyte)
expected_output = []
for j in range(self._num_files):
expected_output.extend(
[self._record(j, i) for i in range(self._num_records)])
self.assertDatasetProduces(dataset, expected_output=expected_output)
def testReadFromDatasetOfFiles(self):
files = dataset_ops.Dataset.from_tensor_slices(self.test_filenames)
expected_output = []
for j in range(self._num_files):
expected_output.extend(
[self._record(j, i) for i in range(self._num_records)])
dataset = readers.TFRecordDataset(files)
self.assertDatasetProduces(dataset, expected_output=expected_output)
def testReadTenEpochsFromDatasetOfFilesInParallel(self):
files = dataset_ops.Dataset.from_tensor_slices(
self.test_filenames).repeat(10)
expected_output = []
for j in range(self._num_files):
expected_output.extend(
[self._record(j, i) for i in range(self._num_records)])
dataset = readers.TFRecordDataset(files, num_parallel_reads=4)
self.assertDatasetProduces(
dataset, expected_output=expected_output * 10, assert_items_equal=True)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/kernel_tests/tf_record_dataset_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.Dataset.skip()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class SkipTest(test_base.DatasetTestBase):
def testSkipTensorDataset(self):
components = (np.arange(10),)
def do_test(count):
dataset = dataset_ops.Dataset.from_tensor_slices(components).skip(count)
self.assertEqual(
[c.shape[1:] for c in components],
[shape for shape in dataset_ops.get_legacy_output_shapes(dataset)])
start_range = min(count, 10) if count != -1 else 10
self.assertDatasetProduces(
dataset,
[tuple(components[0][i:i + 1]) for i in range(start_range, 10)])
# Skip fewer than input size, we should skip
# the first 4 elements and then read the rest.
do_test(4)
# Skip more than input size: get nothing.
do_test(25)
# Skip exactly input size.
do_test(10)
# Set -1 for 'count': skip the entire dataset.
do_test(-1)
# Skip nothing
do_test(0)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/kernel_tests/skip_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.Dataset.filter_with_legacy_function()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.kernel_tests import filter_test_base
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
@test_util.run_v1_only("filter_with_legacy_function only available in TF 1.x")
class FilterWithLegacyFunctionTest(filter_test_base.FilterTestBase):
def apply_filter(self, input_dataset, predicate):
return input_dataset.filter_with_legacy_function(predicate)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/kernel_tests/filter_with_legacy_function_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.data.Dataset.from_generator()."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
import numpy as np
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.ops import script_ops
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class DatasetConstructorTest(test_base.DatasetTestBase):
def _testFromGenerator(self, generator, elem_sequence, num_repeats,
output_types=None):
if output_types is None:
output_types = dtypes.int64
dataset = dataset_ops.Dataset.from_generator(
generator, output_types=output_types).repeat(num_repeats).prefetch(5)
self.assertDatasetProduces(
dataset,
elem_sequence * num_repeats,
requires_initialization=True,
num_test_iterations=2)
def _testFromGeneratorOneShot(self, generator, elem_sequence, num_repeats):
dataset = dataset_ops.Dataset.from_generator(
generator, output_types=dtypes.int64).repeat(num_repeats).prefetch(5)
self.assertDatasetProduces(
dataset, elem_sequence * num_repeats, num_test_iterations=2)
def testFromGeneratorUsingFunction(self):
def generator():
for i in range(1, 100):
yield [i] * i
elem_sequence = list(generator())
self._testFromGenerator(generator, elem_sequence, 1)
self._testFromGenerator(generator, elem_sequence, 5)
self._testFromGeneratorOneShot(generator, elem_sequence, 1)
self._testFromGeneratorOneShot(generator, elem_sequence, 5)
def testFromGeneratorUsingList(self):
generator = lambda: [[i] * i for i in range(1, 100)]
elem_sequence = list(generator())
self._testFromGenerator(generator, elem_sequence, 1)
self._testFromGenerator(generator, elem_sequence, 5)
def testFromGeneratorUsingNdarray(self):
generator = lambda: np.arange(100, dtype=np.int64)
elem_sequence = list(generator())
self._testFromGenerator(generator, elem_sequence, 1, output_types=np.int64)
self._testFromGenerator(generator, elem_sequence, 5, output_types=np.int64)
def testFromGeneratorUsingGeneratorExpression(self):
# NOTE(mrry): Generator *expressions* are not repeatable (or in
# general reusable), because they eagerly evaluate the `for`
# expression as `iter(range(1, 100))` and discard the means of
# reconstructing `range(1, 100)`. Wrapping the generator
# expression in a `lambda` makes it repeatable.
generator = lambda: ([i] * i for i in range(1, 100))
elem_sequence = list(generator())
self._testFromGenerator(generator, elem_sequence, 1)
self._testFromGenerator(generator, elem_sequence, 5)
def testFromMultipleConcurrentGenerators(self):
num_inner_repeats = 5
num_outer_repeats = 100
def generator():
for i in range(1, 10):
yield ([i] * i, [i, i ** 2, i ** 3])
input_list = list(generator())
# The interleave transformation is essentially a flat map that
# draws from multiple input datasets concurrently (in a cyclic
# fashion). By placing `Datsaet.from_generator()` inside an
# interleave, we test its behavior when multiple iterators are
# active at the same time; by additionally prefetching inside the
# interleave, we create the possibility of parallel (modulo GIL)
# invocations to several iterators created by the same dataset.
def interleave_fn(_):
return (dataset_ops.Dataset.from_generator(
generator, output_types=(dtypes.int64, dtypes.int64),
output_shapes=([None], [3]))
.repeat(num_inner_repeats).prefetch(5))
dataset = dataset_ops.Dataset.range(num_outer_repeats).interleave(
interleave_fn, cycle_length=10, block_length=len(input_list))
get_next = self.getNext(dataset)
for _ in range(num_inner_repeats * num_outer_repeats):
for elem in input_list:
val0, val1 = self.evaluate(get_next())
self.assertAllEqual(elem[0], val0)
self.assertAllEqual(elem[1], val1)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# TODO(b/67868766): Reenable this when the source of flakiness is discovered.
def _testFromGeneratorsRunningInParallel(self):
num_parallel_iterators = 3
# Define shared state that multiple iterator instances will access to
# demonstrate their concurrent activity.
lock = threading.Lock()
condition = threading.Condition(lock)
next_ticket = [0] # GUARDED_BY(lock)
def generator():
# NOTE(mrry): We yield one element before the barrier, because
# the current implementation of `Dataset.interleave()` must
# fetch one element from each incoming dataset to start the
# prefetching.
yield 0
# Define a barrier that `num_parallel_iterators` iterators must enter
# before any can proceed. Demonstrates that multiple iterators may be
# active at the same time.
condition.acquire()
ticket = next_ticket[0]
next_ticket[0] += 1
if ticket == num_parallel_iterators - 1:
# The last iterator to join the barrier notifies the others.
condition.notify_all()
else:
# Wait until the last iterator enters the barrier.
while next_ticket[0] < num_parallel_iterators:
condition.wait()
condition.release()
yield 1
# As in `testFromMultipleConcurrentGenerators()`, we use a combination of
# `Dataset.interleave()` and `Dataset.prefetch()` to cause multiple
# iterators to be active concurrently.
def interleave_fn(_):
return dataset_ops.Dataset.from_generator(
generator, output_types=dtypes.int64, output_shapes=[]).prefetch(2)
dataset = dataset_ops.Dataset.range(num_parallel_iterators).interleave(
interleave_fn, cycle_length=num_parallel_iterators, block_length=1)
get_next = self.getNext(dataset)
for elem in [0, 1]:
for _ in range(num_parallel_iterators):
self.assertAllEqual(elem, self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
def testFromGeneratorImplicitConversion(self):
def generator():
yield [1]
yield [2]
yield [3]
for dtype in [dtypes.int8, dtypes.int32, dtypes.int64]:
dataset = dataset_ops.Dataset.from_generator(
generator, output_types=dtype, output_shapes=[1])
get_next = self.getNext(dataset)
for expected in [[1], [2], [3]]:
next_val = self.evaluate(get_next())
self.assertEqual(dtype.as_numpy_dtype, next_val.dtype)
self.assertAllEqual(expected, next_val)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
def testFromGeneratorString(self):
def generator():
yield "foo"
yield b"bar"
yield u"baz"
dataset = dataset_ops.Dataset.from_generator(
generator, output_types=dtypes.string, output_shapes=[])
self.assertDatasetProduces(
dataset, expected_output=[b"foo", b"bar", b"baz"])
def testFromGeneratorTypeError(self):
def generator():
yield np.array([1, 2, 3], dtype=np.int64)
yield np.array([4, 5, 6], dtype=np.int64)
yield "ERROR"
yield np.array([7, 8, 9], dtype=np.int64)
dataset = dataset_ops.Dataset.from_generator(
generator, output_types=dtypes.int64, output_shapes=[3])
get_next = self.getNext(dataset)
self.assertAllEqual([1, 2, 3], self.evaluate(get_next()))
self.assertAllEqual([4, 5, 6], self.evaluate(get_next()))
with self.assertRaisesOpError("The expected type was int64"):
self.evaluate(get_next())
self.assertAllEqual([7, 8, 9], self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
def testFromGeneratorShapeError(self):
def generator():
yield np.array([1, 2, 3], dtype=np.int64)
yield np.array([4, 5, 6], dtype=np.int64)
yield np.array([7, 8, 9, 10], dtype=np.int64)
yield np.array([11, 12, 13], dtype=np.int64)
dataset = dataset_ops.Dataset.from_generator(
generator, output_types=dtypes.int64, output_shapes=[3])
get_next = self.getNext(dataset)
self.assertAllEqual([1, 2, 3], self.evaluate(get_next()))
self.assertAllEqual([4, 5, 6], self.evaluate(get_next()))
with self.assertRaisesOpError(r"element of shape \(3,\) was expected"):
self.evaluate(get_next())
self.assertAllEqual([11, 12, 13], self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
def testFromGeneratorStructureError(self):
def generator():
yield 1, 2
yield 3, 4
yield 5
yield 6, 7, 8
yield 9, 10
dataset = dataset_ops.Dataset.from_generator(
generator, output_types=(dtypes.int64, dtypes.int64))
get_next = self.getNext(dataset)
self.assertEqual((1, 2), self.evaluate(get_next()))
self.assertEqual((3, 4), self.evaluate(get_next()))
with self.assertRaisesOpError(
r"The expected structure was \(tf\.int64, tf\.int64\)"):
self.evaluate(get_next())
with self.assertRaisesOpError(
r"The expected structure was \(tf\.int64, tf\.int64\)"):
self.evaluate(get_next())
self.assertEqual((9, 10), self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
def testFromGeneratorHeterogeneous(self):
def generator():
yield 1
yield [2, 3]
dataset = dataset_ops.Dataset.from_generator(
generator, output_types=dtypes.int64)
self.assertDatasetProduces(dataset, expected_output=[1, [2, 3]])
def testFromGeneratorStopShort(self):
def generator():
yield 0
yield 1
yield 2
dataset = dataset_ops.Dataset.from_generator(
generator, output_types=dtypes.int64)
get_next = self.getNext(dataset)
self.assertAllEqual(0, self.evaluate(get_next()))
self.assertAllEqual(1, self.evaluate(get_next()))
def testFromGeneratorDestructorCalled(self):
# Use an `Event` to signal that the generator has been deleted.
event = threading.Event()
class GeneratorWrapper(object):
def __iter__(self):
return self
def next(self):
return self.__next__()
def __next__(self):
return 42
def __del__(self):
event.set()
dataset = dataset_ops.Dataset.from_generator(
GeneratorWrapper, output_types=dtypes.int64).take(2)
get_next = self.getNext(dataset)
self.assertAllEqual(42, self.evaluate(get_next()))
self.assertAllEqual(42, self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Test that `GeneratorWrapper` object is destroyed when the
# iterator terminates (and the generator iterator is deleted).
self.assertTrue(event.is_set())
def testFromGeneratorWithArgs(self):
def flat_map_fn(elem):
def generator_with_arg(n):
for _ in range(n):
yield np.array(n, dtype=np.int64)
return dataset_ops.Dataset.from_generator(
generator_with_arg, output_types=dtypes.int64, output_shapes=(),
args=(elem,))
dataset = dataset_ops.Dataset.range(5).flat_map(flat_map_fn)
self.assertDatasetProduces(
dataset, expected_output=[1, 2, 2, 3, 3, 3, 4, 4, 4, 4])
def testFromGeneratorWithTwoArgs(self):
def flat_map_fn(elem, message):
def generator_with_arg(n, msg):
for i in range(n):
yield i, msg
return dataset_ops.Dataset.from_generator(
generator_with_arg, output_types=(dtypes.int64, dtypes.string),
output_shapes=((), ()), args=(elem, message))
dataset = dataset_ops.Dataset.zip(
(dataset_ops.Dataset.range(5),
dataset_ops.Dataset.from_tensors("Hi!").repeat(None)
)).flat_map(flat_map_fn)
self.assertDatasetProduces(
dataset,
expected_output=[(0, b"Hi!"), (0, b"Hi!"), (1, b"Hi!"), (0, b"Hi!"),
(1, b"Hi!"), (2, b"Hi!"), (0, b"Hi!"), (1, b"Hi!"),
(2, b"Hi!"), (3, b"Hi!")])
def testGeneratorDatasetFinalizeFunctionCalled(self):
# NOTE(mrry): This test tests the internal `_GeneratorDataset`,
# which affords more control over what the finalize function can do than
# the `Dataset.from_generator()` wrapper.
# Use an `Event` to signal that the generator has been deleted.
event = threading.Event()
def finalize_fn(_):
def finalize_py_func():
event.set()
return 0
return script_ops.py_func(finalize_py_func, [], [dtypes.int64],
stateful=True)
dummy = constant_op.constant(37)
dataset = dataset_ops._GeneratorDataset(dummy, lambda x: x, lambda x: x,
finalize_fn).take(2)
get_next = self.getNext(dataset)
self.assertAllEqual(37, self.evaluate(get_next()))
self.assertAllEqual(37, self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
self.assertTrue(event.is_set())
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/kernel_tests/from_generator_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test utilities for tf.data functionality."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
from tensorflow.python import tf2
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import nest
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.ops.ragged import ragged_test_util
from tensorflow.python.platform import test
class DatasetTestBase(ragged_test_util.RaggedTensorTestCase, test.TestCase):
"""Base class for dataset tests."""
@classmethod
def setUpClass(cls):
if tf2.enabled():
dataset_ops.Dataset = dataset_ops.DatasetV2
else:
dataset_ops.Dataset = dataset_ops.DatasetV1
def assert_op_cancelled(self, op):
with self.assertRaisesRegexp(errors.CancelledError, "was cancelled"):
self.evaluate(op)
def assertSparseValuesEqual(self, a, b):
"""Asserts that two SparseTensors/SparseTensorValues are equal."""
self.assertAllEqual(a.indices, b.indices)
self.assertAllEqual(a.values, b.values)
self.assertAllEqual(a.dense_shape, b.dense_shape)
def getNext(self, dataset, requires_initialization=False):
"""Returns a callable that returns the next element of the dataset.
Example use:
```python
# In both graph and eager modes
dataset = ...
get_next = self.getNext(dataset)
result = self.evaluate(get_next())
```
Args:
dataset: A dataset whose elements will be returned.
requires_initialization: Indicates that when the test is executed in graph
mode, it should use an initializable iterator to iterate through the
dataset (e.g. when it contains stateful nodes). Defaults to False.
Returns:
A callable that returns the next element of `dataset`. Any `TensorArray`
objects `dataset` outputs are stacked.
"""
def ta_wrapper(gn):
def _wrapper():
r = gn()
if isinstance(r, tensor_array_ops.TensorArray):
return r.stack()
else:
return r
return _wrapper
if context.executing_eagerly():
iterator = iter(dataset)
return ta_wrapper(iterator._next_internal) # pylint: disable=protected-access
else:
if requires_initialization:
iterator = dataset_ops.make_initializable_iterator(dataset)
self.evaluate(iterator.initializer)
else:
iterator = dataset_ops.make_one_shot_iterator(dataset)
get_next = iterator.get_next()
return ta_wrapper(lambda: get_next)
def _compareOutputToExpected(self, result_values, expected_values,
assert_items_equal):
if assert_items_equal:
# TODO(shivaniagrawal): add support for nested elements containing sparse
# tensors when needed.
self.assertItemsEqual(result_values, expected_values)
return
for i in range(len(result_values)):
nest.assert_same_structure(result_values[i], expected_values[i])
for result_value, expected_value in zip(
nest.flatten(result_values[i]), nest.flatten(expected_values[i])):
if sparse_tensor.is_sparse(result_value):
self.assertSparseValuesEqual(result_value, expected_value)
elif ragged_tensor.is_ragged(result_value):
self.assertRaggedEqual(result_value, expected_value)
else:
self.assertAllEqual(
result_value,
expected_value,
msg=("Result value: {}. Expected value: {}"
.format(result_value, expected_value)))
def assertDatasetProduces(self,
dataset,
expected_output=None,
expected_shapes=None,
expected_error=None,
requires_initialization=False,
num_test_iterations=1,
assert_items_equal=False,
expected_error_iter=1):
"""Asserts that a dataset produces the expected output / error.
Args:
dataset: A dataset to check for the expected output / error.
expected_output: A list of elements that the dataset is expected to
produce.
expected_shapes: A list of TensorShapes which is expected to match
output_shapes of dataset.
expected_error: A tuple `(type, predicate)` identifying the expected error
`dataset` should raise. The `type` should match the expected exception
type, while `predicate` should either be 1) a unary function that inputs
the raised exception and returns a boolean indicator of success or 2) a
regular expression that is expected to match the error message
partially.
requires_initialization: Indicates that when the test is executed in graph
mode, it should use an initializable iterator to iterate through the
dataset (e.g. when it contains stateful nodes). Defaults to False.
num_test_iterations: Number of times `dataset` will be iterated. Defaults
to 2.
assert_items_equal: Tests expected_output has (only) the same elements
regardless of order.
expected_error_iter: How many times to iterate before expecting an error,
if an error is expected.
"""
self.assertTrue(
expected_error is not None or expected_output is not None,
"Exactly one of expected_output or expected error should be provided.")
if expected_error:
self.assertTrue(
expected_output is None,
"Exactly one of expected_output or expected error should be provided."
)
with self.assertRaisesWithPredicateMatch(expected_error[0],
expected_error[1]):
get_next = self.getNext(
dataset, requires_initialization=requires_initialization)
for _ in range(expected_error_iter):
self.evaluate(get_next())
return
if expected_shapes:
self.assertEqual(expected_shapes,
dataset_ops.get_legacy_output_shapes(dataset))
self.assertGreater(num_test_iterations, 0)
for _ in range(num_test_iterations):
get_next = self.getNext(
dataset, requires_initialization=requires_initialization)
result = []
for _ in range(len(expected_output)):
result.append(self.evaluate(get_next()))
self._compareOutputToExpected(result, expected_output, assert_items_equal)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
def assertDatasetsEqual(self, dataset1, dataset2):
"""Checks that datasets are equal. Supports both graph and eager mode."""
self.assertTrue(dataset_ops.get_structure(dataset1).is_compatible_with(
dataset_ops.get_structure(dataset2)))
self.assertTrue(dataset_ops.get_structure(dataset2).is_compatible_with(
dataset_ops.get_structure(dataset1)))
flattened_types = nest.flatten(
dataset_ops.get_legacy_output_types(dataset1))
next1 = self.getNext(dataset1)
next2 = self.getNext(dataset2)
while True:
try:
op1 = self.evaluate(next1())
except errors.OutOfRangeError:
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next2())
break
op2 = self.evaluate(next2())
op1 = nest.flatten(op1)
op2 = nest.flatten(op2)
assert len(op1) == len(op2)
for i in range(len(op1)):
if sparse_tensor.is_sparse(op1[i]):
self.assertSparseValuesEqual(op1[i], op2[i])
elif ragged_tensor.is_ragged(op1[i]):
self.assertRaggedEqual(op1[i], op2[i])
elif flattened_types[i] == dtypes.string:
self.assertAllEqual(op1[i], op2[i])
else:
self.assertAllClose(op1[i], op2[i])
def assertDatasetsRaiseSameError(self,
dataset1,
dataset2,
exception_class,
replacements=None):
"""Checks that datasets raise the same error on the first get_next call."""
if replacements is None:
replacements = []
next1 = self.getNext(dataset1)
next2 = self.getNext(dataset2)
try:
self.evaluate(next1())
raise ValueError(
"Expected dataset to raise an error of type %s, but it did not." %
repr(exception_class))
except exception_class as e:
expected_message = e.message
for old, new, count in replacements:
expected_message = expected_message.replace(old, new, count)
# Check that the first segment of the error messages are the same.
with self.assertRaisesRegexp(exception_class,
re.escape(expected_message)):
self.evaluate(next2())
def structuredDataset(self, structure, shape=None, dtype=dtypes.int64):
"""Returns a singleton dataset with the given structure."""
if shape is None:
shape = []
if structure is None:
return dataset_ops.Dataset.from_tensors(
array_ops.zeros(shape, dtype=dtype))
else:
return dataset_ops.Dataset.zip(
tuple([
self.structuredDataset(substructure, shape, dtype)
for substructure in structure
]))
def structuredElement(self, structure, shape=None, dtype=dtypes.int64):
"""Returns an element with the given structure."""
if shape is None:
shape = []
if structure is None:
return array_ops.zeros(shape, dtype=dtype)
else:
return tuple([
self.structuredElement(substructure, shape, dtype)
for substructure in structure
])
|
tensorflow-master
|
tensorflow/python/data/kernel_tests/test_base.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.MultiDeviceIterator`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.data.experimental.ops import optimization
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import multi_device_iterator_ops
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class MultiDeviceIteratorTest(test_base.DatasetTestBase,
parameterized.TestCase):
@parameterized.parameters(0, 1, 42,)
@test_util.run_v1_only("b/121264236")
def testInitOnly(self, num_inits):
dataset = dataset_ops.Dataset.range(10)
multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(
dataset, ["/cpu:1", "/cpu:2"])
config = config_pb2.ConfigProto(device_count={"CPU": 3})
with self.test_session(config=config):
for _ in range(num_inits):
self.evaluate(multi_device_iterator.initializer)
@test_util.run_v1_only("b/121264236")
def testBasic(self):
dataset = dataset_ops.Dataset.range(10)
multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(
dataset, ["/cpu:1", "/cpu:2"])
config = config_pb2.ConfigProto(device_count={"CPU": 3})
with self.test_session(config=config):
self.evaluate(multi_device_iterator.initializer)
for i in range(0, 10, 2):
elem_on_1, elem_on_2 = multi_device_iterator.get_next()
self.assertEqual(i, self.evaluate(elem_on_1))
self.assertEqual(i + 1, self.evaluate(elem_on_2))
with self.assertRaises(errors.OutOfRangeError):
elem_on_1, elem_on_2 = multi_device_iterator.get_next()
self.evaluate(elem_on_1)
self.evaluate(elem_on_2)
@test_util.run_v1_only("b/121264236")
def testOneOnSameDevice(self):
with ops.device("/cpu:0"):
dataset = dataset_ops.Dataset.range(10)
multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(
dataset, ["/cpu:0", "/cpu:1"])
config = config_pb2.ConfigProto(device_count={"CPU": 2})
with self.test_session(config=config):
self.evaluate(multi_device_iterator.initializer)
for i in range(0, 10, 2):
elem_on_1, elem_on_2 = multi_device_iterator.get_next()
self.assertEqual(i, self.evaluate(elem_on_1))
self.assertEqual(i + 1, self.evaluate(elem_on_2))
with self.assertRaises(errors.OutOfRangeError):
elem_on_1, elem_on_2 = multi_device_iterator.get_next()
self.evaluate(elem_on_1)
self.evaluate(elem_on_2)
@test_util.run_v1_only("b/121264236")
def testRepeatDevices(self):
with ops.device("/cpu:0"):
dataset = dataset_ops.Dataset.range(20)
multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(
dataset, ["/cpu:1", "/cpu:2", "/cpu:1", "/cpu:2"])
config = config_pb2.ConfigProto(device_count={"CPU": 3})
with self.test_session(config=config):
self.evaluate(multi_device_iterator.initializer)
for i in range(0, 20, 4):
elements = multi_device_iterator.get_next()
elem_on_1, elem_on_2, elem_on_3, elem_on_4 = elements
self.assertEqual(i, self.evaluate(elem_on_1))
self.assertEqual(i + 1, self.evaluate(elem_on_2))
self.assertEqual(i + 2, self.evaluate(elem_on_3))
self.assertEqual(i + 3, self.evaluate(elem_on_4))
with self.assertRaises(errors.OutOfRangeError):
elements = multi_device_iterator.get_next()
elem_on_1, elem_on_2, elem_on_3, elem_on_4 = elements
self.evaluate(elem_on_1)
self.evaluate(elem_on_2)
self.evaluate(elem_on_3)
self.evaluate(elem_on_4)
@test_util.run_v1_only("b/121264236")
def testNotFullyDivisible(self):
dataset = dataset_ops.Dataset.range(9)
multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(
dataset, ["/cpu:1", "/cpu:2"])
config = config_pb2.ConfigProto(device_count={"CPU": 3})
with self.test_session(config=config):
self.evaluate(multi_device_iterator.initializer)
for i in range(0, 8, 2):
elem_on_1, elem_on_2 = multi_device_iterator.get_next()
self.assertEqual(i, self.evaluate(elem_on_1))
self.assertEqual(i + 1, self.evaluate(elem_on_2))
elem_on_1 = multi_device_iterator.get_next("/cpu:1")
self.assertEqual(8, self.evaluate(elem_on_1))
with self.assertRaises(errors.OutOfRangeError):
elem_on_1, elem_on_2 = multi_device_iterator.get_next()
self.evaluate(elem_on_1)
self.evaluate(elem_on_2)
@test_util.run_v1_only("b/121264236")
def testGetNextAsOptional(self):
if context.executing_eagerly():
return
dataset = dataset_ops.Dataset.range(9)
multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(
dataset, ["/cpu:1", "/cpu:2"])
elem_on_1, elem_on_2 = multi_device_iterator.get_next_as_optional()
elem_on_1_has_value_t = elem_on_1.has_value()
elem_on_1_t = elem_on_1.get_value()
elem_on_2_has_value_t = elem_on_2.has_value()
elem_on_2_t = elem_on_2.get_value()
config = config_pb2.ConfigProto(device_count={"CPU": 3})
with self.test_session(config=config) as sess:
self.evaluate(multi_device_iterator.initializer)
for i in range(0, 8, 2):
elem_on_1_has_value, elem_on_1_value = sess.run(
[elem_on_1_has_value_t, elem_on_1_t])
self.assertTrue(elem_on_1_has_value)
self.assertEqual(i, elem_on_1_value)
elem_on_2_has_value, elem_on_2_value = sess.run(
[elem_on_2_has_value_t, elem_on_2_t])
self.assertTrue(elem_on_2_has_value)
self.assertEqual(i + 1, elem_on_2_value)
elem_on_1_has_value, elem_on_1_value = sess.run(
[elem_on_1_has_value_t, elem_on_1_t])
self.assertTrue(elem_on_1_has_value)
self.assertEqual(8, elem_on_1_value)
self.assertFalse(self.evaluate(elem_on_1_has_value_t))
self.assertFalse(self.evaluate(elem_on_2_has_value_t))
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(elem_on_1_t)
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(elem_on_2_t)
@test_util.run_v1_only("b/121264236")
def testUneven(self):
dataset = dataset_ops.Dataset.range(10)
multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(
dataset, ["/cpu:1", "/cpu:2"], max_buffer_size=4)
config = config_pb2.ConfigProto(device_count={"CPU": 3})
with self.test_session(config=config):
self.evaluate(multi_device_iterator.initializer)
for i in range(0, 10, 2):
elem_on_1 = multi_device_iterator.get_next("/cpu:1")
self.assertEqual(i, self.evaluate(elem_on_1))
for i in range(0, 10, 2):
elem_on_2 = multi_device_iterator.get_next("/cpu:2")
self.assertEqual(i + 1, self.evaluate(elem_on_2))
with self.assertRaises(errors.OutOfRangeError):
elem_on_1, elem_on_2 = multi_device_iterator.get_next()
self.evaluate(elem_on_1)
self.evaluate(elem_on_2)
@test_util.run_v1_only("b/121264236")
def testMultipleInitializationsGraph(self):
if context.executing_eagerly():
return
with ops.device("/cpu:0"):
epoch = array_ops.placeholder(dtypes.int64, shape=[])
dataset1 = dataset_ops.Dataset.from_tensors(epoch).repeat(1000)
dataset2 = dataset_ops.Dataset.range(1000)
dataset = dataset_ops.Dataset.zip((dataset1, dataset2))
multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(
dataset, ["/cpu:1", "/cpu:2"], prefetch_buffer_size=4)
elem_on_1, elem_on_2 = multi_device_iterator.get_next()
init_op = multi_device_iterator.initializer
config = config_pb2.ConfigProto(device_count={"CPU": 3})
pool = config.session_inter_op_thread_pool.add()
pool.num_threads = 2
with session.Session(config=config) as sess:
for i in range(1000):
sess.run(init_op, feed_dict={epoch: i})
self.assertEqual([(i, 0), (i, 1)], self.evaluate([elem_on_1,
elem_on_2]))
@test_util.run_v1_only("b/121264236")
def testMultipleInitializationsEager(self):
if not context.executing_eagerly():
return
with ops.device("/cpu:0"):
dataset1 = dataset_ops.Dataset.range(1000)
dataset2 = dataset_ops.Dataset.range(1000)
dataset = dataset_ops.Dataset.zip((dataset1, dataset2))
for _ in range(5):
multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(
dataset, ["/cpu:1", "/cpu:2"], prefetch_buffer_size=4)
elem_on_1, elem_on_2 = multi_device_iterator.get_next()
self.assertEqual([(0, 0), (1, 1)], self.evaluate([elem_on_1, elem_on_2]))
@test_util.run_v1_only("b/121264236")
def testBasicGpu(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
dataset = dataset_ops.Dataset.range(10)
multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(
dataset, ["/cpu:1", "/gpu:0"])
config = config_pb2.ConfigProto(device_count={"CPU": 2, "GPU": 1})
with self.test_session(config=config):
self.evaluate(multi_device_iterator.initializer)
for i in range(0, 10, 2):
elem_on_1, elem_on_2 = multi_device_iterator.get_next()
self.assertEqual(i, self.evaluate(elem_on_1))
self.assertEqual(i + 1, self.evaluate(elem_on_2))
with self.assertRaises(errors.OutOfRangeError):
elem_on_1, elem_on_2 = multi_device_iterator.get_next()
self.evaluate(elem_on_1)
self.evaluate(elem_on_2)
@test_util.run_v1_only("b/121264236")
def testUnevenGpu(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
dataset = dataset_ops.Dataset.range(10)
multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(
dataset, ["/cpu:1", "/gpu:0"], max_buffer_size=4)
config = config_pb2.ConfigProto(device_count={"CPU": 2, "GPU": 1})
with self.test_session(config=config):
self.evaluate(multi_device_iterator.initializer)
for i in range(0, 10, 2):
elem_on_1 = multi_device_iterator.get_next("/cpu:1")
self.assertEqual(i, self.evaluate(elem_on_1))
for i in range(0, 10, 2):
elem_on_2 = multi_device_iterator.get_next("/gpu:0")
self.assertEqual(i + 1, self.evaluate(elem_on_2))
with self.assertRaises(errors.OutOfRangeError):
elem_on_1, elem_on_2 = multi_device_iterator.get_next()
self.evaluate(elem_on_1)
self.evaluate(elem_on_2)
@test_util.run_v1_only("b/121264236")
def testGetNextAsOptionalGpu(self):
if not test_util.is_gpu_available() or context.executing_eagerly():
self.skipTest("No GPU available")
dataset = dataset_ops.Dataset.range(9)
multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(
dataset, ["/cpu:1", "/gpu:0"])
elem_on_1, elem_on_2 = multi_device_iterator.get_next_as_optional()
elem_on_1_has_value_t = elem_on_1.has_value()
elem_on_1_t = elem_on_1.get_value()
elem_on_2_has_value_t = elem_on_2.has_value()
elem_on_2_t = elem_on_2.get_value()
config = config_pb2.ConfigProto(device_count={"CPU": 2, "GPU": 1})
with self.test_session(config=config) as sess:
self.evaluate(multi_device_iterator.initializer)
for i in range(0, 8, 2):
elem_on_1_has_value, elem_on_1_value = sess.run(
[elem_on_1_has_value_t, elem_on_1_t])
self.assertTrue(elem_on_1_has_value)
self.assertEqual(i, elem_on_1_value)
elem_on_2_has_value, elem_on_2_value = sess.run(
[elem_on_2_has_value_t, elem_on_2_t])
self.assertTrue(elem_on_2_has_value)
self.assertEqual(i + 1, elem_on_2_value)
elem_on_1_has_value, elem_on_1_value = sess.run(
[elem_on_1_has_value_t, elem_on_1_t])
self.assertTrue(elem_on_1_has_value)
self.assertEqual(8, elem_on_1_value)
self.assertFalse(self.evaluate(elem_on_1_has_value_t))
self.assertFalse(self.evaluate(elem_on_2_has_value_t))
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(elem_on_1_t)
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(elem_on_2_t)
@test_util.run_v1_only("b/121264236")
def testOptimization(self):
dataset = dataset_ops.Dataset.range(10)
dataset = dataset.apply(optimization.assert_next(["MemoryCacheImpl"]))
dataset = dataset.skip(0) # this should be optimized away
dataset = dataset.cache()
options = dataset_ops.Options()
options.experimental_optimization.noop_elimination = True
dataset = dataset.with_options(options)
multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(
dataset, ["/cpu:1", "/cpu:2"])
config = config_pb2.ConfigProto(device_count={"CPU": 3})
with self.test_session(config=config):
self.evaluate(multi_device_iterator.initializer)
for i in range(0, 10, 2):
elem_on_1, elem_on_2 = multi_device_iterator.get_next()
self.assertEqual(i, self.evaluate(elem_on_1))
self.assertEqual(i + 1, self.evaluate(elem_on_2))
with self.assertRaises(errors.OutOfRangeError):
elem_on_1, elem_on_2 = multi_device_iterator.get_next()
self.evaluate(elem_on_1)
self.evaluate(elem_on_2)
if __name__ == "__main__":
ops.enable_eager_execution(
config=config_pb2.ConfigProto(device_count={"CPU": 3, "GPU": 1}))
test.main()
|
tensorflow-master
|
tensorflow/python/data/kernel_tests/multi_device_iterator_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.Dataset.repeat()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class RepeatTest(test_base.DatasetTestBase):
def testRepeatTensorDataset(self):
"""Test a dataset that repeats its input multiple times."""
components = (np.array(1), np.array([1, 2, 3]), np.array(37.0))
# This placeholder can be fed when dataset-definition subgraph
# runs (i.e. `init_op` below) to configure the number of
# repetitions used in a particular iterator.
def do_test(count):
dataset = dataset_ops.Dataset.from_tensors(components).repeat(count)
self.assertEqual(
[c.shape for c in components],
[shape for shape in dataset_ops.get_legacy_output_shapes(dataset)])
self.assertDatasetProduces(dataset, [components] * count)
# Test a finite repetition.
do_test(3)
# test a different finite repetition.
do_test(7)
# Test an empty repetition.
do_test(0)
# Test an infinite repetition.
# NOTE(mrry): There's not a good way to test that the sequence
# actually is infinite.
dataset = dataset_ops.Dataset.from_tensors(components).repeat(-1)
self.assertEqual(
[c.shape for c in components],
[shape for shape in dataset_ops.get_legacy_output_shapes(dataset)])
get_next = self.getNext(dataset)
for _ in range(17):
results = self.evaluate(get_next())
for component, result_component in zip(components, results):
self.assertAllEqual(component, result_component)
def testRepeatRepeatTensorDataset(self):
"""Test the composition of repeat datasets."""
components = (np.array(1), np.array([1, 2, 3]), np.array(37.0))
inner_count, outer_count = 7, 14
dataset = dataset_ops.Dataset.from_tensors(components).repeat(
inner_count).repeat(outer_count)
self.assertEqual(
[c.shape for c in components],
[shape for shape in dataset_ops.get_legacy_output_shapes(dataset)])
self.assertDatasetProduces(dataset,
[components] * (inner_count * outer_count))
def testRepeatEmptyDataset(self):
"""Test that repeating an empty dataset does not hang."""
dataset = dataset_ops.Dataset.from_tensors(0).repeat(10).skip(10).repeat(-1)
self.assertDatasetProduces(dataset, [])
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/kernel_tests/repeat_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.Dataset.window()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import nest
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class WindowTest(test_base.DatasetTestBase, parameterized.TestCase):
@parameterized.named_parameters(
("1", 20, 14, 7, 1),
("2", 20, 17, 9, 1),
("3", 20, 14, 14, 1),
("4", 20, 10, 14, 1),
("5", 20, 14, 19, 1),
("6", 20, 4, 1, 2),
("7", 20, 2, 1, 6),
("8", 20, 4, 7, 2),
("9", 20, 2, 7, 6),
("10", 1, 10, 4, 1),
("11", 0, 10, 4, 1),
("12", 20, 14, 7, 1, False),
("13", 20, 17, 9, 1, False),
("14", 20, 14, 14, 1, False),
("15", 20, 10, 14, 1, False),
("16", 20, 14, 19, 1, False),
("17", 20, 4, 1, 2, False),
("18", 20, 2, 1, 6, False),
("19", 20, 4, 7, 2, False),
("20", 20, 2, 7, 6, False),
("21", 1, 10, 4, 1, False),
("22", 0, 10, 4, 1, False),
)
def testWindowDataset(self, count, size, shift, stride, drop_remainder=True):
"""Tests a dataset that slides a window its input elements."""
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
def _flat_map_fn(x, y, z):
return dataset_ops.Dataset.zip((x.batch(batch_size=size),
y.batch(batch_size=size),
z.batch(batch_size=size)))
dataset = dataset_ops.Dataset.from_tensor_slices(components).map(
_map_fn).repeat(count).window(
size=size,
shift=shift,
stride=stride,
drop_remainder=drop_remainder).flat_map(_flat_map_fn)
get_next = self.getNext(dataset)
self.assertEqual([[None] + list(c.shape[1:]) for c in components],
[ts.as_list() for ts in nest.flatten(
dataset_ops.get_legacy_output_shapes(dataset))])
num_full_batches = max(0,
(count * 7 - ((size - 1) * stride + 1)) // shift + 1)
for i in range(num_full_batches):
result = self.evaluate(get_next())
for component, result_component in zip(components, result):
for j in range(size):
self.assertAllEqual(component[(i * shift + j * stride) % 7]**2,
result_component[j])
if not drop_remainder:
num_partial_batches = (count * 7) // shift + (
(count * 7) % shift > 0) - num_full_batches
for i in range(num_partial_batches):
result = self.evaluate(get_next())
for component, result_component in zip(components, result):
remaining = (count * 7) - ((num_full_batches + i) * shift)
num_elements = remaining // stride + ((remaining % stride) > 0)
for j in range(num_elements):
self.assertAllEqual(
component[((num_full_batches + i) * shift + j * stride) % 7]**2,
result_component[j])
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@parameterized.named_parameters(
("1", 14, 0, 3, 1),
("2", 14, 3, 0, 1),
("3", 14, 3, 3, 0),
)
def testWindowDatasetInvalid(self, count, size, shift, stride):
with self.assertRaises(errors.InvalidArgumentError):
ds = dataset_ops.Dataset.range(10).map(lambda x: x).repeat(count).window(
size=size, shift=shift,
stride=stride).flat_map(lambda x: x.batch(batch_size=size))
self.evaluate(ds._variant_tensor)
def testWindowSparse(self):
def _sparse(i):
return sparse_tensor.SparseTensorValue(
indices=[[0]], values=(i * [1]), dense_shape=[1])
dataset = dataset_ops.Dataset.range(10).map(_sparse).window(
size=5, shift=3,
drop_remainder=True).flat_map(lambda x: x.batch(batch_size=5))
num_batches = (10 - 5) // 3 + 1
expected_output = [
sparse_tensor.SparseTensorValue(
indices=[[0, 0], [1, 0], [2, 0], [3, 0], [4, 0]],
values=[i * 3, i * 3 + 1, i * 3 + 2, i * 3 + 3, i * 3 + 4],
dense_shape=[5, 1]) for i in range(num_batches)
]
self.assertDatasetProduces(dataset, expected_output=expected_output)
def testWindowSparseWithDifferentDenseShapes(self):
def _sparse(i):
return sparse_tensor.SparseTensorValue(
indices=array_ops.expand_dims(
math_ops.range(i, dtype=dtypes.int64), 1),
values=array_ops.fill([math_ops.cast(i, dtypes.int32)], i),
dense_shape=[i])
dataset = dataset_ops.Dataset.range(10).map(_sparse).window(
size=5, shift=3,
drop_remainder=True).flat_map(lambda x: x.batch(batch_size=5))
expected_output = []
num_batches = (10 - 5) // 3 + 1
for i in range(num_batches):
expected_indices = []
expected_values = []
for j in range(5):
for k in range(i * 3 + j):
expected_indices.append([j, k])
expected_values.append(i * 3 + j)
expected_output.append(
sparse_tensor.SparseTensorValue(
indices=expected_indices,
values=expected_values,
dense_shape=[5, i * 3 + 5 - 1]))
self.assertDatasetProduces(dataset, expected_output=expected_output)
def testNestedWindowSparse(self):
def _sparse(i):
return sparse_tensor.SparseTensorValue(
indices=[[0]], values=(i * [1]), dense_shape=[1])
dataset = dataset_ops.Dataset.range(10).map(_sparse).window(
size=4, shift=2,
drop_remainder=True).flat_map(lambda x: x.batch(batch_size=4)).window(
size=3, shift=1,
drop_remainder=True).flat_map(lambda x: x.batch(batch_size=3))
expected_output = [
sparse_tensor.SparseTensorValue(
indices=[[0, 0, 0], [0, 1, 0], [0, 2, 0], [0, 3, 0], [1, 0, 0],
[1, 1, 0], [1, 2, 0], [1, 3, 0], [2, 0, 0], [2, 1, 0],
[2, 2, 0], [2, 3, 0]],
values=[0, 1, 2, 3, 2, 3, 4, 5, 4, 5, 6, 7],
dense_shape=[3, 4, 1]),
sparse_tensor.SparseTensorValue(
indices=[[0, 0, 0], [0, 1, 0], [0, 2, 0], [0, 3, 0], [1, 0, 0],
[1, 1, 0], [1, 2, 0], [1, 3, 0], [2, 0, 0], [2, 1, 0],
[2, 2, 0], [2, 3, 0]],
values=[2, 3, 4, 5, 4, 5, 6, 7, 6, 7, 8, 9],
dense_shape=[3, 4, 1])
]
self.assertDatasetProduces(dataset, expected_output=expected_output)
def testWindowShapeError(self):
def generator():
yield [1.0, 2.0, 3.0]
yield [4.0, 5.0, 6.0]
yield [7.0, 8.0, 9.0, 10.0]
dataset = dataset_ops.Dataset.from_generator(
generator, dtypes.float32, output_shapes=[None]).window(
size=3, shift=1).flat_map(lambda x: x.batch(batch_size=3))
self.assertDatasetProduces(
dataset,
expected_error=(
errors.InvalidArgumentError,
r"Cannot batch tensors with different shapes in component 0. "
r"First element had shape \[3\] and element 2 had shape \[4\]."))
def testWindowIgnoreErrors(self):
input_values = np.float32([1., np.nan, 2., np.nan, 3.])
dataset = dataset_ops.Dataset.from_tensor_slices(input_values).map(
lambda x: array_ops.check_numerics(x, "message")).window(
size=2, shift=2, stride=2,
drop_remainder=True).flat_map(lambda x: x.batch(batch_size=2))
self.assertDatasetProduces(
dataset, expected_output=[np.float32([1., 2.]),
np.float32([2., 3.])])
def testNestedOutput(self):
if not context.executing_eagerly():
self.skipTest("self.evaluate() does not work with a dataset")
dataset = dataset_ops.Dataset.range(100)
dataset = dataset_ops.Dataset.zip((dataset, dataset)).window(10)
for i, nested_dataset in enumerate(dataset):
x, y = nested_dataset
self.assertDatasetProduces(x, range(i*10, (i+1)*10))
self.assertDatasetProduces(y, range(i*10, (i+1)*10))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/kernel_tests/window_test.py
|
# -*- coding: utf-8 -*-
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.Dataset.padded_batch()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.platform import test
from tensorflow.python.util import compat
def _random_seq_lens(count):
return np.random.randint(20, size=(count,)).astype(np.int32)
@test_util.run_all_in_graph_and_eager_modes
class PaddedBatchTest(test_base.DatasetTestBase, parameterized.TestCase):
@parameterized.named_parameters(
('default_padding', _random_seq_lens(32), 4, [-1], False),
('constant_padding', _random_seq_lens(32), 4, [25], False),
('uneven_with_remainder', _random_seq_lens(34), 4, [-1], False),
('uneven_without_remainder', _random_seq_lens(34), 4, [-1], True),
)
def testPaddedBatchDataset(self, seq_lens, batch_size, padded_shapes,
drop_remainder):
"""Tests the padded batch dataset logic for various input configurations.
Args:
seq_lens: the input sequence lengths
batch_size: the batch size
padded_shapes: the padded shapes to use
drop_remainder: whether a smaller batch size should be produced if batch
size does not divide number of inputs evenly
"""
dataset = dataset_ops.Dataset.from_tensor_slices(seq_lens).map(
lambda x: array_ops.fill([x], x)).padded_batch(
batch_size=batch_size,
drop_remainder=drop_remainder,
padded_shapes=padded_shapes)
num_full_batches = len(seq_lens) // batch_size
get_next = self.getNext(dataset)
for i in range(num_full_batches):
result = self.evaluate(get_next())
padded_len = padded_shapes[0]
if padded_len is None or padded_len == -1:
padded_len = np.max(result) if result.size > 0 else 0
self.assertEqual((batch_size, padded_len), result.shape)
for j in range(batch_size):
seq_len = seq_lens[(i * batch_size) + j]
self.assertAllEqual(result[j, :seq_len], [seq_len] * seq_len)
self.assertAllEqual(result[j, seq_len:], [0] * (padded_len - seq_len))
if not drop_remainder and len(seq_lens) % batch_size > 0:
result = self.evaluate(get_next())
padded_len = np.max(result) if result.size > 0 else 0
self.assertEqual((len(seq_lens) % batch_size, padded_len), result.shape)
for j in range(len(seq_lens) % batch_size):
seq_len = seq_lens[num_full_batches * batch_size + j]
self.assertAllEqual(result[j, :seq_len], [seq_len] * seq_len)
self.assertAllEqual(result[j, seq_len:], [0] * (padded_len - seq_len))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@test_util.run_deprecated_v1
def testPaddedBatchShortPadding(self):
dataset = (
dataset_ops.Dataset.from_tensor_slices(
[6, 5, 5, 5, 5]).map(lambda x: array_ops.fill([x], x)).padded_batch(
batch_size=4, padded_shapes=[5]))
self.assertDatasetProduces(
dataset, expected_error=(errors.DataLossError, ''))
def testPaddedBatchEmptyTensors(self):
dataset = (
dataset_ops.Dataset.from_tensor_slices(
[0, 0, 0, 0]).map(lambda x: array_ops.fill([x], x)).padded_batch(
batch_size=4, padded_shapes=[-1]))
self.assertDatasetProduces(dataset, expected_output=[[[], [], [], []]])
def testPaddedBatchDatasetNonDefaultPadding(self):
def fill_tuple(x):
filled = array_ops.fill([x], x)
return (filled, string_ops.as_string(filled))
random_seq_lens = np.random.randint(20, size=(32,)).astype(np.int32)
dataset = (
dataset_ops.Dataset.from_tensor_slices(random_seq_lens).map(fill_tuple)
.padded_batch(
4, padded_shapes=([-1], [-1]), padding_values=(-1, '<end>')))
get_next = self.getNext(dataset)
for i in range(8):
result = self.evaluate(get_next())
padded_len = np.max(result[0])
self.assertEqual((4, padded_len), result[0].shape)
self.assertEqual((4, padded_len), result[1].shape)
for j in range(4):
seq_len = random_seq_lens[(i * 4) + j]
self.assertAllEqual(result[0][j, :seq_len], [seq_len] * seq_len)
self.assertAllEqual(result[0][j, seq_len:],
[-1] * (padded_len - seq_len))
self.assertAllEqual(result[1][j, :seq_len],
[compat.as_bytes(str(seq_len))] * seq_len)
self.assertAllEqual(result[1][j, seq_len:],
[b'<end>'] * (padded_len - seq_len))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
def testPaddedBatchDatasetUnicode(self):
# See GitHub issue 16149
def generator():
data = [[u'Простой', u'тест', u'юникода'],
[u'никогда', u'не', u'бывает', u'простым']]
for seq in data:
yield seq, [0, 1, 2, 3]
dataset = dataset_ops.Dataset.from_generator(
generator, (dtypes.string, dtypes.int32),
(tensor_shape.TensorShape([None]), tensor_shape.TensorShape([None])))
padded_dataset = dataset.padded_batch(
2, padded_shapes=([None], [None]), padding_values=('', 0))
next_element = self.getNext(padded_dataset)
self.evaluate(next_element())
# NOTE: This test is specific to graph mode and is skipped in eager mode.
@test_util.run_deprecated_v1
def testSkipEagerPaddedBatchDatasetShapeSpecifications(self):
int_placeholder = array_ops.placeholder(dtypes.int32)
float_placeholder = array_ops.placeholder(dtypes.float32)
string_placeholder = array_ops.placeholder(dtypes.string)
input_dataset = dataset_ops.Dataset.from_tensors(
(int_placeholder, float_placeholder, string_placeholder))
# Test different ways of specifying the `padded_shapes` argument.
dynamic_padding_from_tensor_shapes = input_dataset.padded_batch(
32,
padded_shapes=(tensor_shape.TensorShape([None]),
tensor_shape.TensorShape([None, None]),
tensor_shape.TensorShape([37])))
dynamic_padding_from_lists = input_dataset.padded_batch(
32, padded_shapes=([None], [None, None], [37]))
dynamic_padding_from_lists_with_minus_one = input_dataset.padded_batch(
32, padded_shapes=([-1], [-1, -1], [37]))
dynamic_padding_from_tensors = input_dataset.padded_batch(
32,
padded_shapes=(constant_op.constant([-1], dtype=dtypes.int64),
constant_op.constant([-1, -1], dtype=dtypes.int64),
constant_op.constant([37], dtype=dtypes.int64)))
for dataset in [
dynamic_padding_from_tensor_shapes, dynamic_padding_from_lists,
dynamic_padding_from_lists_with_minus_one, dynamic_padding_from_tensors
]:
dataset_output_shapes = dataset_ops.get_legacy_output_shapes(dataset)
self.assertEqual([None, None], dataset_output_shapes[0].as_list())
self.assertEqual([None, None, None], dataset_output_shapes[1].as_list())
self.assertEqual([None, 37], dataset_output_shapes[2].as_list())
def testPaddedBatchSparseError(self):
def _map_fn(i):
return sparse_tensor.SparseTensorValue(
indices=[[0, 0]], values=(i * [1]), dense_shape=[1, 1]), i
with self.assertRaises(TypeError):
_ = dataset_ops.Dataset.range(10).map(_map_fn).padded_batch(10)
def testPaddedBatchShapeError(self):
with self.assertRaisesRegexp(
ValueError, r'The padded shape \(1,\) is not compatible with the '
r'corresponding input component shape \(\).'):
_ = dataset_ops.Dataset.range(10).padded_batch(5, padded_shapes=[1])
with self.assertRaisesRegexp(
ValueError, r'The padded shape \(1,\) is not compatible with the '
r'corresponding input component shape \(3,\).'):
_ = dataset_ops.Dataset.from_tensors([1, 2, 3]).padded_batch(
5, padded_shapes=[1])
with self.assertRaisesRegexp(
ValueError, r'Padded shape .* must be a 1-D tensor '
r'of tf.int64 values, but its shape was \(2, 2\).'):
_ = dataset_ops.Dataset.from_tensors([1, 2, 3]).padded_batch(
5, padded_shapes=[[1, 1], [1, 1]])
with self.assertRaisesRegexp(
TypeError, r'Padded shape .* must be a 1-D tensor '
r'of tf.int64 values, but its element type was float32.'):
_ = dataset_ops.Dataset.from_tensors([1, 2, 3]).padded_batch(
5, padded_shapes=constant_op.constant([1., 2., 3.]))
with self.assertRaisesRegexp(
ValueError, r'The padded shape \(1,\) is not compatible with the '
r'corresponding input component shape \(\).'):
shape_as_tensor = constant_op.constant([1], dtype=dtypes.int64)
_ = dataset_ops.Dataset.range(10).padded_batch(
5, padded_shapes=shape_as_tensor)
# NOTE: This test is specific to graph mode and is skipped in eager mode.
@test_util.run_deprecated_v1
def testSkipEagerPaddedBatchShapeError(self):
with self.assertRaisesRegexp(
ValueError,
r'The padded shape \((\?|None), (\?|None)\) is not compatible with the '
r'corresponding input component shape \(\).'):
shape_as_tensor = array_ops.placeholder(dtypes.int64, shape=[2])
_ = dataset_ops.Dataset.range(10).padded_batch(
5, padded_shapes=shape_as_tensor)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/data/kernel_tests/padded_batch_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.Dataset.filter()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import errors
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import map_fn
from tensorflow.python.ops import math_ops
class FilterTestBase(test_base.DatasetTestBase):
"""Base class for FilterDataset tests."""
def apply_filter(self, input_dataset, predicate):
raise NotImplementedError("FilterTestBase._apply_filter")
def testFilterDataset(self):
components = (
np.arange(7, dtype=np.int64),
np.array([[1, 2, 3]], dtype=np.int64) * np.arange(
7, dtype=np.int64)[:, np.newaxis],
np.array(37.0, dtype=np.float64) * np.arange(7)
)
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
def do_test(count, modulus): # pylint: disable=missing-docstring
dataset = dataset_ops.Dataset.from_tensor_slices(components).map(
_map_fn).repeat(count)
# pylint: disable=g-long-lambda
dataset = self.apply_filter(
dataset, lambda x, _y, _z: math_ops.equal(
math_ops.mod(x, modulus), 0))
# pylint: enable=g-long-lambda
self.assertEqual(
[c.shape[1:] for c in components],
[shape for shape in dataset_ops.get_legacy_output_shapes(dataset)])
get_next = self.getNext(dataset)
for _ in range(count):
for i in [x for x in range(7) if x**2 % modulus == 0]:
result = self.evaluate(get_next())
for component, result_component in zip(components, result):
self.assertAllEqual(component[i]**2, result_component)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
do_test(14, 2)
do_test(4, 18)
# Test an empty dataset.
do_test(0, 1)
def testFilterRange(self):
dataset = dataset_ops.Dataset.range(4)
dataset = self.apply_filter(
dataset, lambda x: math_ops.not_equal(math_ops.mod(x, 3), 2))
self.assertDatasetProduces(dataset, expected_output=[0, 1, 3])
def testFilterDict(self):
dataset = dataset_ops.Dataset.range(10).map(
lambda x: {"foo": x * 2, "bar": x ** 2})
dataset = self.apply_filter(
dataset, lambda d: math_ops.equal(d["bar"] % 2, 0))
dataset = dataset.map(lambda d: d["foo"] + d["bar"])
self.assertDatasetProduces(
dataset,
expected_output=[(i * 2 + i**2) for i in range(10) if not (i**2) % 2])
def testUseStepContainerInFilter(self):
input_data = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int64)
# Define a predicate that returns true for the first element of
# the sequence and not the second, and uses `tf.map_fn()`.
def _predicate(xs):
squared_xs = map_fn.map_fn(lambda x: x * x, xs)
summed = math_ops.reduce_sum(squared_xs)
return math_ops.equal(summed, 1 + 4 + 9)
dataset = dataset_ops.Dataset.from_tensor_slices(
[[1, 2, 3], [4, 5, 6]])
dataset = self.apply_filter(dataset, _predicate)
self.assertDatasetProduces(dataset, expected_output=[input_data[0]])
def testSparse(self):
def _map_fn(i):
return sparse_tensor.SparseTensorValue(
indices=np.array([[0, 0]]),
values=(i * np.array([1])),
dense_shape=np.array([1, 1])), i
def _filter_fn(_, i):
return math_ops.equal(i % 2, 0)
dataset = dataset_ops.Dataset.range(10).map(_map_fn)
dataset = self.apply_filter(dataset, _filter_fn)
dataset = dataset.map(lambda x, i: x)
self.assertDatasetProduces(
dataset, expected_output=[_map_fn(i * 2)[0] for i in range(5)])
def testShortCircuit(self):
dataset = dataset_ops.Dataset.zip(
(dataset_ops.Dataset.range(10),
dataset_ops.Dataset.from_tensors(True).repeat(None)
))
dataset = self.apply_filter(dataset, lambda x, y: y)
self.assertDatasetProduces(
dataset, expected_output=[(i, True) for i in range(10)])
def testParallelFilters(self):
dataset = dataset_ops.Dataset.range(10)
dataset = self.apply_filter(dataset, lambda x: math_ops.equal(x % 2, 0))
next_elements = [self.getNext(dataset) for _ in range(10)]
self.assertEqual([0 for _ in range(10)],
self.evaluate(
[next_element() for next_element in next_elements]))
|
tensorflow-master
|
tensorflow/python/data/kernel_tests/filter_test_base.py
|
# -*- coding: utf-8 -*-
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.Dataset.batch()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import nest
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.ragged import ragged_concat_ops
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_math_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class BatchTest(test_base.DatasetTestBase, parameterized.TestCase):
@parameterized.named_parameters(
('even', 28, 14, False),
('uneven_with_remainder', 28, 15, False),
('uneven_without_remainder', 28, 15, True),
('empty', 0, 14, False),
)
def testBasic(self, count, batch_size, drop_remainder):
"""Tests the batch dataset logic for various input configurations.
Args:
count: the number of input elements
batch_size: the batch size
drop_remainder: whether a smaller batch size should be produced if batch
size does not divide number of inputs evenly
"""
# The pipeline is TensorSliceDataset -> MapDataset(square_3) ->
# RepeatDataset(count) -> BatchDataset(batch_size).
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
dataset = dataset_ops.Dataset.from_tensor_slices(components).map(
_map_fn).repeat(count).batch(batch_size, drop_remainder)
get_next = self.getNext(dataset)
if drop_remainder:
dim0 = batch_size
else:
dim0 = None
self.assertEqual(
[ts.as_list() for ts in nest.flatten(
dataset_ops.get_legacy_output_shapes(dataset))],
[[dim0] + list(c.shape[1:]) for c in components])
num_full_batches = (count * 7) // batch_size
for i in range(num_full_batches):
result = self.evaluate(get_next())
for component, result_component in zip(components, result):
for j in range(batch_size):
self.assertAllEqual(component[(i * batch_size + j) % 7]**2,
result_component[j])
if not drop_remainder and (count * 7) % batch_size > 0:
result = self.evaluate(get_next())
for component, result_component in zip(components, result):
for j in range((count * 7) % batch_size):
self.assertAllEqual(
component[(num_full_batches * batch_size + j) % 7]**2,
result_component[j])
with self.assertRaises(errors.OutOfRangeError):
result = self.evaluate(get_next())
def testInvalidBatchSize(self):
with self.assertRaises(errors.InvalidArgumentError):
dataset = (dataset_ops.Dataset.range(10).batch(0))
self.evaluate(dataset._variant_tensor)
def testDataset(self):
def map_fn(i):
return dataset_ops.Dataset.from_tensors(i)
dataset = dataset_ops.Dataset.range(10).map(map_fn).batch(5)
dataset = dataset.map(lambda x: x)
dataset = dataset.unbatch().flat_map(lambda x: x)
self.assertDatasetProduces(dataset, expected_output=range(10))
def testSparse(self):
def _sparse(i):
return sparse_tensor.SparseTensorValue(
indices=[[0]], values=(i * [1]), dense_shape=[1])
dataset = dataset_ops.Dataset.range(10).map(_sparse).batch(5)
expected_output = [
sparse_tensor.SparseTensorValue(
indices=[[0, 0], [1, 0], [2, 0], [3, 0], [4, 0]],
values=[i * 5, i * 5 + 1, i * 5 + 2, i * 5 + 3, i * 5 + 4],
dense_shape=[5, 1]) for i in range(2)
]
self.assertDatasetProduces(dataset, expected_output=expected_output)
def testSparseWithDifferentDenseShapes(self):
def _sparse(i):
return sparse_tensor.SparseTensorValue(
indices=array_ops.expand_dims(
math_ops.range(i, dtype=dtypes.int64), 1),
values=array_ops.fill([math_ops.cast(i, dtypes.int32)], i),
dense_shape=[i])
dataset = dataset_ops.Dataset.range(10).map(_sparse).batch(5)
expected_output = []
for i in range(2):
expected_indices = []
expected_outputs = []
for j in range(5):
for k in range(i * 5 + j):
expected_indices.append([j, k])
expected_outputs.append(i * 5 + j)
expected_output.append(
sparse_tensor.SparseTensorValue(
indices=expected_indices,
values=expected_outputs,
dense_shape=[5, (i + 1) * 5 - 1]))
self.assertDatasetProduces(dataset, expected_output=expected_output)
def testSparseNested(self):
def _sparse(i):
return sparse_tensor.SparseTensorValue(
indices=[[0]], values=(i * [1]), dense_shape=[1])
dataset = dataset_ops.Dataset.range(10).map(_sparse).batch(5).batch(2)
expected_output = [
sparse_tensor.SparseTensorValue(
indices=[[0, 0, 0], [0, 1, 0], [0, 2, 0], [0, 3, 0], [0, 4, 0],
[1, 0, 0], [1, 1, 0], [1, 2, 0], [1, 3, 0], [1, 4, 0]],
values=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
dense_shape=[2, 5, 1])
]
self.assertDatasetProduces(dataset, expected_output=expected_output)
def testShapeError(self):
def generator():
yield [1.0, 2.0, 3.0]
yield [4.0, 5.0, 6.0]
yield [7.0, 8.0, 9.0, 10.0]
dataset = (
dataset_ops.Dataset.from_generator(
generator, dtypes.float32, output_shapes=[None]).batch(3))
self.assertDatasetProduces(
dataset,
expected_error=(
errors.InvalidArgumentError,
r'Cannot batch tensors with different shapes in component 0. First '
r'element had shape \[3\] and element 2 had shape \[4\].'))
# Ragged Tensors.
def testRagged(self):
def _ragged(i):
return ragged_tensor.RaggedTensor.from_tensor(i * [[1]])
dataset = dataset_ops.Dataset.range(10).map(_ragged).batch(5)
expected_output = [
ragged_factory_ops.constant([[[0]], [[1]], [[2]], [[3]], [[4]]]),
ragged_factory_ops.constant([[[5]], [[6]], [[7]], [[8]], [[9]]])
]
self.assertDatasetProduces(dataset, expected_output=expected_output)
def testRaggedWithDifferentShapes(self):
dataset = dataset_ops.Dataset.range(10).map(ragged_math_ops.range).batch(5)
expected_output = [
ragged_concat_ops.stack([ragged_math_ops.range(i) for i in range(5)]),
ragged_concat_ops.stack(
[ragged_math_ops.range(i) for i in range(5, 10)])
]
self.assertDatasetProduces(dataset, expected_output=expected_output)
def testRaggedNested(self):
def _ragged(i):
return ragged_tensor.RaggedTensor.from_tensor(i * [[1]])
dataset = dataset_ops.Dataset.range(10).map(_ragged).batch(5).batch(2)
expected_output = [
ragged_factory_ops.constant([[[[0]], [[1]], [[2]], [[3]], [[4]]],
[[[5]], [[6]], [[7]], [[8]], [[9]]]])
]
self.assertDatasetProduces(dataset, expected_output=expected_output)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/data/kernel_tests/batch_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmarks for `tf.data.Dataset.filter()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.benchmarks import benchmark_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.ops import array_ops
# TODO(b/119837791): Add eager benchmarks.
class FilterBenchmark(benchmark_base.DatasetBenchmarkBase):
"""Benchmarks for `tf.data.Dataset.filter()`."""
def _benchmark(self, predicate, name):
dataset = (
dataset_ops.Dataset.from_tensors(True).repeat(None).filter(predicate))
self.run_and_report_benchmark(dataset, num_elements=100000, name=name)
def benchmark_simple_function(self):
self._benchmark(array_ops.identity, "simple_function")
def benchmark_return_component_optimization(self):
self._benchmark(lambda x: x, "return_component")
if __name__ == "__main__":
benchmark_base.test.main()
|
tensorflow-master
|
tensorflow/python/data/benchmarks/filter_benchmark.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmarks for `tf.data.Dataset.list_files()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from os import path
from os import makedirs
import shutil
import time
import tempfile
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
class ListFilesBenchmark(test.Benchmark):
"""Benchmarks for `tf.data.Dataset.list_files()`."""
def benchmark_nested_directories(self):
tmp_dir = tempfile.mkdtemp()
width = 1024
depth = 16
for i in range(width):
for j in range(depth):
new_base = path.join(tmp_dir, str(i),
*[str(dir_name) for dir_name in range(j)])
makedirs(new_base)
child_files = ['a.py', 'b.pyc'] if j < depth - 1 else ['c.txt', 'd.log']
for f in child_files:
filename = path.join(new_base, f)
open(filename, 'w').close()
patterns = [
path.join(tmp_dir, path.join(*['**'
for _ in range(depth)]), suffix)
for suffix in ['*.txt', '*.log']
]
deltas = []
iters = 3
for _ in range(iters):
with ops.Graph().as_default():
dataset = dataset_ops.Dataset.list_files(patterns)
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
dataset = dataset.with_options(options)
next_element = dataset.make_one_shot_iterator().get_next()
with session.Session() as sess:
sub_deltas = []
while True:
try:
start = time.time()
sess.run(next_element)
end = time.time()
sub_deltas.append(end - start)
except errors.OutOfRangeError:
break
deltas.append(sub_deltas)
median_deltas = np.median(deltas, axis=0)
self.report_benchmark(
iters=iters,
wall_time=np.sum(median_deltas),
extras={
'read first file:':
median_deltas[0],
'read second file:':
median_deltas[1],
'avg time for reading %d more filenames:' %
(len(median_deltas) - 2):
np.average(median_deltas[2:])
},
name='nested_directory(%d*%d)' % (width, depth))
shutil.rmtree(tmp_dir, ignore_errors=True)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/data/benchmarks/list_files_benchmark.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmarks for `tf.data.Dataset.batch()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.data.benchmarks import benchmark_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import sparse_tensor
class BatchBenchmark(benchmark_base.DatasetBenchmarkBase):
"""Benchmarks for `tf.data.Dataset.batch()`."""
def benchmark_batch_sparse(self):
non_zeros_per_row_values = [0, 1, 5, 10, 100]
batch_size_values = [1, 32, 64, 128, 1024]
for non_zeros_per_row in non_zeros_per_row_values:
tensor = sparse_tensor.SparseTensor(
indices=np.arange(non_zeros_per_row, dtype=np.int64)[:, np.newaxis],
values=np.arange(non_zeros_per_row, dtype=np.int64),
dense_shape=[1000])
for batch_size in batch_size_values:
dataset = dataset_ops.Dataset.from_tensors(tensor).repeat().batch(
batch_size)
self.run_and_report_benchmark(
dataset,
num_elements=100000 // batch_size,
iters=1,
name="sparse_num_elements_%d_batch_size_%d" % (non_zeros_per_row,
batch_size))
def benchmark_batch_dense(self):
for element_exp in [10, 12, 14, 16, 18, 20, 22]:
for batch_exp in [3, 6, 9]:
for parallel_copy in [True, False]:
element_size = 1 << element_exp
batch_size = 1 << batch_exp
dataset = dataset_ops.Dataset.from_tensors(
np.random.rand(element_size)).repeat().batch(batch_size)
options = dataset_ops.Options()
options.experimental_optimization.parallel_batch = parallel_copy
dataset = dataset.with_options(options)
tag = "_parallel" if parallel_copy else ""
self.run_and_report_benchmark(
dataset,
num_elements=(1 << (22 - batch_exp - element_exp // 2)),
iters=1,
name="batch_element_size_%d_batch_size_%d%s" %
(element_size, batch_size, tag))
if __name__ == "__main__":
benchmark_base.test.main()
|
tensorflow-master
|
tensorflow/python/data/benchmarks/batch_benchmark.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test utilities for tf.data benchmarking functionality."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import nest
from tensorflow.python.platform import test
# TODO(b/119837791): Add eager benchmarks.
class DatasetBenchmarkBase(test.Benchmark):
"""Base class for dataset benchmarks."""
def run_benchmark(self, dataset, num_elements, iters=1, warmup=True):
"""Benchmarks the dataset.
Runs the dataset `iters` times. In each iteration, the benchmark measures
the time it takes to go through `num_elements` elements of the dataset.
Args:
dataset: Dataset to benchmark.
num_elements: Number of dataset elements to iterate through each benchmark
iteration.
iters: Number of times to repeat the timing.
warmup: If true, warms up the session caches by running an untimed run.
Returns:
A float, representing the per-element wall time of the dataset in seconds.
This is the median time (with respect to `iters`) it takes for the dataset
to go through `num_elements` elements, divided by `num_elements.`
"""
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
dataset = dataset.with_options(options)
# NOTE: We use `dataset.skip()` to perform the iterations in C++, avoiding
# the overhead of multiple `session.run()` calls. Note that this relies on
# the underlying implementation of `skip`: if it is optimized in the future,
# we will have to change this code.
dataset = dataset.skip(num_elements - 1)
iterator = dataset_ops.make_initializable_iterator(dataset)
next_element = iterator.get_next()
next_element = nest.flatten(next_element)[0]
deltas = []
for _ in range(iters):
with session.Session() as sess:
if warmup:
# Run once to warm up the session caches.
sess.run(iterator.initializer)
sess.run(next_element)
sess.run(iterator.initializer)
start = time.time()
sess.run(next_element.op)
end = time.time()
deltas.append(end - start)
return np.median(deltas) / float(num_elements)
def run_and_report_benchmark(self,
dataset,
num_elements,
name,
iters=5,
extras=None,
warmup=True):
# Measure the per-element wall time.
wall_time = self.run_benchmark(dataset, num_elements, iters, warmup)
if extras is None:
extras = {}
extras["num_elements"] = num_elements
self.report_benchmark(
wall_time=wall_time, iters=iters, name=name, extras=extras)
|
tensorflow-master
|
tensorflow/python/data/benchmarks/benchmark_base.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test utilities for tf.data benchmarking functionality."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import timeit
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.data.experimental.ops import sleep
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import context
from tensorflow.python.platform import test
class MetaBenchmark(test.Benchmark):
"""Benchmark that compares various ways of running tf.data benchmarks."""
# Note that each of these benchmarks is a separate method so that we can
# run them independently and collect a performance profile.
def setup_fast_dataset(self):
self.num_reps = 15
self.iters = 100000
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
return dataset_ops.Dataset.range(10000**2).with_options(options)
def benchmark_fast_dataset_with_only_cpp_iterations(self):
dataset = self.setup_fast_dataset()
self.run_benchmark_with_only_cpp_iterations(dataset)
def benchmark_fast_dataset_with_session_run(self):
dataset = self.setup_fast_dataset()
self.run_benchmark_with_session_run(dataset)
def benchmark_fast_dataset_with_session_callable(self):
dataset = self.setup_fast_dataset()
self.run_benchmark_with_session_run(dataset, make_callable=True)
def benchmark_fast_dataset_in_eager(self):
with context.eager_mode():
dataset = self.setup_fast_dataset()
self.run_benchmark_in_eager(dataset)
def setup_slow_dataset(self):
dataset = self.setup_fast_dataset()
self.iters = 1000
# sleep for 1e-3s per iteration
return dataset.apply(sleep.sleep(1000))
def benchmark_slow_dataset_with_only_cpp_iterations(self):
dataset = self.setup_slow_dataset()
self.run_benchmark_with_only_cpp_iterations(dataset)
def benchmark_slow_dataset_with_session_run(self):
dataset = self.setup_slow_dataset()
self.run_benchmark_with_session_run(dataset)
def benchmark_slow_dataset_with_session_callable(self):
dataset = self.setup_slow_dataset()
self.run_benchmark_with_session_run(dataset, make_callable=True)
def benchmark_slow_dataset_in_eager(self):
with context.eager_mode():
dataset = self.setup_slow_dataset()
self.run_benchmark_in_eager(dataset)
def report(self, deltas):
# Each `delta` is the time taken for `self.iters` iterations. Divide by the
# number of iterations here to get per-element iteration time.
deltas = np.array(deltas) / self.iters
# Discard the first 5 results from "warming up" the session.
deltas = deltas[5:]
median = np.median(deltas)
mean = np.mean(deltas)
min_val = np.min(deltas)
max_val = np.max(deltas)
extras = {
"iters_per_second": 1 / median,
"median": median,
"mean": mean,
"min": min_val,
"max": max_val,
"num_reps": self.num_reps - 5,
}
self.report_benchmark(wall_time=median, iters=self.iters, extras=extras)
def run_benchmark_in_eager(self, dataset):
deltas = []
for _ in range(self.num_reps):
iterator = iter(dataset)
deltas.append(timeit.timeit(lambda: next(iterator), number=self.iters)) # pylint: disable=cell-var-from-loop
self.report(deltas)
def run_benchmark_with_session_run(self, dataset, make_callable=False):
iterator = dataset_ops.make_initializable_iterator(dataset)
next_element = iterator.get_next()
with session.Session() as sess:
deltas = []
for _ in range(self.num_reps):
if make_callable:
get_next_element = sess.make_callable(next_element)
else:
# Note: session.run(next_element.op) is more performant than
# session.run(next_element) because we avoid the cost of copying the
# tensor from C++ to python.
get_next_element = lambda: sess.run(next_element.op)
sess.run(iterator.initializer)
deltas.append(timeit.timeit(get_next_element, number=self.iters))
self.report(deltas)
def run_benchmark_with_only_cpp_iterations(self, dataset):
"""Benchmarks the dataset with the iterations performed in C++."""
# NOTE: We use `dataset.skip()` to perform the iterations in C++, avoiding
# the overhead of multiple `session.run()` calls. Note that this relies on
# the underlying implementation of `skip`: if it is optimized in the future,
# we will have to change this code.
dataset = dataset.skip(self.iters - 1)
iterator = dataset_ops.make_initializable_iterator(dataset)
next_element = iterator.get_next()
with session.Session() as sess:
deltas = []
for _ in range(self.num_reps):
sess.run(iterator.initializer)
deltas.append(
timeit.timeit(lambda: sess.run(next_element.op), number=1))
self.report(deltas)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/benchmarks/meta_benchmark.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Bechmarks for `tf.data.Dataset.map()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.benchmarks import benchmark_base
from tensorflow.python.data.ops import dataset_ops
# TODO(b/119837791): Add eager benchmarks.
class MapBenchmark(benchmark_base.DatasetBenchmarkBase):
"""Benchmarks for `tf.data.Dataset.map()`."""
def benchmark_chain_of_maps(self):
def benchmark_helper(chain_length, map_fn, use_inter_op_parallelism, label):
dataset = dataset_ops.Dataset.from_tensors(0).repeat(None)
for _ in range(chain_length):
dataset = dataset_ops.MapDataset(
dataset, map_fn, use_inter_op_parallelism=use_inter_op_parallelism)
self.run_and_report_benchmark(
dataset,
num_elements=10000,
name="chain_length_%d%s" % (chain_length, label))
chain_lengths = [0, 1, 2, 5, 10, 20, 50]
for chain_length in chain_lengths:
benchmark_helper(chain_length, lambda x: x + 1, True, "")
benchmark_helper(chain_length, lambda x: x + 1, False, "_single_threaded")
benchmark_helper(chain_length, lambda x: x, True, "_short_circuit")
def benchmark_map_fan_out(self):
fan_outs = [1, 2, 5, 10, 20, 50, 100]
def benchmark_helper(fan_out, map_fn, use_inter_op_parallelism, label):
dataset = dataset_ops.Dataset.from_tensors(
tuple(0 for _ in range(fan_out))).repeat(None)
dataset = dataset_ops.MapDataset(
dataset, map_fn, use_inter_op_parallelism=use_inter_op_parallelism)
self.run_and_report_benchmark(
dataset,
num_elements=10000,
name="fan_out_%d%s" % (fan_out, label))
for fan_out in fan_outs:
benchmark_helper(fan_out, lambda *xs: [x + 1 for x in xs], True, "")
benchmark_helper(fan_out, lambda *xs: [x + 1 for x in xs], False,
"_single_threaded")
benchmark_helper(fan_out, lambda *xs: xs, True, "_short_circuit")
if __name__ == "__main__":
benchmark_base.test.main()
|
tensorflow-master
|
tensorflow/python/data/benchmarks/map_benchmark.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmarks for `tf.data.Dataset.range()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.benchmarks import benchmark_base
from tensorflow.python.data.ops import dataset_ops
class RangeBenchmark(benchmark_base.DatasetBenchmarkBase):
"""Benchmarks for `tf.data.Dataset.range()`."""
def benchmark_range(self):
for modeling_enabled in [False, True]:
num_elements = 10000000 if modeling_enabled else 50000000
options = dataset_ops.Options()
options.experimental_optimization.autotune = modeling_enabled
dataset = dataset_ops.Dataset.range(num_elements)
dataset = dataset.with_options(options)
self.run_and_report_benchmark(
dataset,
num_elements=num_elements,
name="modeling_%s" % ("on" if modeling_enabled else "off"))
if __name__ == "__main__":
benchmark_base.test.main()
|
tensorflow-master
|
tensorflow/python/data/benchmarks/range_benchmark.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmarks for `tf.data.Dataset.from_tensor_slices()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.data.benchmarks import benchmark_base
from tensorflow.python.data.ops import dataset_ops
# TODO(b/119837791): Add eager benchmarks.
class FromTensorSlicesBenchmark(benchmark_base.DatasetBenchmarkBase):
"""Benchmarks for `tf.data.Dataset.from_tensor_slices()`."""
def benchmark_slice_repeat_batch(self):
input_size = 10000
batch_size = 100
num_epochs = 100
num_elements = input_size * num_epochs // batch_size
input_data = np.random.randn(input_size)
dataset = (
dataset_ops.Dataset.from_tensor_slices(input_data).repeat(
num_epochs).batch(batch_size))
self.run_and_report_benchmark(
dataset,
num_elements=num_elements,
name="slice_repeat_batch_input_%d_batch_%d" % (input_size, batch_size))
def benchmark_reshape_slice_repeat(self):
input_size = 10000
reshape_dim = [100, 100]
num_epochs = 100
num_elements = num_epochs * reshape_dim[0]
input_data = np.random.randn(input_size)
dataset = (
dataset_ops.Dataset.from_tensor_slices(
input_data.reshape(*reshape_dim)).repeat(num_epochs))
self.run_and_report_benchmark(
dataset,
num_elements=num_elements,
name="reshape_slice_repeat_input_%d" % input_size,
)
def benchmark_slice_batch_cache_repeat(self):
input_size = 10000
batch_size = 100
num_epochs = 100
num_elements = input_size * num_epochs // batch_size
input_data = np.random.randn(input_size)
dataset = (
dataset_ops.Dataset.from_tensor_slices(input_data).batch(
batch_size).cache().repeat(num_epochs))
self.run_and_report_benchmark(
dataset,
num_elements=num_elements,
name="slice_batch_cache_repeat_input_%d_batch_%d" % (input_size,
batch_size))
if __name__ == "__main__":
benchmark_base.test.main()
|
tensorflow-master
|
tensorflow/python/data/benchmarks/from_tensor_slices_benchmark.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python wrappers for reader Datasets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.compat import compat
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import convert
from tensorflow.python.data.util import structure
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_dataset_ops
from tensorflow.python.ops import gen_experimental_dataset_ops as ged_ops
from tensorflow.python.util.tf_export import tf_export
# TODO(b/64974358): Increase default buffer size to 256 MB.
_DEFAULT_READER_BUFFER_SIZE_BYTES = 256 * 1024 # 256 KB
def _create_or_validate_filenames_dataset(filenames):
"""Creates (or validates) a dataset of filenames.
Args:
filenames: Either a list or dataset of filenames. If it is a list, it is
convert to a dataset. If it is a dataset, its type and shape is validated.
Returns:
A dataset of filenames.
"""
if isinstance(filenames, dataset_ops.DatasetV2):
if dataset_ops.get_legacy_output_types(filenames) != dtypes.string:
raise TypeError(
"`filenames` must be a `tf.data.Dataset` of `tf.string` elements.")
if not dataset_ops.get_legacy_output_shapes(filenames).is_compatible_with(
tensor_shape.scalar()):
raise TypeError(
"`filenames` must be a `tf.data.Dataset` of scalar `tf.string` "
"elements.")
else:
filenames = ops.convert_to_tensor(filenames, dtype=dtypes.string)
filenames = array_ops.reshape(filenames, [-1], name="flat_filenames")
filenames = dataset_ops.DatasetV2.from_tensor_slices(filenames)
return filenames
def _create_dataset_reader(dataset_creator, filenames, num_parallel_reads=None):
"""Creates a dataset that reads the given files using the given reader.
Args:
dataset_creator: A function that takes in a single file name and returns a
dataset.
filenames: A `tf.data.Dataset` containing one or more filenames.
num_parallel_reads: The number of parallel reads we should do.
Returns:
A `Dataset` that reads data from `filenames`.
"""
def read_one_file(filename):
filename = ops.convert_to_tensor(filename, dtypes.string, name="filename")
return dataset_creator(filename)
if num_parallel_reads is None:
return filenames.flat_map(read_one_file)
else:
return ParallelInterleaveDataset(
filenames, read_one_file, cycle_length=num_parallel_reads,
block_length=1, sloppy=False, buffer_output_elements=None,
prefetch_input_elements=None)
class _TextLineDataset(dataset_ops.DatasetSource):
"""A `Dataset` comprising records from one or more text files."""
def __init__(self, filenames, compression_type=None, buffer_size=None):
"""Creates a `TextLineDataset`.
Args:
filenames: A `tf.string` tensor containing one or more filenames.
compression_type: (Optional.) A `tf.string` scalar evaluating to one of
`""` (no compression), `"ZLIB"`, or `"GZIP"`.
buffer_size: (Optional.) A `tf.int64` scalar denoting the number of bytes
to buffer. A value of 0 results in the default buffering values chosen
based on the compression type.
"""
self._filenames = filenames
self._compression_type = convert.optional_param_to_tensor(
"compression_type",
compression_type,
argument_default="",
argument_dtype=dtypes.string)
self._buffer_size = convert.optional_param_to_tensor(
"buffer_size",
buffer_size,
argument_default=_DEFAULT_READER_BUFFER_SIZE_BYTES)
variant_tensor = gen_dataset_ops.text_line_dataset(
self._filenames, self._compression_type, self._buffer_size)
super(_TextLineDataset, self).__init__(variant_tensor)
@property
def _element_structure(self):
return structure.TensorStructure(dtypes.string, [])
@tf_export("data.TextLineDataset", v1=[])
class TextLineDatasetV2(dataset_ops.DatasetSource):
"""A `Dataset` comprising lines from one or more text files."""
def __init__(self, filenames, compression_type=None, buffer_size=None,
num_parallel_reads=None):
"""Creates a `TextLineDataset`.
Args:
filenames: A `tf.string` tensor or `tf.data.Dataset` containing one or
more filenames.
compression_type: (Optional.) A `tf.string` scalar evaluating to one of
`""` (no compression), `"ZLIB"`, or `"GZIP"`.
buffer_size: (Optional.) A `tf.int64` scalar denoting the number of bytes
to buffer. A value of 0 results in the default buffering values chosen
based on the compression type.
num_parallel_reads: (Optional.) A `tf.int64` scalar representing the
number of files to read in parallel. If greater than one, the records of
files read in parallel are outputted in an interleaved order. If your
input pipeline is I/O bottlenecked, consider setting this parameter to a
value greater than one to parallelize the I/O. If `None`, files will be
read sequentially.
"""
filenames = _create_or_validate_filenames_dataset(filenames)
self._filenames = filenames
self._compression_type = compression_type
self._buffer_size = buffer_size
def creator_fn(filename):
return _TextLineDataset(filename, compression_type, buffer_size)
self._impl = _create_dataset_reader(creator_fn, filenames,
num_parallel_reads)
variant_tensor = self._impl._variant_tensor # pylint: disable=protected-access
super(TextLineDatasetV2, self).__init__(variant_tensor)
@property
def _element_structure(self):
return structure.TensorStructure(dtypes.string, [])
@tf_export(v1=["data.TextLineDataset"])
class TextLineDatasetV1(dataset_ops.DatasetV1Adapter):
"""A `Dataset` comprising lines from one or more text files."""
def __init__(self, filenames, compression_type=None, buffer_size=None,
num_parallel_reads=None):
wrapped = TextLineDatasetV2(filenames, compression_type, buffer_size,
num_parallel_reads)
super(TextLineDatasetV1, self).__init__(wrapped)
__init__.__doc__ = TextLineDatasetV2.__init__.__doc__
@property
def _filenames(self):
return self._dataset._filenames # pylint: disable=protected-access
@_filenames.setter
def _filenames(self, value):
self._dataset._filenames = value # pylint: disable=protected-access
class _TFRecordDataset(dataset_ops.DatasetSource):
"""A `Dataset` comprising records from one or more TFRecord files."""
def __init__(self, filenames, compression_type=None, buffer_size=None):
"""Creates a `TFRecordDataset`.
Args:
filenames: A `tf.string` tensor containing one or more filenames.
compression_type: (Optional.) A `tf.string` scalar evaluating to one of
`""` (no compression), `"ZLIB"`, or `"GZIP"`.
buffer_size: (Optional.) A `tf.int64` scalar representing the number of
bytes in the read buffer. 0 means no buffering.
"""
self._filenames = filenames
self._compression_type = convert.optional_param_to_tensor(
"compression_type",
compression_type,
argument_default="",
argument_dtype=dtypes.string)
self._buffer_size = convert.optional_param_to_tensor(
"buffer_size",
buffer_size,
argument_default=_DEFAULT_READER_BUFFER_SIZE_BYTES)
variant_tensor = gen_dataset_ops.tf_record_dataset(
self._filenames, self._compression_type, self._buffer_size)
super(_TFRecordDataset, self).__init__(variant_tensor)
@property
def _element_structure(self):
return structure.TensorStructure(dtypes.string, [])
class ParallelInterleaveDataset(dataset_ops.UnaryDataset):
"""A `Dataset` that maps a function over its input and flattens the result."""
def __init__(self, input_dataset, map_func, cycle_length, block_length,
sloppy, buffer_output_elements, prefetch_input_elements):
"""See `tf.data.experimental.parallel_interleave()` for details."""
self._input_dataset = input_dataset
self._map_func = dataset_ops.StructuredFunctionWrapper(
map_func, self._transformation_name(), dataset=input_dataset)
if not isinstance(self._map_func.output_structure,
dataset_ops.DatasetStructure):
raise TypeError("`map_func` must return a `Dataset` object.")
self._structure = self._map_func.output_structure._element_structure # pylint: disable=protected-access
self._cycle_length = ops.convert_to_tensor(
cycle_length, dtype=dtypes.int64, name="cycle_length")
self._block_length = ops.convert_to_tensor(
block_length, dtype=dtypes.int64, name="block_length")
self._sloppy = ops.convert_to_tensor(
sloppy, dtype=dtypes.bool, name="sloppy")
self._buffer_output_elements = convert.optional_param_to_tensor(
"buffer_output_elements",
buffer_output_elements,
argument_default=2 * block_length)
self._prefetch_input_elements = convert.optional_param_to_tensor(
"prefetch_input_elements",
prefetch_input_elements,
argument_default=2 * cycle_length)
variant_tensor = ged_ops.experimental_parallel_interleave_dataset(
self._input_dataset._variant_tensor, # pylint: disable=protected-access
self._map_func.function.captured_inputs,
self._cycle_length,
self._block_length,
self._sloppy,
self._buffer_output_elements,
self._prefetch_input_elements,
f=self._map_func.function,
**dataset_ops.flat_structure(self))
super(ParallelInterleaveDataset, self).__init__(input_dataset,
variant_tensor)
def _functions(self):
return [self._map_func]
@property
def _element_structure(self):
return self._structure
def _transformation_name(self):
return "tf.data.experimental.parallel_interleave()"
@tf_export("data.TFRecordDataset", v1=[])
class TFRecordDatasetV2(dataset_ops.DatasetV2):
"""A `Dataset` comprising records from one or more TFRecord files."""
def __init__(self, filenames, compression_type=None, buffer_size=None,
num_parallel_reads=None):
"""Creates a `TFRecordDataset` to read one or more TFRecord files.
Args:
filenames: A `tf.string` tensor or `tf.data.Dataset` containing one or
more filenames.
compression_type: (Optional.) A `tf.string` scalar evaluating to one of
`""` (no compression), `"ZLIB"`, or `"GZIP"`.
buffer_size: (Optional.) A `tf.int64` scalar representing the number of
bytes in the read buffer. If your input pipeline is I/O bottlenecked,
consider setting this parameter to a value 1-100 MBs. If `None`, a
sensible default for both local and remote file systems is used.
num_parallel_reads: (Optional.) A `tf.int64` scalar representing the
number of files to read in parallel. If greater than one, the records of
files read in parallel are outputted in an interleaved order. If your
input pipeline is I/O bottlenecked, consider setting this parameter to a
value greater than one to parallelize the I/O. If `None`, files will be
read sequentially.
Raises:
TypeError: If any argument does not have the expected type.
ValueError: If any argument does not have the expected shape.
"""
filenames = _create_or_validate_filenames_dataset(filenames)
self._filenames = filenames
self._compression_type = compression_type
self._buffer_size = buffer_size
self._num_parallel_reads = num_parallel_reads
def creator_fn(filename):
return _TFRecordDataset(filename, compression_type, buffer_size)
self._impl = _create_dataset_reader(creator_fn, filenames,
num_parallel_reads)
variant_tensor = self._impl._variant_tensor # pylint: disable=protected-access
super(TFRecordDatasetV2, self).__init__(variant_tensor)
def _clone(self,
filenames=None,
compression_type=None,
buffer_size=None,
num_parallel_reads=None):
return TFRecordDatasetV2(filenames or self._filenames,
compression_type or self._compression_type,
buffer_size or self._buffer_size,
num_parallel_reads or self._num_parallel_reads)
def _inputs(self):
return self._impl._inputs() # pylint: disable=protected-access
@property
def _element_structure(self):
return structure.TensorStructure(dtypes.string, [])
@tf_export(v1=["data.TFRecordDataset"])
class TFRecordDatasetV1(dataset_ops.DatasetV1Adapter):
"""A `Dataset` comprising records from one or more TFRecord files."""
def __init__(self, filenames, compression_type=None, buffer_size=None,
num_parallel_reads=None):
wrapped = TFRecordDatasetV2(
filenames, compression_type, buffer_size, num_parallel_reads)
super(TFRecordDatasetV1, self).__init__(wrapped)
__init__.__doc__ = TFRecordDatasetV2.__init__.__doc__
def _clone(self,
filenames=None,
compression_type=None,
buffer_size=None,
num_parallel_reads=None):
# pylint: disable=protected-access
return TFRecordDatasetV1(
filenames or self._dataset._filenames,
compression_type or self._dataset._compression_type,
buffer_size or self._dataset._buffer_size,
num_parallel_reads or self._dataset._num_parallel_reads)
@property
def _filenames(self):
return self._dataset._filenames # pylint: disable=protected-access
@_filenames.setter
def _filenames(self, value):
self._dataset._filenames = value # pylint: disable=protected-access
class _FixedLengthRecordDataset(dataset_ops.DatasetSource):
"""A `Dataset` of fixed-length records from one or more binary files."""
def __init__(self,
filenames,
record_bytes,
header_bytes=None,
footer_bytes=None,
buffer_size=None,
compression_type=None):
"""Creates a `FixedLengthRecordDataset`.
Args:
filenames: A `tf.string` tensor containing one or more filenames.
record_bytes: A `tf.int64` scalar representing the number of bytes in
each record.
header_bytes: (Optional.) A `tf.int64` scalar representing the number of
bytes to skip at the start of a file.
footer_bytes: (Optional.) A `tf.int64` scalar representing the number of
bytes to ignore at the end of a file.
buffer_size: (Optional.) A `tf.int64` scalar representing the number of
bytes to buffer when reading.
compression_type: (Optional.) A `tf.string` scalar evaluating to one of
`""` (no compression), `"ZLIB"`, or `"GZIP"`.
"""
self._filenames = filenames
self._record_bytes = ops.convert_to_tensor(
record_bytes, dtype=dtypes.int64, name="record_bytes")
self._header_bytes = convert.optional_param_to_tensor(
"header_bytes", header_bytes)
self._footer_bytes = convert.optional_param_to_tensor(
"footer_bytes", footer_bytes)
self._buffer_size = convert.optional_param_to_tensor(
"buffer_size", buffer_size, _DEFAULT_READER_BUFFER_SIZE_BYTES)
self._compression_type = convert.optional_param_to_tensor(
"compression_type",
compression_type,
argument_default="",
argument_dtype=dtypes.string)
if (self._compression_type is not None or
compat.forward_compatible(2018, 11, 30)):
variant_tensor = gen_dataset_ops.fixed_length_record_dataset_v2(
self._filenames, self._header_bytes, self._record_bytes,
self._footer_bytes, self._buffer_size, self._compression_type)
else:
variant_tensor = gen_dataset_ops.fixed_length_record_dataset(
self._filenames, self._header_bytes, self._record_bytes,
self._footer_bytes, self._buffer_size)
super(_FixedLengthRecordDataset, self).__init__(variant_tensor)
@property
def _element_structure(self):
return structure.TensorStructure(dtypes.string, [])
@tf_export("data.FixedLengthRecordDataset", v1=[])
class FixedLengthRecordDatasetV2(dataset_ops.DatasetSource):
"""A `Dataset` of fixed-length records from one or more binary files."""
def __init__(self,
filenames,
record_bytes,
header_bytes=None,
footer_bytes=None,
buffer_size=None,
compression_type=None,
num_parallel_reads=None):
"""Creates a `FixedLengthRecordDataset`.
Args:
filenames: A `tf.string` tensor or `tf.data.Dataset` containing one or
more filenames.
record_bytes: A `tf.int64` scalar representing the number of bytes in
each record.
header_bytes: (Optional.) A `tf.int64` scalar representing the number of
bytes to skip at the start of a file.
footer_bytes: (Optional.) A `tf.int64` scalar representing the number of
bytes to ignore at the end of a file.
buffer_size: (Optional.) A `tf.int64` scalar representing the number of
bytes to buffer when reading.
compression_type: (Optional.) A `tf.string` scalar evaluating to one of
`""` (no compression), `"ZLIB"`, or `"GZIP"`.
num_parallel_reads: (Optional.) A `tf.int64` scalar representing the
number of files to read in parallel. If greater than one, the records of
files read in parallel are outputted in an interleaved order. If your
input pipeline is I/O bottlenecked, consider setting this parameter to a
value greater than one to parallelize the I/O. If `None`, files will be
read sequentially.
"""
filenames = _create_or_validate_filenames_dataset(filenames)
self._filenames = filenames
self._record_bytes = record_bytes
self._header_bytes = header_bytes
self._footer_bytes = footer_bytes
self._buffer_size = buffer_size
self._compression_type = compression_type
def creator_fn(filename):
return _FixedLengthRecordDataset(filename, record_bytes, header_bytes,
footer_bytes, buffer_size,
compression_type)
self._impl = _create_dataset_reader(creator_fn, filenames,
num_parallel_reads)
variant_tensor = self._impl._variant_tensor # pylint: disable=protected-access
super(FixedLengthRecordDatasetV2, self).__init__(variant_tensor)
@property
def _element_structure(self):
return structure.TensorStructure(dtypes.string, [])
@tf_export(v1=["data.FixedLengthRecordDataset"])
class FixedLengthRecordDatasetV1(dataset_ops.DatasetV1Adapter):
"""A `Dataset` of fixed-length records from one or more binary files."""
def __init__(self,
filenames,
record_bytes,
header_bytes=None,
footer_bytes=None,
buffer_size=None,
compression_type=None,
num_parallel_reads=None):
wrapped = FixedLengthRecordDatasetV2(
filenames, record_bytes, header_bytes, footer_bytes, buffer_size,
compression_type, num_parallel_reads)
super(FixedLengthRecordDatasetV1, self).__init__(wrapped)
__init__.__doc__ = FixedLengthRecordDatasetV2.__init__.__doc__
@property
def _filenames(self):
return self._dataset._filenames # pylint: disable=protected-access
@_filenames.setter
def _filenames(self, value):
self._dataset._filenames = value # pylint: disable=protected-access
# TODO(b/119044825): Until all `tf.data` unit tests are converted to V2, keep
# these aliases in place.
FixedLengthRecordDataset = FixedLengthRecordDatasetV1
TFRecordDataset = TFRecordDatasetV1
TextLineDataset = TextLineDatasetV1
|
tensorflow-master
|
tensorflow/python/data/ops/readers.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python wrappers for Iterators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
import warnings
from tensorflow.python.compat import compat
from tensorflow.python.data.ops import optional_ops
from tensorflow.python.data.util import nest
from tensorflow.python.data.util import structure as structure_lib
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import gen_dataset_ops
from tensorflow.python.training.saver import BaseSaverBuilder
from tensorflow.python.training.tracking import base as trackable
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
# NOTE(mrry): It is legitimate to call `Iterator.get_next()` multiple
# times, e.g. when you are distributing different elements to multiple
# devices in a single step. However, a common pitfall arises when
# users call `Iterator.get_next()` in each iteration of their training
# loop. `Iterator.get_next()` adds ops to the graph, and executing
# each op allocates resources (including threads); as a consequence,
# invoking it in every iteration of a training loop causes slowdown
# and eventual resource exhaustion. To guard against this outcome, we
# log a warning when the number of uses crosses a threshold of suspicion.
GET_NEXT_CALL_WARNING_THRESHOLD = 32
GET_NEXT_CALL_WARNING_MESSAGE = (
"An unusually high number of `Iterator.get_next()` calls was detected. "
"This often indicates that `Iterator.get_next()` is being called inside "
"a training loop, which will cause gradual slowdown and eventual resource "
"exhaustion. If this is the case, restructure your code to call "
"`next_element = iterator.get_next()` once outside the loop, and use "
"`next_element` as the input to some computation that is invoked inside "
"the loop.")
# Collection of all IteratorResources in the `Graph`.
GLOBAL_ITERATORS = "iterators"
def _device_stack_is_empty():
# pylint: disable=protected-access
device_stack = ops.get_default_graph()._device_functions_outer_to_inner
# pylint: enable=protected-access
return not bool(device_stack)
@tf_export(v1=["data.Iterator"])
class Iterator(trackable.Trackable):
"""Represents the state of iterating through a `Dataset`."""
def __init__(self, iterator_resource, initializer, output_types,
output_shapes, output_classes):
"""Creates a new iterator from the given iterator resource.
Note: Most users will not call this initializer directly, and will
instead use `Dataset.make_initializable_iterator()` or
`Dataset.make_one_shot_iterator()`.
Args:
iterator_resource: A `tf.resource` scalar `tf.Tensor` representing the
iterator.
initializer: A `tf.Operation` that should be run to initialize this
iterator.
output_types: A nested structure of `tf.DType` objects corresponding to
each component of an element of this iterator.
output_shapes: A nested structure of `tf.TensorShape` objects
corresponding to each component of an element of this iterator.
output_classes: A nested structure of Python `type` objects corresponding
to each component of an element of this iterator.
"""
self._iterator_resource = iterator_resource
self._initializer = initializer
if (output_types is None or output_shapes is None
or output_classes is None):
raise ValueError("If `structure` is not specified, all of "
"`output_types`, `output_shapes`, and `output_classes`"
" must be specified.")
self._structure = structure_lib.convert_legacy_structure(
output_types, output_shapes, output_classes)
self._string_handle = gen_dataset_ops.iterator_to_string_handle(
self._iterator_resource)
self._get_next_call_count = 0
ops.add_to_collection(GLOBAL_ITERATORS, self._iterator_resource)
@staticmethod
def from_structure(output_types,
output_shapes=None,
shared_name=None,
output_classes=None):
"""Creates a new, uninitialized `Iterator` with the given structure.
This iterator-constructing method can be used to create an iterator that
is reusable with many different datasets.
The returned iterator is not bound to a particular dataset, and it has
no `initializer`. To initialize the iterator, run the operation returned by
`Iterator.make_initializer(dataset)`.
The following is an example
```python
iterator = Iterator.from_structure(tf.int64, tf.TensorShape([]))
dataset_range = Dataset.range(10)
range_initializer = iterator.make_initializer(dataset_range)
dataset_evens = dataset_range.filter(lambda x: x % 2 == 0)
evens_initializer = iterator.make_initializer(dataset_evens)
# Define a model based on the iterator; in this example, the model_fn
# is expected to take scalar tf.int64 Tensors as input (see
# the definition of 'iterator' above).
prediction, loss = model_fn(iterator.get_next())
# Train for `num_epochs`, where for each epoch, we first iterate over
# dataset_range, and then iterate over dataset_evens.
for _ in range(num_epochs):
# Initialize the iterator to `dataset_range`
sess.run(range_initializer)
while True:
try:
pred, loss_val = sess.run([prediction, loss])
except tf.errors.OutOfRangeError:
break
# Initialize the iterator to `dataset_evens`
sess.run(evens_initializer)
while True:
try:
pred, loss_val = sess.run([prediction, loss])
except tf.errors.OutOfRangeError:
break
```
Args:
output_types: A nested structure of `tf.DType` objects corresponding to
each component of an element of this dataset.
output_shapes: (Optional.) A nested structure of `tf.TensorShape` objects
corresponding to each component of an element of this dataset. If
omitted, each component will have an unconstrainted shape.
shared_name: (Optional.) If non-empty, this iterator will be shared under
the given name across multiple sessions that share the same devices
(e.g. when using a remote server).
output_classes: (Optional.) A nested structure of Python `type` objects
corresponding to each component of an element of this iterator. If
omitted, each component is assumed to be of type `tf.Tensor`.
Returns:
An `Iterator`.
Raises:
TypeError: If the structures of `output_shapes` and `output_types` are
not the same.
"""
output_types = nest.map_structure(dtypes.as_dtype, output_types)
if output_shapes is None:
output_shapes = nest.map_structure(
lambda _: tensor_shape.TensorShape(None), output_types)
else:
output_shapes = nest.map_structure_up_to(
output_types, tensor_shape.as_shape, output_shapes)
if output_classes is None:
output_classes = nest.map_structure(lambda _: ops.Tensor, output_types)
nest.assert_same_structure(output_types, output_shapes)
output_structure = structure_lib.convert_legacy_structure(
output_types, output_shapes, output_classes)
if shared_name is None:
shared_name = ""
# pylint: disable=protected-access
if compat.forward_compatible(2018, 8, 3):
if _device_stack_is_empty():
with ops.device("/cpu:0"):
iterator_resource = gen_dataset_ops.iterator_v2(
container="",
shared_name=shared_name,
output_types=output_structure._flat_types,
output_shapes=output_structure._flat_shapes)
else:
iterator_resource = gen_dataset_ops.iterator_v2(
container="",
shared_name=shared_name,
output_types=output_structure._flat_types,
output_shapes=output_structure._flat_shapes)
else:
iterator_resource = gen_dataset_ops.iterator(
container="",
shared_name=shared_name,
output_types=output_structure._flat_types,
output_shapes=output_structure._flat_shapes)
# pylint: enable=protected-access
return Iterator(iterator_resource, None, output_types, output_shapes,
output_classes)
@staticmethod
def from_string_handle(string_handle,
output_types,
output_shapes=None,
output_classes=None):
"""Creates a new, uninitialized `Iterator` based on the given handle.
This method allows you to define a "feedable" iterator where you can choose
between concrete iterators by feeding a value in a `tf.Session.run` call.
In that case, `string_handle` would be a `tf.compat.v1.placeholder`, and you
would
feed it with the value of `tf.data.Iterator.string_handle` in each step.
For example, if you had two iterators that marked the current position in
a training dataset and a test dataset, you could choose which to use in
each step as follows:
```python
train_iterator = tf.data.Dataset(...).make_one_shot_iterator()
train_iterator_handle = sess.run(train_iterator.string_handle())
test_iterator = tf.data.Dataset(...).make_one_shot_iterator()
test_iterator_handle = sess.run(test_iterator.string_handle())
handle = tf.compat.v1.placeholder(tf.string, shape=[])
iterator = tf.data.Iterator.from_string_handle(
handle, train_iterator.output_types)
next_element = iterator.get_next()
loss = f(next_element)
train_loss = sess.run(loss, feed_dict={handle: train_iterator_handle})
test_loss = sess.run(loss, feed_dict={handle: test_iterator_handle})
```
Args:
string_handle: A scalar `tf.Tensor` of type `tf.string` that evaluates to
a handle produced by the `Iterator.string_handle()` method.
output_types: A nested structure of `tf.DType` objects corresponding to
each component of an element of this dataset.
output_shapes: (Optional.) A nested structure of `tf.TensorShape` objects
corresponding to each component of an element of this dataset. If
omitted, each component will have an unconstrainted shape.
output_classes: (Optional.) A nested structure of Python `type` objects
corresponding to each component of an element of this iterator. If
omitted, each component is assumed to be of type `tf.Tensor`.
Returns:
An `Iterator`.
"""
output_types = nest.map_structure(dtypes.as_dtype, output_types)
if output_shapes is None:
output_shapes = nest.map_structure(
lambda _: tensor_shape.TensorShape(None), output_types)
else:
output_shapes = nest.map_structure_up_to(
output_types, tensor_shape.as_shape, output_shapes)
if output_classes is None:
output_classes = nest.map_structure(lambda _: ops.Tensor, output_types)
nest.assert_same_structure(output_types, output_shapes)
output_structure = structure_lib.convert_legacy_structure(
output_types, output_shapes, output_classes)
string_handle = ops.convert_to_tensor(string_handle, dtype=dtypes.string)
# pylint: disable=protected-access
if compat.forward_compatible(2018, 8, 3):
if _device_stack_is_empty():
with ops.device("/cpu:0"):
iterator_resource = gen_dataset_ops.iterator_from_string_handle_v2(
string_handle,
output_types=output_structure._flat_types,
output_shapes=output_structure._flat_shapes)
else:
iterator_resource = gen_dataset_ops.iterator_from_string_handle_v2(
string_handle,
output_types=output_structure._flat_types,
output_shapes=output_structure._flat_shapes)
else:
iterator_resource = gen_dataset_ops.iterator_from_string_handle(
string_handle,
output_types=output_structure._flat_types,
output_shapes=output_structure._flat_shapes)
# pylint: enable=protected-access
return Iterator(iterator_resource, None, output_types, output_shapes,
output_classes)
@property
def initializer(self):
"""A `tf.Operation` that should be run to initialize this iterator.
Returns:
A `tf.Operation` that should be run to initialize this iterator
Raises:
ValueError: If this iterator initializes itself automatically.
"""
if self._initializer is not None:
return self._initializer
else:
# TODO(mrry): Consider whether one-shot iterators should have
# initializers that simply reset their state to the beginning.
raise ValueError("Iterator does not have an initializer.")
def make_initializer(self, dataset, name=None):
"""Returns a `tf.Operation` that initializes this iterator on `dataset`.
Args:
dataset: A `Dataset` with compatible structure to this iterator.
name: (Optional.) A name for the created operation.
Returns:
A `tf.Operation` that can be run to initialize this iterator on the given
`dataset`.
Raises:
TypeError: If `dataset` and this iterator do not have a compatible
element structure.
"""
with ops.name_scope(name, "make_initializer") as name:
# pylint: disable=protected-access
# NOTE(mrry): Cannot depend on `dataset_ops.get_legacy_output*()` due
# to that creating a circular dependency.
dataset_output_types = (
dataset._element_structure._to_legacy_output_types())
dataset_output_shapes = (
dataset._element_structure._to_legacy_output_shapes())
dataset_output_classes = (
dataset._element_structure._to_legacy_output_classes())
# pylint: enable=protected-access
nest.assert_same_structure(self.output_types, dataset_output_types)
nest.assert_same_structure(self.output_shapes, dataset_output_shapes)
for iterator_class, dataset_class in zip(
nest.flatten(self.output_classes),
nest.flatten(dataset_output_classes)):
if iterator_class is not dataset_class:
raise TypeError(
"Expected output classes %r but got dataset with output class %r."
% (self.output_classes, dataset_output_classes))
for iterator_dtype, dataset_dtype in zip(
nest.flatten(self.output_types), nest.flatten(dataset_output_types)):
if iterator_dtype != dataset_dtype:
raise TypeError(
"Expected output types %r but got dataset with output types %r." %
(self.output_types, dataset_output_types))
for iterator_shape, dataset_shape in zip(
nest.flatten(self.output_shapes), nest.flatten(
dataset_output_shapes)):
if not iterator_shape.is_compatible_with(dataset_shape):
raise TypeError("Expected output shapes compatible with %r but got "
"dataset with output shapes %r." %
(self.output_shapes, dataset_output_shapes))
with ops.colocate_with(self._iterator_resource):
return gen_dataset_ops.make_iterator(
dataset._variant_tensor, self._iterator_resource, name=name) # pylint: disable=protected-access
def get_next(self, name=None):
"""Returns a nested structure of `tf.Tensor`s representing the next element.
In graph mode, you should typically call this method *once* and use its
result as the input to another computation. A typical loop will then call
`tf.Session.run` on the result of that computation. The loop will terminate
when the `Iterator.get_next()` operation raises
`tf.errors.OutOfRangeError`. The following skeleton shows how to use
this method when building a training loop:
```python
dataset = ... # A `tf.data.Dataset` object.
iterator = dataset.make_initializable_iterator()
next_element = iterator.get_next()
# Build a TensorFlow graph that does something with each element.
loss = model_function(next_element)
optimizer = ... # A `tf.compat.v1.train.Optimizer` object.
train_op = optimizer.minimize(loss)
with tf.compat.v1.Session() as sess:
try:
while True:
sess.run(train_op)
except tf.errors.OutOfRangeError:
pass
```
NOTE: It is legitimate to call `Iterator.get_next()` multiple times, e.g.
when you are distributing different elements to multiple devices in a single
step. However, a common pitfall arises when users call `Iterator.get_next()`
in each iteration of their training loop. `Iterator.get_next()` adds ops to
the graph, and executing each op allocates resources (including threads); as
a consequence, invoking it in every iteration of a training loop causes
slowdown and eventual resource exhaustion. To guard against this outcome, we
log a warning when the number of uses crosses a fixed threshold of
suspiciousness.
Args:
name: (Optional.) A name for the created operation.
Returns:
A nested structure of `tf.Tensor` objects.
"""
self._get_next_call_count += 1
if self._get_next_call_count > GET_NEXT_CALL_WARNING_THRESHOLD:
warnings.warn(GET_NEXT_CALL_WARNING_MESSAGE)
# pylint: disable=protected-access
flat_ret = gen_dataset_ops.iterator_get_next(
self._iterator_resource,
output_types=self._structure._flat_types,
output_shapes=self._structure._flat_shapes, name=name)
return self._structure._from_tensor_list(flat_ret)
def string_handle(self, name=None):
"""Returns a string-valued `tf.Tensor` that represents this iterator.
Args:
name: (Optional.) A name for the created operation.
Returns:
A scalar `tf.Tensor` of type `tf.string`.
"""
if name is None:
return self._string_handle
else:
return gen_dataset_ops.iterator_to_string_handle(
self._iterator_resource, name=name)
@property
@deprecation.deprecated(
None, "Use `tf.compat.v1.data.get_output_classes(iterator)`.")
def output_classes(self):
"""Returns the class of each component of an element of this iterator.
The expected values are `tf.Tensor` and `tf.SparseTensor`.
Returns:
A nested structure of Python `type` objects corresponding to each
component of an element of this dataset.
"""
return self._structure._to_legacy_output_classes() # pylint: disable=protected-access
@property
@deprecation.deprecated(
None, "Use `tf.compat.v1.data.get_output_shapes(iterator)`.")
def output_shapes(self):
"""Returns the shape of each component of an element of this iterator.
Returns:
A nested structure of `tf.TensorShape` objects corresponding to each
component of an element of this dataset.
"""
return self._structure._to_legacy_output_shapes() # pylint: disable=protected-access
@property
@deprecation.deprecated(
None, "Use `tf.compat.v1.data.get_output_types(iterator)`.")
def output_types(self):
"""Returns the type of each component of an element of this iterator.
Returns:
A nested structure of `tf.DType` objects corresponding to each component
of an element of this dataset.
"""
return self._structure._to_legacy_output_types() # pylint: disable=protected-access
@property
def _element_structure(self):
"""The structure of an element of this iterator.
Returns:
A `Structure` object representing the structure of the components of this
optional.
"""
return self._structure
def _gather_saveables_for_checkpoint(self):
def _saveable_factory(name):
return _IteratorSaveable(self._iterator_resource, name)
return {"ITERATOR": _saveable_factory}
_uid_counter = 0
_uid_lock = threading.Lock()
def _generate_shared_name(prefix):
with _uid_lock:
global _uid_counter
uid = _uid_counter
_uid_counter += 1
return "{}{}".format(prefix, uid)
class IteratorResourceDeleter(object):
"""An object which cleans up an iterator resource handle.
An alternative to defining a __del__ method on an object. Even if the parent
object is part of a reference cycle, the cycle will be collectable.
"""
def __init__(self, handle, device, deleter):
self._deleter = deleter
self._handle = handle
self._device = device
self._eager_mode = context.executing_eagerly()
def __del__(self):
with ops.device(self._device):
# Make sure the resource is deleted in the same mode as it was created in.
if self._eager_mode:
with context.eager_mode():
gen_dataset_ops.delete_iterator(
handle=self._handle, deleter=self._deleter)
else:
with context.graph_mode():
gen_dataset_ops.delete_iterator(
handle=self._handle, deleter=self._deleter)
class IteratorV2(trackable.Trackable):
"""An iterator producing tf.Tensor objects from a tf.data.Dataset."""
def __init__(self, dataset):
"""Creates a new iterator over the given dataset.
For example:
```python
dataset = tf.data.Dataset.range(4)
for x in Iterator(dataset):
print(x)
```
Tensors produced will be placed on the device on which this iterator object
was created.
Args:
dataset: A `tf.data.Dataset` object.
Raises:
RuntimeError: When invoked without eager execution enabled.
"""
self._device = context.context().device_name
with ops.device("/cpu:0"):
# pylint: disable=protected-access
dataset = dataset._apply_options()
ds_variant = dataset._variant_tensor
self._structure = dataset._element_structure
self._flat_output_types = self._structure._flat_types
self._flat_output_shapes = self._structure._flat_shapes
with ops.colocate_with(ds_variant):
self._iterator_resource, self._deleter = (
gen_dataset_ops.anonymous_iterator_v2(
output_types=self._flat_output_types,
output_shapes=self._flat_output_shapes))
gen_dataset_ops.make_iterator(ds_variant, self._iterator_resource)
# Delete the resource when this object is deleted
self._resource_deleter = IteratorResourceDeleter(
handle=self._iterator_resource,
device=self._device,
deleter=self._deleter)
# pylint: enable=protected-access
def __iter__(self):
return self
def __next__(self): # For Python 3 compatibility
return self.next()
def _next_internal(self):
"""Returns a nested structure of `tf.Tensor`s containing the next element.
"""
if not context.executing_eagerly():
with ops.device(self._device):
ret = gen_dataset_ops.iterator_get_next(
self._iterator_resource,
output_types=self._flat_output_types,
output_shapes=self._flat_output_shapes)
return self._structure._from_compatible_tensor_list(ret) # pylint: disable=protected-access
# This runs in sync mode as iterators use an error status to communicate
# that there is no more data to iterate over.
# TODO(b/77291417): Fix
with context.execution_mode(context.SYNC):
with ops.device(self._device):
# TODO(ashankar): Consider removing this ops.device() contextmanager
# and instead mimic ops placement in graphs: Operations on resource
# handles execute on the same device as where the resource is placed.
# NOTE(mrry): Here we use the "_sync" variant of `iterator_get_next`
# because in eager mode this code will run synchronously on the calling
# thread. Therefore we do not need to make a defensive context switch
# to a background thread, and can achieve a small constant performance
# boost by invoking the iterator synchronously.
ret = gen_dataset_ops.iterator_get_next_sync(
self._iterator_resource,
output_types=self._flat_output_types,
output_shapes=self._flat_output_shapes)
return self._structure._from_compatible_tensor_list(ret) # pylint: disable=protected-access
def next(self):
"""Returns a nested structure of `tf.Tensor`s containing the next element.
"""
try:
return self._next_internal()
except errors.OutOfRangeError:
raise StopIteration
@property
@deprecation.deprecated(
None, "Use `tf.compat.v1.data.get_output_classes(iterator)`.")
def output_classes(self):
"""Returns the class of each component of an element of this iterator.
The expected values are `tf.Tensor` and `tf.SparseTensor`.
Returns:
A nested structure of Python `type` objects corresponding to each
component of an element of this dataset.
"""
return self._structure._to_legacy_output_classes() # pylint: disable=protected-access
@property
@deprecation.deprecated(
None, "Use `tf.compat.v1.data.get_output_shapes(iterator)`.")
def output_shapes(self):
"""Returns the shape of each component of an element of this iterator.
Returns:
A nested structure of `tf.TensorShape` objects corresponding to each
component of an element of this dataset.
"""
return self._structure._to_legacy_output_shapes() # pylint: disable=protected-access
@property
@deprecation.deprecated(
None, "Use `tf.compat.v1.data.get_output_types(iterator)`.")
def output_types(self):
"""Returns the type of each component of an element of this iterator.
Returns:
A nested structure of `tf.DType` objects corresponding to each component
of an element of this dataset.
"""
return self._structure._to_legacy_output_types() # pylint: disable=protected-access
@property
def _element_structure(self):
"""The structure of an element of this iterator.
Returns:
A `Structure` object representing the structure of the components of this
optional.
"""
return self._structure
def get_next(self, name=None):
"""Returns a nested structure of `tf.Tensor`s containing the next element.
Args:
name: (Optional.) A name for the created operation. Currently unused.
Returns:
A nested structure of `tf.Tensor` objects.
Raises:
`tf.errors.OutOfRangeError`: If the end of the dataset has been reached.
"""
del name
return self._next_internal()
def _gather_saveables_for_checkpoint(self):
def _saveable_factory(name):
return _IteratorSaveable(self._iterator_resource, name)
return {"ITERATOR": _saveable_factory}
# TODO(b/71645805): Expose trackable stateful objects from dataset
# attributes(potential).
class _IteratorSaveable(BaseSaverBuilder.SaveableObject):
"""SaveableObject for saving/restoring iterator state."""
def __init__(self, iterator_resource, name):
serialized_iterator = gen_dataset_ops.serialize_iterator(iterator_resource)
specs = [
BaseSaverBuilder.SaveSpec(serialized_iterator, "", name + "_STATE")
]
super(_IteratorSaveable, self).__init__(iterator_resource, specs, name)
def restore(self, restored_tensors, restored_shapes):
with ops.colocate_with(self.op):
return gen_dataset_ops.deserialize_iterator(self.op, restored_tensors[0])
@tf_export("data.experimental.get_next_as_optional")
def get_next_as_optional(iterator):
"""Returns an `Optional` that contains the next value from the iterator.
If `iterator` has reached the end of the sequence, the returned `Optional`
will have no value.
Args:
iterator: A `tf.compat.v1.data.Iterator` object.
Returns:
An `Optional` object representing the next value from the iterator (if it
has one) or no value.
"""
# pylint: disable=protected-access
return optional_ops._OptionalImpl(
gen_dataset_ops.iterator_get_next_as_optional(
iterator._iterator_resource,
output_types=iterator._element_structure._flat_types,
output_shapes=iterator._element_structure._flat_shapes),
iterator._element_structure)
|
tensorflow-master
|
tensorflow/python/data/ops/iterator_ops.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python wrapper for prefetching_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.eager import context
from tensorflow.python.eager import function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gen_dataset_ops
from tensorflow.python.ops import resource_variable_ops
class _PerDeviceGenerator(dataset_ops.DatasetV2):
"""A `dummy` generator dataset."""
def __init__(self, shard_num, multi_device_iterator_resource, incarnation_id,
source_device, element_structure):
self._structure = element_structure
multi_device_iterator_string_handle = (
gen_dataset_ops.multi_device_iterator_to_string_handle(
multi_device_iterator_resource))
# TODO(b/124254153): Enable autograph once the overhead is low enough.
@function.defun(autograph=False) # Pure graph code.
def _init_func():
return multi_device_iterator_string_handle
init_func_concrete = _init_func._get_concrete_function_internal() # pylint: disable=protected-access
# TODO(b/124254153): Enable autograph once the overhead is low enough.
@function.defun(autograph=False) # Pure graph code.
def _remote_init_func():
return functional_ops.remote_call(
target=source_device,
args=init_func_concrete.captured_inputs,
Tout=[dtypes.string],
f=init_func_concrete)
self._init_func = _remote_init_func._get_concrete_function_internal() # pylint: disable=protected-access
self._init_captured_args = self._init_func.captured_inputs
# TODO(b/124254153): Enable autograph once the overhead is low enough.
@function.defun(
input_signature=[tensor_spec.TensorSpec([], dtypes.string)],
autograph=False) # Pure graph code.
def _next_func(string_handle):
# pylint: disable=protected-access
multi_device_iterator = (
gen_dataset_ops.multi_device_iterator_from_string_handle(
string_handle=string_handle,
output_types=self._structure._flat_types,
output_shapes=self._structure._flat_shapes))
return gen_dataset_ops.multi_device_iterator_get_next_from_shard(
multi_device_iterator=multi_device_iterator,
shard_num=shard_num,
incarnation_id=incarnation_id,
output_types=self._structure._flat_types,
output_shapes=self._structure._flat_shapes)
next_func_concrete = _next_func._get_concrete_function_internal() # pylint: disable=protected-access
# TODO(b/124254153): Enable autograph once the overhead is low enough.
@function.defun_with_attributes(
input_signature=[tensor_spec.TensorSpec([], dtypes.string)],
attributes={"experimental_ints_on_device": True},
autograph=False) # Pure graph code.
def _remote_next_func(string_handle):
return functional_ops.remote_call(
target=source_device,
args=[string_handle] + next_func_concrete.captured_inputs,
Tout=self._structure._flat_types, # pylint: disable=protected-access
f=next_func_concrete)
self._next_func = _remote_next_func._get_concrete_function_internal() # pylint: disable=protected-access
self._next_captured_args = self._next_func.captured_inputs
self._incarnation_id_index = -1
for i, arg in enumerate(self._next_captured_args):
if arg == incarnation_id:
self._incarnation_id_index = i
# TODO(b/124254153): Enable autograph once the overhead is low enough.
@function.defun(
input_signature=[tensor_spec.TensorSpec([], dtypes.string)],
autograph=False) # Pure graph code.
def _finalize_func(unused_string_handle):
return array_ops.constant(0, dtypes.int64)
finalize_func_concrete = _finalize_func._get_concrete_function_internal() # pylint: disable=protected-access
# TODO(b/124254153): Enable autograph once the overhead is low enough.
@function.defun(
input_signature=[tensor_spec.TensorSpec([], dtypes.string)],
autograph=False) # Pure graph code.
def _remote_finalize_func(string_handle):
return functional_ops.remote_call(
target=source_device,
args=[string_handle] + finalize_func_concrete.captured_inputs,
Tout=[dtypes.int64],
f=finalize_func_concrete)
self._finalize_func = _remote_finalize_func._get_concrete_function_internal( # pylint: disable=protected-access
)
self._finalize_captured_args = self._finalize_func.captured_inputs
variant_tensor = gen_dataset_ops.generator_dataset(
self._init_captured_args,
self._next_captured_args,
self._finalize_captured_args,
init_func=self._init_func,
next_func=self._next_func,
finalize_func=self._finalize_func,
**dataset_ops.flat_structure(self))
super(_PerDeviceGenerator, self).__init__(variant_tensor)
def _inputs(self):
# TODO(b/116506223): Determine which datasets should be used as inputs here.
return []
@property
def _element_structure(self):
return self._structure
class _ReincarnatedPerDeviceGenerator(dataset_ops.DatasetV2):
"""Creates a _PerDeviceGenerator-like dataset with a new incarnation_id.
Re-uses the functions from the provided per_device_dataset and just switches
out the function argument corresponding to the incarnation_id.
"""
def __init__(self, per_device_dataset, incarnation_id):
# pylint: disable=protected-access
self._structure = per_device_dataset._structure
self._init_func = per_device_dataset._init_func
self._init_captured_args = self._init_func.captured_inputs
self._next_func = per_device_dataset._next_func
self._next_captured_args = per_device_dataset._next_captured_args
# The captured arguments to the next_func are string_handle, incarnation_id.
# We update the incarnation id to the new one.
self._next_captured_args[
per_device_dataset._incarnation_id_index] = incarnation_id
self._finalize_func = per_device_dataset._finalize_func
self._finalize_captured_args = per_device_dataset._finalize_captured_args
variant_tensor = gen_dataset_ops.generator_dataset(
self._init_captured_args,
self._next_captured_args,
self._finalize_captured_args,
init_func=self._init_func,
next_func=self._next_func,
finalize_func=self._finalize_func,
**dataset_ops.flat_structure(self))
super(_ReincarnatedPerDeviceGenerator, self).__init__(variant_tensor)
def _inputs(self):
# TODO(b/116506223): Determine which datasets should be used as inputs here.
return []
@property
def _element_structure(self):
return self._structure
class MultiDeviceIterator(object):
"""An iterator over multiple devices."""
def __init__(self,
dataset,
devices,
max_buffer_size=1,
prefetch_buffer_size=1,
source_device="/cpu:0"):
"""Constructs a MultiDeviceIterator.
Args:
dataset: The input dataset to be iterated over.
devices: The list of devices to fetch data to.
max_buffer_size: Maximum size of the host side per device buffer to keep.
prefetch_buffer_size: if > 1, then we setup a buffer on each device
to prefetch into.
source_device: The host device to place the `dataset` on.
In order to prevent deadlocks, if the prefetch_buffer_size is greater
than the max_buffer_size, we set the max_buffer_size to
prefetch_buffer_size.
"""
options = dataset_ops.Options()
options.experimental_distribute.num_devices = len(devices)
dataset = dataset.with_options(options)
self._dataset = dataset._apply_options() # pylint: disable=protected-access
self._experimental_slack = dataset.options().experimental_slack
self._devices = devices
self._source_device = source_device
self._source_device_tensor = ops.convert_to_tensor(source_device)
self._max_buffer_size = max_buffer_size
self._prefetch_buffer_size = prefetch_buffer_size
if self._prefetch_buffer_size > self._max_buffer_size:
self._max_buffer_size = self._prefetch_buffer_size
# Create the MultiDeviceIterator.
with ops.device(self._source_device):
# TODO(b/121378567): Get rid of this shared_name hack.
shared_name = ""
if context.executing_eagerly():
shared_name = context.shared_name()
self._multi_device_iterator_resource = (
gen_dataset_ops.multi_device_iterator(
devices=self._devices,
shared_name=shared_name,
container="",
**dataset_ops.flat_structure(self._dataset)))
if context.executing_eagerly():
# Delete the resource when this object is deleted
self._resource_deleter = resource_variable_ops.EagerResourceDeleter(
handle=self._multi_device_iterator_resource,
handle_device=self._source_device)
# The incarnation ID is used to ensure consistency between the per-device
# iterators and the multi-device iterator.
self._incarnation_id = gen_dataset_ops.multi_device_iterator_init(
self._dataset._variant_tensor, # pylint: disable=protected-access
self._multi_device_iterator_resource,
max_buffer_size=self._max_buffer_size)
self._prototype_device_datasets = []
for i, device in enumerate(self._devices):
with ops.device(device):
ds = _PerDeviceGenerator(
i, self._multi_device_iterator_resource, self._incarnation_id,
self._source_device_tensor, self._dataset._element_structure) # pylint: disable=protected-access
self._prototype_device_datasets.append(ds)
# TODO(rohanj): Explore the possibility of the MultiDeviceIterator to
# initialize the device side of the pipeline. This would allow the
# MultiDeviceIterator to choose, for example, to move some transformations
# into the device side from its input. It might be useful in rewriting.
# Create the per device iterators.
self._device_iterators = []
for i, device in enumerate(self._devices):
with ops.device(device):
ds = self._create_device_dataset(i)
if context.executing_eagerly():
self._device_iterators.append(dataset_ops.make_one_shot_iterator(ds))
else:
self._device_iterators.append(
dataset_ops.make_initializable_iterator(ds))
if not context.executing_eagerly():
device_iterator_initializers = [
iterator.initializer for iterator in self._device_iterators
]
self._initializer = control_flow_ops.group(*device_iterator_initializers)
def _create_device_dataset(self, i):
"""Uses _prototype_device_datasets[i] to build a dataset for the device."""
ds = self._prototype_device_datasets[i]
ds = _ReincarnatedPerDeviceGenerator(ds, self._incarnation_id)
if self._prefetch_buffer_size > 0:
if self._experimental_slack:
ds = dataset_ops.PrefetchDataset(
ds, self._prefetch_buffer_size, slack_period=1)
else:
ds = ds.prefetch(self._prefetch_buffer_size)
# TODO(jsimsa): Enable auto-tuning and optimizations when supported for
# non-CPU devices.
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
options.experimental_optimization.autotune = False
ds = ds.with_options(options)
return ds
def get_next(self, device=None):
"""Returns the next element given a `device`, else returns all in a list."""
if device is not None:
index = self._devices.index(device)
return self._device_iterators[index].get_next()
result = []
for i, device in enumerate(self._devices):
with ops.device(device):
result.append(self._device_iterators[i].get_next())
return result
def get_next_as_optional(self):
result = []
for i, device in enumerate(self._devices):
with ops.device(device):
result.append(iterator_ops.get_next_as_optional(
self._device_iterators[i]))
return result
@property
def initializer(self):
if context.executing_eagerly():
return control_flow_ops.no_op()
return self._initializer
def _eager_reset(self):
"""Resets the MultiDeviceIterator in eager mode."""
if not context.executing_eagerly():
raise ValueError("Eager reset is only supported in eager mode.")
# pylint: disable=protected-access
self._incarnation_id = gen_dataset_ops.multi_device_iterator_init(
self._dataset._variant_tensor,
self._multi_device_iterator_resource,
max_buffer_size=self._max_buffer_size)
for i, device in enumerate(self._devices):
with ops.device(device):
ds = self._create_device_dataset(i)
# Reset the device iterator resources with the new dataset.
ds_variant = ds._variant_tensor
gen_dataset_ops.make_iterator(
ds_variant, self._device_iterators[i]._iterator_resource)
@property
def _element_structure(self):
return dataset_ops.get_structure(self._dataset)
|
tensorflow-master
|
tensorflow/python/data/ops/multi_device_iterator_ops.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""An Optional type for representing potentially missing values."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import type_spec
from tensorflow.python.ops import gen_dataset_ops
from tensorflow.python.util.tf_export import tf_export
@tf_export("data.experimental.Optional")
@six.add_metaclass(abc.ABCMeta)
class Optional(composite_tensor.CompositeTensor):
"""Wraps a nested structure of tensors that may/may not be present at runtime.
An `Optional` can represent the result of an operation that may fail as a
value, rather than raising an exception and halting execution. For example,
`tf.data.experimental.get_next_as_optional` returns an `Optional` that either
contains the next value from a `tf.compat.v1.data.Iterator` if one exists, or
a "none"
value that indicates the end of the sequence has been reached.
"""
@abc.abstractmethod
def has_value(self, name=None):
"""Returns a tensor that evaluates to `True` if this optional has a value.
Args:
name: (Optional.) A name for the created operation.
Returns:
A scalar `tf.Tensor` of type `tf.bool`.
"""
raise NotImplementedError("Optional.has_value()")
@abc.abstractmethod
def get_value(self, name=None):
"""Returns a nested structure of values wrapped by this optional.
If this optional does not have a value (i.e. `self.has_value()` evaluates
to `False`), this operation will raise `tf.errors.InvalidArgumentError`
at runtime.
Args:
name: (Optional.) A name for the created operation.
Returns:
A nested structure of `tf.Tensor` and/or `tf.SparseTensor` objects.
"""
raise NotImplementedError("Optional.get_value()")
@abc.abstractproperty
def value_structure(self):
"""The structure of the components of this optional.
Returns:
A `Structure` object representing the structure of the components of this
optional.
"""
raise NotImplementedError("Optional.value_structure")
@staticmethod
def from_value(value):
"""Returns an `Optional` that wraps the given value.
Args:
value: A nested structure of `tf.Tensor` and/or `tf.SparseTensor` objects.
Returns:
An `Optional` that wraps `value`.
"""
with ops.name_scope("optional") as scope:
with ops.name_scope("value"):
value_structure = type_spec.type_spec_from_value(value)
encoded_value = value_structure._to_tensor_list(value) # pylint: disable=protected-access
return _OptionalImpl(
gen_dataset_ops.optional_from_value(encoded_value, name=scope),
value_structure)
@staticmethod
def none_from_structure(value_structure):
"""Returns an `Optional` that has no value.
NOTE: This method takes an argument that defines the structure of the value
that would be contained in the returned `Optional` if it had a value.
Args:
value_structure: A `Structure` object representing the structure of the
components of this optional.
Returns:
An `Optional` that has no value.
"""
return _OptionalImpl(gen_dataset_ops.optional_none(), value_structure)
class _OptionalImpl(Optional):
"""Concrete implementation of `tf.data.experimental.Optional`.
NOTE(mrry): This implementation is kept private, to avoid defining
`Optional.__init__()` in the public API.
"""
def __init__(self, variant_tensor, value_structure):
self._variant_tensor = variant_tensor
self._value_structure = value_structure
def has_value(self, name=None):
return gen_dataset_ops.optional_has_value(self._variant_tensor, name=name)
def get_value(self, name=None):
# TODO(b/110122868): Consolidate the restructuring logic with similar logic
# in `Iterator.get_next()` and `StructuredFunctionWrapper`.
with ops.name_scope(name, "OptionalGetValue",
[self._variant_tensor]) as scope:
# pylint: disable=protected-access
return self._value_structure._from_tensor_list(
gen_dataset_ops.optional_get_value(
self._variant_tensor,
name=scope,
output_types=self._value_structure._flat_types,
output_shapes=self._value_structure._flat_shapes))
@property
def value_structure(self):
return self._value_structure
@property
def _type_spec(self):
return OptionalStructure.from_value(self)
# TODO(b/133606651) Rename this class to OptionalSpec
@tf_export("OptionalSpec", "data.experimental.OptionalStructure")
class OptionalStructure(type_spec.TypeSpec):
"""Represents an optional potentially containing a structured value."""
__slots__ = ["_value_structure"]
def __init__(self, value_structure):
self._value_structure = value_structure
@property
def value_type(self):
return _OptionalImpl
def _serialize(self):
return (self._value_structure,)
@property
def _component_specs(self):
return [tensor_spec.TensorSpec((), dtypes.variant)]
def _to_components(self, value):
return [value._variant_tensor] # pylint: disable=protected-access
def _from_components(self, flat_value):
# pylint: disable=protected-access
return _OptionalImpl(flat_value[0], self._value_structure)
@staticmethod
def from_value(value):
return OptionalStructure(value.value_structure)
def _to_legacy_output_types(self):
return self
def _to_legacy_output_shapes(self):
return self
def _to_legacy_output_classes(self):
return self
|
tensorflow-master
|
tensorflow/python/data/ops/optional_ops.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python wrappers for Datasets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import functools
import threading
import warnings
import weakref
import numpy as np
import six
from six.moves import queue as Queue # pylint: disable=redefined-builtin
from tensorflow.core.framework import graph_pb2
from tensorflow.python.compat import compat
from tensorflow.python.data.experimental.ops import distribute_options
from tensorflow.python.data.experimental.ops import optimization_options
from tensorflow.python.data.experimental.ops import stats_options
from tensorflow.python.data.experimental.ops import threading_options
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.data.util import nest
from tensorflow.python.data.util import options as options_lib
from tensorflow.python.data.util import random_seed
from tensorflow.python.data.util import sparse
from tensorflow.python.data.util import structure as structure_lib
from tensorflow.python.data.util import traverse
from tensorflow.python.eager import context
from tensorflow.python.eager import function as eager_function
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed as core_random_seed
from tensorflow.python.framework import smart_cond
from tensorflow.python.framework import sparse_tensor as sparse_tensor_lib
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import type_spec
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_dataset_ops
from tensorflow.python.ops import gen_experimental_dataset_ops as ged_ops
from tensorflow.python.ops import gen_io_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training.tracking import base as tracking_base
from tensorflow.python.training.tracking import tracking
from tensorflow.python.util import deprecation
from tensorflow.python.util import function_utils
from tensorflow.python.util import lazy_loader
from tensorflow.python.util import nest as tf_nest
from tensorflow.python.util.tf_export import tf_export
# Loaded lazily due to a circular dependency (roughly
# tf.function->wrap_function->dataset->autograph->tf.function).
# TODO(b/133251390): Use a regular import.
wrap_function = lazy_loader.LazyLoader(
"wrap_function", globals(),
"tensorflow.python.eager.wrap_function")
# TODO(mdan): Create a public API for this.
autograph_ctx = lazy_loader.LazyLoader(
"autograph_ctx", globals(),
"tensorflow.python.autograph.core.ag_ctx")
autograph = lazy_loader.LazyLoader(
"autograph", globals(),
"tensorflow.python.autograph")
ops.NotDifferentiable("ReduceDataset")
# A constant that can be used to enable auto-tuning.
AUTOTUNE = -1
tf_export("data.experimental.AUTOTUNE").export_constant(__name__, "AUTOTUNE")
@tf_export("data.Dataset", v1=[])
@six.add_metaclass(abc.ABCMeta)
class DatasetV2(tracking_base.Trackable, composite_tensor.CompositeTensor):
"""Represents a potentially large set of elements.
A `Dataset` can be used to represent an input pipeline as a
collection of elements (nested structures of tensors) and a "logical
plan" of transformations that act on those elements.
"""
def __init__(self, variant_tensor):
"""Creates a DatasetV2 object.
This is a difference between DatasetV1 and DatasetV2. DatasetV1 does not
take anything in its constructor whereas in the DatasetV2, we expect
subclasses to create a variant_tensor and pass it in to the super() call.
Args:
variant_tensor: A DT_VARIANT tensor that represents the dataset.
"""
self._variant_tensor_attr = variant_tensor
weak_self = weakref.proxy(self)
self._variant_tracker = self._track_trackable(
_VariantTracker(
self._variant_tensor,
# _trace_variant_creation only works when executing eagerly, so we
# don't want to run it immediately. We also want the _VariantTracker
# to have a weak reference to the Dataset to avoid creating
# reference cycles and making work for the garbage collector.
lambda: weak_self._trace_variant_creation()()), # pylint: disable=unnecessary-lambda,protected-access
name="_variant_tracker")
self._graph_attr = ops.get_default_graph()
@property
def _variant_tensor(self):
return self._variant_tensor_attr
@_variant_tensor.setter
def _variant_tensor(self, _):
raise ValueError("The _variant_tensor property is read-only")
def _as_serialized_graph(self):
"""Produces serialized graph representation of the dataset.
Returns:
A scalar `tf.Tensor` of `tf.string` type, representing this dataset as a
serialized graph.
"""
return gen_dataset_ops.dataset_to_graph(self._variant_tensor)
def _trace_variant_creation(self):
"""Traces a function which outputs a variant `tf.Tensor` for this dataset.
Note that creating this function involves evaluating an op, and is currently
only supported when executing eagerly.
Returns:
A zero-argument `ConcreteFunction` which outputs a variant `tf.Tensor`.
"""
variant = self._variant_tensor
if not isinstance(variant, ops.EagerTensor):
raise NotImplementedError(
"Can only export Datasets which were created executing eagerly. "
"Please file a feature request if this is important to you.")
with context.eager_mode(), ops.device("CPU"):
graph_def = graph_pb2.GraphDef().FromString(
self._as_serialized_graph().numpy()) # pylint: disable=protected-access
output_node_name = None
for node in graph_def.node:
if node.op == "_Retval":
if output_node_name is not None:
raise AssertionError(
"Found multiple return values from the dataset's graph, expected "
"only one.")
output_node_name, = node.input
if output_node_name is None:
raise AssertionError("Could not find the dataset's output node.")
# Add functions used in this Dataset to the function's graph, since they
# need to follow it around (and for example be added to a SavedModel which
# references the dataset).
variant_function = wrap_function.function_from_graph_def(
graph_def, inputs=[], outputs=output_node_name + ":0")
for used_function in self._functions():
used_function.function.add_to_graph(variant_function.graph)
return variant_function
@abc.abstractmethod
def _inputs(self):
"""Returns a list of the input datasets of the dataset."""
raise NotImplementedError("Dataset._inputs")
@property
def _graph(self):
return self._graph_attr
@_graph.setter
def _graph(self, _):
raise ValueError("The _graph property is read-only")
def _has_captured_ref(self):
"""Whether this dataset uses a function that captures ref variables.
Returns:
A boolean, which if true indicates that the dataset or one of its inputs
uses a function that captures ref variables.
"""
if context.executing_eagerly():
# RefVariables are not supported in eager mode
return False
def is_tensor_or_parent_ref(tensor):
if tensor.dtype._is_ref_dtype: # pylint: disable=protected-access
return True
return any([is_tensor_or_parent_ref(x) for x in tensor.op.inputs])
for fn in self._functions():
if any([is_tensor_or_parent_ref(t) for t in fn.function.captured_inputs]):
return True
return any(
[input_dataset._has_captured_ref() for input_dataset in self._inputs()]) # pylint: disable=protected-access
# TODO(jsimsa): Change this to be the transitive closure of functions used
# by this dataset and its inputs.
def _functions(self):
"""Returns a list of functions associated with this dataset.
Returns:
A list of `StructuredFunctionWrapper` objects.
"""
return []
def options(self):
"""Returns the options for this dataset and its inputs.
Returns:
A `tf.data.Options` object representing the dataset options.
"""
options = Options()
for input_dataset in self._inputs():
input_options = input_dataset.options()
if input_options is not None:
options = options.merge(input_options)
return options
def _apply_options(self):
"""Apply options, such as optimization configuration, to the dataset."""
dataset = self
options = self.options()
if options.experimental_threading is not None:
t_options = options.experimental_threading
if t_options.max_intra_op_parallelism is not None:
dataset = _MaxIntraOpParallelismDataset(
dataset, t_options.max_intra_op_parallelism)
if t_options.private_threadpool_size is not None:
dataset = _PrivateThreadPoolDataset(dataset,
t_options.private_threadpool_size)
# pylint: disable=protected-access
static_optimizations = options._static_optimizations()
static_optimization_configs = options._static_optimization_configs()
# pylint: enable=protected-access
if static_optimizations:
if self._has_captured_ref():
warnings.warn(
"tf.data static optimizations are not compatible with tf.Variable. "
"The following optimizations will be disabled: %s. To enable "
"optimizations, use resource variables instead by calling "
"`tf.enable_resource_variables()` at the start of the program." %
", ".join(static_optimizations))
else:
dataset = _OptimizeDataset(dataset, static_optimizations,
static_optimization_configs)
autotune = True
cpu_budget = 0 # Indicates that all CPU cores should be used.
if options.experimental_optimization is not None:
if options.experimental_optimization.autotune is False: # pylint: disable=g-bool-id-comparison
autotune = False
if options.experimental_optimization.autotune_cpu_budget is not None:
cpu_budget = options.experimental_optimization.autotune_cpu_budget
if autotune:
dataset = _ModelDataset(dataset, cpu_budget)
if options.experimental_stats and options.experimental_stats.aggregator: # pylint: disable=line-too-long
dataset = _SetStatsAggregatorDataset( # pylint: disable=protected-access
dataset, options.experimental_stats.aggregator,
options.experimental_stats.prefix,
options.experimental_stats.counter_prefix)
return dataset
def __iter__(self):
"""Creates an `Iterator` for enumerating the elements of this dataset.
The returned iterator implements the Python iterator protocol and therefore
can only be used in eager mode.
Returns:
An `Iterator` over the elements of this dataset.
Raises:
RuntimeError: If not inside of tf.function and not executing eagerly.
"""
if (context.executing_eagerly()
or ops.get_default_graph()._building_function): # pylint: disable=protected-access
return iterator_ops.IteratorV2(self)
else:
raise RuntimeError("__iter__() is only supported inside of tf.function "
"or when eager execution is enabled.")
@abc.abstractproperty
def _element_structure(self):
"""The structure of an element of this dataset.
Returns:
A `Structure` object representing the structure of an element of this
dataset.
"""
raise NotImplementedError("Dataset._element_structure")
def __repr__(self):
output_shapes = nest.map_structure(str, get_legacy_output_shapes(self))
output_shapes = str(output_shapes).replace("'", "")
output_types = nest.map_structure(repr, get_legacy_output_types(self))
output_types = str(output_types).replace("'", "")
return ("<%s shapes: %s, types: %s>" % (type(self).__name__, output_shapes,
output_types))
@property
def _type_spec(self):
return DatasetStructure(self._element_structure)
@staticmethod
def from_tensors(tensors):
"""Creates a `Dataset` with a single element, comprising the given tensors.
Note that if `tensors` contains a NumPy array, and eager execution is not
enabled, the values will be embedded in the graph as one or more
`tf.constant` operations. For large datasets (> 1 GB), this can waste
memory and run into byte limits of graph serialization. If `tensors`
contains one or more large NumPy arrays, consider the alternative described
in [this
guide](https://tensorflow.org/guide/datasets#consuming_numpy_arrays).
Args:
tensors: A nested structure of tensors.
Returns:
Dataset: A `Dataset`.
"""
return TensorDataset(tensors)
@staticmethod
def from_tensor_slices(tensors):
"""Creates a `Dataset` whose elements are slices of the given tensors.
Note that if `tensors` contains a NumPy array, and eager execution is not
enabled, the values will be embedded in the graph as one or more
`tf.constant` operations. For large datasets (> 1 GB), this can waste
memory and run into byte limits of graph serialization. If `tensors`
contains one or more large NumPy arrays, consider the alternative described
in [this guide](
https://tensorflow.org/guide/datasets#consuming_numpy_arrays).
Args:
tensors: A nested structure of tensors, each having the same size in the
0th dimension.
Returns:
Dataset: A `Dataset`.
"""
return TensorSliceDataset(tensors)
class _GeneratorState(object):
"""Stores outstanding iterators created from a Python generator.
This class keeps track of potentially multiple iterators that may have
been created from a generator, e.g. in the case that the dataset is
repeated, or nested within a parallel computation.
"""
def __init__(self, generator):
self._generator = generator
self._lock = threading.Lock()
self._next_id = 0 # GUARDED_BY(self._lock)
self._args = {}
self._iterators = {}
def get_next_id(self, *args):
with self._lock:
ret = self._next_id
self._next_id += 1
self._args[ret] = args
# NOTE(mrry): Explicitly create an array of `np.int64` because implicit
# casting in `py_func()` will create an array of `np.int32` on Windows,
# leading to a runtime error.
return np.array(ret, dtype=np.int64)
def get_iterator(self, iterator_id):
try:
return self._iterators[iterator_id]
except KeyError:
iterator = iter(self._generator(*self._args.pop(iterator_id)))
self._iterators[iterator_id] = iterator
return iterator
def iterator_completed(self, iterator_id):
del self._iterators[iterator_id]
@staticmethod
def from_generator(generator, output_types, output_shapes=None, args=None):
"""Creates a `Dataset` whose elements are generated by `generator`.
The `generator` argument must be a callable object that returns
an object that support the `iter()` protocol (e.g. a generator function).
The elements generated by `generator` must be compatible with the given
`output_types` and (optional) `output_shapes` arguments.
For example:
```python
import itertools
tf.compat.v1.enable_eager_execution()
def gen():
for i in itertools.count(1):
yield (i, [1] * i)
ds = tf.data.Dataset.from_generator(
gen, (tf.int64, tf.int64), (tf.TensorShape([]), tf.TensorShape([None])))
for value in ds.take(2):
print value
# (1, array([1]))
# (2, array([1, 1]))
```
NOTE: The current implementation of `Dataset.from_generator()` uses
`tf.numpy_function` and inherits the same constraints. In particular, it
requires the `Dataset`- and `Iterator`-related operations to be placed
on a device in the same process as the Python program that called
`Dataset.from_generator()`. The body of `generator` will not be
serialized in a `GraphDef`, and you should not use this method if you
need to serialize your model and restore it in a different environment.
NOTE: If `generator` depends on mutable global variables or other external
state, be aware that the runtime may invoke `generator` multiple times
(in order to support repeating the `Dataset`) and at any time
between the call to `Dataset.from_generator()` and the production of the
first element from the generator. Mutating global variables or external
state can cause undefined behavior, and we recommend that you explicitly
cache any external state in `generator` before calling
`Dataset.from_generator()`.
Args:
generator: A callable object that returns an object that supports the
`iter()` protocol. If `args` is not specified, `generator` must take no
arguments; otherwise it must take as many arguments as there are values
in `args`.
output_types: A nested structure of `tf.DType` objects corresponding to
each component of an element yielded by `generator`.
output_shapes: (Optional.) A nested structure of `tf.TensorShape` objects
corresponding to each component of an element yielded by `generator`.
args: (Optional.) A tuple of `tf.Tensor` objects that will be evaluated
and passed to `generator` as NumPy-array arguments.
Returns:
Dataset: A `Dataset`.
"""
if not callable(generator):
raise TypeError("`generator` must be callable.")
if output_shapes is None:
output_shapes = nest.map_structure(
lambda _: tensor_shape.TensorShape(None), output_types)
else:
output_shapes = nest.map_structure_up_to(
output_types, tensor_shape.as_shape, output_shapes)
if args is None:
args = ()
else:
args = tuple(ops.convert_n_to_tensor(args, name="args"))
flattened_types = [dtypes.as_dtype(dt) for dt in nest.flatten(output_types)]
flattened_shapes = nest.flatten(output_shapes)
generator_state = DatasetV2._GeneratorState(generator)
def get_iterator_id_fn(unused_dummy):
"""Creates a unique `iterator_id` for each pass over the dataset.
The returned `iterator_id` disambiguates between multiple concurrently
existing iterators.
Args:
unused_dummy: Ignored value.
Returns:
A `tf.int64` tensor whose value uniquely identifies an iterator in
`generator_state`.
"""
return script_ops.numpy_function(generator_state.get_next_id, args,
dtypes.int64)
def generator_next_fn(iterator_id_t):
"""Generates the next element from iterator with ID `iterator_id_t`.
We map this function across an infinite repetition of the
`iterator_id_t`, and raise `StopIteration` to terminate the iteration.
Args:
iterator_id_t: A `tf.int64` tensor whose value uniquely identifies the
iterator in `generator_state` from which to generate an element.
Returns:
A nested structure of tensors representing an element from the iterator.
"""
def generator_py_func(iterator_id):
"""A `py_func` that will be called to invoke the iterator."""
# `next()` raises `StopIteration` when there are no more
# elements remaining to be generated.
values = next(generator_state.get_iterator(iterator_id))
# Use the same _convert function from the py_func() implementation to
# convert the returned values to arrays early, so that we can inspect
# their values.
try:
flattened_values = nest.flatten_up_to(output_types, values)
except (TypeError, ValueError):
raise TypeError(
"`generator` yielded an element that did not match the expected "
"structure. The expected structure was %s, but the yielded "
"element was %s." % (output_types, values))
ret_arrays = []
for ret, dtype in zip(flattened_values, flattened_types):
try:
ret_arrays.append(script_ops.FuncRegistry._convert( # pylint: disable=protected-access
ret, dtype=dtype.as_numpy_dtype))
except (TypeError, ValueError):
raise TypeError(
"`generator` yielded an element that could not be converted to "
"the expected type. The expected type was %s, but the yielded "
"element was %s." % (dtype.name, ret))
# Additional type and shape checking to ensure that the components
# of the generated element match the `output_types` and `output_shapes`
# arguments.
for (ret_array, expected_dtype, expected_shape) in zip(
ret_arrays, flattened_types, flattened_shapes):
if ret_array.dtype != expected_dtype.as_numpy_dtype:
raise TypeError(
"`generator` yielded an element of type %s where an element "
"of type %s was expected." % (ret_array.dtype,
expected_dtype.as_numpy_dtype))
if not expected_shape.is_compatible_with(ret_array.shape):
raise ValueError(
"`generator` yielded an element of shape %s where an element "
"of shape %s was expected." % (ret_array.shape, expected_shape))
return ret_arrays
flat_values = script_ops.numpy_function(generator_py_func,
[iterator_id_t], flattened_types)
# The `py_func()` op drops the inferred shapes, so we add them back in
# here.
if output_shapes is not None:
for ret_t, shape in zip(flat_values, flattened_shapes):
ret_t.set_shape(shape)
return nest.pack_sequence_as(output_types, flat_values)
def finalize_fn(iterator_id_t):
"""Releases host-side state for the iterator with ID `iterator_id_t`."""
def finalize_py_func(iterator_id):
generator_state.iterator_completed(iterator_id)
# We return a dummy value so that the `finalize_fn` has a valid
# signature.
# NOTE(mrry): Explicitly create an array of `np.int64` because implicit
# casting in `py_func()` will create an array of `np.int32` on Windows,
# leading to a runtime error.
return np.array(0, dtype=np.int64)
return script_ops.numpy_function(finalize_py_func, [iterator_id_t],
dtypes.int64)
# This function associates each traversal of `generator` with a unique
# iterator ID.
def flat_map_fn(dummy_arg):
# The `get_iterator_id_fn` gets a unique ID for the current instance of
# of the generator.
# The `generator_next_fn` gets the next element from the iterator with the
# given ID, and raises StopIteration when that iterator contains no
# more elements.
return _GeneratorDataset(dummy_arg, get_iterator_id_fn, generator_next_fn,
finalize_fn)
# A single-element dataset that, each time it is evaluated, contains a
# freshly-generated and unique (for the returned dataset) int64
# ID that will be used to identify the appropriate Python state, which
# is encapsulated in `generator_state`, and captured in
# `get_iterator_id_map_fn`.
dummy = 0
id_dataset = Dataset.from_tensors(dummy)
# A dataset that contains all of the elements generated by a
# single iterator created from `generator`, identified by the
# iterator ID contained in `id_dataset`. Lifting the iteration
# into a flat_map here enables multiple repetitions and/or nested
# versions of the returned dataset to be created, because it forces
# the generation of a new ID for each version.
return id_dataset.flat_map(flat_map_fn)
@staticmethod
def range(*args):
"""Creates a `Dataset` of a step-separated range of values.
For example:
```python
Dataset.range(5) == [0, 1, 2, 3, 4]
Dataset.range(2, 5) == [2, 3, 4]
Dataset.range(1, 5, 2) == [1, 3]
Dataset.range(1, 5, -2) == []
Dataset.range(5, 1) == []
Dataset.range(5, 1, -2) == [5, 3]
```
Args:
*args: follows the same semantics as python's xrange.
len(args) == 1 -> start = 0, stop = args[0], step = 1
len(args) == 2 -> start = args[0], stop = args[1], step = 1
len(args) == 3 -> start = args[0], stop = args[1, stop = args[2]
Returns:
Dataset: A `RangeDataset`.
Raises:
ValueError: if len(args) == 0.
"""
return RangeDataset(*args)
@staticmethod
def zip(datasets):
"""Creates a `Dataset` by zipping together the given datasets.
This method has similar semantics to the built-in `zip()` function
in Python, with the main difference being that the `datasets`
argument can be an arbitrary nested structure of `Dataset` objects.
For example:
```python
a = Dataset.range(1, 4) # ==> [ 1, 2, 3 ]
b = Dataset.range(4, 7) # ==> [ 4, 5, 6 ]
c = Dataset.range(7, 13).batch(2) # ==> [ [7, 8], [9, 10], [11, 12] ]
d = Dataset.range(13, 15) # ==> [ 13, 14 ]
# The nested structure of the `datasets` argument determines the
# structure of elements in the resulting dataset.
Dataset.zip((a, b)) # ==> [ (1, 4), (2, 5), (3, 6) ]
Dataset.zip((b, a)) # ==> [ (4, 1), (5, 2), (6, 3) ]
# The `datasets` argument may contain an arbitrary number of
# datasets.
Dataset.zip((a, b, c)) # ==> [ (1, 4, [7, 8]),
# (2, 5, [9, 10]),
# (3, 6, [11, 12]) ]
# The number of elements in the resulting dataset is the same as
# the size of the smallest dataset in `datasets`.
Dataset.zip((a, d)) # ==> [ (1, 13), (2, 14) ]
```
Args:
datasets: A nested structure of datasets.
Returns:
Dataset: A `Dataset`.
"""
return ZipDataset(datasets)
def concatenate(self, dataset):
"""Creates a `Dataset` by concatenating given dataset with this dataset.
```python
a = Dataset.range(1, 4) # ==> [ 1, 2, 3 ]
b = Dataset.range(4, 8) # ==> [ 4, 5, 6, 7 ]
# Input dataset and dataset to be concatenated should have same
# nested structures and output types.
# c = Dataset.range(8, 14).batch(2) # ==> [ [8, 9], [10, 11], [12, 13] ]
# d = Dataset.from_tensor_slices([14.0, 15.0, 16.0])
# a.concatenate(c) and a.concatenate(d) would result in error.
a.concatenate(b) # ==> [ 1, 2, 3, 4, 5, 6, 7 ]
```
Args:
dataset: `Dataset` to be concatenated.
Returns:
Dataset: A `Dataset`.
"""
return ConcatenateDataset(self, dataset)
def prefetch(self, buffer_size):
"""Creates a `Dataset` that prefetches elements from this dataset.
Note: Like other `Dataset` methods, prefetch operates on the
elements of the input dataset. It has no concept of examples vs. batches.
`examples.prefetch(2)` will prefetch two elements (2 examples),
while `examples.batch(20).prefetch(2)` will prefetch 2 elements
(2 batches, of 20 examples each).
Args:
buffer_size: A `tf.int64` scalar `tf.Tensor`, representing the maximum
number of elements that will be buffered when prefetching.
Returns:
Dataset: A `Dataset`.
"""
return PrefetchDataset(self, buffer_size)
@staticmethod
def list_files(file_pattern, shuffle=None, seed=None):
"""A dataset of all files matching one or more glob patterns.
NOTE: The default behavior of this method is to return filenames in
a non-deterministic random shuffled order. Pass a `seed` or `shuffle=False`
to get results in a deterministic order.
Example:
If we had the following files on our filesystem:
- /path/to/dir/a.txt
- /path/to/dir/b.py
- /path/to/dir/c.py
If we pass "/path/to/dir/*.py" as the directory, the dataset
would produce:
- /path/to/dir/b.py
- /path/to/dir/c.py
Args:
file_pattern: A string, a list of strings, or a `tf.Tensor` of string type
(scalar or vector), representing the filename glob (i.e. shell wildcard)
pattern(s) that will be matched.
shuffle: (Optional.) If `True`, the file names will be shuffled randomly.
Defaults to `True`.
seed: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the random
seed that will be used to create the distribution. See
`tf.compat.v1.set_random_seed` for behavior.
Returns:
Dataset: A `Dataset` of strings corresponding to file names.
"""
with ops.name_scope("list_files"):
if shuffle is None:
shuffle = True
file_pattern = ops.convert_to_tensor(
file_pattern, dtype=dtypes.string, name="file_pattern")
matching_files = gen_io_ops.matching_files(file_pattern)
# Raise an exception if `file_pattern` does not match any files.
condition = math_ops.greater(array_ops.shape(matching_files)[0], 0,
name="match_not_empty")
message = math_ops.add(
"No files matched pattern: ",
string_ops.reduce_join(file_pattern, separator=", "), name="message")
assert_not_empty = control_flow_ops.Assert(
condition, [message], summarize=1, name="assert_not_empty")
with ops.control_dependencies([assert_not_empty]):
matching_files = array_ops.identity(matching_files)
dataset = Dataset.from_tensor_slices(matching_files)
if shuffle:
# NOTE(mrry): The shuffle buffer size must be greater than zero, but the
# list of files might be empty.
buffer_size = math_ops.maximum(
array_ops.shape(matching_files, out_type=dtypes.int64)[0], 1)
dataset = dataset.shuffle(buffer_size, seed=seed)
return dataset
def repeat(self, count=None):
"""Repeats this dataset `count` times.
NOTE: If this dataset is a function of global state (e.g. a random number
generator), then different repetitions may produce different elements.
Args:
count: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the
number of times the dataset should be repeated. The default behavior (if
`count` is `None` or `-1`) is for the dataset be repeated indefinitely.
Returns:
Dataset: A `Dataset`.
"""
return RepeatDataset(self, count)
def enumerate(self, start=0):
"""Enumerates the elements of this dataset.
It is similar to python's `enumerate`.
For example:
```python
# NOTE: The following examples use `{ ... }` to represent the
# contents of a dataset.
a = { 1, 2, 3 }
b = { (7, 8), (9, 10) }
# The nested structure of the `datasets` argument determines the
# structure of elements in the resulting dataset.
a.enumerate(start=5)) == { (5, 1), (6, 2), (7, 3) }
b.enumerate() == { (0, (7, 8)), (1, (9, 10)) }
```
Args:
start: A `tf.int64` scalar `tf.Tensor`, representing the start value for
enumeration.
Returns:
Dataset: A `Dataset`.
"""
max_value = np.iinfo(dtypes.int64.as_numpy_dtype).max
return Dataset.zip((Dataset.range(start, max_value), self))
def shuffle(self, buffer_size, seed=None, reshuffle_each_iteration=None):
"""Randomly shuffles the elements of this dataset.
This dataset fills a buffer with `buffer_size` elements, then randomly
samples elements from this buffer, replacing the selected elements with new
elements. For perfect shuffling, a buffer size greater than or equal to the
full size of the dataset is required.
For instance, if your dataset contains 10,000 elements but `buffer_size` is
set to 1,000, then `shuffle` will initially select a random element from
only the first 1,000 elements in the buffer. Once an element is selected,
its space in the buffer is replaced by the next (i.e. 1,001-st) element,
maintaining the 1,000 element buffer.
Args:
buffer_size: A `tf.int64` scalar `tf.Tensor`, representing the number of
elements from this dataset from which the new dataset will sample.
seed: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the random
seed that will be used to create the distribution. See
`tf.compat.v1.set_random_seed` for behavior.
reshuffle_each_iteration: (Optional.) A boolean, which if true indicates
that the dataset should be pseudorandomly reshuffled each time it is
iterated over. (Defaults to `True`.)
Returns:
Dataset: A `Dataset`.
"""
return ShuffleDataset(self, buffer_size, seed, reshuffle_each_iteration)
def cache(self, filename=""):
"""Caches the elements in this dataset.
Args:
filename: A `tf.string` scalar `tf.Tensor`, representing the name of a
directory on the filesystem to use for caching tensors in this Dataset.
If a filename is not provided, the dataset will be cached in memory.
Returns:
Dataset: A `Dataset`.
"""
return CacheDataset(self, filename)
def take(self, count):
"""Creates a `Dataset` with at most `count` elements from this dataset.
Args:
count: A `tf.int64` scalar `tf.Tensor`, representing the number of
elements of this dataset that should be taken to form the new dataset.
If `count` is -1, or if `count` is greater than the size of this
dataset, the new dataset will contain all elements of this dataset.
Returns:
Dataset: A `Dataset`.
"""
return TakeDataset(self, count)
def skip(self, count):
"""Creates a `Dataset` that skips `count` elements from this dataset.
Args:
count: A `tf.int64` scalar `tf.Tensor`, representing the number of
elements of this dataset that should be skipped to form the new dataset.
If `count` is greater than the size of this dataset, the new dataset
will contain no elements. If `count` is -1, skips the entire dataset.
Returns:
Dataset: A `Dataset`.
"""
return SkipDataset(self, count)
def shard(self, num_shards, index):
"""Creates a `Dataset` that includes only 1/`num_shards` of this dataset.
This dataset operator is very useful when running distributed training, as
it allows each worker to read a unique subset.
When reading a single input file, you can skip elements as follows:
```python
d = tf.data.TFRecordDataset(input_file)
d = d.shard(num_workers, worker_index)
d = d.repeat(num_epochs)
d = d.shuffle(shuffle_buffer_size)
d = d.map(parser_fn, num_parallel_calls=num_map_threads)
```
Important caveats:
- Be sure to shard before you use any randomizing operator (such as
shuffle).
- Generally it is best if the shard operator is used early in the dataset
pipeline. For example, when reading from a set of TFRecord files, shard
before converting the dataset to input samples. This avoids reading every
file on every worker. The following is an example of an efficient
sharding strategy within a complete pipeline:
```python
d = Dataset.list_files(pattern)
d = d.shard(num_workers, worker_index)
d = d.repeat(num_epochs)
d = d.shuffle(shuffle_buffer_size)
d = d.interleave(tf.data.TFRecordDataset,
cycle_length=num_readers, block_length=1)
d = d.map(parser_fn, num_parallel_calls=num_map_threads)
```
Args:
num_shards: A `tf.int64` scalar `tf.Tensor`, representing the number of
shards operating in parallel.
index: A `tf.int64` scalar `tf.Tensor`, representing the worker index.
Returns:
Dataset: A `Dataset`.
Raises:
InvalidArgumentError: if `num_shards` or `index` are illegal values.
Note: error checking is done on a best-effort basis, and errors aren't
guaranteed to be caught upon dataset creation. (e.g. providing in a
placeholder tensor bypasses the early checking, and will instead result
in an error during a session.run call.)
"""
return ShardDataset(self, num_shards, index)
def batch(self, batch_size, drop_remainder=False):
"""Combines consecutive elements of this dataset into batches.
The tensors in the resulting element will have an additional outer
dimension, which will be `batch_size` (or `N % batch_size` for the last
element if `batch_size` does not divide the number of input elements `N`
evenly and `drop_remainder` is `False`). If your program depends on the
batches having the same outer dimension, you should set the `drop_remainder`
argument to `True` to prevent the smaller batch from being produced.
Args:
batch_size: A `tf.int64` scalar `tf.Tensor`, representing the number of
consecutive elements of this dataset to combine in a single batch.
drop_remainder: (Optional.) A `tf.bool` scalar `tf.Tensor`, representing
whether the last batch should be dropped in the case it has fewer than
`batch_size` elements; the default behavior is not to drop the smaller
batch.
Returns:
Dataset: A `Dataset`.
"""
return BatchDataset(self, batch_size, drop_remainder)
def padded_batch(self,
batch_size,
padded_shapes,
padding_values=None,
drop_remainder=False):
"""Combines consecutive elements of this dataset into padded batches.
This transformation combines multiple consecutive elements of the input
dataset into a single element.
Like `tf.data.Dataset.batch`, the tensors in the resulting element will
have an additional outer dimension, which will be `batch_size` (or
`N % batch_size` for the last element if `batch_size` does not divide the
number of input elements `N` evenly and `drop_remainder` is `False`). If
your program depends on the batches having the same outer dimension, you
should set the `drop_remainder` argument to `True` to prevent the smaller
batch from being produced.
Unlike `tf.data.Dataset.batch`, the input elements to be batched may have
different shapes, and this transformation will pad each component to the
respective shape in `padding_shapes`. The `padding_shapes` argument
determines the resulting shape for each dimension of each component in an
output element:
* If the dimension is a constant (e.g. `tf.compat.v1.Dimension(37)`), the
component
will be padded out to that length in that dimension.
* If the dimension is unknown (e.g. `tf.compat.v1.Dimension(None)`), the
component
will be padded out to the maximum length of all elements in that
dimension.
See also `tf.data.experimental.dense_to_sparse_batch`, which combines
elements that may have different shapes into a `tf.SparseTensor`.
Args:
batch_size: A `tf.int64` scalar `tf.Tensor`, representing the number of
consecutive elements of this dataset to combine in a single batch.
padded_shapes: A nested structure of `tf.TensorShape` or `tf.int64` vector
tensor-like objects representing the shape to which the respective
component of each input element should be padded prior to batching. Any
unknown dimensions (e.g. `tf.compat.v1.Dimension(None)` in a
`tf.TensorShape` or `-1` in a tensor-like object) will be padded to the
maximum size of that dimension in each batch.
padding_values: (Optional.) A nested structure of scalar-shaped
`tf.Tensor`, representing the padding values to use for the respective
components. Defaults are `0` for numeric types and the empty string for
string types.
drop_remainder: (Optional.) A `tf.bool` scalar `tf.Tensor`, representing
whether the last batch should be dropped in the case it has fewer than
`batch_size` elements; the default behavior is not to drop the smaller
batch.
Returns:
Dataset: A `Dataset`.
"""
return PaddedBatchDataset(self, batch_size, padded_shapes, padding_values,
drop_remainder)
def map(self, map_func, num_parallel_calls=None):
"""Maps `map_func` across the elements of this dataset.
This transformation applies `map_func` to each element of this dataset, and
returns a new dataset containing the transformed elements, in the same
order as they appeared in the input.
For example:
```python
a = Dataset.range(1, 6) # ==> [ 1, 2, 3, 4, 5 ]
a.map(lambda x: x + 1) # ==> [ 2, 3, 4, 5, 6 ]
```
The input signature of `map_func` is determined by the structure of each
element in this dataset. For example:
```python
# NOTE: The following examples use `{ ... }` to represent the
# contents of a dataset.
# Each element is a `tf.Tensor` object.
a = { 1, 2, 3, 4, 5 }
# `map_func` takes a single argument of type `tf.Tensor` with the same
# shape and dtype.
result = a.map(lambda x: ...)
# Each element is a tuple containing two `tf.Tensor` objects.
b = { (1, "foo"), (2, "bar"), (3, "baz") }
# `map_func` takes two arguments of type `tf.Tensor`.
result = b.map(lambda x_int, y_str: ...)
# Each element is a dictionary mapping strings to `tf.Tensor` objects.
c = { {"a": 1, "b": "foo"}, {"a": 2, "b": "bar"}, {"a": 3, "b": "baz"} }
# `map_func` takes a single argument of type `dict` with the same keys as
# the elements.
result = c.map(lambda d: ...)
```
The value or values returned by `map_func` determine the structure of each
element in the returned dataset.
```python
# `map_func` returns a scalar `tf.Tensor` of type `tf.float32`.
def f(...):
return tf.constant(37.0)
result = dataset.map(f)
result.output_classes == tf.Tensor
result.output_types == tf.float32
result.output_shapes == [] # scalar
# `map_func` returns two `tf.Tensor` objects.
def g(...):
return tf.constant(37.0), tf.constant(["Foo", "Bar", "Baz"])
result = dataset.map(g)
result.output_classes == (tf.Tensor, tf.Tensor)
result.output_types == (tf.float32, tf.string)
result.output_shapes == ([], [3])
# Python primitives, lists, and NumPy arrays are implicitly converted to
# `tf.Tensor`.
def h(...):
return 37.0, ["Foo", "Bar", "Baz"], np.array([1.0, 2.0] dtype=np.float64)
result = dataset.map(h)
result.output_classes == (tf.Tensor, tf.Tensor, tf.Tensor)
result.output_types == (tf.float32, tf.string, tf.float64)
result.output_shapes == ([], [3], [2])
# `map_func` can return nested structures.
def i(...):
return {"a": 37.0, "b": [42, 16]}, "foo"
result.output_classes == ({"a": tf.Tensor, "b": tf.Tensor}, tf.Tensor)
result.output_types == ({"a": tf.float32, "b": tf.int32}, tf.string)
result.output_shapes == ({"a": [], "b": [2]}, [])
```
In addition to `tf.Tensor` objects, `map_func` can accept as arguments and
return `tf.SparseTensor` objects.
Note that irrespective of the context in which `map_func` is defined (eager
vs. graph), tf.data traces the function and executes it as a graph. To use
Python code inside of the function you have two options:
1) Rely on AutoGraph to convert Python code into an equivalent graph
computation. The downside of this approach is that AutoGraph can convert
some but not all Python code.
2) Use `tf.py_function`, which allows you to write arbitrary Python code but
will generally result in worse performance than 1).
Args:
map_func: A function mapping a nested structure of tensors (having shapes
and types defined by `self.output_shapes` and `self.output_types`) to
another nested structure of tensors.
num_parallel_calls: (Optional.) A `tf.int32` scalar `tf.Tensor`,
representing the number elements to process asynchronously in parallel.
If not specified, elements will be processed sequentially. If the value
`tf.data.experimental.AUTOTUNE` is used, then the number of parallel
calls is set dynamically based on available CPU.
Returns:
Dataset: A `Dataset`.
"""
if num_parallel_calls is None:
return MapDataset(self, map_func, preserve_cardinality=True)
else:
return ParallelMapDataset(
self, map_func, num_parallel_calls, preserve_cardinality=True)
def flat_map(self, map_func):
"""Maps `map_func` across this dataset and flattens the result.
Use `flat_map` if you want to make sure that the order of your dataset
stays the same. For example, to flatten a dataset of batches into a
dataset of their elements:
```python
a = Dataset.from_tensor_slices([ [1, 2, 3], [4, 5, 6], [7, 8, 9] ])
a.flat_map(lambda x: Dataset.from_tensor_slices(x + 1)) # ==>
# [ 2, 3, 4, 5, 6, 7, 8, 9, 10 ]
```
`tf.data.Dataset.interleave()` is a generalization of `flat_map`, since
`flat_map` produces the same output as
`tf.data.Dataset.interleave(cycle_length=1)`
Args:
map_func: A function mapping a nested structure of tensors (having shapes
and types defined by `self.output_shapes` and `self.output_types`) to a
`Dataset`.
Returns:
Dataset: A `Dataset`.
"""
return FlatMapDataset(self, map_func)
def interleave(self,
map_func,
cycle_length=AUTOTUNE,
block_length=1,
num_parallel_calls=None):
"""Maps `map_func` across this dataset, and interleaves the results.
For example, you can use `Dataset.interleave()` to process many input files
concurrently:
```python
# Preprocess 4 files concurrently, and interleave blocks of 16 records from
# each file.
filenames = ["/var/data/file1.txt", "/var/data/file2.txt", ...]
dataset = (Dataset.from_tensor_slices(filenames)
.interleave(lambda x:
TextLineDataset(x).map(parse_fn, num_parallel_calls=1),
cycle_length=4, block_length=16))
```
The `cycle_length` and `block_length` arguments control the order in which
elements are produced. `cycle_length` controls the number of input elements
that are processed concurrently. If you set `cycle_length` to 1, this
transformation will handle one input element at a time, and will produce
identical results to `tf.data.Dataset.flat_map`. In general,
this transformation will apply `map_func` to `cycle_length` input elements,
open iterators on the returned `Dataset` objects, and cycle through them
producing `block_length` consecutive elements from each iterator, and
consuming the next input element each time it reaches the end of an
iterator.
For example:
```python
a = Dataset.range(1, 6) # ==> [ 1, 2, 3, 4, 5 ]
# NOTE: New lines indicate "block" boundaries.
a.interleave(lambda x: Dataset.from_tensors(x).repeat(6),
cycle_length=2, block_length=4) # ==> [1, 1, 1, 1,
# 2, 2, 2, 2,
# 1, 1,
# 2, 2,
# 3, 3, 3, 3,
# 4, 4, 4, 4,
# 3, 3,
# 4, 4,
# 5, 5, 5, 5,
# 5, 5]
```
NOTE: The order of elements yielded by this transformation is
deterministic, as long as `map_func` is a pure function. If
`map_func` contains any stateful operations, the order in which
that state is accessed is undefined.
Args:
map_func: A function mapping a nested structure of tensors (having shapes
and types defined by `self.output_shapes` and `self.output_types`) to a
`Dataset`.
cycle_length: (Optional.) The number of input elements that will be
processed concurrently. If not specified, the value will be derived from
the number of available CPU cores. If the `num_parallel_calls` argument
is set to `tf.data.experimental.AUTOTUNE`, the `cycle_length` argument
also identifies the maximum degree of parallelism.
block_length: (Optional.) The number of consecutive elements to produce
from each input element before cycling to another input element.
num_parallel_calls: (Optional.) If specified, the implementation creates a
threadpool, which is used to fetch inputs from cycle elements
asynchronously and in parallel. The default behavior is to fetch inputs
from cycle elements synchronously with no parallelism. If the value
`tf.data.experimental.AUTOTUNE` is used, then the number of parallel
calls is set dynamically based on available CPU.
Returns:
Dataset: A `Dataset`.
"""
if num_parallel_calls is None:
return InterleaveDataset(self, map_func, cycle_length, block_length)
else:
return ParallelInterleaveDataset(self, map_func, cycle_length,
block_length, num_parallel_calls)
def filter(self, predicate):
"""Filters this dataset according to `predicate`.
```python
d = tf.data.Dataset.from_tensor_slices([1, 2, 3])
d = d.filter(lambda x: x < 3) # ==> [1, 2]
# `tf.math.equal(x, y)` is required for equality comparison
def filter_fn(x):
return tf.math.equal(x, 1)
d = d.filter(filter_fn) # ==> [1]
```
Args:
predicate: A function mapping a nested structure of tensors (having shapes
and types defined by `self.output_shapes` and `self.output_types`) to a
scalar `tf.bool` tensor.
Returns:
Dataset: The `Dataset` containing the elements of this dataset for which
`predicate` is `True`.
"""
return FilterDataset(self, predicate)
def apply(self, transformation_func):
"""Applies a transformation function to this dataset.
`apply` enables chaining of custom `Dataset` transformations, which are
represented as functions that take one `Dataset` argument and return a
transformed `Dataset`.
For example:
```
dataset = (dataset.map(lambda x: x ** 2)
.apply(group_by_window(key_func, reduce_func, window_size))
.map(lambda x: x ** 3))
```
Args:
transformation_func: A function that takes one `Dataset` argument and
returns a `Dataset`.
Returns:
Dataset: The `Dataset` returned by applying `transformation_func` to this
dataset.
"""
dataset = transformation_func(self)
if not isinstance(dataset, DatasetV2):
raise TypeError(
"`transformation_func` must return a Dataset. Got {}.".format(
dataset))
dataset._input_datasets = [self] # pylint: disable=protected-access
return dataset
def window(self, size, shift=None, stride=1, drop_remainder=False):
"""Combines (nests of) input elements into a dataset of (nests of) windows.
A "window" is a finite dataset of flat elements of size `size` (or possibly
fewer if there are not enough input elements to fill the window and
`drop_remainder` evaluates to false).
The `stride` argument determines the stride of the input elements, and the
`shift` argument determines the shift of the window.
For example, letting {...} to represent a Dataset:
- `tf.data.Dataset.range(7).window(2)` produces
`{{0, 1}, {2, 3}, {4, 5}, {6}}`
- `tf.data.Dataset.range(7).window(3, 2, 1, True)` produces
`{{0, 1, 2}, {2, 3, 4}, {4, 5, 6}}`
- `tf.data.Dataset.range(7).window(3, 1, 2, True)` produces
`{{0, 2, 4}, {1, 3, 5}, {2, 4, 6}}`
Note that when the `window` transformation is applied to a dataset of
nested elements, it produces a dataset of nested windows.
For example:
- `tf.data.Dataset.from_tensor_slices((range(4), range(4)).window(2)`
produces `{({0, 1}, {0, 1}), ({2, 3}, {2, 3})}`
- `tf.data.Dataset.from_tensor_slices({"a": range(4)}).window(2)`
produces `{{"a": {0, 1}}, {"a": {2, 3}}}`
Args:
size: A `tf.int64` scalar `tf.Tensor`, representing the number of elements
of the input dataset to combine into a window.
shift: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the
forward shift of the sliding window in each iteration. Defaults to
`size`.
stride: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the
stride of the input elements in the sliding window.
drop_remainder: (Optional.) A `tf.bool` scalar `tf.Tensor`, representing
whether a window should be dropped in case its size is smaller than
`window_size`.
Returns:
Dataset: A `Dataset` of (nests of) windows -- a finite datasets of flat
elements created from the (nests of) input elements.
"""
if shift is None:
shift = size
return WindowDataset(self, size, shift, stride, drop_remainder)
def reduce(self, initial_state, reduce_func):
"""Reduces the input dataset to a single element.
The transformation calls `reduce_func` successively on every element of
the input dataset until the dataset is exhausted, aggregating information in
its internal state. The `initial_state` argument is used for the initial
state and the final state is returned as the result.
For example:
- `tf.data.Dataset.range(5).reduce(np.int64(0), lambda x, _: x + 1)`
produces `5`
- `tf.data.Dataset.range(5).reduce(np.int64(0), lambda x, y: x + y)`
produces `10`
Args:
initial_state: A nested structure of tensors, representing the initial
state of the transformation.
reduce_func: A function that maps `(old_state, input_element)` to
`new_state`. It must take two arguments and return a nested structure of
tensors. The structure of `new_state` must match the structure of
`initial_state`.
Returns:
A nested structure of `tf.Tensor` objects, corresponding to the final
state of the transformation.
"""
with ops.name_scope("initial_state"):
initial_state = structure_lib.normalize_tensors(initial_state)
state_structure = type_spec.type_spec_from_value(initial_state)
# Iteratively rerun the reduce function until reaching a fixed point on
# `state_structure`.
need_to_rerun = True
while need_to_rerun:
wrapped_func = StructuredFunctionWrapper(
reduce_func,
"reduce()",
input_structure=structure_lib.NestedStructure(
(state_structure, self._element_structure)),
add_to_graph=False)
# Extract and validate class information from the returned values.
output_classes = wrapped_func.output_classes
state_classes = state_structure._to_legacy_output_classes() # pylint: disable=protected-access
for new_state_class, state_class in zip(
nest.flatten(output_classes), nest.flatten(state_classes)):
if not issubclass(new_state_class, state_class):
raise TypeError(
"The element classes for the new state must match the initial "
"state. Expected %s; got %s." % (state_classes,
wrapped_func.output_classes))
# Extract and validate type information from the returned values.
output_types = wrapped_func.output_types
state_types = state_structure._to_legacy_output_types() # pylint: disable=protected-access
for new_state_type, state_type in zip(
nest.flatten(output_types), nest.flatten(state_types)):
if new_state_type != state_type:
raise TypeError(
"The element types for the new state must match the initial "
"state. Expected %s; got %s." % (state_types,
wrapped_func.output_types))
# Extract shape information from the returned values.
output_shapes = wrapped_func.output_shapes
state_shapes = state_structure._to_legacy_output_shapes() # pylint: disable=protected-access
flat_state_shapes = nest.flatten(state_shapes)
flat_new_state_shapes = nest.flatten(output_shapes)
weakened_state_shapes = [
original.most_specific_compatible_shape(new)
for original, new in zip(flat_state_shapes, flat_new_state_shapes)
]
need_to_rerun = False
for original_shape, weakened_shape in zip(flat_state_shapes,
weakened_state_shapes):
if original_shape.ndims is not None and (
weakened_shape.ndims is None or
original_shape.as_list() != weakened_shape.as_list()):
need_to_rerun = True
break
if need_to_rerun:
# TODO(b/110122868): Support a "most specific compatible structure"
# method for combining structures, to avoid using legacy structures
# here.
state_structure = structure_lib.convert_legacy_structure(
state_types,
nest.pack_sequence_as(state_shapes, weakened_state_shapes),
state_classes)
reduce_func = wrapped_func.function
reduce_func.add_to_graph(ops.get_default_graph())
# pylint: disable=protected-access
return state_structure._from_compatible_tensor_list(
gen_dataset_ops.reduce_dataset(
self._variant_tensor,
state_structure._to_tensor_list(initial_state),
reduce_func.captured_inputs,
f=reduce_func,
output_shapes=state_structure._flat_shapes,
output_types=state_structure._flat_types))
def unbatch(self):
"""Splits elements of a dataset into multiple elements.
For example, if elements of the dataset are shaped `[B, a0, a1, ...]`,
where `B` may vary for each input element, then for each element in the
dataset, the unbatched dataset will contain `B` consecutive elements
of shape `[a0, a1, ...]`.
```python
# NOTE: The following example uses `{ ... }` to represent the contents
# of a dataset.
ds = { ['a', 'b', 'c'], ['a', 'b'], ['a', 'b', 'c', 'd'] }
ds.unbatch() == {'a', 'b', 'c', 'a', 'b', 'a', 'b', 'c', 'd'}
```
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
# NOTE(mrry): We must ensure that any non-tensor components in `dataset`
# are normalized to their dense tensor representation, so that the
# non-tensor oblivious unbatching logic will slice them appropriately.
# This leads to a somewhat inefficient re-encoding step for all non-tensor
# components.
#
# TODO(mrry): Consider optimizing this if it turns out to be a bottleneck.
def normalize(arg, *rest):
# pylint: disable=protected-access
if rest:
return self._element_structure._to_batched_tensor_list((arg,) + rest)
else:
return self._element_structure._to_batched_tensor_list(arg)
normalized_dataset = self.map(normalize)
# NOTE(mrry): Our `map()` has lost information about the structure of
# non-tensor components, so re-apply the structure of the original dataset.
restructured_dataset = _RestructuredDataset(normalized_dataset,
self._element_structure)
return _UnbatchDataset(restructured_dataset)
def with_options(self, options):
"""Returns a new `tf.data.Dataset` with the given options set.
The options are "global" in the sense they apply to the entire dataset.
If options are set multiple times, they are merged as long as different
options do not use different non-default values.
Args:
options: A `tf.data.Options` that identifies the options the use.
Returns:
Dataset: A `Dataset` with the given options.
Raises:
ValueError: when an option is set more than once to a non-default value
"""
return _OptionsDataset(self, options)
@tf_export(v1=["data.Dataset"])
class DatasetV1(DatasetV2):
"""Represents a potentially large set of elements.
A `Dataset` can be used to represent an input pipeline as a
collection of elements (nested structures of tensors) and a "logical
plan" of transformations that act on those elements.
"""
def __init__(self):
try:
variant_tensor = self._as_variant_tensor()
except AttributeError as e:
if "_as_variant_tensor" in str(e):
raise AttributeError("Please use _variant_tensor instead of "
"_as_variant_tensor() to obtain the variant "
"associated with a dataset")
raise AttributeError("A likely cause of this error is that the super "
"call for this dataset is not the last line of the "
"__init__ method. The base class causes the "
"_as_variant_tensor call in its constructor and "
"if that uses attributes defined in the __init__ "
"method, those attrs need to be defined before the "
"super call.")
super(DatasetV1, self).__init__(variant_tensor)
@abc.abstractmethod
def _as_variant_tensor(self):
"""Creates a scalar `tf.Tensor` of `tf.variant` representing this dataset.
Returns:
A scalar `tf.Tensor` of `tf.variant` type, which represents this dataset.
"""
raise NotImplementedError("Dataset._as_variant_tensor")
@deprecation.deprecated(
None, "Use `for ... in dataset:` to iterate over a dataset. If using "
"`tf.estimator`, return the `Dataset` object directly from your input "
"function. As a last resort, you can use "
"`tf.compat.v1.data.make_one_shot_iterator(dataset)`.")
def make_one_shot_iterator(self):
"""Creates an `Iterator` for enumerating the elements of this dataset.
Note: The returned iterator will be initialized automatically.
A "one-shot" iterator does not currently support re-initialization.
Returns:
An `Iterator` over the elements of this dataset.
"""
return self._make_one_shot_iterator()
def _make_one_shot_iterator(self): # pylint: disable=missing-docstring
if context.executing_eagerly():
return iterator_ops.IteratorV2(self)
_ensure_same_dataset_graph(self)
# Now that we create datasets at python object creation time, the capture
# by value _make_dataset() function would try to capture these variant
# tensor dataset inputs, which are marked as stateful ops and would throw
# an error if we try and capture them. We therefore traverse the graph
# to find all these ops and whitelist them so that the capturing
# logic instead of throwing an error recreates these ops which is what was
# happening before.
all_ds_ops = traverse.obtain_all_variant_tensor_ops(self)
graph_level_seed, op_level_seed = core_random_seed.get_seed(None)
# NOTE(mrry): We capture by value here to ensure that `_make_dataset()` is
# a 0-argument function.
@function.Defun(capture_by_value=True, whitelisted_stateful_ops=all_ds_ops)
def _make_dataset():
"""Factory function for a dataset."""
# NOTE(mrry): `Defun` does not capture the graph-level seed from the
# enclosing graph, so if a graph-level seed is present we set the local
# graph seed based on a combination of the graph- and op-level seeds.
if graph_level_seed is not None:
assert op_level_seed is not None
core_random_seed.set_random_seed(
(graph_level_seed + 87654321 * op_level_seed) % (2 ** 63 - 1))
dataset = self._apply_options()
return dataset._variant_tensor # pylint: disable=protected-access
try:
_make_dataset.add_to_graph(ops.get_default_graph())
except ValueError as err:
if "Cannot capture a stateful node" in str(err):
raise ValueError(
"Failed to create a one-shot iterator for a dataset. "
"`Dataset.make_one_shot_iterator()` does not support datasets that "
"capture stateful objects, such as a `Variable` or `LookupTable`. "
"In these cases, use `Dataset.make_initializable_iterator()`. "
"(Original error: %s)" % err)
else:
six.reraise(ValueError, err)
# pylint: disable=protected-access
return iterator_ops.Iterator(
gen_dataset_ops.one_shot_iterator(
dataset_factory=_make_dataset, **flat_structure(self)),
None, get_legacy_output_types(self), get_legacy_output_shapes(self),
get_legacy_output_classes(self))
@deprecation.deprecated(
None, "Use `for ... in dataset:` to iterate over a dataset. If using "
"`tf.estimator`, return the `Dataset` object directly from your input "
"function. As a last resort, you can use "
"`tf.compat.v1.data.make_initializable_iterator(dataset)`.")
def make_initializable_iterator(self, shared_name=None):
"""Creates an `Iterator` for enumerating the elements of this dataset.
Note: The returned iterator will be in an uninitialized state,
and you must run the `iterator.initializer` operation before using it:
```python
dataset = ...
iterator = dataset.make_initializable_iterator()
# ...
sess.run(iterator.initializer)
```
Args:
shared_name: (Optional.) If non-empty, the returned iterator will be
shared under the given name across multiple sessions that share the same
devices (e.g. when using a remote server).
Returns:
An `Iterator` over the elements of this dataset.
Raises:
RuntimeError: If eager execution is enabled.
"""
return self._make_initializable_iterator(shared_name)
def _make_initializable_iterator(self, shared_name=None): # pylint: disable=missing-docstring
if context.executing_eagerly():
raise RuntimeError(
"dataset.make_initializable_iterator is not supported when eager "
"execution is enabled.")
_ensure_same_dataset_graph(self)
dataset = self._apply_options()
if shared_name is None:
shared_name = ""
if compat.forward_compatible(2018, 8, 3):
iterator_resource = gen_dataset_ops.iterator_v2(
container="", shared_name=shared_name, **flat_structure(self))
else:
iterator_resource = gen_dataset_ops.iterator(
container="", shared_name=shared_name, **flat_structure(self))
with ops.colocate_with(iterator_resource):
initializer = gen_dataset_ops.make_iterator(
dataset._variant_tensor, # pylint: disable=protected-access
iterator_resource)
# pylint: disable=protected-access
return iterator_ops.Iterator(
iterator_resource, initializer, get_legacy_output_types(dataset),
get_legacy_output_shapes(dataset), get_legacy_output_classes(dataset))
@property
@deprecation.deprecated(
None, "Use `tf.compat.v1.data.get_output_classes(dataset)`.")
def output_classes(self):
"""Returns the class of each component of an element of this dataset.
The expected values are `tf.Tensor` and `tf.SparseTensor`.
Returns:
A nested structure of Python `type` objects corresponding to each
component of an element of this dataset.
"""
return self._element_structure._to_legacy_output_classes() # pylint: disable=protected-access
@property
@deprecation.deprecated(
None, "Use `tf.compat.v1.data.get_output_shapes(dataset)`.")
def output_shapes(self):
"""Returns the shape of each component of an element of this dataset.
Returns:
A nested structure of `tf.TensorShape` objects corresponding to each
component of an element of this dataset.
"""
return self._element_structure._to_legacy_output_shapes() # pylint: disable=protected-access
@property
@deprecation.deprecated(
None, "Use `tf.compat.v1.data.get_output_types(dataset)`.")
def output_types(self):
"""Returns the type of each component of an element of this dataset.
Returns:
A nested structure of `tf.DType` objects corresponding to each component
of an element of this dataset.
"""
return self._element_structure._to_legacy_output_types() # pylint: disable=protected-access
@property
def _element_structure(self):
# TODO(b/110122868): Remove this override once all `Dataset` instances
# implement `element_structure`.
return structure_lib.convert_legacy_structure(
self.output_types, self.output_shapes, self.output_classes)
@staticmethod
@functools.wraps(DatasetV2.from_tensors)
def from_tensors(tensors):
return DatasetV1Adapter(DatasetV2.from_tensors(tensors))
@staticmethod
@functools.wraps(DatasetV2.from_tensor_slices)
def from_tensor_slices(tensors):
return DatasetV1Adapter(DatasetV2.from_tensor_slices(tensors))
@staticmethod
@deprecation.deprecated(None, "Use `tf.data.Dataset.from_tensor_slices()`.")
def from_sparse_tensor_slices(sparse_tensor):
"""Splits each rank-N `tf.SparseTensor` in this dataset row-wise.
Args:
sparse_tensor: A `tf.SparseTensor`.
Returns:
Dataset: A `Dataset` of rank-(N-1) sparse tensors.
"""
return DatasetV1Adapter(SparseTensorSliceDataset(sparse_tensor))
@staticmethod
@functools.wraps(DatasetV2.from_generator)
def from_generator(generator, output_types, output_shapes=None, args=None):
return DatasetV1Adapter(DatasetV2.from_generator(
generator, output_types, output_shapes, args))
@staticmethod
@functools.wraps(DatasetV2.range)
def range(*args):
return DatasetV1Adapter(DatasetV2.range(*args))
@staticmethod
@functools.wraps(DatasetV2.zip)
def zip(datasets):
return DatasetV1Adapter(DatasetV2.zip(datasets))
@functools.wraps(DatasetV2.concatenate)
def concatenate(self, dataset):
return DatasetV1Adapter(super(DatasetV1, self).concatenate(dataset))
@functools.wraps(DatasetV2.prefetch)
def prefetch(self, buffer_size):
return DatasetV1Adapter(super(DatasetV1, self).prefetch(buffer_size))
@staticmethod
@functools.wraps(DatasetV2.list_files)
def list_files(file_pattern, shuffle=None, seed=None):
return DatasetV1Adapter(DatasetV2.list_files(file_pattern, shuffle, seed))
@functools.wraps(DatasetV2.repeat)
def repeat(self, count=None):
return DatasetV1Adapter(super(DatasetV1, self).repeat(count))
@functools.wraps(DatasetV2.shuffle)
def shuffle(self, buffer_size, seed=None, reshuffle_each_iteration=None):
return DatasetV1Adapter(super(DatasetV1, self).shuffle(
buffer_size, seed, reshuffle_each_iteration))
@functools.wraps(DatasetV2.cache)
def cache(self, filename=""):
return DatasetV1Adapter(super(DatasetV1, self).cache(filename))
@functools.wraps(DatasetV2.take)
def take(self, count):
return DatasetV1Adapter(super(DatasetV1, self).take(count))
@functools.wraps(DatasetV2.skip)
def skip(self, count):
return DatasetV1Adapter(super(DatasetV1, self).skip(count))
@functools.wraps(DatasetV2.shard)
def shard(self, num_shards, index):
return DatasetV1Adapter(super(DatasetV1, self).shard(num_shards, index))
@functools.wraps(DatasetV2.batch)
def batch(self, batch_size, drop_remainder=False):
return DatasetV1Adapter(super(DatasetV1, self).batch(
batch_size, drop_remainder))
@functools.wraps(DatasetV2.padded_batch)
def padded_batch(self,
batch_size,
padded_shapes,
padding_values=None,
drop_remainder=False):
return DatasetV1Adapter(super(DatasetV1, self).padded_batch(
batch_size, padded_shapes, padding_values, drop_remainder))
@functools.wraps(DatasetV2.map)
def map(self, map_func, num_parallel_calls=None):
if num_parallel_calls is None:
return DatasetV1Adapter(
MapDataset(self, map_func, preserve_cardinality=False))
else:
return DatasetV1Adapter(
ParallelMapDataset(
self, map_func, num_parallel_calls, preserve_cardinality=False))
@deprecation.deprecated(None, "Use `tf.data.Dataset.map()")
def map_with_legacy_function(self, map_func, num_parallel_calls=None):
"""Maps `map_func` across the elements of this dataset.
NOTE: This is an escape hatch for existing uses of `map` that do not work
with V2 functions. New uses are strongly discouraged and existing uses
should migrate to `map` as this method will be removed in V2.
Args:
map_func: A function mapping a nested structure of tensors (having shapes
and types defined by `self.output_shapes` and `self.output_types`) to
another nested structure of tensors.
num_parallel_calls: (Optional.) A `tf.int32` scalar `tf.Tensor`,
representing the number elements to process asynchronously in parallel.
If not specified, elements will be processed sequentially. If the value
`tf.data.experimental.AUTOTUNE` is used, then the number of parallel
calls is set dynamically based on available CPU.
Returns:
Dataset: A `Dataset`.
"""
if num_parallel_calls is None:
return DatasetV1Adapter(
MapDataset(
self,
map_func,
preserve_cardinality=False,
use_legacy_function=True))
else:
return DatasetV1Adapter(
ParallelMapDataset(
self,
map_func,
num_parallel_calls,
preserve_cardinality=False,
use_legacy_function=True))
@functools.wraps(DatasetV2.flat_map)
def flat_map(self, map_func):
return DatasetV1Adapter(super(DatasetV1, self).flat_map(map_func))
@functools.wraps(DatasetV2.interleave)
def interleave(self,
map_func,
cycle_length=AUTOTUNE,
block_length=1,
num_parallel_calls=None):
return DatasetV1Adapter(super(DatasetV1, self).interleave(
map_func, cycle_length, block_length, num_parallel_calls))
@functools.wraps(DatasetV2.filter)
def filter(self, predicate):
return DatasetV1Adapter(super(DatasetV1, self).filter(predicate))
@deprecation.deprecated(None, "Use `tf.data.Dataset.filter()")
def filter_with_legacy_function(self, predicate):
"""Filters this dataset according to `predicate`.
NOTE: This is an escape hatch for existing uses of `filter` that do not work
with V2 functions. New uses are strongly discouraged and existing uses
should migrate to `filter` as this method will be removed in V2.
Args:
predicate: A function mapping a nested structure of tensors (having shapes
and types defined by `self.output_shapes` and `self.output_types`) to a
scalar `tf.bool` tensor.
Returns:
Dataset: The `Dataset` containing the elements of this dataset for which
`predicate` is `True`.
"""
return FilterDataset(self, predicate, use_legacy_function=True)
@functools.wraps(DatasetV2.apply)
def apply(self, transformation_func):
return DatasetV1Adapter(super(DatasetV1, self).apply(transformation_func))
@functools.wraps(DatasetV2.window)
def window(self, size, shift=None, stride=1, drop_remainder=False):
return DatasetV1Adapter(super(DatasetV1, self).window(
size, shift, stride, drop_remainder))
@functools.wraps(DatasetV2.with_options)
def with_options(self, options):
return DatasetV1Adapter(super(DatasetV1, self).with_options(options))
# TODO(b/119044825): Until all `tf.data` unit tests are converted to V2, keep
# this alias in place.
Dataset = DatasetV1
class DatasetV1Adapter(DatasetV1):
"""Wraps a V2 `Dataset` object in the `tf.compat.v1.data.Dataset` API."""
def __init__(self, dataset):
self._dataset = dataset
super(DatasetV1Adapter, self).__init__()
def _as_variant_tensor(self):
return self._dataset._variant_tensor # pylint: disable=protected-access
def _has_captured_ref(self):
return self._dataset._has_captured_ref() # pylint: disable=protected-access
def _inputs(self):
return self._dataset._inputs() # pylint: disable=protected-access
def _functions(self):
return self._dataset._functions() # pylint: disable=protected-access
def options(self):
return self._dataset.options()
@property
def _element_structure(self):
return self._dataset._element_structure # pylint: disable=protected-access
def __iter__(self):
return iter(self._dataset)
def _ensure_same_dataset_graph(dataset):
"""Walks the dataset graph to ensure all datasets come from the same graph."""
current_graph = ops.get_default_graph()
bfs_q = Queue.Queue()
bfs_q.put(dataset) # pylint: disable=protected-access
visited = []
while not bfs_q.empty():
ds = bfs_q.get()
visited.append(ds)
ds_graph = ds._graph # pylint: disable=protected-access
if current_graph != ds_graph:
logging.warning("The graph (" + str(current_graph) + ") of the iterator "
"is different from the graph (" + str(ds_graph) + ") "
"the dataset: " + str(ds._variant_tensor) + " was " # pylint: disable=protected-access
"created in. If you are using the Estimator API, "
"make sure that no part of the dataset returned by the "
"`input_fn` function is defined outside the `input_fn` "
"function. Please ensure that all datasets in the "
"pipeline are created in the same graph as the iterator. "
"NOTE: This warning will become an error in future "
"versions of TensorFlow.")
for input_ds in ds._inputs(): # pylint: disable=protected-access
if input_ds not in visited:
bfs_q.put(input_ds)
@tf_export(v1=["data.make_one_shot_iterator"])
def make_one_shot_iterator(dataset):
"""Creates a `tf.compat.v1.data.Iterator` for enumerating the elements of a dataset.
Note: The returned iterator will be initialized automatically.
A "one-shot" iterator does not support re-initialization.
Args:
dataset: A `tf.data.Dataset`.
Returns:
A `tf.compat.v1.data.Iterator` over the elements of this dataset.
"""
try:
# Call the defined `_make_one_shot_iterator()` if there is one, because some
# datasets (e.g. for prefetching) override its behavior.
return dataset._make_one_shot_iterator() # pylint: disable=protected-access
except AttributeError:
return DatasetV1Adapter(dataset)._make_one_shot_iterator() # pylint: disable=protected-access
@tf_export(v1=["data.make_initializable_iterator"])
def make_initializable_iterator(dataset, shared_name=None):
"""Creates a `tf.compat.v1.data.Iterator` for enumerating the elements of a dataset.
Note: The returned iterator will be in an uninitialized state,
and you must run the `iterator.initializer` operation before using it:
```python
dataset = ...
iterator = tf.compat.v1.data.make_initializable_iterator(dataset)
# ...
sess.run(iterator.initializer)
```
Args:
dataset: A `tf.data.Dataset`.
shared_name: (Optional.) If non-empty, the returned iterator will be shared
under the given name across multiple sessions that share the same devices
(e.g. when using a remote server).
Returns:
A `tf.compat.v1.data.Iterator` over the elements of `dataset`.
Raises:
RuntimeError: If eager execution is enabled.
"""
try:
# Call the defined `_make_initializable_iterator()` if there is one, because
# some datasets (e.g. for prefetching) override its behavior.
return dataset._make_initializable_iterator(shared_name) # pylint: disable=protected-access
except AttributeError:
return DatasetV1Adapter(dataset)._make_initializable_iterator(shared_name) # pylint: disable=protected-access
@tf_export("data.experimental.get_structure")
def get_structure(dataset_or_iterator):
"""Returns the `tf.data.experimental.Structure` of a `Dataset` or `Iterator`.
Args:
dataset_or_iterator: A `tf.data.Dataset`, `tf.compat.v1.data.Iterator`, or
`IteratorV2`.
Returns:
A `tf.data.experimental.Structure` representing the structure of the
elements of `dataset_or_iterator`.
Raises:
TypeError: If `dataset_or_iterator` is not a dataset or iterator object.
"""
try:
ret = dataset_or_iterator._element_structure # pylint: disable=protected-access
if isinstance(ret, structure_lib.Structure):
return ret
except AttributeError:
pass
raise TypeError("`dataset_or_iterator` must be a Dataset or Iterator object, "
"but got %s." % type(dataset_or_iterator))
@tf_export(v1=["data.get_output_shapes"])
def get_legacy_output_shapes(dataset_or_iterator):
"""Returns the output shapes of a `Dataset` or `Iterator`.
This utility method replaces the deprecated-in-V2
`tf.compat.v1.Dataset.output_shapes` property.
Args:
dataset_or_iterator: A `tf.data.Dataset`, `tf.compat.v1.data.Iterator`, or
`IteratorV2`.
Returns:
A nested structure of `tf.TensorShape` objects corresponding to each
component of an element of the given dataset or iterator.
"""
return get_structure(dataset_or_iterator)._to_legacy_output_shapes() # pylint: disable=protected-access
@tf_export(v1=["data.get_output_types"])
def get_legacy_output_types(dataset_or_iterator):
"""Returns the output shapes of a `Dataset` or `Iterator`.
This utility method replaces the deprecated-in-V2
`tf.compat.v1.Dataset.output_types` property.
Args:
dataset_or_iterator: A `tf.data.Dataset`, `tf.compat.v1.data.Iterator`, or
`IteratorV2`.
Returns:
A nested structure of `tf.DType` objects corresponding to each component
of an element of this dataset.
"""
return get_structure(dataset_or_iterator)._to_legacy_output_types() # pylint: disable=protected-access
@tf_export(v1=["data.get_output_classes"])
def get_legacy_output_classes(dataset_or_iterator):
"""Returns the output classes of a `Dataset` or `Iterator`.
This utility method replaces the deprecated-in-V2
`tf.compat.v1.Dataset.output_classes` property.
Args:
dataset_or_iterator: A `tf.data.Dataset`, `tf.compat.v1.data.Iterator`, or
`IteratorV2`.
Returns:
A nested structure of Python `type` or `tf.data.experimental.Structure`
objects corresponding to each component of an element of this dataset.
"""
return get_structure(dataset_or_iterator)._to_legacy_output_classes() # pylint: disable=protected-access
@tf_export("data.Options")
class Options(options_lib.OptionsBase):
"""Represents options for tf.data.Dataset.
An `Options` object can be, for instance, used to control which static
optimizations to apply or whether to use performance modeling to dynamically
tune the parallelism of operations such as `tf.data.Dataset.map` or
`tf.data.Dataset.interleave`.
"""
experimental_deterministic = options_lib.create_option(
name="experimental_deterministic",
ty=bool,
docstring=
"Whether the outputs need to be produced in deterministic order. If None,"
" defaults to True.")
experimental_distribute = options_lib.create_option(
name="experimental_distribute",
ty=distribute_options.DistributeOptions,
docstring=
"The distribution options associated with the dataset. See "
"`tf.data.experimental.DistributeOptions` for more details.",
default_factory=distribute_options.DistributeOptions)
experimental_optimization = options_lib.create_option(
name="experimental_optimization",
ty=optimization_options.OptimizationOptions,
docstring=
"The optimization options associated with the dataset. See "
"`tf.data.experimental.OptimizationOptions` for more details.",
default_factory=optimization_options.OptimizationOptions)
experimental_slack = options_lib.create_option(
name="experimental_slack",
ty=bool,
docstring="Whether to introduce 'slack' in the last `prefetch` of the "
"input pipeline, if it exists. This may reduce CPU contention with "
"accelerator host-side activity at the start of a step. The slack "
"frequency is determined by the number of devices attached to this "
"input pipeline. If None, defaults to False.")
experimental_stats = options_lib.create_option(
name="experimental_stats",
ty=stats_options.StatsOptions,
docstring=
"The statistics options associated with the dataset. See "
"`tf.data.experimental.StatsOptions` for more details.",
default_factory=stats_options.StatsOptions)
experimental_threading = options_lib.create_option(
name="experimental_threading",
ty=threading_options.ThreadingOptions,
docstring=
"The threading options associated with the dataset. See "
"`tf.data.experimental.ThreadingOptions` for more details.",
default_factory=threading_options.ThreadingOptions)
def _static_optimizations(self):
"""Produces the list of enabled static optimizations."""
result = []
result.extend(self.experimental_optimization._static_optimizations()) # pylint: disable=protected-access
if self.experimental_deterministic is False:
result.append("make_sloppy")
exp_stats_options = self.experimental_stats
if exp_stats_options and exp_stats_options.latency_all_edges:
result.append("latency_all_edges")
if self.experimental_slack:
result.append("slack")
return result
def _static_optimization_configs(self):
"""Produces the list of configurations for enabled static optimizations."""
result = []
if self.experimental_optimization:
result.extend(
self.experimental_optimization._static_optimization_configs()) # pylint: disable=protected-access
if self.experimental_slack:
num_devices = self.experimental_distribute.num_devices
if num_devices is None:
num_devices = 1
result.append("slack:slack_period:%d" % num_devices)
return result
def merge(self, options):
"""Merges itself with the given `tf.data.Options`.
The given `tf.data.Options` can be merged as long as there does not exist an
attribute that is set to different values in `self` and `options`.
Args:
options: a `tf.data.Options` to merge with
Raises:
ValueError: if the given `tf.data.Options` cannot be merged
Returns:
New `tf.data.Options()` object which is the result of merging self with
the input `tf.data.Options`.
"""
return options_lib.merge_options(self, options)
class DatasetSource(DatasetV2):
"""Abstract class representing a dataset with no inputs."""
def _inputs(self):
return []
class UnaryDataset(DatasetV2):
"""Abstract class representing a dataset with one input."""
def __init__(self, input_dataset, variant_tensor):
self._input_dataset = input_dataset
super(UnaryDataset, self).__init__(variant_tensor)
def _inputs(self):
return [self._input_dataset]
class UnaryUnchangedStructureDataset(UnaryDataset):
"""Represents a unary dataset with the same input and output structure."""
def __init__(self, input_dataset, variant_tensor):
self._input_dataset = input_dataset
super(UnaryUnchangedStructureDataset, self).__init__(
input_dataset, variant_tensor)
@property
def _element_structure(self):
return self._input_dataset._element_structure # pylint: disable=protected-access
class TensorDataset(DatasetSource):
"""A `Dataset` with a single element, viz. a nested structure of tensors."""
def __init__(self, tensors):
"""See `Dataset.from_tensors()` for details."""
tensors = structure_lib.normalize_tensors(tensors)
self._structure = type_spec.type_spec_from_value(tensors)
self._tensors = self._structure._to_tensor_list(tensors) # pylint: disable=protected-access
variant_tensor = gen_dataset_ops.tensor_dataset(
self._tensors, output_shapes=self._structure._flat_shapes) # pylint: disable=protected-access
super(TensorDataset, self).__init__(variant_tensor)
@property
def _element_structure(self):
return self._structure
class TensorSliceDataset(DatasetSource):
"""A `Dataset` of slices from a nested structure of tensors."""
def __init__(self, tensors):
"""See `Dataset.from_tensor_slices()` for details."""
tensors = structure_lib.normalize_tensors(tensors)
batched_structure = type_spec.type_spec_from_value(tensors)
# pylint: disable=protected-access
self._tensors = batched_structure._to_batched_tensor_list(tensors)
self._structure = batched_structure._unbatch()
# pylint: enable=protected-access
batch_dim = tensor_shape.Dimension(tensor_shape.dimension_value(
self._tensors[0].get_shape()[0]))
for t in self._tensors[1:]:
batch_dim.assert_is_compatible_with(tensor_shape.Dimension(
tensor_shape.dimension_value(t.get_shape()[0])))
variant_tensor = gen_dataset_ops.tensor_slice_dataset(
self._tensors, output_shapes=self._structure._flat_shapes) # pylint: disable=protected-access
super(TensorSliceDataset, self).__init__(variant_tensor)
@property
def _element_structure(self):
return self._structure
class SparseTensorSliceDataset(DatasetSource):
"""A `Dataset` that splits a rank-N `tf.SparseTensor` into its rows."""
def __init__(self, sparse_tensor):
"""See `Dataset.from_sparse_tensor_slices()` for details."""
if not isinstance(sparse_tensor, sparse_tensor_lib.SparseTensor):
raise TypeError(
"`sparse_tensor` must be a `tf.SparseTensor` object. Was {}.".format(
sparse_tensor))
self._sparse_tensor = sparse_tensor
indices_shape = self._sparse_tensor.indices.get_shape()
shape_shape = self._sparse_tensor.dense_shape.get_shape()
rank = (indices_shape.dims[1] - 1).merge_with(shape_shape.dims[0] - 1)
self._structure = structure_lib.NestedStructure(
(structure_lib.TensorStructure(dtypes.int64, [None, rank]),
structure_lib.TensorStructure(self._sparse_tensor.dtype, [None]),
structure_lib.TensorStructure(dtypes.int64, [rank])))
variant_tensor = gen_dataset_ops.sparse_tensor_slice_dataset(
self._sparse_tensor.indices, self._sparse_tensor.values,
self._sparse_tensor.dense_shape)
super(SparseTensorSliceDataset, self).__init__(variant_tensor)
@property
def _element_structure(self):
return self._structure
class _VariantDataset(DatasetV2):
"""A Dataset wrapper around a `tf.variant`-typed function argument."""
def __init__(self, dataset_variant, structure):
self._structure = structure
super(_VariantDataset, self).__init__(dataset_variant)
def _inputs(self):
return []
@property
def _element_structure(self):
return self._structure
class _NestedVariant(composite_tensor.CompositeTensor):
def __init__(self, variant_tensor, element_structure, dataset_shape):
self._variant_tensor = variant_tensor
self._element_structure = element_structure
self._dataset_shape = dataset_shape
@property
def _type_spec(self):
return DatasetStructure(self._element_structure, self._dataset_shape)
@tf_export("data.experimental.from_variant")
def from_variant(variant, structure):
"""Constructs a dataset from the given variant and structure.
Args:
variant: A scalar `tf.variant` tensor representing a dataset.
structure: A `tf.data.experimental.Structure` object representing the
structure of each element in the dataset.
Returns:
A `tf.data.Dataset` instance.
"""
return _VariantDataset(variant, structure) # pylint: disable=protected-access
@tf_export("data.experimental.to_variant")
def to_variant(dataset):
"""Returns a variant representing the given dataset.
Args:
dataset: A `tf.data.Dataset`.
Returns:
A scalar `tf.variant` tensor representing the given dataset.
"""
return dataset._variant_tensor # pylint: disable=protected-access
# TODO(b/133606651) Rename this class to DatasetSpec
@tf_export("data.DatasetSpec", "data.experimental.DatasetStructure")
class DatasetStructure(type_spec.BatchableTypeSpec):
"""Type specification for `tf.data.Dataset`."""
__slots__ = ["_element_structure", "_dataset_shape"]
def __init__(self, element_spec, dataset_shape=None):
self._element_structure = element_spec
if dataset_shape:
self._dataset_shape = dataset_shape
else:
self._dataset_shape = tensor_shape.TensorShape([])
@property
def value_type(self):
return _VariantDataset
def _serialize(self):
return (self._element_structure, self._dataset_shape)
@property
def _component_specs(self):
return tensor_spec.TensorSpec(self._dataset_shape, dtypes.variant)
def _to_components(self, value):
return value._variant_tensor # pylint: disable=protected-access
def _from_components(self, components):
# pylint: disable=protected-access
if self._dataset_shape.ndims == 0:
return _VariantDataset(components, self._element_structure)
else:
return _NestedVariant(components, self._element_structure,
self._dataset_shape)
def _to_tensor_list(self, value):
return [
ops.convert_to_tensor(
tf_nest.map_structure(lambda x: x._variant_tensor, value)) # pylint: disable=protected-access
]
@staticmethod
def from_value(value):
return DatasetStructure(value._element_structure) # pylint: disable=protected-access
def _batch(self, batch_size):
return DatasetStructure(
self._element_structure,
tensor_shape.TensorShape([batch_size]).concatenate(self._dataset_shape))
def _unbatch(self):
if self._dataset_shape.ndims == 0:
raise ValueError("Unbatching a dataset is only supported for rank >= 1")
return DatasetStructure(self._element_structure, self._dataset_shape[1:])
def _to_batched_tensor_list(self, value):
if self._dataset_shape.ndims == 0:
raise ValueError("Unbatching a dataset is only supported for rank >= 1")
return self._to_tensor_list(value)
def _to_legacy_output_types(self):
return self
def _to_legacy_output_shapes(self):
return self
def _to_legacy_output_classes(self):
return self
class StructuredFunctionWrapper(object):
"""A function wrapper that supports structured arguments and return values."""
# pylint: disable=protected-access
def __init__(self,
func,
transformation_name,
dataset=None,
input_classes=None,
input_shapes=None,
input_types=None,
input_structure=None,
add_to_graph=True,
use_legacy_function=False,
defun_kwargs=None):
"""Creates a new `StructuredFunctionWrapper` for the given function.
Args:
func: A function from a nested structure to another nested structure.
transformation_name: Human-readable name of the transformation in which
this function is being instantiated, for error messages.
dataset: (Optional.) A `tf.data.Dataset`. If given, the structure of this
dataset will be assumed as the structure for `func` arguments; otherwise
`input_classes`, `input_shapes`, and `input_types` must be defined.
input_classes: (Optional.) A nested structure of `type`. If given, this
argument defines the Python types for `func` arguments.
input_shapes: (Optional.) A nested structure of `tf.TensorShape`. If
given, this argument defines the shapes and structure for `func`
arguments.
input_types: (Optional.) A nested structure of `tf.DType`. If given, this
argument defines the element types and structure for `func` arguments.
input_structure: (Optional.) A `Structure` object. If given, this argument
defines the element types and structure for `func` arguments.
add_to_graph: (Optional.) If `True`, the function will be added to the
default graph.
use_legacy_function: (Optional.) A boolean that determines whether the
function be created using `tensorflow.python.eager.function.defun`
(default behavior) or `tensorflow.python.framework.function.Defun`
(legacy beheavior).
defun_kwargs: (Optional.) A dictionary mapping string argument names to
values. If supplied, will be passed to `function` as keyword arguments.
Raises:
ValueError: If an invalid combination of `dataset`, `input_classes`,
`input_shapes`, and `input_types` is passed.
"""
if input_structure is None:
if dataset is None:
if input_classes is None or input_shapes is None or input_types is None:
raise ValueError("Either `dataset`, `input_structure` or all of "
"`input_classes`, `input_shapes`, and `input_types` "
"must be specified.")
self._input_structure = structure_lib.convert_legacy_structure(
input_types, input_shapes, input_classes)
else:
if not (input_classes is None and input_shapes is None and
input_types is None):
raise ValueError("Either `dataset`, `input_structure` or all of "
"`input_classes`, `input_shapes`, and `input_types` "
"must be specified.")
self._input_structure = dataset._element_structure
else:
if not (dataset is None and input_classes is None and input_shapes is None
and input_types is None):
raise ValueError("Either `dataset`, `input_structure`, or all of "
"`input_classes`, `input_shapes`, and `input_types` "
"must be specified.")
self._input_structure = input_structure
if defun_kwargs is None:
defun_kwargs = {}
readable_transformation_name = transformation_name.replace(
".", "_")[:-2] if len(transformation_name) > 2 else ""
func_name = "_".join(
[readable_transformation_name,
function_utils.get_func_name(func)])
ctx = autograph_ctx.control_status_ctx()
if ctx.status in (
autograph_ctx.Status.ENABLED, autograph_ctx.Status.UNSPECIFIED):
apply_autograph = True
else:
apply_autograph = False
def _warn_if_collections(transformation_name):
"""Prints a warning if the given graph uses common graph collections.
NOTE(mrry): Currently a warning is only generated for resources. Any
variables created will be automatically hoisted out to the outermost scope
using `init_scope()`. Some collections (such as for control-flow contexts)
are benign and should not generate a warning.
Args:
transformation_name: A human-readable name for the transformation.
"""
warnings.warn("Creating resources inside a function passed to %s "
"is not supported. Create each resource outside the "
"function, and capture it inside the function to use it." %
transformation_name, stacklevel=5)
def _wrapper_helper(*args):
"""Wrapper for passing nested structures to and from tf.data functions."""
nested_args = self._input_structure._from_compatible_tensor_list(args)
if not _should_unpack_args(nested_args):
nested_args = (nested_args,)
if apply_autograph:
try:
ret = autograph.converted_call(
func, None,
autograph.ConversionOptions(
recursive=True,
# TODO(mdan): Grab features from context.
optional_features=None,
force_conversion=False,
), nested_args, {})
except Exception as e: # pylint:disable=broad-except
if hasattr(e, "ag_error_metadata"):
raise e.ag_error_metadata.to_exception(type(e))
else:
raise
else:
ret = func(*nested_args)
# If `func` returns a list of tensors, `nest.flatten()` and
# `ops.convert_to_tensor()` would conspire to attempt to stack
# those tensors into a single tensor, because the customized
# version of `nest.flatten()` does not recurse into lists. Since
# it is more likely that the list arose from returning the
# result of an operation (such as `tf.numpy_function()`) that returns a
# list of not-necessarily-stackable tensors, we treat the
# returned value is a `tuple` instead. A user wishing to pack
# the return value into a single tensor can use an explicit
# `tf.stack()` before returning.
if isinstance(ret, list):
ret = tuple(ret)
try:
self._output_structure = type_spec.type_spec_from_value(ret)
except (ValueError, TypeError):
raise TypeError("Unsupported return value from function passed to "
"%s: %s." % (transformation_name, ret))
return ret
if use_legacy_function:
func_name = func_name + "_" + str(ops.uid())
@function.Defun(
*self._input_structure._flat_types,
func_name=func_name,
**defun_kwargs)
def wrapper_fn(*args):
ret = _wrapper_helper(*args)
# _warn_if_collections(transformation_name, ops.get_default_graph(), 0)
return self._output_structure._to_tensor_list(ret)
self._function = wrapper_fn
resource_tracker = tracking.ResourceTracker()
with tracking.resource_tracker_scope(resource_tracker):
if add_to_graph:
self._function.add_to_graph(ops.get_default_graph())
else:
# Use the private method that will execute `wrapper_fn` but delay
# adding it to the graph in case (e.g.) we need to rerun the function.
self._function._create_definition_if_needed()
if resource_tracker.resources:
_warn_if_collections(transformation_name)
else:
defun_kwargs.update({"func_name": func_name})
# Note: _wrapper_helper will apply autograph based on context.
@eager_function.defun_with_attributes(
input_signature=self._input_structure._flat_tensor_specs,
autograph=False,
attributes=defun_kwargs)
def wrapper_fn(*args): # pylint: disable=missing-docstring
ret = _wrapper_helper(*args)
ret = self._output_structure._to_tensor_list(ret)
return [ops.convert_to_tensor(t) for t in ret]
resource_tracker = tracking.ResourceTracker()
with tracking.resource_tracker_scope(resource_tracker):
self._function = wrapper_fn._get_concrete_function_internal()
if add_to_graph:
self._function.add_to_graph(ops.get_default_graph())
if resource_tracker.resources:
_warn_if_collections(transformation_name)
outer_graph_seed = ops.get_default_graph().seed
if outer_graph_seed and self._function.graph.seed == outer_graph_seed:
if self._function.graph._seed_used:
warnings.warn(
"Seed %s from outer graph might be getting used by function %s, "
"if the random op has not been provided any seed. Explicitly set "
"the seed in the function if this is not the intended behavior."
%(outer_graph_seed, func_name), stacklevel=4)
# pylint: enable=protected-access
@property
def output_structure(self):
return self._output_structure
@property
def output_classes(self):
return self._output_structure._to_legacy_output_classes() # pylint: disable=protected-access
@property
def output_shapes(self):
return self._output_structure._to_legacy_output_shapes() # pylint: disable=protected-access
@property
def output_types(self):
return self._output_structure._to_legacy_output_types() # pylint: disable=protected-access
@property
def function(self):
return self._function
def flat_structure(dataset):
"""Helper for setting `output_shapes` and `output_types` attrs of Dataset ops.
Most Dataset op constructors expect `output_shapes` and `output_types`
arguments that represent the flattened structure of an element. This helper
function generates these attrs as a keyword argument dictionary, allowing
`Dataset._variant_tensor` implementations to pass
`**flat_structure(self)` to the op constructor.
Args:
dataset: A `tf.data.Dataset`.
Returns:
A dictionary of keyword arguments that can be passed to many Dataset op
constructors.
"""
# pylint: disable=protected-access
structure = dataset._element_structure
return {
"output_shapes": structure._flat_shapes,
"output_types": structure._flat_types,
}
class _GeneratorDataset(DatasetSource):
"""A `Dataset` that generates elements by invoking a function."""
def __init__(self, init_args, init_func, next_func, finalize_func):
"""Constructs a `_GeneratorDataset`.
Args:
init_args: A nested structure representing the arguments to `init_func`.
init_func: A TensorFlow function that will be called on `init_args` each
time a C++ iterator over this dataset is constructed. Returns a nested
structure representing the "state" of the dataset.
next_func: A TensorFlow function that will be called on the result of
`init_func` to produce each element, and that raises `OutOfRangeError`
to terminate iteration.
finalize_func: A TensorFlow function that will be called on the result of
`init_func` immediately before a C++ iterator over this dataset is
destroyed. The return value is ignored.
"""
self._init_args = init_args
self._init_structure = type_spec.type_spec_from_value(init_args)
self._init_func = StructuredFunctionWrapper(
init_func,
self._transformation_name(),
input_structure=self._init_structure)
self._next_func = StructuredFunctionWrapper(
next_func,
self._transformation_name(),
input_structure=self._init_func.output_structure)
self._finalize_func = StructuredFunctionWrapper(
finalize_func,
self._transformation_name(),
input_structure=self._init_func.output_structure)
variant_tensor = gen_dataset_ops.generator_dataset(
self._init_structure._to_tensor_list(self._init_args) # pylint: disable=protected-access
+ self._init_func.function.captured_inputs,
self._next_func.function.captured_inputs,
self._finalize_func.function.captured_inputs,
init_func=self._init_func.function,
next_func=self._next_func.function,
finalize_func=self._finalize_func.function,
**flat_structure(self))
super(_GeneratorDataset, self).__init__(variant_tensor)
@property
def _element_structure(self):
return self._next_func.output_structure
def _transformation_name(self):
return "Dataset.from_generator()"
class ZipDataset(DatasetV2):
"""A `Dataset` that zips its inputs together."""
def __init__(self, datasets):
"""See `Dataset.zip()` for details."""
for ds in nest.flatten(datasets):
if not isinstance(ds, DatasetV2):
if isinstance(ds, list):
message = ("The argument to `Dataset.zip()` must be a nested "
"structure of `Dataset` objects. Nested structures do not "
"support Python lists; please use a tuple instead.")
else:
message = ("The argument to `Dataset.zip()` must be a nested "
"structure of `Dataset` objects.")
raise TypeError(message)
self._datasets = datasets
self._structure = structure_lib.NestedStructure(
nest.pack_sequence_as(
self._datasets,
[ds._element_structure for ds in nest.flatten(self._datasets)])) # pylint: disable=protected-access
# pylint: disable=protected-access
variant_tensor = gen_dataset_ops.zip_dataset(
[ds._variant_tensor for ds in nest.flatten(self._datasets)],
**flat_structure(self))
# pylint: enable=protected-access
super(ZipDataset, self).__init__(variant_tensor)
def _inputs(self):
return nest.flatten(self._datasets)
@property
def _element_structure(self):
return self._structure
class ConcatenateDataset(DatasetV2):
"""A `Dataset` that concatenates its input with given dataset."""
def __init__(self, input_dataset, dataset_to_concatenate):
"""See `Dataset.concatenate()` for details."""
self._input_dataset = input_dataset
self._dataset_to_concatenate = dataset_to_concatenate
output_types = get_legacy_output_types(input_dataset)
if output_types != get_legacy_output_types(dataset_to_concatenate):
raise TypeError(
"Two datasets to concatenate have different types %s and %s" %
(output_types, get_legacy_output_types(dataset_to_concatenate)))
output_classes = get_legacy_output_classes(input_dataset)
if output_classes != get_legacy_output_classes(dataset_to_concatenate):
raise TypeError(
"Two datasets to concatenate have different classes %s and %s" %
(output_classes, get_legacy_output_classes(dataset_to_concatenate)))
input_shapes = get_legacy_output_shapes(self._input_dataset)
output_shapes = nest.pack_sequence_as(input_shapes, [
ts1.most_specific_compatible_shape(ts2)
for (ts1, ts2) in zip(
nest.flatten(input_shapes),
nest.flatten(get_legacy_output_shapes(
self._dataset_to_concatenate)))
])
self._structure = structure_lib.convert_legacy_structure(
output_types, output_shapes, output_classes)
self._input_datasets = [input_dataset, dataset_to_concatenate]
# pylint: disable=protected-access
variant_tensor = gen_dataset_ops.concatenate_dataset(
input_dataset._variant_tensor, dataset_to_concatenate._variant_tensor,
**flat_structure(self))
# pylint: enable=protected-access
super(ConcatenateDataset, self).__init__(variant_tensor)
def _inputs(self):
return self._input_datasets
@property
def _element_structure(self):
return self._structure
class RepeatDataset(UnaryUnchangedStructureDataset):
"""A `Dataset` that repeats its input several times."""
def __init__(self, input_dataset, count):
"""See `Dataset.repeat()` for details."""
self._input_dataset = input_dataset
if count is None:
self._count = constant_op.constant(-1, dtype=dtypes.int64, name="count")
else:
self._count = ops.convert_to_tensor(
count, dtype=dtypes.int64, name="count")
variant_tensor = gen_dataset_ops.repeat_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
count=self._count,
**flat_structure(self))
super(RepeatDataset, self).__init__(input_dataset, variant_tensor)
class RangeDataset(DatasetSource):
"""A `Dataset` of a step separated range of values."""
def __init__(self, *args):
"""See `Dataset.range()` for details."""
self._parse_args(*args)
self._structure = structure_lib.TensorStructure(dtypes.int64, [])
variant_tensor = gen_dataset_ops.range_dataset(
start=self._start,
stop=self._stop,
step=self._step,
**flat_structure(self))
super(RangeDataset, self).__init__(variant_tensor)
def _parse_args(self, *args):
"""Parse arguments according to the same rules as the `range()` builtin."""
if len(args) == 1:
self._start = self._build_tensor(0, "start")
self._stop = self._build_tensor(args[0], "stop")
self._step = self._build_tensor(1, "step")
elif len(args) == 2:
self._start = self._build_tensor(args[0], "start")
self._stop = self._build_tensor(args[1], "stop")
self._step = self._build_tensor(1, "step")
elif len(args) == 3:
self._start = self._build_tensor(args[0], "start")
self._stop = self._build_tensor(args[1], "stop")
self._step = self._build_tensor(args[2], "step")
else:
raise ValueError("Invalid arguments to RangeDataset: %s" % str(args))
def _build_tensor(self, int64_value, name):
return ops.convert_to_tensor(int64_value, dtype=dtypes.int64, name=name)
@property
def _element_structure(self):
return self._structure
class CacheDataset(UnaryUnchangedStructureDataset):
"""A `Dataset` that caches elements of its input."""
def __init__(self, input_dataset, filename):
"""See `Dataset.cache()` for details."""
self._input_dataset = input_dataset
self._filename = ops.convert_to_tensor(
filename, dtype=dtypes.string, name="filename")
variant_tensor = gen_dataset_ops.cache_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
filename=self._filename,
**flat_structure(self))
super(CacheDataset, self).__init__(input_dataset, variant_tensor)
class ShuffleDataset(UnaryUnchangedStructureDataset):
"""A `Dataset` that randomly shuffles the elements of its input."""
def __init__(self,
input_dataset,
buffer_size,
seed=None,
reshuffle_each_iteration=None):
"""Randomly shuffles the elements of this dataset.
Args:
input_dataset: The input dataset.
buffer_size: A `tf.int64` scalar `tf.Tensor`, representing the number of
elements from this dataset from which the new dataset will sample.
seed: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the random
seed that will be used to create the distribution. See
`tf.compat.v1.set_random_seed` for behavior.
reshuffle_each_iteration: (Optional.) A boolean, which if true indicates
that the dataset should be pseudorandomly reshuffled each time it is
iterated over. (Defaults to `True`.)
Returns:
A `Dataset`.
Raises:
ValueError: if invalid arguments are provided.
"""
self._input_dataset = input_dataset
self._buffer_size = ops.convert_to_tensor(
buffer_size, dtype=dtypes.int64, name="buffer_size")
self._seed, self._seed2 = random_seed.get_seed(seed)
if reshuffle_each_iteration is None:
self._reshuffle_each_iteration = True
else:
self._reshuffle_each_iteration = reshuffle_each_iteration
variant_tensor = gen_dataset_ops.shuffle_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
buffer_size=self._buffer_size,
seed=self._seed,
seed2=self._seed2,
reshuffle_each_iteration=self._reshuffle_each_iteration,
**flat_structure(self))
super(ShuffleDataset, self).__init__(input_dataset, variant_tensor)
class TakeDataset(UnaryUnchangedStructureDataset):
"""A `Dataset` containing the first `count` elements from its input."""
def __init__(self, input_dataset, count):
"""See `Dataset.take()` for details."""
self._input_dataset = input_dataset
self._count = ops.convert_to_tensor(count, dtype=dtypes.int64, name="count")
variant_tensor = gen_dataset_ops.take_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
count=self._count,
**flat_structure(self))
super(TakeDataset, self).__init__(input_dataset, variant_tensor)
class SkipDataset(UnaryUnchangedStructureDataset):
"""A `Dataset` skipping the first `count` elements from its input."""
def __init__(self, input_dataset, count):
"""See `Dataset.skip()` for details."""
self._input_dataset = input_dataset
self._count = ops.convert_to_tensor(count, dtype=dtypes.int64, name="count")
variant_tensor = gen_dataset_ops.skip_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
count=self._count,
**flat_structure(self))
super(SkipDataset, self).__init__(input_dataset, variant_tensor)
class ShardDataset(UnaryUnchangedStructureDataset):
"""A `Dataset` for sharding its input."""
def __init__(self, input_dataset, num_shards, index):
"""See `Dataset.shard()` for details."""
self._input_dataset = input_dataset
self._num_shards = ops.convert_to_tensor(
num_shards, dtype=dtypes.int64, name="num_shards")
self._index = ops.convert_to_tensor(index, dtype=dtypes.int64, name="index")
variant_tensor = gen_dataset_ops.shard_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
num_shards=self._num_shards,
index=self._index,
**flat_structure(self))
super(ShardDataset, self).__init__(input_dataset, variant_tensor)
class BatchDataset(UnaryDataset):
"""A `Dataset` that batches contiguous elements from its input."""
def __init__(self, input_dataset, batch_size, drop_remainder):
"""See `Dataset.batch()` for details."""
self._input_dataset = input_dataset
self._batch_size = ops.convert_to_tensor(
batch_size, dtype=dtypes.int64, name="batch_size")
self._drop_remainder = ops.convert_to_tensor(
drop_remainder, dtype=dtypes.bool, name="drop_remainder")
constant_drop_remainder = tensor_util.constant_value(self._drop_remainder)
# pylint: disable=protected-access
if constant_drop_remainder:
# NOTE(mrry): `constant_drop_remainder` may be `None` (unknown statically)
# or `False` (explicitly retaining the remainder).
self._structure = input_dataset._element_structure._batch(
tensor_util.constant_value(self._batch_size))
else:
self._structure = input_dataset._element_structure._batch(None)
variant_tensor = gen_dataset_ops.batch_dataset_v2(
input_dataset._variant_tensor, # pylint: disable=protected-access
batch_size=self._batch_size,
drop_remainder=self._drop_remainder,
**flat_structure(self))
super(BatchDataset, self).__init__(input_dataset, variant_tensor)
@property
def _element_structure(self):
return self._structure
class _VariantTracker(tracking.CapturableResource):
"""Allows export of functions capturing a Dataset in SavedModels.
When saving a SavedModel, `tf.saved_model.save` traverses the object
graph. Since Datasets reference _VariantTracker objects, that traversal will
find a _VariantTracker for each Dataset and so know how to save and restore
functions which reference the Dataset's variant Tensor.
"""
def __init__(self, variant_tensor, resource_creator):
"""Record that `variant_tensor` is associated with `resource_creator`.
Args:
variant_tensor: The variant-dtype Tensor associated with the Dataset. This
Tensor will be a captured input to functions which use the Dataset, and
is used by saving code to identify the corresponding _VariantTracker.
resource_creator: A zero-argument function which creates a new
variant-dtype Tensor. This function will be included in SavedModels and
run to re-create the Dataset's variant Tensor on restore.
"""
super(_VariantTracker, self).__init__(device="CPU")
self._resource_handle = variant_tensor
self._create_resource = resource_creator
def _is_padded_shape_compatible_with(padded_shape, input_component_shape):
"""Returns `True` if `input_component_shape` can be padded to `padded_shape`.
Args:
padded_shape: A `tf.TensorShape`.
input_component_shape: A `tf.TensorShape`.
Returns:
`True` if `input_component_shape` can be padded to `padded_shape`, otherwise
`False`.
"""
if padded_shape.dims is None or input_component_shape.dims is None:
return True
if len(padded_shape.dims) != len(input_component_shape.dims):
return False
for padded_dim, input_dim in zip(
padded_shape.dims, input_component_shape.dims):
if (padded_dim.value is not None and input_dim.value is not None
and padded_dim.value < input_dim.value):
return False
return True
def _padded_shape_to_tensor(padded_shape, input_component_shape):
"""Converts `padded_shape` to a `tf.Tensor` representing that shape.
Args:
padded_shape: A shape-like object, which may be a `tf.TensorShape`, a Python
sequence, or a 1-D `tf.Tensor` of `tf.int64` elements.
input_component_shape: A `tf.TensorShape`, with which `padded_shape` must
be compatible.
Returns:
A 1-D `tf.Tensor` of `tf.int64` elements, representing `padded_shape`.
Raises:
ValueError: If `padded_shape` is not a shape or not compatible with
`input_component_shape`.
TypeError: If `padded_shape` is not convertible to a `tf.int64` tensor.
"""
try:
# Try to convert the `padded_shape` to a `tf.TensorShape`
padded_shape_as_shape = tensor_shape.as_shape(padded_shape)
# We will return the "canonical" tensor representation, which uses
# `-1` in place of `None`.
ret = ops.convert_to_tensor(
[dim if dim is not None else -1
for dim in padded_shape_as_shape.as_list()], dtype=dtypes.int64)
except (TypeError, ValueError):
# The argument was not trivially convertible to a
# `tf.TensorShape`, so fall back on the conversion to tensor
# machinery.
ret = ops.convert_to_tensor(padded_shape, preferred_dtype=dtypes.int64)
if ret.shape.dims is not None and len(ret.shape.dims) != 1:
raise ValueError(
"Padded shape %s must be a 1-D tensor of tf.int64 values, but its "
"shape was %s." % (padded_shape, ret.shape))
if ret.dtype != dtypes.int64:
raise TypeError(
"Padded shape %s must be a 1-D tensor of tf.int64 values, but its "
"element type was %s." % (padded_shape, ret.dtype.name))
padded_shape_as_shape = tensor_util.constant_value_as_shape(ret)
if not _is_padded_shape_compatible_with(padded_shape_as_shape,
input_component_shape):
raise ValueError("The padded shape %s is not compatible with the "
"corresponding input component shape %s."
% (padded_shape_as_shape, input_component_shape))
return ret
def _padding_value_to_tensor(value, output_type):
"""Converts the padding value to a tensor.
Args:
value: The padding value.
output_type: Its expected dtype.
Returns:
A scalar `Tensor`.
Raises:
ValueError: if the padding value is not a scalar.
TypeError: if the padding value's type does not match `output_type`.
"""
value = ops.convert_to_tensor(value, name="padding_value")
if not value.shape.is_compatible_with(tensor_shape.scalar()):
raise ValueError("Padding value should be a scalar, but is not: %s" % value)
if value.dtype != output_type:
raise TypeError("Padding value tensor (%s) does not match output type: %s" %
(value, output_type))
return value
def _default_padding(input_dataset):
"""Returns default padding tensors in a structure matching `input_dataset`."""
def make_zero(t):
if t.base_dtype == dtypes.string:
return ""
elif t.base_dtype == dtypes.variant:
error_msg = ("Unable to create padding for field of type 'variant' "
"because t.base_type == dtypes.variant == "
"{}.".format(
t.base_dtype))
raise TypeError(error_msg)
else:
return np.zeros_like(t.as_numpy_dtype())
return nest.map_structure(
make_zero, get_legacy_output_types(input_dataset))
class PaddedBatchDataset(UnaryDataset):
"""A `Dataset` that batches and pads contiguous elements from its input."""
def __init__(self, input_dataset, batch_size, padded_shapes, padding_values,
drop_remainder):
"""See `Dataset.batch()` for details."""
self._input_dataset = input_dataset
if sparse.any_sparse(get_legacy_output_classes(input_dataset)):
# TODO(b/63669786): support batching of sparse tensors
raise TypeError(
"Batching of padded sparse tensors is not currently supported")
self._input_dataset = input_dataset
self._batch_size = ops.convert_to_tensor(
batch_size, dtype=dtypes.int64, name="batch_size")
padding_values = (
padding_values
if padding_values is not None else _default_padding(input_dataset))
input_shapes = get_legacy_output_shapes(input_dataset)
flat_padded_shapes = nest.flatten_up_to(input_shapes, padded_shapes)
flat_padded_shapes_as_tensors = []
for input_component_shape, padded_shape in zip(
nest.flatten(input_shapes), flat_padded_shapes):
flat_padded_shapes_as_tensors.append(
_padded_shape_to_tensor(padded_shape, input_component_shape))
self._padded_shapes = nest.pack_sequence_as(input_shapes,
flat_padded_shapes_as_tensors)
self._padding_values = nest.map_structure_up_to(
input_shapes, _padding_value_to_tensor, padding_values,
get_legacy_output_types(input_dataset))
self._drop_remainder = ops.convert_to_tensor(
drop_remainder, dtype=dtypes.bool, name="drop_remainder")
def _padded_shape_to_batch_shape(s):
return tensor_shape.vector(
tensor_util.constant_value(self._batch_size) if smart_cond.
smart_constant_value(self._drop_remainder) else None).concatenate(
tensor_util.constant_value_as_shape(s))
output_shapes = nest.map_structure(
_padded_shape_to_batch_shape, self._padded_shapes)
self._structure = structure_lib.convert_legacy_structure(
get_legacy_output_types(self._input_dataset), output_shapes,
get_legacy_output_classes(self._input_dataset))
# pylint: disable=protected-access
# TODO(jsimsa): Switch to using v2 only any time after 6/30/2018.
if smart_cond.smart_constant_value(self._drop_remainder) is False:
variant_tensor = gen_dataset_ops.padded_batch_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
batch_size=self._batch_size,
padded_shapes=[
ops.convert_to_tensor(s, dtype=dtypes.int64)
for s in nest.flatten(self._padded_shapes)
],
padding_values=nest.flatten(self._padding_values),
output_shapes=self._structure._flat_shapes)
else:
variant_tensor = gen_dataset_ops.padded_batch_dataset_v2(
input_dataset._variant_tensor, # pylint: disable=protected-access
batch_size=self._batch_size,
padded_shapes=[
ops.convert_to_tensor(s, dtype=dtypes.int64)
for s in nest.flatten(self._padded_shapes)
],
padding_values=nest.flatten(self._padding_values),
drop_remainder=self._drop_remainder,
output_shapes=self._structure._flat_shapes)
super(PaddedBatchDataset, self).__init__(input_dataset, variant_tensor)
@property
def _element_structure(self):
return self._structure
def _should_unpack_args(args):
"""Returns `True` if `args` should be `*args` when passed to a callable."""
return type(args) is tuple # pylint: disable=unidiomatic-typecheck
class MapDataset(UnaryDataset):
"""A `Dataset` that maps a function over elements in its input."""
def __init__(self,
input_dataset,
map_func,
use_inter_op_parallelism=True,
preserve_cardinality=False,
use_legacy_function=False):
"""See `Dataset.map()` for details."""
self._input_dataset = input_dataset
self._use_inter_op_parallelism = use_inter_op_parallelism
self._preserve_cardinality = preserve_cardinality
self._map_func = StructuredFunctionWrapper(
map_func,
self._transformation_name(),
dataset=input_dataset,
use_legacy_function=use_legacy_function)
variant_tensor = gen_dataset_ops.map_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
self._map_func.function.captured_inputs,
f=self._map_func.function,
use_inter_op_parallelism=self._use_inter_op_parallelism,
preserve_cardinality=self._preserve_cardinality,
**flat_structure(self))
super(MapDataset, self).__init__(input_dataset, variant_tensor)
def _functions(self):
return [self._map_func]
@property
def _element_structure(self):
return self._map_func.output_structure
def _transformation_name(self):
return "Dataset.map()"
class ParallelMapDataset(UnaryDataset):
"""A `Dataset` that maps a function over elements in its input in parallel."""
def __init__(self,
input_dataset,
map_func,
num_parallel_calls,
use_inter_op_parallelism=True,
preserve_cardinality=False,
use_legacy_function=False):
"""See `Dataset.map()` for details."""
self._input_dataset = input_dataset
self._use_inter_op_parallelism = use_inter_op_parallelism
self._map_func = StructuredFunctionWrapper(
map_func,
self._transformation_name(),
dataset=input_dataset,
use_legacy_function=use_legacy_function)
self._num_parallel_calls = ops.convert_to_tensor(
num_parallel_calls, dtype=dtypes.int32, name="num_parallel_calls")
self._preserve_cardinality = preserve_cardinality
variant_tensor = gen_dataset_ops.parallel_map_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
self._map_func.function.captured_inputs,
f=self._map_func.function,
num_parallel_calls=self._num_parallel_calls,
use_inter_op_parallelism=self._use_inter_op_parallelism,
preserve_cardinality=self._preserve_cardinality,
**flat_structure(self))
super(ParallelMapDataset, self).__init__(input_dataset, variant_tensor)
def _functions(self):
return [self._map_func]
@property
def _element_structure(self):
return self._map_func.output_structure
def _transformation_name(self):
return "Dataset.map()"
class FlatMapDataset(UnaryDataset):
"""A `Dataset` that maps a function over its input and flattens the result."""
def __init__(self, input_dataset, map_func):
"""See `Dataset.flat_map()` for details."""
self._input_dataset = input_dataset
self._map_func = StructuredFunctionWrapper(
map_func, self._transformation_name(), dataset=input_dataset)
if not isinstance(self._map_func.output_structure, DatasetStructure):
raise TypeError(
"`map_func` must return a `Dataset` object. Got {}".format(
type(self._map_func.output_structure)))
self._structure = self._map_func.output_structure._element_structure # pylint: disable=protected-access
variant_tensor = gen_dataset_ops.flat_map_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
self._map_func.function.captured_inputs,
f=self._map_func.function,
**flat_structure(self))
super(FlatMapDataset, self).__init__(input_dataset, variant_tensor)
def _functions(self):
return [self._map_func]
@property
def _element_structure(self):
return self._structure
def _transformation_name(self):
return "Dataset.flat_map()"
class InterleaveDataset(UnaryDataset):
"""A `Dataset` that maps a function over its input and interleaves the result.
"""
def __init__(self, input_dataset, map_func, cycle_length, block_length):
"""See `Dataset.interleave()` for details."""
self._input_dataset = input_dataset
self._map_func = StructuredFunctionWrapper(
map_func, self._transformation_name(), dataset=input_dataset)
if not isinstance(self._map_func.output_structure, DatasetStructure):
raise TypeError(
"`map_func` must return a `Dataset` object. Got {}".format(
type(self._map_func.output_structure)))
self._structure = self._map_func.output_structure._element_structure # pylint: disable=protected-access
self._cycle_length = ops.convert_to_tensor(
cycle_length, dtype=dtypes.int64, name="cycle_length")
self._block_length = ops.convert_to_tensor(
block_length, dtype=dtypes.int64, name="block_length")
variant_tensor = gen_dataset_ops.interleave_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
self._map_func.function.captured_inputs, # pylint: disable=protected-access
self._cycle_length,
self._block_length,
f=self._map_func.function,
**flat_structure(self))
super(InterleaveDataset, self).__init__(input_dataset, variant_tensor)
def _functions(self):
return [self._map_func]
@property
def _element_structure(self):
return self._structure
def _transformation_name(self):
return "Dataset.interleave()"
class ParallelInterleaveDataset(UnaryDataset):
"""A `Dataset` that maps a function over its input and interleaves the result."""
def __init__(self, input_dataset, map_func, cycle_length, block_length,
num_parallel_calls):
"""See `Dataset.interleave()` for details."""
self._input_dataset = input_dataset
self._map_func = StructuredFunctionWrapper(
map_func, self._transformation_name(), dataset=input_dataset)
if not isinstance(self._map_func.output_structure, DatasetStructure):
raise TypeError(
"`map_func` must return a `Dataset` object. Got {}".format(
type(self._map_func.output_structure)))
self._structure = self._map_func.output_structure._element_structure # pylint: disable=protected-access
self._cycle_length = ops.convert_to_tensor(
cycle_length, dtype=dtypes.int64, name="cycle_length")
self._block_length = ops.convert_to_tensor(
block_length, dtype=dtypes.int64, name="block_length")
self._num_parallel_calls = ops.convert_to_tensor(
num_parallel_calls, dtype=dtypes.int64, name="num_parallel_calls")
variant_tensor = gen_dataset_ops.parallel_interleave_dataset_v2(
input_dataset._variant_tensor, # pylint: disable=protected-access
self._map_func.function.captured_inputs, # pylint: disable=protected-access
self._cycle_length,
self._block_length,
self._num_parallel_calls,
f=self._map_func.function,
**flat_structure(self))
super(ParallelInterleaveDataset, self).__init__(input_dataset,
variant_tensor)
def _functions(self):
return [self._map_func]
@property
def _element_structure(self):
return self._structure
def _transformation_name(self):
return "Dataset.interleave()"
class FilterDataset(UnaryUnchangedStructureDataset):
"""A `Dataset` that filters its input according to a predicate function."""
def __init__(self, input_dataset, predicate, use_legacy_function=False):
"""See `Dataset.filter()` for details."""
self._input_dataset = input_dataset
wrapped_func = StructuredFunctionWrapper(
predicate,
self._transformation_name(),
dataset=input_dataset,
use_legacy_function=use_legacy_function)
if not wrapped_func.output_structure.is_compatible_with(
structure_lib.TensorStructure(dtypes.bool, [])):
error_msg = ("`predicate` return type must be convertible to a scalar "
"boolean tensor. Was {}.").format(
wrapped_func.output_structure)
raise ValueError(error_msg)
self._predicate = wrapped_func
variant_tensor = gen_dataset_ops.filter_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
other_arguments=self._predicate.function.captured_inputs,
predicate=self._predicate.function,
**flat_structure(self))
super(FilterDataset, self).__init__(input_dataset, variant_tensor)
def _functions(self):
return [self._predicate]
def _transformation_name(self):
return "Dataset.filter()"
class PrefetchDataset(UnaryUnchangedStructureDataset):
"""A `Dataset` that asynchronously prefetches its input."""
def __init__(self, input_dataset, buffer_size, slack_period=None):
"""See `Dataset.prefetch()` for details.
Args:
input_dataset: The input dataset.
buffer_size: See `Dataset.prefetch()` for details.
slack_period: (Optional.) An integer. If non-zero, determines the number
of GetNext calls before injecting slack into the execution. This may
reduce CPU contention at the start of a step. Note that a tensorflow
user should not have to set this manually; enable this behavior
automatically via `tf.data.Options.experimental_slack` instead. Defaults
to None.
"""
self._input_dataset = input_dataset
if buffer_size is None:
buffer_size = -1 # This is the sentinel for auto-tuning.
self._buffer_size = ops.convert_to_tensor(
buffer_size, dtype=dtypes.int64, name="buffer_size")
variant_tensor = gen_dataset_ops.prefetch_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
buffer_size=self._buffer_size,
slack_period=slack_period,
**flat_structure(self))
super(PrefetchDataset, self).__init__(input_dataset, variant_tensor)
class WindowDataset(UnaryDataset):
"""A dataset that creates window datasets from the input elements."""
def __init__(self, input_dataset, size, shift, stride, drop_remainder):
"""See `window_dataset()` for more details."""
self._input_dataset = input_dataset
self._size = ops.convert_to_tensor(size, dtype=dtypes.int64, name="size")
self._shift = ops.convert_to_tensor(shift, dtype=dtypes.int64, name="shift")
self._stride = ops.convert_to_tensor(
stride, dtype=dtypes.int64, name="stride")
self._drop_remainder = ops.convert_to_tensor(
drop_remainder, dtype=dtypes.bool, name="drop_remainder")
nest_of_structures = nest.pack_sequence_as(
get_legacy_output_classes(input_dataset),
[
DatasetStructure(structure_lib.convert_legacy_structure(
output_type, output_shape, output_class))
for output_class, output_shape, output_type in zip(
nest.flatten(get_legacy_output_classes(input_dataset)),
nest.flatten(get_legacy_output_shapes(input_dataset)),
nest.flatten(get_legacy_output_types(input_dataset)))
])
self._structure = structure_lib.NestedStructure(nest_of_structures)
variant_tensor = gen_dataset_ops.window_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
self._size,
self._shift,
self._stride,
self._drop_remainder,
**flat_structure(self))
super(WindowDataset, self).__init__(input_dataset, variant_tensor)
@property
def _element_structure(self):
return self._structure
class _OptionsDataset(UnaryUnchangedStructureDataset):
"""An identity `Dataset` that stores options."""
def __init__(self, input_dataset, options):
self._input_dataset = input_dataset
self._options = input_dataset.options()
if self._options:
self._options = self._options.merge(options)
else:
self._options = options
variant_tensor = input_dataset._variant_tensor # pylint: disable=protected-access
super(_OptionsDataset, self).__init__(input_dataset, variant_tensor)
def options(self):
return self._options
class _ModelDataset(UnaryUnchangedStructureDataset):
"""A `Dataset` that acts as an identity, and models performance."""
def __init__(self, input_dataset, cpu_budget):
self._input_dataset = input_dataset
variant_tensor = gen_dataset_ops.model_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
cpu_budget=cpu_budget,
**flat_structure(self))
super(_ModelDataset, self).__init__(input_dataset, variant_tensor)
class _OptimizeDataset(UnaryUnchangedStructureDataset):
"""A `Dataset` that acts as an identity, and applies optimizations."""
def __init__(self, input_dataset, optimizations, optimization_configs=None):
self._input_dataset = input_dataset
if optimizations is None:
optimizations = []
if optimization_configs is None:
optimization_configs = []
self._optimizations = ops.convert_to_tensor(
optimizations, dtype=dtypes.string, name="optimizations")
variant_tensor = gen_dataset_ops.optimize_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
self._optimizations,
optimization_configs=optimization_configs,
**flat_structure(self))
super(_OptimizeDataset, self).__init__(input_dataset, variant_tensor)
class _SetStatsAggregatorDataset(UnaryUnchangedStructureDataset):
"""A `Dataset` that acts as an identity, and sets a stats aggregator."""
def __init__(self, input_dataset, aggregator, prefix, counter_prefix):
self._input_dataset = input_dataset
self._stats_aggregator = aggregator
self._prefix = prefix
self._counter_prefix = counter_prefix
variant_tensor = ged_ops.experimental_set_stats_aggregator_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
self._stats_aggregator._resource, # pylint: disable=protected-access
self._prefix,
self._counter_prefix,
**flat_structure(self))
super(_SetStatsAggregatorDataset, self).__init__(input_dataset,
variant_tensor)
class _MaxIntraOpParallelismDataset(UnaryUnchangedStructureDataset):
"""A `Dataset` that acts as an identity, overriding intra-op parallelism."""
def __init__(self, input_dataset, max_intra_op_parallelism):
self._input_dataset = input_dataset
self._max_intra_op_parallelism = ops.convert_to_tensor(
max_intra_op_parallelism,
dtype=dtypes.int64,
name="max_intra_op_parallelism")
variant_tensor = ged_ops.experimental_max_intra_op_parallelism_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
self._max_intra_op_parallelism,
**flat_structure(self))
super(_MaxIntraOpParallelismDataset, self).__init__(input_dataset,
variant_tensor)
class _PrivateThreadPoolDataset(UnaryUnchangedStructureDataset):
"""A `Dataset` that acts as an identity, setting a private threadpool."""
def __init__(self, input_dataset, num_threads):
self._input_dataset = input_dataset
self._num_threads = ops.convert_to_tensor(
num_threads, dtype=dtypes.int64, name="num_threads")
variant_tensor = ged_ops.experimental_private_thread_pool_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
self._num_threads,
**flat_structure(self))
super(_PrivateThreadPoolDataset, self).__init__(input_dataset,
variant_tensor)
class _RestructuredDataset(UnaryDataset):
"""An internal helper for changing the structure and shape of a dataset."""
def __init__(self, dataset, structure):
self._input_dataset = dataset
self._structure = structure
variant_tensor = self._input_dataset._variant_tensor # pylint: disable=protected-access
super(_RestructuredDataset, self).__init__(dataset, variant_tensor)
@property
def _element_structure(self):
return self._structure
class _UnbatchDataset(UnaryDataset):
"""A dataset that splits the elements of its input into multiple elements."""
def __init__(self, input_dataset):
"""See `unbatch()` for more details."""
flat_shapes = get_structure(input_dataset)._flat_shapes # pylint: disable=protected-access
if any(s.ndims == 0 for s in flat_shapes):
raise ValueError("Cannot unbatch an input with scalar components.")
known_batch_dim = tensor_shape.Dimension(None)
for s in flat_shapes:
try:
known_batch_dim = known_batch_dim.merge_with(s[0])
except ValueError:
raise ValueError("Cannot unbatch an input whose components have "
"different batch sizes.")
self._input_dataset = input_dataset
self._structure = get_structure(input_dataset)._unbatch() # pylint: disable=protected-access
variant_tensor = ged_ops.experimental_unbatch_dataset(
self._input_dataset._variant_tensor, # pylint: disable=protected-access
**flat_structure(self))
super(_UnbatchDataset, self).__init__(input_dataset, variant_tensor)
@property
def _element_structure(self):
return self._structure
|
tensorflow-master
|
tensorflow/python/data/ops/dataset_ops.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A Python interface for creating TensorFlow servers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.framework import device_attributes_pb2
from tensorflow.python import pywrap_tensorflow
def list_local_devices(session_config=None):
"""List the available devices available in the local process.
Args:
session_config: a session config proto or None to use the default config.
Returns:
A list of `DeviceAttribute` protocol buffers.
"""
def _convert(pb_str):
m = device_attributes_pb2.DeviceAttributes()
m.ParseFromString(pb_str)
return m
return [
_convert(s)
for s in pywrap_tensorflow.list_devices(session_config=session_config)
]
|
tensorflow-master
|
tensorflow/python/client/device_lib.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Timeline visualization for TensorFlow using Chrome Trace Format."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import json
import re
# The timeline target is usually imported as part of BUILD target
# "platform_test", which includes also includes the "platform"
# dependency. This is why the logging import here is okay.
from tensorflow.python.platform import tf_logging as logging
class AllocationMaximum(collections.namedtuple(
'AllocationMaximum', ('timestamp', 'num_bytes', 'tensors'))):
"""Stores the maximum allocation for a given allocator within the timelne.
Parameters:
timestamp: `tensorflow::Env::NowMicros()` when this maximum was reached.
num_bytes: the total memory used at this time.
tensors: the set of tensors allocated at this time.
"""
pass
class StepStatsAnalysis(collections.namedtuple(
'StepStatsAnalysis', ('chrome_trace', 'allocator_maximums'))):
"""Stores the step stats analysis output.
Parameters:
chrome_trace: A dict containing the chrome trace analysis.
allocator_maximums: A dict mapping allocator names to AllocationMaximum.
"""
pass
class _ChromeTraceFormatter(object):
"""A helper class for generating traces in Chrome Trace Format."""
def __init__(self, show_memory=False):
"""Constructs a new Chrome Trace formatter."""
self._show_memory = show_memory
self._events = []
self._metadata = []
def _create_event(self, ph, category, name, pid, tid, timestamp):
"""Creates a new Chrome Trace event.
For details of the file format, see:
https://github.com/catapult-project/catapult/blob/master/tracing/README.md
Args:
ph: The type of event - usually a single character.
category: The event category as a string.
name: The event name as a string.
pid: Identifier of the process generating this event as an integer.
tid: Identifier of the thread generating this event as an integer.
timestamp: The timestamp of this event as a long integer.
Returns:
A JSON compatible event object.
"""
event = {}
event['ph'] = ph
event['cat'] = category
event['name'] = name
event['pid'] = pid
event['tid'] = tid
event['ts'] = timestamp
return event
def emit_pid(self, name, pid):
"""Adds a process metadata event to the trace.
Args:
name: The process name as a string.
pid: Identifier of the process as an integer.
"""
event = {}
event['name'] = 'process_name'
event['ph'] = 'M'
event['pid'] = pid
event['args'] = {'name': name}
self._metadata.append(event)
def emit_tid(self, name, pid, tid):
"""Adds a thread metadata event to the trace.
Args:
name: The thread name as a string.
pid: Identifier of the process as an integer.
tid: Identifier of the thread as an integer.
"""
event = {}
event['name'] = 'thread_name'
event['ph'] = 'M'
event['pid'] = pid
event['tid'] = tid
event['args'] = {'name': name}
self._metadata.append(event)
def emit_region(self, timestamp, duration, pid, tid, category, name, args):
"""Adds a region event to the trace.
Args:
timestamp: The start timestamp of this region as a long integer.
duration: The duration of this region as a long integer.
pid: Identifier of the process generating this event as an integer.
tid: Identifier of the thread generating this event as an integer.
category: The event category as a string.
name: The event name as a string.
args: A JSON-compatible dictionary of event arguments.
"""
event = self._create_event('X', category, name, pid, tid, timestamp)
event['dur'] = duration
event['args'] = args
self._events.append(event)
def emit_obj_create(self, category, name, timestamp, pid, tid, object_id):
"""Adds an object creation event to the trace.
Args:
category: The event category as a string.
name: The event name as a string.
timestamp: The timestamp of this event as a long integer.
pid: Identifier of the process generating this event as an integer.
tid: Identifier of the thread generating this event as an integer.
object_id: Identifier of the object as an integer.
"""
event = self._create_event('N', category, name, pid, tid, timestamp)
event['id'] = object_id
self._events.append(event)
def emit_obj_delete(self, category, name, timestamp, pid, tid, object_id):
"""Adds an object deletion event to the trace.
Args:
category: The event category as a string.
name: The event name as a string.
timestamp: The timestamp of this event as a long integer.
pid: Identifier of the process generating this event as an integer.
tid: Identifier of the thread generating this event as an integer.
object_id: Identifier of the object as an integer.
"""
event = self._create_event('D', category, name, pid, tid, timestamp)
event['id'] = object_id
self._events.append(event)
def emit_obj_snapshot(self, category, name, timestamp, pid, tid, object_id,
snapshot):
"""Adds an object snapshot event to the trace.
Args:
category: The event category as a string.
name: The event name as a string.
timestamp: The timestamp of this event as a long integer.
pid: Identifier of the process generating this event as an integer.
tid: Identifier of the thread generating this event as an integer.
object_id: Identifier of the object as an integer.
snapshot: A JSON-compatible representation of the object.
"""
event = self._create_event('O', category, name, pid, tid, timestamp)
event['id'] = object_id
event['args'] = {'snapshot': snapshot}
self._events.append(event)
def emit_flow_start(self, name, timestamp, pid, tid, flow_id):
"""Adds a flow start event to the trace.
When matched with a flow end event (with the same 'flow_id') this will
cause the trace viewer to draw an arrow between the start and end events.
Args:
name: The event name as a string.
timestamp: The timestamp of this event as a long integer.
pid: Identifier of the process generating this event as an integer.
tid: Identifier of the thread generating this event as an integer.
flow_id: Identifier of the flow as an integer.
"""
event = self._create_event('s', 'DataFlow', name, pid, tid, timestamp)
event['id'] = flow_id
self._events.append(event)
def emit_flow_end(self, name, timestamp, pid, tid, flow_id):
"""Adds a flow end event to the trace.
When matched with a flow start event (with the same 'flow_id') this will
cause the trace viewer to draw an arrow between the start and end events.
Args:
name: The event name as a string.
timestamp: The timestamp of this event as a long integer.
pid: Identifier of the process generating this event as an integer.
tid: Identifier of the thread generating this event as an integer.
flow_id: Identifier of the flow as an integer.
"""
event = self._create_event('t', 'DataFlow', name, pid, tid, timestamp)
event['id'] = flow_id
self._events.append(event)
def emit_counter(self, category, name, pid, timestamp, counter, value):
"""Emits a record for a single counter.
Args:
category: The event category as a string.
name: The event name as a string.
pid: Identifier of the process generating this event as an integer.
timestamp: The timestamp of this event as a long integer.
counter: Name of the counter as a string.
value: Value of the counter as an integer.
"""
event = self._create_event('C', category, name, pid, 0, timestamp)
event['args'] = {counter: value}
self._events.append(event)
def emit_counters(self, category, name, pid, timestamp, counters):
"""Emits a counter record for the dictionary 'counters'.
Args:
category: The event category as a string.
name: The event name as a string.
pid: Identifier of the process generating this event as an integer.
timestamp: The timestamp of this event as a long integer.
counters: Dictionary of counter values.
"""
event = self._create_event('C', category, name, pid, 0, timestamp)
event['args'] = counters.copy()
self._events.append(event)
def format_to_string(self, pretty=False):
"""Formats the chrome trace to a string.
Args:
pretty: (Optional.) If True, produce human-readable JSON output.
Returns:
A JSON-formatted string in Chrome Trace format.
"""
trace = {}
trace['traceEvents'] = self._metadata + self._events
if pretty:
return json.dumps(trace, indent=4, separators=(',', ': '))
else:
return json.dumps(trace, separators=(',', ':'))
class _TensorTracker(object):
"""An internal class to track the lifetime of a Tensor."""
def __init__(self, name, object_id, timestamp, pid, allocator, num_bytes):
"""Creates an object to track tensor references.
This class is not thread safe and is intended only for internal use by
the 'Timeline' class in this file.
Args:
name: The name of the Tensor as a string.
object_id: Chrome Trace object identifier assigned for this Tensor.
timestamp: The creation timestamp of this event as a long integer.
pid: Process identifier of the associated device, as an integer.
allocator: Name of the allocator used to create the Tensor.
num_bytes: Number of bytes allocated (long integer).
Returns:
A 'TensorTracker' object.
"""
self._name = name
self._pid = pid
self._object_id = object_id
self._create_time = timestamp
self._allocator = allocator
self._num_bytes = num_bytes
self._ref_times = []
self._unref_times = []
@property
def name(self):
"""Name of this tensor."""
return self._name
@property
def pid(self):
"""ID of the process which created this tensor (an integer)."""
return self._pid
@property
def create_time(self):
"""Timestamp when this tensor was created (long integer)."""
return self._create_time
@property
def object_id(self):
"""Returns the object identifier of this tensor (integer)."""
return self._object_id
@property
def num_bytes(self):
"""Size of this tensor in bytes (long integer)."""
return self._num_bytes
@property
def allocator(self):
"""Name of the allocator used to create this tensor (string)."""
return self._allocator
@property
def last_unref(self):
"""Last unreference timestamp of this tensor (long integer)."""
return max(self._unref_times)
def add_ref(self, timestamp):
"""Adds a reference to this tensor with the specified timestamp.
Args:
timestamp: Timestamp of object reference as an integer.
"""
self._ref_times.append(timestamp)
def add_unref(self, timestamp):
"""Adds an unref to this tensor with the specified timestamp.
Args:
timestamp: Timestamp of object unreference as an integer.
"""
self._unref_times.append(timestamp)
class Timeline(object):
"""A class for visualizing execution timelines of TensorFlow steps."""
def __init__(self, step_stats, graph=None):
"""Constructs a new Timeline.
A 'Timeline' is used for visualizing the execution of a TensorFlow
computation. It shows the timings and concurrency of execution at
the granularity of TensorFlow Ops.
This class is not thread safe.
Args:
step_stats: The 'StepStats' proto recording execution times.
graph: (Optional) The 'Graph' that was executed.
"""
self._step_stats = step_stats
self._graph = graph
self._chrome_trace = _ChromeTraceFormatter()
self._next_pid = 0
self._device_pids = {} # device name -> pid for compute activity.
self._tensor_pids = {} # device name -> pid for tensors.
self._tensors = {} # tensor_name -> TensorTracker
self._next_flow_id = 0
self._flow_starts = {} # tensor_name -> (timestamp, pid, tid)
self._alloc_times = {} # tensor_name -> ( time, allocator, size )
self._allocator_maximums = {} # allocator name => maximum bytes long
def _alloc_pid(self):
"""Allocate a process Id."""
pid = self._next_pid
self._next_pid += 1
return pid
def _alloc_flow_id(self):
"""Allocate a flow Id."""
flow_id = self._next_flow_id
self._next_flow_id += 1
return flow_id
def _parse_op_label(self, label):
"""Parses the fields in a node timeline label."""
# Expects labels of the form: name = op(arg, arg, ...).
match = re.match(r'(.*) = (.*)\((.*)\)', label)
if match is None:
return 'unknown', 'unknown', []
nn, op, inputs = match.groups()
if not inputs:
inputs = []
else:
inputs = inputs.split(', ')
return nn, op, inputs
def _assign_lanes(self):
"""Assigns non-overlapping lanes for the activities on each device."""
for device_stats in self._step_stats.dev_stats:
# TODO(pbar): Genuine thread IDs in NodeExecStats might be helpful.
lanes = [0]
for ns in device_stats.node_stats:
l = -1
for (i, lts) in enumerate(lanes):
if ns.all_start_micros > lts:
l = i
lanes[l] = ns.all_start_micros + ns.all_end_rel_micros
break
if l < 0:
l = len(lanes)
lanes.append(ns.all_start_micros + ns.all_end_rel_micros)
ns.thread_id = l
def _emit_op(self, nodestats, pid, is_gputrace):
"""Generates a Chrome Trace event to show Op execution.
Args:
nodestats: The 'NodeExecStats' proto recording op execution.
pid: The pid assigned for the device where this op ran.
is_gputrace: If True then this op came from the GPUTracer.
"""
node_name = nodestats.node_name
start = nodestats.all_start_micros
duration = nodestats.all_end_rel_micros
tid = nodestats.thread_id
inputs = []
if is_gputrace:
# Node names should always have the form 'name:op'.
fields = node_name.split(':') + ['unknown']
node_name, op = fields[:2]
elif node_name == 'RecvTensor':
# RPC tracing does not use the standard timeline_label format.
op = 'RecvTensor'
else:
_, op, inputs = self._parse_op_label(nodestats.timeline_label)
args = {'name': node_name, 'op': op}
for i, iname in enumerate(inputs):
args['input%d' % i] = iname
self._chrome_trace.emit_region(start, duration, pid, tid, 'Op', op, args)
def _emit_tensor_snapshot(self, tensor, timestamp, pid, tid, value):
"""Generate Chrome Trace snapshot event for a computed Tensor.
Args:
tensor: A 'TensorTracker' object.
timestamp: The timestamp of this snapshot as a long integer.
pid: The pid assigned for showing the device where this op ran.
tid: The tid of the thread computing the tensor snapshot.
value: A JSON-compliant snapshot of the object.
"""
desc = str(value.tensor_description).replace('"', '')
snapshot = {'tensor_description': desc}
self._chrome_trace.emit_obj_snapshot('Tensor', tensor.name, timestamp, pid,
tid, tensor.object_id, snapshot)
def _produce_tensor(self, name, timestamp, tensors_pid, allocator, num_bytes):
object_id = len(self._tensors)
tensor = _TensorTracker(name, object_id, timestamp, tensors_pid, allocator,
num_bytes)
self._tensors[name] = tensor
return tensor
def _is_gputrace_device(self, device_name):
"""Returns true if this device is part of the GPUTracer logging."""
return '/stream:' in device_name or '/memcpy' in device_name
def _allocate_pids(self):
"""Allocate fake process ids for each device in the StepStats."""
self._allocators_pid = self._alloc_pid()
self._chrome_trace.emit_pid('Allocators', self._allocators_pid)
# Add processes in the Chrome trace to show compute and data activity.
for dev_stats in self._step_stats.dev_stats:
device_pid = self._alloc_pid()
self._device_pids[dev_stats.device] = device_pid
tensors_pid = self._alloc_pid()
self._tensor_pids[dev_stats.device] = tensors_pid
self._chrome_trace.emit_pid(dev_stats.device + ' Compute', device_pid)
self._chrome_trace.emit_pid(dev_stats.device + ' Tensors', tensors_pid)
def _analyze_tensors(self, show_memory):
"""Analyze tensor references to track dataflow."""
for dev_stats in self._step_stats.dev_stats:
device_pid = self._device_pids[dev_stats.device]
tensors_pid = self._tensor_pids[dev_stats.device]
for node_stats in dev_stats.node_stats:
tid = node_stats.thread_id
node_name = node_stats.node_name
start_time = node_stats.all_start_micros
end_time = node_stats.all_start_micros + node_stats.all_end_rel_micros
for index, output in enumerate(node_stats.output):
if index:
output_name = '%s:%d' % (node_name, index)
else:
output_name = node_name
allocation = output.tensor_description.allocation_description
num_bytes = allocation.requested_bytes
allocator_name = allocation.allocator_name
tensor = self._produce_tensor(output_name, start_time, tensors_pid,
allocator_name, num_bytes)
tensor.add_ref(start_time)
tensor.add_unref(end_time)
self._flow_starts[output_name] = (end_time, device_pid, tid)
if show_memory:
self._chrome_trace.emit_obj_create('Tensor', output_name,
start_time, tensors_pid, tid,
tensor.object_id)
self._emit_tensor_snapshot(tensor, end_time - 1, tensors_pid, tid,
output)
def _show_compute(self, show_dataflow):
"""Visualize the computation activity."""
for dev_stats in self._step_stats.dev_stats:
device_name = dev_stats.device
device_pid = self._device_pids[device_name]
is_gputrace = self._is_gputrace_device(device_name)
for node_stats in dev_stats.node_stats:
tid = node_stats.thread_id
start_time = node_stats.all_start_micros
end_time = node_stats.all_start_micros + node_stats.all_end_rel_micros
self._emit_op(node_stats, device_pid, is_gputrace)
if is_gputrace or node_stats.node_name == 'RecvTensor':
continue
_, _, inputs = self._parse_op_label(node_stats.timeline_label)
for input_name in inputs:
if input_name not in self._tensors:
# This can happen when partitioning has inserted a Send/Recv.
# We remove the numeric suffix so that the dataflow appears to
# come from the original node. Ideally, the StepStats would
# contain logging for the Send and Recv nodes.
index = input_name.rfind('/_')
if index > 0:
input_name = input_name[:index]
if input_name in self._tensors:
tensor = self._tensors[input_name]
tensor.add_ref(start_time)
tensor.add_unref(end_time - 1)
if show_dataflow:
# We use a different flow ID for every graph edge.
create_time, create_pid, create_tid = self._flow_starts[
input_name]
# Don't add flows when producer and consumer ops are on the same
# pid/tid since the horizontal arrows clutter the visualization.
if create_pid != device_pid or create_tid != tid:
flow_id = self._alloc_flow_id()
self._chrome_trace.emit_flow_start(input_name, create_time,
create_pid, create_tid,
flow_id)
self._chrome_trace.emit_flow_end(input_name, start_time,
device_pid, tid, flow_id)
else:
logging.vlog(1, 'Can\'t find tensor %s - removed by CSE?',
input_name)
def _show_memory_counters(self):
"""Produce a counter series for each memory allocator."""
# Iterate over all tensor trackers to build a list of allocations and
# frees for each allocator. Then sort the lists and emit a cumulative
# counter series for each allocator.
allocations = {}
for name in self._tensors:
tensor = self._tensors[name]
self._chrome_trace.emit_obj_delete('Tensor', name, tensor.last_unref,
tensor.pid, 0, tensor.object_id)
allocator = tensor.allocator
if allocator not in allocations:
allocations[allocator] = []
num_bytes = tensor.num_bytes
allocations[allocator].append((tensor.create_time, num_bytes, name))
allocations[allocator].append((tensor.last_unref, -num_bytes, name))
alloc_maxes = {}
# Generate a counter series showing total allocations for each allocator.
for allocator in allocations:
alloc_list = allocations[allocator]
alloc_list.sort()
total_bytes = 0
alloc_tensor_set = set()
alloc_maxes[allocator] = AllocationMaximum(
timestamp=0, num_bytes=0, tensors=set())
for time, num_bytes, name in sorted(
alloc_list, key=lambda allocation: allocation[0]):
total_bytes += num_bytes
if num_bytes < 0:
alloc_tensor_set.discard(name)
else:
alloc_tensor_set.add(name)
if total_bytes > alloc_maxes[allocator].num_bytes:
alloc_maxes[allocator] = AllocationMaximum(
timestamp=time,
num_bytes=total_bytes,
tensors=copy.deepcopy(alloc_tensor_set))
self._chrome_trace.emit_counter('Memory', allocator,
self._allocators_pid, time, allocator,
total_bytes)
self._allocator_maximums = alloc_maxes
def analyze_step_stats(self, show_dataflow=True, show_memory=True):
self._allocate_pids()
self._assign_lanes()
self._analyze_tensors(show_memory)
self._show_compute(show_dataflow)
if show_memory:
self._show_memory_counters()
return StepStatsAnalysis(
chrome_trace=self._chrome_trace,
allocator_maximums=self._allocator_maximums)
def generate_chrome_trace_format(self, show_dataflow=True, show_memory=False):
"""Produces a trace in Chrome Trace Format.
Args:
show_dataflow: (Optional.) If True, add flow events to the trace
connecting producers and consumers of tensors.
show_memory: (Optional.) If True, add object snapshot events to the trace
showing the sizes and lifetimes of tensors.
Returns:
A JSON formatted string in Chrome Trace format.
"""
step_stats_analysis = self.analyze_step_stats(
show_dataflow=show_dataflow, show_memory=show_memory)
return step_stats_analysis.chrome_trace.format_to_string(pretty=True)
|
tensorflow-master
|
tensorflow/python/client/timeline.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the SWIG-wrapped events writer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
from tensorflow.core.framework import summary_pb2
from tensorflow.core.util import event_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.lib.io import tf_record
from tensorflow.python.platform import googletest
from tensorflow.python.util import compat
class PywrapeventsWriterTest(test_util.TensorFlowTestCase):
def testWriteEvents(self):
file_prefix = os.path.join(self.get_temp_dir(), "events")
writer = pywrap_tensorflow.EventsWriter(compat.as_bytes(file_prefix))
filename = compat.as_text(writer.FileName())
event_written = event_pb2.Event(
wall_time=123.45,
step=67,
summary=summary_pb2.Summary(
value=[summary_pb2.Summary.Value(
tag="foo", simple_value=89.0)]))
writer.WriteEvent(event_written)
writer.Flush()
writer.Close()
with self.assertRaises(errors.NotFoundError):
for r in tf_record.tf_record_iterator(filename + "DOES_NOT_EXIST"):
self.assertTrue(False)
reader = tf_record.tf_record_iterator(filename)
event_read = event_pb2.Event()
event_read.ParseFromString(next(reader))
self.assertTrue(event_read.HasField("file_version"))
event_read.ParseFromString(next(reader))
# Second event
self.assertProtoEquals("""
wall_time: 123.45 step: 67
summary { value { tag: 'foo' simple_value: 89.0 } }
""", event_read)
with self.assertRaises(StopIteration):
next(reader)
def testWriteEventInvalidType(self):
class _Invalid(object):
def __str__(self):
return "Invalid"
with self.assertRaisesRegexp(TypeError, "Invalid"):
pywrap_tensorflow.EventsWriter(b"foo").WriteEvent(_Invalid())
if __name__ == "__main__":
googletest.main()
|
tensorflow-master
|
tensorflow/python/client/events_writer_test.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.