python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains a model definition for AlexNet.
This work was first described in:
ImageNet Classification with Deep Convolutional Neural Networks
Alex Krizhevsky, Ilya Sutskever and Geoffrey E. Hinton
and later refined in:
One weird trick for parallelizing convolutional neural networks
Alex Krizhevsky, 2014
Here we provide the implementation proposed in "One weird trick" and not
"ImageNet Classification", as per the paper, the LRN layers have been removed.
Usage:
with slim.arg_scope(alexnet.alexnet_v2_arg_scope()):
outputs, end_points = alexnet.alexnet_v2(inputs)
@@alexnet_v2
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import layers
from tensorflow.contrib.framework.python.ops import arg_scope
from tensorflow.contrib.layers.python.layers import layers as layers_lib
from tensorflow.contrib.layers.python.layers import regularizers
from tensorflow.contrib.layers.python.layers import utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variable_scope
trunc_normal = lambda stddev: init_ops.truncated_normal_initializer(0.0, stddev)
def alexnet_v2_arg_scope(weight_decay=0.0005):
with arg_scope(
[layers.conv2d, layers_lib.fully_connected],
activation_fn=nn_ops.relu,
biases_initializer=init_ops.constant_initializer(0.1),
weights_regularizer=regularizers.l2_regularizer(weight_decay)):
with arg_scope([layers.conv2d], padding='SAME'):
with arg_scope([layers_lib.max_pool2d], padding='VALID') as arg_sc:
return arg_sc
def alexnet_v2(inputs,
num_classes=1000,
is_training=True,
dropout_keep_prob=0.5,
spatial_squeeze=True,
scope='alexnet_v2'):
"""AlexNet version 2.
Described in: http://arxiv.org/pdf/1404.5997v2.pdf
Parameters from:
github.com/akrizhevsky/cuda-convnet2/blob/master/layers/
layers-imagenet-1gpu.cfg
Note: All the fully_connected layers have been transformed to conv2d layers.
To use in classification mode, resize input to 224x224. To use in fully
convolutional mode, set spatial_squeeze to false.
The LRN layers have been removed and change the initializers from
random_normal_initializer to xavier_initializer.
Args:
inputs: a tensor of size [batch_size, height, width, channels].
num_classes: number of predicted classes.
is_training: whether or not the model is being trained.
dropout_keep_prob: the probability that activations are kept in the dropout
layers during training.
spatial_squeeze: whether or not should squeeze the spatial dimensions of the
outputs. Useful to remove unnecessary dimensions for classification.
scope: Optional scope for the variables.
Returns:
the last op containing the log predictions and end_points dict.
"""
with variable_scope.variable_scope(scope, 'alexnet_v2', [inputs]) as sc:
end_points_collection = sc.original_name_scope + '_end_points'
# Collect outputs for conv2d, fully_connected and max_pool2d.
with arg_scope(
[layers.conv2d, layers_lib.fully_connected, layers_lib.max_pool2d],
outputs_collections=[end_points_collection]):
net = layers.conv2d(
inputs, 64, [11, 11], 4, padding='VALID', scope='conv1')
net = layers_lib.max_pool2d(net, [3, 3], 2, scope='pool1')
net = layers.conv2d(net, 192, [5, 5], scope='conv2')
net = layers_lib.max_pool2d(net, [3, 3], 2, scope='pool2')
net = layers.conv2d(net, 384, [3, 3], scope='conv3')
net = layers.conv2d(net, 384, [3, 3], scope='conv4')
net = layers.conv2d(net, 256, [3, 3], scope='conv5')
net = layers_lib.max_pool2d(net, [3, 3], 2, scope='pool5')
# Use conv2d instead of fully_connected layers.
with arg_scope(
[layers.conv2d],
weights_initializer=trunc_normal(0.005),
biases_initializer=init_ops.constant_initializer(0.1)):
net = layers.conv2d(net, 4096, [5, 5], padding='VALID', scope='fc6')
net = layers_lib.dropout(
net, dropout_keep_prob, is_training=is_training, scope='dropout6')
net = layers.conv2d(net, 4096, [1, 1], scope='fc7')
net = layers_lib.dropout(
net, dropout_keep_prob, is_training=is_training, scope='dropout7')
net = layers.conv2d(
net,
num_classes, [1, 1],
activation_fn=None,
normalizer_fn=None,
biases_initializer=init_ops.zeros_initializer(),
scope='fc8')
# Convert end_points_collection into a end_point dict.
end_points = utils.convert_collection_to_dict(end_points_collection)
if spatial_squeeze:
net = array_ops.squeeze(net, [1, 2], name='fc8/squeezed')
end_points[sc.name + '/fc8'] = net
return net, end_points
alexnet_v2.default_image_size = 224
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/slim/python/slim/nets/alexnet.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains definitions for the preactivation form of Residual Networks.
Residual networks (ResNets) were originally proposed in:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
The full preactivation 'v2' ResNet variant implemented in this module was
introduced by:
[2] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Identity Mappings in Deep Residual Networks. arXiv: 1603.05027
The key difference of the full preactivation 'v2' variant compared to the
'v1' variant in [1] is the use of batch normalization before every weight layer.
Typical use:
from tensorflow.contrib.slim.python.slim.nets import
resnet_v2
ResNet-101 for image classification into 1000 classes:
# inputs has shape [batch, 224, 224, 3]
with slim.arg_scope(resnet_v2.resnet_arg_scope()):
net, end_points = resnet_v2.resnet_v2_101(inputs, 1000, is_training=False)
ResNet-101 for semantic segmentation into 21 classes:
# inputs has shape [batch, 513, 513, 3]
with slim.arg_scope(resnet_v2.resnet_arg_scope()):
net, end_points = resnet_v2.resnet_v2_101(inputs,
21,
is_training=False,
global_pool=False,
output_stride=16)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import layers as layers_lib
from tensorflow.contrib.framework.python.ops import add_arg_scope
from tensorflow.contrib.framework.python.ops import arg_scope
from tensorflow.contrib.layers.python.layers import layers
from tensorflow.contrib.layers.python.layers import utils
from tensorflow.contrib.slim.python.slim.nets import resnet_utils
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variable_scope
resnet_arg_scope = resnet_utils.resnet_arg_scope
@add_arg_scope
def bottleneck(inputs,
depth,
depth_bottleneck,
stride,
rate=1,
outputs_collections=None,
scope=None):
"""Bottleneck residual unit variant with BN before convolutions.
This is the full preactivation residual unit variant proposed in [2]. See
Fig. 1(b) of [2] for its definition. Note that we use here the bottleneck
variant which has an extra bottleneck layer.
When putting together two consecutive ResNet blocks that use this unit, one
should use stride = 2 in the last unit of the first block.
Args:
inputs: A tensor of size [batch, height, width, channels].
depth: The depth of the ResNet unit output.
depth_bottleneck: The depth of the bottleneck layers.
stride: The ResNet unit's stride. Determines the amount of downsampling of
the units output compared to its input.
rate: An integer, rate for atrous convolution.
outputs_collections: Collection to add the ResNet unit output.
scope: Optional variable_scope.
Returns:
The ResNet unit's output.
"""
with variable_scope.variable_scope(scope, 'bottleneck_v2', [inputs]) as sc:
depth_in = utils.last_dimension(inputs.get_shape(), min_rank=4)
preact = layers.batch_norm(
inputs, activation_fn=nn_ops.relu, scope='preact')
if depth == depth_in:
shortcut = resnet_utils.subsample(inputs, stride, 'shortcut')
else:
shortcut = layers_lib.conv2d(
preact,
depth, [1, 1],
stride=stride,
normalizer_fn=None,
activation_fn=None,
scope='shortcut')
residual = layers_lib.conv2d(
preact, depth_bottleneck, [1, 1], stride=1, scope='conv1')
residual = resnet_utils.conv2d_same(
residual, depth_bottleneck, 3, stride, rate=rate, scope='conv2')
residual = layers_lib.conv2d(
residual,
depth, [1, 1],
stride=1,
normalizer_fn=None,
activation_fn=None,
scope='conv3')
output = shortcut + residual
return utils.collect_named_outputs(outputs_collections, sc.name, output)
def resnet_v2(inputs,
blocks,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
include_root_block=True,
reuse=None,
scope=None):
"""Generator for v2 (preactivation) ResNet models.
This function generates a family of ResNet v2 models. See the resnet_v2_*()
methods for specific model instantiations, obtained by selecting different
block instantiations that produce ResNets of various depths.
Training for image classification on Imagenet is usually done with [224, 224]
inputs, resulting in [7, 7] feature maps at the output of the last ResNet
block for the ResNets defined in [1] that have nominal stride equal to 32.
However, for dense prediction tasks we advise that one uses inputs with
spatial dimensions that are multiples of 32 plus 1, e.g., [321, 321]. In
this case the feature maps at the ResNet output will have spatial shape
[(height - 1) / output_stride + 1, (width - 1) / output_stride + 1]
and corners exactly aligned with the input image corners, which greatly
facilitates alignment of the features to the image. Using as input [225, 225]
images results in [8, 8] feature maps at the output of the last ResNet block.
For dense prediction tasks, the ResNet needs to run in fully-convolutional
(FCN) mode and global_pool needs to be set to False. The ResNets in [1, 2] all
have nominal stride equal to 32 and a good choice in FCN mode is to use
output_stride=16 in order to increase the density of the computed features at
small computational and memory overhead, cf. http://arxiv.org/abs/1606.00915.
Args:
inputs: A tensor of size [batch, height_in, width_in, channels].
blocks: A list of length equal to the number of ResNet blocks. Each element
is a resnet_utils.Block object describing the units in the block.
num_classes: Number of predicted classes for classification tasks. If None
we return the features before the logit layer.
is_training: whether batch_norm layers are in training mode.
global_pool: If True, we perform global average pooling before computing the
logits. Set to True for image classification, False for dense prediction.
output_stride: If None, then the output will be computed at the nominal
network stride. If output_stride is not None, it specifies the requested
ratio of input to output spatial resolution.
include_root_block: If True, include the initial convolution followed by
max-pooling, if False excludes it. If excluded, `inputs` should be the
results of an activation-less convolution.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
Returns:
net: A rank-4 tensor of size [batch, height_out, width_out, channels_out].
If global_pool is False, then height_out and width_out are reduced by a
factor of output_stride compared to the respective height_in and width_in,
else both height_out and width_out equal one. If num_classes is None, then
net is the output of the last ResNet block, potentially after global
average pooling. If num_classes is not None, net contains the pre-softmax
activations.
end_points: A dictionary from components of the network to the corresponding
activation.
Raises:
ValueError: If the target output_stride is not valid.
"""
with variable_scope.variable_scope(
scope, 'resnet_v2', [inputs], reuse=reuse) as sc:
end_points_collection = sc.original_name_scope + '_end_points'
with arg_scope(
[layers_lib.conv2d, bottleneck, resnet_utils.stack_blocks_dense],
outputs_collections=end_points_collection):
with arg_scope([layers.batch_norm], is_training=is_training):
net = inputs
if include_root_block:
if output_stride is not None:
if output_stride % 4 != 0:
raise ValueError('The output_stride needs to be a multiple of 4.')
output_stride /= 4
# We do not include batch normalization or activation functions in
# conv1 because the first ResNet unit will perform these. Cf.
# Appendix of [2].
with arg_scope(
[layers_lib.conv2d], activation_fn=None, normalizer_fn=None):
net = resnet_utils.conv2d_same(net, 64, 7, stride=2, scope='conv1')
net = layers.max_pool2d(net, [3, 3], stride=2, scope='pool1')
net = resnet_utils.stack_blocks_dense(net, blocks, output_stride)
# This is needed because the pre-activation variant does not have batch
# normalization or activation functions in the residual unit output. See
# Appendix of [2].
net = layers.batch_norm(
net, activation_fn=nn_ops.relu, scope='postnorm')
if global_pool:
# Global average pooling.
net = math_ops.reduce_mean(net, [1, 2], name='pool5', keepdims=True)
if num_classes is not None:
net = layers_lib.conv2d(
net,
num_classes, [1, 1],
activation_fn=None,
normalizer_fn=None,
scope='logits')
# Convert end_points_collection into a dictionary of end_points.
end_points = utils.convert_collection_to_dict(end_points_collection)
if num_classes is not None:
end_points['predictions'] = layers.softmax(net, scope='predictions')
return net, end_points
resnet_v2.default_image_size = 224
def resnet_v2_block(scope, base_depth, num_units, stride):
"""Helper function for creating a resnet_v2 bottleneck block.
Args:
scope: The scope of the block.
base_depth: The depth of the bottleneck layer for each unit.
num_units: The number of units in the block.
stride: The stride of the block, implemented as a stride in the last unit.
All other units have stride=1.
Returns:
A resnet_v2 bottleneck block.
"""
return resnet_utils.Block(scope, bottleneck, [{
'depth': base_depth * 4,
'depth_bottleneck': base_depth,
'stride': 1
}] * (num_units - 1) + [{
'depth': base_depth * 4,
'depth_bottleneck': base_depth,
'stride': stride
}])
def resnet_v2_50(inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
reuse=None,
scope='resnet_v2_50'):
"""ResNet-50 model of [1]. See resnet_v2() for arg and return description."""
blocks = [
resnet_v2_block('block1', base_depth=64, num_units=3, stride=2),
resnet_v2_block('block2', base_depth=128, num_units=4, stride=2),
resnet_v2_block('block3', base_depth=256, num_units=6, stride=2),
resnet_v2_block('block4', base_depth=512, num_units=3, stride=1),
]
return resnet_v2(
inputs,
blocks,
num_classes,
is_training,
global_pool,
output_stride,
include_root_block=True,
reuse=reuse,
scope=scope)
def resnet_v2_101(inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
reuse=None,
scope='resnet_v2_101'):
"""ResNet-101 model of [1]. See resnet_v2() for arg and return description."""
blocks = [
resnet_v2_block('block1', base_depth=64, num_units=3, stride=2),
resnet_v2_block('block2', base_depth=128, num_units=4, stride=2),
resnet_v2_block('block3', base_depth=256, num_units=23, stride=2),
resnet_v2_block('block4', base_depth=512, num_units=3, stride=1),
]
return resnet_v2(
inputs,
blocks,
num_classes,
is_training,
global_pool,
output_stride,
include_root_block=True,
reuse=reuse,
scope=scope)
def resnet_v2_152(inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
reuse=None,
scope='resnet_v2_152'):
"""ResNet-152 model of [1]. See resnet_v2() for arg and return description."""
blocks = [
resnet_v2_block('block1', base_depth=64, num_units=3, stride=2),
resnet_v2_block('block2', base_depth=128, num_units=8, stride=2),
resnet_v2_block('block3', base_depth=256, num_units=36, stride=2),
resnet_v2_block('block4', base_depth=512, num_units=3, stride=1),
]
return resnet_v2(
inputs,
blocks,
num_classes,
is_training,
global_pool,
output_stride,
include_root_block=True,
reuse=reuse,
scope=scope)
def resnet_v2_200(inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
reuse=None,
scope='resnet_v2_200'):
"""ResNet-200 model of [2]. See resnet_v2() for arg and return description."""
blocks = [
resnet_v2_block('block1', base_depth=64, num_units=3, stride=2),
resnet_v2_block('block2', base_depth=128, num_units=24, stride=2),
resnet_v2_block('block3', base_depth=256, num_units=36, stride=2),
resnet_v2_block('block4', base_depth=512, num_units=3, stride=1),
]
return resnet_v2(
inputs,
blocks,
num_classes,
is_training,
global_pool,
output_stride,
include_root_block=True,
reuse=reuse,
scope=scope)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/slim/python/slim/nets/resnet_v2.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implements a parallel data reader with queues and optional shuffling."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes as tf_dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import gfile
from tensorflow.python.summary import summary
from tensorflow.python.training import input as tf_input
from tensorflow.python.training import queue_runner
class ParallelReader(io_ops.ReaderBase):
"""Reader class that uses multiple readers in parallel to improve speed.
See ReaderBase for supported methods.
"""
def __init__(self,
reader_class,
common_queue,
num_readers=4,
reader_kwargs=None):
"""ParallelReader creates num_readers instances of the reader_class.
Each instance is created by calling the `reader_class` function passing
the arguments specified in `reader_kwargs` as in:
reader_class(**read_kwargs)
When you read from a ParallelReader, with its `read()` method,
you just dequeue examples from the `common_queue`.
The readers will read different files in parallel, asynchronously enqueueing
their output into `common_queue`. The `common_queue.dtypes` must be
[tf.string, tf.string]
Because each reader can read from a different file, the examples in the
`common_queue` could be from different files. Due to the asynchronous
reading there is no guarantee that all the readers will read the same
number of examples.
If the `common_queue` is a shuffling queue, then the examples are shuffled.
Usage:
common_queue = tf.queue.RandomShuffleQueue(
capacity=256,
min_after_dequeue=128,
dtypes=[tf.string, tf.string])
p_reader = ParallelReader(tf.compat.v1.TFRecordReader, common_queue)
common_queue = tf.queue.FIFOQueue(
capacity=256,
dtypes=[tf.string, tf.string])
p_reader = ParallelReader(readers, common_queue, num_readers=2)
Args:
reader_class: one of the io_ops.ReaderBase subclasses ex: TFRecordReader
common_queue: a Queue to hold (key, value pairs) with `dtypes` equal to
[tf.string, tf.string]. Must be one of the data_flow_ops.Queues
instances, ex. `tf.queue.FIFOQueue()`, `tf.queue.RandomShuffleQueue()`,
...
num_readers: a integer, number of instances of reader_class to create.
reader_kwargs: an optional dict of kwargs to create the readers.
Raises:
TypeError: if `common_queue.dtypes` is not [tf.string, tf.string].
"""
if len(common_queue.dtypes) != 2:
raise TypeError('common_queue.dtypes must be [tf.string, tf.string]')
for dtype in common_queue.dtypes:
if not dtype.is_compatible_with(tf_dtypes.string):
raise TypeError('common_queue.dtypes must be [tf.string, tf.string]')
reader_kwargs = reader_kwargs or {}
self._readers = [reader_class(**reader_kwargs) for _ in range(num_readers)]
self._common_queue = common_queue
@property
def num_readers(self):
return len(self._readers)
@property
def common_queue(self):
return self._common_queue
def read(self, queue, name=None):
"""Returns the next record (key, value pair) produced by the reader.
The multiple reader instances are all configured to `read()` from the
filenames listed in `queue` and enqueue their output into the `common_queue`
passed to the constructor, and this method returns the next record dequeued
from that `common_queue`.
Readers dequeue a work unit from `queue` if necessary (e.g. when a
reader needs to start reading from a new file since it has finished with
the previous file).
A queue runner for enqueuing in the `common_queue` is automatically added
to the TF QueueRunners collection.
Args:
queue: A Queue or a mutable string Tensor representing a handle to a
Queue, with string work items.
name: A name for the operation (optional).
Returns:
The next record (i.e. (key, value pair)) from the common_queue.
"""
self._configure_readers_by(queue)
return self._common_queue.dequeue(name=name)
def read_up_to(self, queue, num_records, name=None):
"""Returns up to num_records (key, value pairs) produced by a reader.
Will dequeue a work unit from queue if necessary (e.g., when the
Reader needs to start reading from a new file since it has
finished with the previous file).
It may return less than num_records even before the last batch.
**Note** This operation is not supported by all types of `common_queue`s.
If a `common_queue` does not support `dequeue_up_to()`, then a
`tf.errors.UnimplementedError` is raised.
Args:
queue: A Queue or a mutable string Tensor representing a handle to a
Queue, with string work items.
num_records: Number of records to read.
name: A name for the operation (optional).
Returns:
A tuple of Tensors (keys, values) from common_queue.
keys: A 1-D string Tensor.
values: A 1-D string Tensor.
"""
self._configure_readers_by(queue)
return self._common_queue.dequeue_up_to(num_records, name)
def _configure_readers_by(self, queue):
enqueue_ops = []
for reader in self._readers:
enqueue_ops.append(self._common_queue.enqueue(reader.read(queue)))
queue_runner.add_queue_runner(
queue_runner.QueueRunner(self._common_queue, enqueue_ops))
def num_records_produced(self, name=None):
"""Returns the number of records this reader has produced.
Args:
name: A name for the operation (optional).
Returns:
An int64 Tensor.
"""
num_records = [r.num_records_produced() for r in self._readers]
return math_ops.add_n(num_records, name=name)
def num_work_units_completed(self, name=None):
"""Returns the number of work units this reader has finished processing.
Args:
name: A name for the operation (optional).
Returns:
An int64 Tensor.
"""
num_work_units = [r.num_work_units_completed() for r in self._readers]
return math_ops.add_n(num_work_units, name=name)
def parallel_read(data_sources,
reader_class,
num_epochs=None,
num_readers=4,
reader_kwargs=None,
shuffle=True,
dtypes=None,
capacity=256,
min_after_dequeue=128,
seed=None,
scope=None):
"""Reads multiple records in parallel from data_sources using n readers.
It uses a ParallelReader to read from multiple files in parallel using
multiple readers created using `reader_class` with `reader_kwargs'.
If shuffle is True the common_queue would be a RandomShuffleQueue otherwise
it would be a FIFOQueue.
Usage:
data_sources = ['path_to/train*']
key, value = parallel_read(data_sources, tf.CSVReader, num_readers=4)
Args:
data_sources: a list/tuple of files or the location of the data, i.e.
/path/to/train@128, /path/to/train* or /tmp/.../train*
reader_class: one of the io_ops.ReaderBase subclasses ex: TFRecordReader
num_epochs: The number of times each data source is read. If left as None,
the data will be cycled through indefinitely.
num_readers: a integer, number of Readers to create.
reader_kwargs: an optional dict, of kwargs for the reader.
shuffle: boolean, whether should shuffle the files and the records by using
RandomShuffleQueue as common_queue.
dtypes: A list of types. The length of dtypes must equal the number of
elements in each record. If it is None it will default to [tf.string,
tf.string] for (key, value).
capacity: integer, capacity of the common_queue.
min_after_dequeue: integer, minimum number of records in the common_queue
after dequeue. Needed for a good shuffle.
seed: A seed for RandomShuffleQueue.
scope: Optional name scope for the ops.
Returns:
key, value: a tuple of keys and values from the data_source.
"""
data_files = get_data_files(data_sources)
with ops.name_scope(scope, 'parallel_read'):
filename_queue = tf_input.string_input_producer(
data_files,
num_epochs=num_epochs,
shuffle=shuffle,
seed=seed,
name='filenames')
dtypes = dtypes or [tf_dtypes.string, tf_dtypes.string]
if shuffle:
common_queue = data_flow_ops.RandomShuffleQueue(
capacity=capacity,
min_after_dequeue=min_after_dequeue,
dtypes=dtypes,
seed=seed,
name='common_queue')
else:
common_queue = data_flow_ops.FIFOQueue(
capacity=capacity, dtypes=dtypes, name='common_queue')
summary.scalar(
'fraction_of_%d_full' % capacity,
math_ops.cast(common_queue.size(), tf_dtypes.float32) * (1. / capacity))
return ParallelReader(
reader_class,
common_queue,
num_readers=num_readers,
reader_kwargs=reader_kwargs).read(filename_queue)
def single_pass_read(data_sources, reader_class, reader_kwargs=None,
scope=None):
"""Reads sequentially the data_sources using the reader, doing a single pass.
Args:
data_sources: a list/tuple of files or the location of the data, i.e.
/path/to/train@128, /path/to/train* or /tmp/.../train*
reader_class: one of the io_ops.ReaderBase subclasses ex: TFRecordReader.
reader_kwargs: an optional dict, of kwargs for the reader.
scope: Optional name scope for the ops.
Returns:
key, value: a tuple of keys and values from the data_source.
"""
data_files = get_data_files(data_sources)
with ops.name_scope(scope, 'single_pass_read'):
filename_queue = tf_input.string_input_producer(
data_files, num_epochs=1, shuffle=False, capacity=1, name='filenames')
reader_kwargs = reader_kwargs or {}
return reader_class(**reader_kwargs).read(filename_queue)
def get_data_files(data_sources):
"""Get data_files from data_sources.
Args:
data_sources: a list/tuple of files or the location of the data, i.e.
/path/to/train@128, /path/to/train* or /tmp/.../train*
Returns:
a list of data_files.
Raises:
ValueError: if data files are not found
"""
if isinstance(data_sources, (list, tuple)):
data_files = []
for source in data_sources:
data_files += get_data_files(source)
else:
if '*' in data_sources or '?' in data_sources or '[' in data_sources:
data_files = gfile.Glob(data_sources)
else:
data_files = [data_sources]
if not data_files:
raise ValueError('No data files found in %s' % (data_sources,))
return data_files
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/slim/python/slim/data/parallel_reader.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains test utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.lib.io import tf_record
from tensorflow.python.ops import image_ops
def _encoded_int64_feature(ndarray):
return feature_pb2.Feature(int64_list=feature_pb2.Int64List(
value=ndarray.flatten().tolist()))
def _encoded_bytes_feature(tf_encoded):
encoded = tf_encoded.eval()
def string_to_bytes(value):
return feature_pb2.BytesList(value=[value])
return feature_pb2.Feature(bytes_list=string_to_bytes(encoded))
def _string_feature(value):
value = value.encode('utf-8')
return feature_pb2.Feature(bytes_list=feature_pb2.BytesList(value=[value]))
def _encoder(image, image_format):
assert image_format in ['jpeg', 'png']
if image_format == 'jpeg':
tf_image = constant_op.constant(image, dtype=dtypes.uint8)
return image_ops.encode_jpeg(tf_image)
if image_format == 'png':
tf_image = constant_op.constant(image, dtype=dtypes.uint8)
return image_ops.encode_png(tf_image)
def generate_image(image_shape, image_format='jpeg', label=0):
"""Generates an image and an example containing the encoded image.
GenerateImage must be called within an active session.
Args:
image_shape: the shape of the image to generate.
image_format: the encoding format of the image.
label: the int64 labels for the image.
Returns:
image: the generated image.
example: a TF-example with a feature key 'image/encoded' set to the
serialized image and a feature key 'image/format' set to the image
encoding format ['jpeg', 'png'].
"""
image = np.random.random_integers(0, 255, size=image_shape)
tf_encoded = _encoder(image, image_format)
example = example_pb2.Example(features=feature_pb2.Features(feature={
'image/encoded': _encoded_bytes_feature(tf_encoded),
'image/format': _string_feature(image_format),
'image/class/label': _encoded_int64_feature(np.array(label)),
}))
return image, example.SerializeToString()
def create_tfrecord_files(output_dir, num_files=3, num_records_per_file=10):
"""Creates TFRecords files.
The method must be called within an active session.
Args:
output_dir: The directory where the files are stored.
num_files: The number of files to create.
num_records_per_file: The number of records per file.
Returns:
A list of the paths to the TFRecord files.
"""
tfrecord_paths = []
for i in range(num_files):
path = os.path.join(output_dir,
'flowers.tfrecord-%d-of-%s' % (i, num_files))
tfrecord_paths.append(path)
writer = tf_record.TFRecordWriter(path)
for _ in range(num_records_per_file):
_, example = generate_image(image_shape=(10, 10, 3))
writer.write(example)
writer.close()
return tfrecord_paths
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/slim/python/slim/data/test_utils.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for slim.data.prefetch_queue."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.slim.python.slim.data import prefetch_queue
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import queue_runner_impl
class PrefetchQueueTest(test.TestCase):
def testOneThread(self):
with self.cached_session() as sess:
batch_size = 10
image_size = 32
num_batches = 5
zero64 = constant_op.constant(0, dtype=dtypes.int64)
examples = variables.Variable(zero64)
counter = examples.count_up_to(num_batches * batch_size)
image = random_ops.random_normal(
[image_size, image_size, 3], dtype=dtypes.float32, name='images')
label = random_ops.random_uniform(
[1], 0, 10, dtype=dtypes.int32, name='labels')
batches = input_lib.batch(
[counter, image, label], batch_size=batch_size, num_threads=1)
batches = prefetch_queue.prefetch_queue(batches).dequeue()
variables.global_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
for i in range(num_batches):
results = sess.run(batches)
self.assertAllEqual(results[0],
np.arange(i * batch_size, (i + 1) * batch_size))
self.assertEquals(results[1].shape,
(batch_size, image_size, image_size, 3))
self.assertEquals(results[2].shape, (batch_size, 1))
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
sess.run(batches)
for thread in threads:
thread.join()
def testMultiThread(self):
with self.cached_session() as sess:
batch_size = 10
image_size = 32
num_batches = 5
zero64 = constant_op.constant(0, dtype=dtypes.int64)
examples = variables.Variable(zero64)
counter = examples.count_up_to(num_batches * batch_size)
image = random_ops.random_normal(
[image_size, image_size, 3], dtype=dtypes.float32, name='images')
label = random_ops.random_uniform(
[1], 0, 10, dtype=dtypes.int32, name='labels')
batches = input_lib.batch(
[counter, image, label], batch_size=batch_size, num_threads=4)
batches = prefetch_queue.prefetch_queue(batches).dequeue()
variables.global_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
value_counter = []
for _ in range(num_batches):
results = sess.run(batches)
value_counter.append(results[0])
self.assertEqual(results[1].shape,
(batch_size, image_size, image_size, 3))
self.assertEqual(results[2].shape, (batch_size, 1))
self.assertAllEqual(
np.sort(np.concatenate(value_counter)),
np.arange(0, num_batches * batch_size))
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
sess.run(batches)
for thread in threads:
thread.join()
def testMultipleDequeue(self):
with self.cached_session() as sess:
batch_size = 10
image_size = 32
num_batches = 4
zero64 = constant_op.constant(0, dtype=dtypes.int64)
examples = variables.Variable(zero64)
counter = examples.count_up_to(num_batches * batch_size)
image = random_ops.random_normal(
[image_size, image_size, 3], dtype=dtypes.float32, name='images')
label = random_ops.random_uniform(
[1], 0, 10, dtype=dtypes.int32, name='labels')
batches = input_lib.batch(
[counter, image, label], batch_size=batch_size, num_threads=4)
batcher = prefetch_queue.prefetch_queue(batches)
batches_list = [batcher.dequeue() for _ in range(2)]
variables.global_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
value_counter = []
for _ in range(int(num_batches / 2)):
for batches in batches_list:
results = sess.run(batches)
value_counter.append(results[0])
self.assertEquals(results[1].shape,
(batch_size, image_size, image_size, 3))
self.assertEquals(results[2].shape, (batch_size, 1))
self.assertAllEqual(
np.sort(np.concatenate(value_counter)),
np.arange(0, num_batches * batch_size))
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
sess.run(batches)
for thread in threads:
thread.join()
def testDynamicPad_failure(self):
with ops.Graph().as_default():
variable_tensor = array_ops.placeholder(dtypes.int32, shape=[None, 3])
with self.assertRaisesRegexp(ValueError, 'shapes must be fully defined'):
prefetch_queue.prefetch_queue([variable_tensor])
def testDynamicPad(self):
with self.cached_session() as sess:
# Create 3 tensors of variable but compatible shapes.
var_shape = [None, 2]
p1 = constant_op.constant([[1, 2], [3, 4]])
p1.set_shape(var_shape)
p2 = constant_op.constant([[5, 6], [7, 8], [9, 10]])
p2.set_shape(var_shape)
p3 = constant_op.constant([[11, 12]])
p3.set_shape(var_shape)
batch = [p1, p2, p3]
batch_size = len(batch)
zero64 = constant_op.constant(0, dtype=dtypes.int64)
examples = variables.Variable(zero64)
counter = examples.count_up_to(batch_size)
# Create a PaddingFIFOQueue to enqueue these tensors.
q = data_flow_ops.PaddingFIFOQueue(
capacity=10, dtypes=[dtypes.int32], shapes=[var_shape])
for tensor in [p1, p2, p3]:
q.enqueue([tensor]).run()
# Dequeue from the queue and batch them using batch().
batches = input_lib.batch([q.dequeue(), counter], batch_size=batch_size,
num_threads=1, dynamic_pad=True)
self.assertEqual([batch_size, None, 2], batches[0].shape.as_list())
# Finally, assemble them into prefetch_queue with dynamic_pad.
batcher = prefetch_queue.prefetch_queue(batches, dynamic_pad=True)
batches = batcher.dequeue()
self.assertEqual([batch_size, None, 2], batches[0].shape.as_list())
variables.global_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
values, _ = sess.run(batches)
# We enqueued 3 tensors of [None, 2] shapes, so using dynamic_pad
# they should be padded to the fixed size [3, 3, 2], where 3
# is the maximum length of the batch.
self.assertTrue(np.array_equal(
np.array([[[1, 2], [3, 4], [0, 0]],
[[5, 6], [7, 8], [9, 10]],
[[11, 12], [0, 0], [0, 0]]]),
values))
with self.assertRaises(errors_impl.OutOfRangeError):
sess.run(batches)
for thread in threads:
thread.join()
def testDictConstruction(self):
with ops.Graph().as_default():
batches = {
'first': constant_op.constant([1]),
'second': constant_op.constant([2.0, 2.1])
}
prefetcher = prefetch_queue.prefetch_queue(batches)
dequeued = prefetcher.dequeue()
self.assertTrue(isinstance(dequeued, dict))
self.assertEqual(2, len(dequeued))
self.assertEqual(dtypes.int32, dequeued['first'].dtype)
self.assertEqual(dtypes.float32, dequeued['second'].dtype)
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/slim/python/slim/data/prefetch_queue_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for slim.data.dataset_data_provider."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
from tensorflow.contrib.slim.python.slim import queues
from tensorflow.contrib.slim.python.slim.data import dataset
from tensorflow.contrib.slim.python.slim.data import dataset_data_provider
from tensorflow.contrib.slim.python.slim.data import test_utils
from tensorflow.contrib.slim.python.slim.data import tfexample_decoder
from tensorflow.python.client import session
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import image_ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
def _resize_image(image, height, width):
image = array_ops.expand_dims(image, 0)
image = image_ops.resize_bilinear(image, [height, width])
return array_ops.squeeze(image, [0])
def _create_tfrecord_dataset(tmpdir):
if not gfile.Exists(tmpdir):
gfile.MakeDirs(tmpdir)
data_sources = test_utils.create_tfrecord_files(tmpdir, num_files=1)
keys_to_features = {
'image/encoded':
parsing_ops.FixedLenFeature(
shape=(), dtype=dtypes.string, default_value=''),
'image/format':
parsing_ops.FixedLenFeature(
shape=(), dtype=dtypes.string, default_value='jpeg'),
'image/class/label':
parsing_ops.FixedLenFeature(
shape=[1],
dtype=dtypes.int64,
default_value=array_ops.zeros(
[1], dtype=dtypes.int64))
}
items_to_handlers = {
'image': tfexample_decoder.Image(),
'label': tfexample_decoder.Tensor('image/class/label'),
}
decoder = tfexample_decoder.TFExampleDecoder(keys_to_features,
items_to_handlers)
return dataset.Dataset(
data_sources=data_sources,
reader=io_ops.TFRecordReader,
decoder=decoder,
num_samples=100,
items_to_descriptions=None)
class DatasetDataProviderTest(test.TestCase):
def testTFRecordDataset(self):
dataset_dir = tempfile.mkdtemp(prefix=os.path.join(self.get_temp_dir(),
'tfrecord_dataset'))
height = 300
width = 280
with self.cached_session():
test_dataset = _create_tfrecord_dataset(dataset_dir)
provider = dataset_data_provider.DatasetDataProvider(test_dataset)
key, image, label = provider.get(['record_key', 'image', 'label'])
image = _resize_image(image, height, width)
with session.Session('') as sess:
with queues.QueueRunners(sess):
key, image, label = sess.run([key, image, label])
split_key = key.decode('utf-8').split(':')
self.assertEqual(2, len(split_key))
self.assertEqual(test_dataset.data_sources[0], split_key[0])
self.assertTrue(split_key[1].isdigit())
self.assertListEqual([height, width, 3], list(image.shape))
self.assertListEqual([1], list(label.shape))
def testTFRecordSeparateGetDataset(self):
dataset_dir = tempfile.mkdtemp(prefix=os.path.join(self.get_temp_dir(),
'tfrecord_separate_get'))
height = 300
width = 280
with self.cached_session():
provider = dataset_data_provider.DatasetDataProvider(
_create_tfrecord_dataset(dataset_dir))
[image] = provider.get(['image'])
[label] = provider.get(['label'])
image = _resize_image(image, height, width)
with session.Session('') as sess:
with queues.QueueRunners(sess):
image, label = sess.run([image, label])
self.assertListEqual([height, width, 3], list(image.shape))
self.assertListEqual([1], list(label.shape))
def testConflictingRecordKeyItem(self):
dataset_dir = tempfile.mkdtemp(prefix=os.path.join(self.get_temp_dir(),
'tfrecord_dataset'))
with self.cached_session():
with self.assertRaises(ValueError):
dataset_data_provider.DatasetDataProvider(
_create_tfrecord_dataset(dataset_dir), record_key='image')
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/slim/python/slim/data/dataset_data_provider_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains helper functions and classes necessary for decoding data.
While data providers read data from disk, sstables or other formats, data
decoders decode the data (if necessary). A data decoder is provided with a
serialized or encoded piece of data as well as a list of items and
returns a set of tensors, each of which correspond to the requested list of
items extracted from the data:
def Decode(self, data, items):
...
For example, if data is a compressed map, the implementation might be:
def Decode(self, data, items):
decompressed_map = _Decompress(data)
outputs = []
for item in items:
outputs.append(decompressed_map[item])
return outputs.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
@six.add_metaclass(abc.ABCMeta)
class DataDecoder(object):
"""An abstract class which is used to decode data for a provider."""
@abc.abstractmethod
def decode(self, data, items):
"""Decodes the data to returns the tensors specified by the list of items.
Args:
data: A possibly encoded data format.
items: A list of strings, each of which indicate a particular data type.
Returns:
A list of `Tensors`, whose length matches the length of `items`, where
each `Tensor` corresponds to each item.
Raises:
ValueError: If any of the items cannot be satisfied.
"""
pass
@abc.abstractmethod
def list_items(self):
"""Lists the names of the items that the decoder can decode.
Returns:
A list of string names.
"""
pass
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/slim/python/slim/data/data_decoder.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for slim.data.parallel_reader."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.slim.python.slim import queues
from tensorflow.contrib.slim.python.slim.data import parallel_reader
from tensorflow.contrib.slim.python.slim.data import test_utils
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import supervisor
class ParallelReaderTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def _verify_all_data_sources_read(self, shared_queue):
with self.cached_session():
tfrecord_paths = test_utils.create_tfrecord_files(
self.get_temp_dir(), num_files=3)
num_readers = len(tfrecord_paths)
p_reader = parallel_reader.ParallelReader(
io_ops.TFRecordReader, shared_queue, num_readers=num_readers)
data_files = parallel_reader.get_data_files(tfrecord_paths)
filename_queue = input_lib.string_input_producer(data_files)
key, value = p_reader.read(filename_queue)
count0 = 0
count1 = 0
count2 = 0
num_reads = 50
sv = supervisor.Supervisor(logdir=self.get_temp_dir())
with sv.prepare_or_wait_for_session() as sess:
sv.start_queue_runners(sess)
for _ in range(num_reads):
current_key, _ = sess.run([key, value])
if '0-of-3' in str(current_key):
count0 += 1
if '1-of-3' in str(current_key):
count1 += 1
if '2-of-3' in str(current_key):
count2 += 1
self.assertGreater(count0, 0)
self.assertGreater(count1, 0)
self.assertGreater(count2, 0)
self.assertEquals(count0 + count1 + count2, num_reads)
def _verify_read_up_to_out(self, shared_queue):
with self.cached_session():
num_files = 3
num_records_per_file = 7
tfrecord_paths = test_utils.create_tfrecord_files(
self.get_temp_dir(),
num_files=num_files,
num_records_per_file=num_records_per_file)
p_reader = parallel_reader.ParallelReader(
io_ops.TFRecordReader, shared_queue, num_readers=5)
data_files = parallel_reader.get_data_files(tfrecord_paths)
filename_queue = input_lib.string_input_producer(data_files, num_epochs=1)
key, value = p_reader.read_up_to(filename_queue, 4)
count0 = 0
count1 = 0
count2 = 0
all_keys_count = 0
all_values_count = 0
sv = supervisor.Supervisor(logdir=self.get_temp_dir())
with sv.prepare_or_wait_for_session() as sess:
sv.start_queue_runners(sess)
while True:
try:
current_keys, current_values = sess.run([key, value])
self.assertEquals(len(current_keys), len(current_values))
all_keys_count += len(current_keys)
all_values_count += len(current_values)
for current_key in current_keys:
if '0-of-3' in str(current_key):
count0 += 1
if '1-of-3' in str(current_key):
count1 += 1
if '2-of-3' in str(current_key):
count2 += 1
except errors_impl.OutOfRangeError:
break
self.assertEquals(count0, num_records_per_file)
self.assertEquals(count1, num_records_per_file)
self.assertEquals(count2, num_records_per_file)
self.assertEquals(
all_keys_count,
num_files * num_records_per_file)
self.assertEquals(all_values_count, all_keys_count)
self.assertEquals(
count0 + count1 + count2,
all_keys_count)
def testRandomShuffleQueue(self):
shared_queue = data_flow_ops.RandomShuffleQueue(
capacity=256,
min_after_dequeue=128,
dtypes=[dtypes_lib.string, dtypes_lib.string])
self._verify_all_data_sources_read(shared_queue)
def testFIFOSharedQueue(self):
shared_queue = data_flow_ops.FIFOQueue(
capacity=256, dtypes=[dtypes_lib.string, dtypes_lib.string])
self._verify_all_data_sources_read(shared_queue)
def testReadUpToFromRandomShuffleQueue(self):
shared_queue = data_flow_ops.RandomShuffleQueue(
capacity=55,
min_after_dequeue=28,
dtypes=[dtypes_lib.string, dtypes_lib.string],
shapes=[tensor_shape.TensorShape([]),
tensor_shape.TensorShape([])])
self._verify_read_up_to_out(shared_queue)
def testReadUpToFromFIFOQueue(self):
shared_queue = data_flow_ops.FIFOQueue(
capacity=99,
dtypes=[dtypes_lib.string, dtypes_lib.string],
shapes=[tensor_shape.TensorShape([]),
tensor_shape.TensorShape([])])
self._verify_read_up_to_out(shared_queue)
class ParallelReadTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testTFRecordReader(self):
with self.cached_session():
self._tfrecord_paths = test_utils.create_tfrecord_files(
self.get_temp_dir(), num_files=3)
key, value = parallel_reader.parallel_read(
self._tfrecord_paths, reader_class=io_ops.TFRecordReader, num_readers=3)
sv = supervisor.Supervisor(logdir=self.get_temp_dir())
with sv.prepare_or_wait_for_session() as sess:
sv.start_queue_runners(sess)
flowers = 0
num_reads = 100
for _ in range(num_reads):
current_key, _ = sess.run([key, value])
if 'flowers' in str(current_key):
flowers += 1
self.assertGreater(flowers, 0)
self.assertEquals(flowers, num_reads)
class SinglePassReadTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testOutOfRangeError(self):
with self.cached_session():
[tfrecord_path] = test_utils.create_tfrecord_files(
self.get_temp_dir(), num_files=1)
key, value = parallel_reader.single_pass_read(
tfrecord_path, reader_class=io_ops.TFRecordReader)
init_op = variables.local_variables_initializer()
with self.cached_session() as sess:
sess.run(init_op)
with queues.QueueRunners(sess):
num_reads = 11
with self.assertRaises(errors_impl.OutOfRangeError):
for _ in range(num_reads):
sess.run([key, value])
def testTFRecordReader(self):
with self.cached_session():
[tfrecord_path] = test_utils.create_tfrecord_files(
self.get_temp_dir(), num_files=1)
key, value = parallel_reader.single_pass_read(
tfrecord_path, reader_class=io_ops.TFRecordReader)
init_op = variables.local_variables_initializer()
with self.cached_session() as sess:
sess.run(init_op)
with queues.QueueRunners(sess):
flowers = 0
num_reads = 9
for _ in range(num_reads):
current_key, _ = sess.run([key, value])
if 'flowers' in str(current_key):
flowers += 1
self.assertGreater(flowers, 0)
self.assertEquals(flowers, num_reads)
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/slim/python/slim/data/parallel_reader_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the definition of a Dataset.
A Dataset is a collection of several components: (1) a list of data sources
(2) a Reader class that can read those sources and returns possibly encoded
samples of data (3) a decoder that decodes each sample of data provided by the
reader (4) the total number of samples and (5) an optional dictionary mapping
the list of items returns to a description of those items.
Data can be loaded from a dataset specification using a dataset_data_provider:
dataset = CreateMyDataset(...)
provider = dataset_data_provider.DatasetDataProvider(
dataset, shuffle=False)
image, label = provider.get(['image', 'label'])
See slim.data.dataset_data_provider for additional examples.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
class Dataset(object):
"""Represents a Dataset specification."""
def __init__(self, data_sources, reader, decoder, num_samples,
items_to_descriptions, **kwargs):
"""Initializes the dataset.
Args:
data_sources: A list of files that make up the dataset.
reader: The reader class, a subclass of BaseReader such as TextLineReader
or TFRecordReader.
decoder: An instance of a data_decoder.
num_samples: The number of samples in the dataset.
items_to_descriptions: A map from the items that the dataset provides to
the descriptions of those items.
**kwargs: Any remaining dataset-specific fields.
"""
kwargs['data_sources'] = data_sources
kwargs['reader'] = reader
kwargs['decoder'] = decoder
kwargs['num_samples'] = num_samples
kwargs['items_to_descriptions'] = items_to_descriptions
self.__dict__.update(kwargs)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/slim/python/slim/data/dataset.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for slim.data.tfexample_decoder."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import sys
from tensorflow.contrib.slim.python.slim.data import tfexample_decoder
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import image_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.platform import test
class TFExampleDecoderTest(test.TestCase):
def _EncodedFloatFeature(self, ndarray):
return feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=ndarray.flatten().tolist()))
def _EncodedInt64Feature(self, ndarray):
return feature_pb2.Feature(
int64_list=feature_pb2.Int64List(value=ndarray.flatten().tolist()))
def _EncodedBytesFeature(self, tf_encoded):
with self.cached_session():
encoded = tf_encoded.eval()
def BytesList(value):
return feature_pb2.BytesList(value=[value])
return feature_pb2.Feature(bytes_list=BytesList(encoded))
def _BytesFeature(self, ndarray):
values = ndarray.flatten().tolist()
for i in range(len(values)):
values[i] = values[i].encode('utf-8')
return feature_pb2.Feature(bytes_list=feature_pb2.BytesList(value=values))
def _StringFeature(self, value):
value = value.encode('utf-8')
return feature_pb2.Feature(bytes_list=feature_pb2.BytesList(value=[value]))
def _Encoder(self, image, image_format):
assert image_format in ['jpeg', 'JPEG', 'png', 'PNG', 'raw', 'RAW']
if image_format in ['jpeg', 'JPEG']:
tf_image = constant_op.constant(image, dtype=dtypes.uint8)
return image_ops.encode_jpeg(tf_image)
if image_format in ['png', 'PNG']:
tf_image = constant_op.constant(image, dtype=dtypes.uint8)
return image_ops.encode_png(tf_image)
if image_format in ['raw', 'RAW']:
# If machine is big endian, change the byte ordering in case of dtype float32
# so that it should be interpreted correctly.
if image.dtype == np.float32 and sys.byteorder == 'big':
image = image.astype('<f4')
return constant_op.constant(image.tostring(), dtype=dtypes.string)
def GenerateImage(self, image_format, image_shape, image_dtype=np.uint8):
"""Generates an image and an example containing the encoded image.
Args:
image_format: the encoding format of the image.
image_shape: the shape of the image to generate.
image_dtype: the dtype of values in the image. Only 'raw' image can have
type different than uint8.
Returns:
image: the generated image.
example: a TF-example with a feature key 'image/encoded' set to the
serialized image and a feature key 'image/format' set to the image
encoding format ['jpeg', 'JPEG', 'png', 'PNG', 'raw'].
"""
assert image_format in ['raw', 'RAW'] or image_dtype == np.uint8
num_pixels = image_shape[0] * image_shape[1] * image_shape[2]
image = np.linspace(
0, num_pixels - 1,
num=num_pixels).reshape(image_shape).astype(image_dtype)
tf_encoded = self._Encoder(image, image_format)
example = example_pb2.Example(
features=feature_pb2.Features(
feature={
'image/encoded': self._EncodedBytesFeature(tf_encoded),
'image/format': self._StringFeature(image_format)
}))
return image, example.SerializeToString()
def DecodeExample(self, serialized_example, item_handler, image_format):
"""Decodes the given serialized example with the specified item handler.
Args:
serialized_example: a serialized TF example string.
item_handler: the item handler used to decode the image.
image_format: the image format being decoded.
Returns:
the decoded image found in the serialized Example.
"""
serialized_example = array_ops.reshape(serialized_example, shape=[])
decoder = tfexample_decoder.TFExampleDecoder(
keys_to_features={
'image/encoded':
parsing_ops.FixedLenFeature(
(), dtypes.string, default_value=''),
'image/format':
parsing_ops.FixedLenFeature(
(), dtypes.string, default_value=image_format),
},
items_to_handlers={'image': item_handler})
[tf_image] = decoder.decode(serialized_example, ['image'])
return tf_image
def RunDecodeExample(self, serialized_example, item_handler, image_format):
tf_image = self.DecodeExample(serialized_example, item_handler,
image_format)
with self.cached_session():
decoded_image = tf_image.eval()
# We need to recast them here to avoid some issues with uint8.
return decoded_image.astype(np.float32)
def testDecodeExampleWithJpegEncoding(self):
image_shape = (2, 3, 3)
image, serialized_example = self.GenerateImage(
image_format='jpeg', image_shape=image_shape)
decoded_image = self.RunDecodeExample(
serialized_example, tfexample_decoder.Image(), image_format='jpeg')
# Need to use a tolerance of 1 because of noise in the jpeg encode/decode
self.assertAllClose(image, decoded_image, atol=1.001)
def testDecodeExampleWithJPEGEncoding(self):
test_image_channels = [1, 3]
for channels in test_image_channels:
image_shape = (2, 3, channels)
image, serialized_example = self.GenerateImage(
image_format='JPEG', image_shape=image_shape)
decoded_image = self.RunDecodeExample(
serialized_example,
tfexample_decoder.Image(channels=channels),
image_format='JPEG')
# Need to use a tolerance of 1 because of noise in the jpeg encode/decode
self.assertAllClose(image, decoded_image, atol=1.001)
def testDecodeExampleWithNoShapeInfo(self):
test_image_channels = [1, 3]
for channels in test_image_channels:
image_shape = (2, 3, channels)
_, serialized_example = self.GenerateImage(
image_format='jpeg', image_shape=image_shape)
tf_decoded_image = self.DecodeExample(
serialized_example,
tfexample_decoder.Image(shape=None, channels=channels),
image_format='jpeg')
self.assertEqual(tf_decoded_image.get_shape().ndims, 3)
def testDecodeExampleWithPngEncoding(self):
test_image_channels = [1, 3, 4]
for channels in test_image_channels:
image_shape = (2, 3, channels)
image, serialized_example = self.GenerateImage(
image_format='png', image_shape=image_shape)
decoded_image = self.RunDecodeExample(
serialized_example,
tfexample_decoder.Image(channels=channels),
image_format='png')
self.assertAllClose(image, decoded_image, atol=0)
def testDecodeExampleWithPNGEncoding(self):
test_image_channels = [1, 3, 4]
for channels in test_image_channels:
image_shape = (2, 3, channels)
image, serialized_example = self.GenerateImage(
image_format='PNG', image_shape=image_shape)
decoded_image = self.RunDecodeExample(
serialized_example,
tfexample_decoder.Image(channels=channels),
image_format='PNG')
self.assertAllClose(image, decoded_image, atol=0)
def testDecodeExampleWithRawEncoding(self):
image_shape = (2, 3, 3)
image, serialized_example = self.GenerateImage(
image_format='raw', image_shape=image_shape)
decoded_image = self.RunDecodeExample(
serialized_example,
tfexample_decoder.Image(shape=image_shape),
image_format='raw')
self.assertAllClose(image, decoded_image, atol=0)
def testDecodeExampleWithRAWEncoding(self):
image_shape = (2, 3, 3)
image, serialized_example = self.GenerateImage(
image_format='RAW', image_shape=image_shape)
decoded_image = self.RunDecodeExample(
serialized_example,
tfexample_decoder.Image(shape=image_shape),
image_format='RAW')
self.assertAllClose(image, decoded_image, atol=0)
def testDecodeExampleWithRawEncodingFloatDtype(self):
image_shape = (2, 3, 3)
image, serialized_example = self.GenerateImage(
image_format='raw', image_shape=image_shape, image_dtype=np.float32)
decoded_image = self.RunDecodeExample(
serialized_example,
tfexample_decoder.Image(shape=image_shape, dtype=dtypes.float32),
image_format='raw')
self.assertAllClose(image, decoded_image, atol=0)
def testDecodeExampleWithJpegEncodingAt16BitDoesNotCauseError(self):
image_shape = (2, 3, 3)
# Image has type uint8 but decoding at uint16 should not cause problems.
image, serialized_example = self.GenerateImage(
image_format='jpeg', image_shape=image_shape)
decoded_image = self.RunDecodeExample(
serialized_example,
tfexample_decoder.Image(dtype=dtypes.uint16),
image_format='jpeg')
self.assertAllClose(image, decoded_image, atol=1.001)
def testDecodeExampleWithStringTensor(self):
tensor_shape = (2, 3, 1)
np_array = np.array([[['ab'], ['cd'], ['ef']],
[['ghi'], ['jkl'], ['mnop']]])
example = example_pb2.Example(
features=feature_pb2.Features(feature={
'labels': self._BytesFeature(np_array),
}))
serialized_example = example.SerializeToString()
with self.cached_session():
serialized_example = array_ops.reshape(serialized_example, shape=[])
keys_to_features = {
'labels':
parsing_ops.FixedLenFeature(
tensor_shape,
dtypes.string,
default_value=constant_op.constant(
'', shape=tensor_shape, dtype=dtypes.string))
}
items_to_handlers = {
'labels': tfexample_decoder.Tensor('labels'),
}
decoder = tfexample_decoder.TFExampleDecoder(keys_to_features,
items_to_handlers)
[tf_labels] = decoder.decode(serialized_example, ['labels'])
labels = tf_labels.eval()
labels = labels.astype(np_array.dtype)
self.assertTrue(np.array_equal(np_array, labels))
def testDecodeExampleWithFloatTensor(self):
np_array = np.random.rand(2, 3, 1).astype('f')
example = example_pb2.Example(
features=feature_pb2.Features(feature={
'array': self._EncodedFloatFeature(np_array),
}))
serialized_example = example.SerializeToString()
with self.cached_session():
serialized_example = array_ops.reshape(serialized_example, shape=[])
keys_to_features = {
'array': parsing_ops.FixedLenFeature(np_array.shape, dtypes.float32)
}
items_to_handlers = {
'array': tfexample_decoder.Tensor('array'),
}
decoder = tfexample_decoder.TFExampleDecoder(keys_to_features,
items_to_handlers)
[tf_array] = decoder.decode(serialized_example, ['array'])
self.assertAllEqual(tf_array.eval(), np_array)
def testDecodeExampleWithInt64Tensor(self):
np_array = np.random.randint(1, 10, size=(2, 3, 1))
example = example_pb2.Example(
features=feature_pb2.Features(feature={
'array': self._EncodedInt64Feature(np_array),
}))
serialized_example = example.SerializeToString()
with self.cached_session():
serialized_example = array_ops.reshape(serialized_example, shape=[])
keys_to_features = {
'array': parsing_ops.FixedLenFeature(np_array.shape, dtypes.int64)
}
items_to_handlers = {
'array': tfexample_decoder.Tensor('array'),
}
decoder = tfexample_decoder.TFExampleDecoder(keys_to_features,
items_to_handlers)
[tf_array] = decoder.decode(serialized_example, ['array'])
self.assertAllEqual(tf_array.eval(), np_array)
def testDecodeExampleWithVarLenTensor(self):
np_array = np.array([[[1], [2], [3]], [[4], [5], [6]]])
example = example_pb2.Example(
features=feature_pb2.Features(feature={
'labels': self._EncodedInt64Feature(np_array),
}))
serialized_example = example.SerializeToString()
with self.cached_session():
serialized_example = array_ops.reshape(serialized_example, shape=[])
keys_to_features = {
'labels': parsing_ops.VarLenFeature(dtype=dtypes.int64),
}
items_to_handlers = {
'labels': tfexample_decoder.Tensor('labels'),
}
decoder = tfexample_decoder.TFExampleDecoder(keys_to_features,
items_to_handlers)
[tf_labels] = decoder.decode(serialized_example, ['labels'])
labels = tf_labels.eval()
self.assertAllEqual(labels, np_array.flatten())
def testDecodeExampleWithFixLenTensorWithShape(self):
np_array = np.array([[1, 2, 3], [4, 5, 6]])
example = example_pb2.Example(
features=feature_pb2.Features(feature={
'labels': self._EncodedInt64Feature(np_array),
}))
serialized_example = example.SerializeToString()
with self.cached_session():
serialized_example = array_ops.reshape(serialized_example, shape=[])
keys_to_features = {
'labels':
parsing_ops.FixedLenFeature(np_array.shape, dtype=dtypes.int64),
}
items_to_handlers = {
'labels': tfexample_decoder.Tensor('labels', shape=np_array.shape),
}
decoder = tfexample_decoder.TFExampleDecoder(keys_to_features,
items_to_handlers)
[tf_labels] = decoder.decode(serialized_example, ['labels'])
labels = tf_labels.eval()
self.assertAllEqual(labels, np_array)
def testDecodeExampleWithVarLenTensorToDense(self):
np_array = np.array([[1, 2, 3], [4, 5, 6]])
example = example_pb2.Example(
features=feature_pb2.Features(feature={
'labels': self._EncodedInt64Feature(np_array),
}))
serialized_example = example.SerializeToString()
with self.cached_session():
serialized_example = array_ops.reshape(serialized_example, shape=[])
keys_to_features = {
'labels': parsing_ops.VarLenFeature(dtype=dtypes.int64),
}
items_to_handlers = {
'labels': tfexample_decoder.Tensor('labels', shape=np_array.shape),
}
decoder = tfexample_decoder.TFExampleDecoder(keys_to_features,
items_to_handlers)
[tf_labels] = decoder.decode(serialized_example, ['labels'])
labels = tf_labels.eval()
self.assertAllEqual(labels, np_array)
def testDecodeExampleShapeKeyTensor(self):
np_image = np.random.rand(2, 3, 1).astype('f')
np_labels = np.array([[[1], [2], [3]], [[4], [5], [6]]])
example = example_pb2.Example(
features=feature_pb2.Features(
feature={
'image':
self._EncodedFloatFeature(np_image),
'image/shape':
self._EncodedInt64Feature(np.array(np_image.shape)),
'labels':
self._EncodedInt64Feature(np_labels),
'labels/shape':
self._EncodedInt64Feature(np.array(np_labels.shape)),
}))
serialized_example = example.SerializeToString()
with self.cached_session():
serialized_example = array_ops.reshape(serialized_example, shape=[])
keys_to_features = {
'image': parsing_ops.VarLenFeature(dtype=dtypes.float32),
'image/shape': parsing_ops.VarLenFeature(dtype=dtypes.int64),
'labels': parsing_ops.VarLenFeature(dtype=dtypes.int64),
'labels/shape': parsing_ops.VarLenFeature(dtype=dtypes.int64),
}
items_to_handlers = {
'image':
tfexample_decoder.Tensor('image', shape_keys='image/shape'),
'labels':
tfexample_decoder.Tensor('labels', shape_keys='labels/shape'),
}
decoder = tfexample_decoder.TFExampleDecoder(keys_to_features,
items_to_handlers)
[tf_image, tf_labels] = decoder.decode(serialized_example,
['image', 'labels'])
self.assertAllEqual(tf_image.eval(), np_image)
self.assertAllEqual(tf_labels.eval(), np_labels)
def testDecodeExampleMultiShapeKeyTensor(self):
np_image = np.random.rand(2, 3, 1).astype('f')
np_labels = np.array([[[1], [2], [3]], [[4], [5], [6]]])
height, width, depth = np_labels.shape
example = example_pb2.Example(
features=feature_pb2.Features(
feature={
'image':
self._EncodedFloatFeature(np_image),
'image/shape':
self._EncodedInt64Feature(np.array(np_image.shape)),
'labels':
self._EncodedInt64Feature(np_labels),
'labels/height':
self._EncodedInt64Feature(np.array([height])),
'labels/width':
self._EncodedInt64Feature(np.array([width])),
'labels/depth':
self._EncodedInt64Feature(np.array([depth])),
}))
serialized_example = example.SerializeToString()
with self.cached_session():
serialized_example = array_ops.reshape(serialized_example, shape=[])
keys_to_features = {
'image': parsing_ops.VarLenFeature(dtype=dtypes.float32),
'image/shape': parsing_ops.VarLenFeature(dtype=dtypes.int64),
'labels': parsing_ops.VarLenFeature(dtype=dtypes.int64),
'labels/height': parsing_ops.VarLenFeature(dtype=dtypes.int64),
'labels/width': parsing_ops.VarLenFeature(dtype=dtypes.int64),
'labels/depth': parsing_ops.VarLenFeature(dtype=dtypes.int64),
}
items_to_handlers = {
'image':
tfexample_decoder.Tensor('image', shape_keys='image/shape'),
'labels':
tfexample_decoder.Tensor(
'labels',
shape_keys=['labels/height', 'labels/width', 'labels/depth']),
}
decoder = tfexample_decoder.TFExampleDecoder(keys_to_features,
items_to_handlers)
[tf_image, tf_labels] = decoder.decode(serialized_example,
['image', 'labels'])
self.assertAllEqual(tf_image.eval(), np_image)
self.assertAllEqual(tf_labels.eval(), np_labels)
def testDecodeExampleWithSparseTensor(self):
np_indices = np.array([[1], [2], [5]])
np_values = np.array([0.1, 0.2, 0.6]).astype('f')
example = example_pb2.Example(
features=feature_pb2.Features(
feature={
'indices': self._EncodedInt64Feature(np_indices),
'values': self._EncodedFloatFeature(np_values),
}))
serialized_example = example.SerializeToString()
with self.cached_session():
serialized_example = array_ops.reshape(serialized_example, shape=[])
keys_to_features = {
'indices': parsing_ops.VarLenFeature(dtype=dtypes.int64),
'values': parsing_ops.VarLenFeature(dtype=dtypes.float32),
}
items_to_handlers = {
'labels': tfexample_decoder.SparseTensor(),
}
decoder = tfexample_decoder.TFExampleDecoder(keys_to_features,
items_to_handlers)
[tf_labels] = decoder.decode(serialized_example, ['labels'])
labels = tf_labels.eval()
self.assertAllEqual(labels.indices, np_indices)
self.assertAllEqual(labels.values, np_values)
self.assertAllEqual(labels.dense_shape, np_values.shape)
def testDecodeExampleWithSparseTensorWithKeyShape(self):
np_indices = np.array([[1], [2], [5]])
np_values = np.array([0.1, 0.2, 0.6]).astype('f')
np_shape = np.array([6])
example = example_pb2.Example(
features=feature_pb2.Features(
feature={
'indices': self._EncodedInt64Feature(np_indices),
'values': self._EncodedFloatFeature(np_values),
'shape': self._EncodedInt64Feature(np_shape),
}))
serialized_example = example.SerializeToString()
with self.cached_session():
serialized_example = array_ops.reshape(serialized_example, shape=[])
keys_to_features = {
'indices': parsing_ops.VarLenFeature(dtype=dtypes.int64),
'values': parsing_ops.VarLenFeature(dtype=dtypes.float32),
'shape': parsing_ops.VarLenFeature(dtype=dtypes.int64),
}
items_to_handlers = {
'labels': tfexample_decoder.SparseTensor(shape_key='shape'),
}
decoder = tfexample_decoder.TFExampleDecoder(keys_to_features,
items_to_handlers)
[tf_labels] = decoder.decode(serialized_example, ['labels'])
labels = tf_labels.eval()
self.assertAllEqual(labels.indices, np_indices)
self.assertAllEqual(labels.values, np_values)
self.assertAllEqual(labels.dense_shape, np_shape)
def testDecodeExampleWithSparseTensorWithGivenShape(self):
np_indices = np.array([[1], [2], [5]])
np_values = np.array([0.1, 0.2, 0.6]).astype('f')
np_shape = np.array([6])
example = example_pb2.Example(
features=feature_pb2.Features(
feature={
'indices': self._EncodedInt64Feature(np_indices),
'values': self._EncodedFloatFeature(np_values),
}))
serialized_example = example.SerializeToString()
with self.cached_session():
serialized_example = array_ops.reshape(serialized_example, shape=[])
keys_to_features = {
'indices': parsing_ops.VarLenFeature(dtype=dtypes.int64),
'values': parsing_ops.VarLenFeature(dtype=dtypes.float32),
}
items_to_handlers = {
'labels': tfexample_decoder.SparseTensor(shape=np_shape),
}
decoder = tfexample_decoder.TFExampleDecoder(keys_to_features,
items_to_handlers)
[tf_labels] = decoder.decode(serialized_example, ['labels'])
labels = tf_labels.eval()
self.assertAllEqual(labels.indices, np_indices)
self.assertAllEqual(labels.values, np_values)
self.assertAllEqual(labels.dense_shape, np_shape)
def testDecodeExampleWithSparseTensorToDense(self):
np_indices = np.array([1, 2, 5])
np_values = np.array([0.1, 0.2, 0.6]).astype('f')
np_shape = np.array([6])
np_dense = np.array([0.0, 0.1, 0.2, 0.0, 0.0, 0.6]).astype('f')
example = example_pb2.Example(
features=feature_pb2.Features(
feature={
'indices': self._EncodedInt64Feature(np_indices),
'values': self._EncodedFloatFeature(np_values),
}))
serialized_example = example.SerializeToString()
with self.cached_session():
serialized_example = array_ops.reshape(serialized_example, shape=[])
keys_to_features = {
'indices': parsing_ops.VarLenFeature(dtype=dtypes.int64),
'values': parsing_ops.VarLenFeature(dtype=dtypes.float32),
}
items_to_handlers = {
'labels':
tfexample_decoder.SparseTensor(shape=np_shape, densify=True),
}
decoder = tfexample_decoder.TFExampleDecoder(keys_to_features,
items_to_handlers)
[tf_labels] = decoder.decode(serialized_example, ['labels'])
labels = tf_labels.eval()
self.assertAllClose(labels, np_dense)
def testDecodeExampleWithTensor(self):
tensor_shape = (2, 3, 1)
np_array = np.random.rand(2, 3, 1)
example = example_pb2.Example(
features=feature_pb2.Features(feature={
'image/depth_map': self._EncodedFloatFeature(np_array),
}))
serialized_example = example.SerializeToString()
with self.cached_session():
serialized_example = array_ops.reshape(serialized_example, shape=[])
keys_to_features = {
'image/depth_map':
parsing_ops.FixedLenFeature(
tensor_shape,
dtypes.float32,
default_value=array_ops.zeros(tensor_shape))
}
items_to_handlers = {'depth': tfexample_decoder.Tensor('image/depth_map')}
decoder = tfexample_decoder.TFExampleDecoder(keys_to_features,
items_to_handlers)
[tf_depth] = decoder.decode(serialized_example, ['depth'])
depth = tf_depth.eval()
self.assertAllClose(np_array, depth)
def testDecodeExampleWithItemHandlerCallback(self):
np.random.seed(0)
tensor_shape = (2, 3, 1)
np_array = np.random.rand(2, 3, 1)
example = example_pb2.Example(
features=feature_pb2.Features(feature={
'image/depth_map': self._EncodedFloatFeature(np_array),
}))
serialized_example = example.SerializeToString()
with self.cached_session():
serialized_example = array_ops.reshape(serialized_example, shape=[])
keys_to_features = {
'image/depth_map':
parsing_ops.FixedLenFeature(
tensor_shape,
dtypes.float32,
default_value=array_ops.zeros(tensor_shape))
}
def HandleDepth(keys_to_tensors):
depth = list(keys_to_tensors.values())[0]
depth += 1
return depth
items_to_handlers = {
'depth':
tfexample_decoder.ItemHandlerCallback('image/depth_map',
HandleDepth)
}
decoder = tfexample_decoder.TFExampleDecoder(keys_to_features,
items_to_handlers)
[tf_depth] = decoder.decode(serialized_example, ['depth'])
depth = tf_depth.eval()
self.assertAllClose(np_array, depth - 1)
def testDecodeImageWithItemHandlerCallback(self):
image_shape = (2, 3, 3)
for image_encoding in ['jpeg', 'png']:
image, serialized_example = self.GenerateImage(
image_format=image_encoding, image_shape=image_shape)
with self.cached_session():
def ConditionalDecoding(keys_to_tensors):
"""See base class."""
image_buffer = keys_to_tensors['image/encoded']
image_format = keys_to_tensors['image/format']
def DecodePng():
return image_ops.decode_png(image_buffer, 3)
def DecodeJpg():
return image_ops.decode_jpeg(image_buffer, 3)
image = control_flow_ops.case(
{
math_ops.equal(image_format, 'png'): DecodePng,
},
default=DecodeJpg,
exclusive=True)
image = array_ops.reshape(image, image_shape)
return image
keys_to_features = {
'image/encoded':
parsing_ops.FixedLenFeature(
(), dtypes.string, default_value=''),
'image/format':
parsing_ops.FixedLenFeature(
(), dtypes.string, default_value='jpeg')
}
items_to_handlers = {
'image':
tfexample_decoder.ItemHandlerCallback(
['image/encoded', 'image/format'], ConditionalDecoding)
}
decoder = tfexample_decoder.TFExampleDecoder(keys_to_features,
items_to_handlers)
[tf_image] = decoder.decode(serialized_example, ['image'])
decoded_image = tf_image.eval()
if image_encoding == 'jpeg':
# For jenkins:
image = image.astype(np.float32)
decoded_image = decoded_image.astype(np.float32)
self.assertAllClose(image, decoded_image, rtol=.5, atol=1.001)
else:
self.assertAllClose(image, decoded_image, atol=0)
def testDecodeExampleWithBoundingBoxSparse(self):
num_bboxes = 10
np_ymin = np.random.rand(num_bboxes, 1)
np_xmin = np.random.rand(num_bboxes, 1)
np_ymax = np.random.rand(num_bboxes, 1)
np_xmax = np.random.rand(num_bboxes, 1)
np_bboxes = np.hstack([np_ymin, np_xmin, np_ymax, np_xmax])
example = example_pb2.Example(
features=feature_pb2.Features(
feature={
'image/object/bbox/ymin': self._EncodedFloatFeature(np_ymin),
'image/object/bbox/xmin': self._EncodedFloatFeature(np_xmin),
'image/object/bbox/ymax': self._EncodedFloatFeature(np_ymax),
'image/object/bbox/xmax': self._EncodedFloatFeature(np_xmax),
}))
serialized_example = example.SerializeToString()
with self.cached_session():
serialized_example = array_ops.reshape(serialized_example, shape=[])
keys_to_features = {
'image/object/bbox/ymin': parsing_ops.VarLenFeature(dtypes.float32),
'image/object/bbox/xmin': parsing_ops.VarLenFeature(dtypes.float32),
'image/object/bbox/ymax': parsing_ops.VarLenFeature(dtypes.float32),
'image/object/bbox/xmax': parsing_ops.VarLenFeature(dtypes.float32),
}
items_to_handlers = {
'object/bbox':
tfexample_decoder.BoundingBox(['ymin', 'xmin', 'ymax', 'xmax'],
'image/object/bbox/'),
}
decoder = tfexample_decoder.TFExampleDecoder(keys_to_features,
items_to_handlers)
[tf_bboxes] = decoder.decode(serialized_example, ['object/bbox'])
bboxes = tf_bboxes.eval()
self.assertAllClose(np_bboxes, bboxes)
def testDecodeExampleWithBoundingBoxDense(self):
num_bboxes = 10
np_ymin = np.random.rand(num_bboxes, 1)
np_xmin = np.random.rand(num_bboxes, 1)
np_ymax = np.random.rand(num_bboxes, 1)
np_xmax = np.random.rand(num_bboxes, 1)
np_bboxes = np.hstack([np_ymin, np_xmin, np_ymax, np_xmax])
example = example_pb2.Example(
features=feature_pb2.Features(
feature={
'image/object/bbox/ymin': self._EncodedFloatFeature(np_ymin),
'image/object/bbox/xmin': self._EncodedFloatFeature(np_xmin),
'image/object/bbox/ymax': self._EncodedFloatFeature(np_ymax),
'image/object/bbox/xmax': self._EncodedFloatFeature(np_xmax),
}))
serialized_example = example.SerializeToString()
with self.cached_session():
serialized_example = array_ops.reshape(serialized_example, shape=[])
keys_to_features = {
'image/object/bbox/ymin':
parsing_ops.FixedLenSequenceFeature(
[], dtypes.float32, allow_missing=True),
'image/object/bbox/xmin':
parsing_ops.FixedLenSequenceFeature(
[], dtypes.float32, allow_missing=True),
'image/object/bbox/ymax':
parsing_ops.FixedLenSequenceFeature(
[], dtypes.float32, allow_missing=True),
'image/object/bbox/xmax':
parsing_ops.FixedLenSequenceFeature(
[], dtypes.float32, allow_missing=True),
}
items_to_handlers = {
'object/bbox':
tfexample_decoder.BoundingBox(['ymin', 'xmin', 'ymax', 'xmax'],
'image/object/bbox/'),
}
decoder = tfexample_decoder.TFExampleDecoder(keys_to_features,
items_to_handlers)
[tf_bboxes] = decoder.decode(serialized_example, ['object/bbox'])
bboxes = tf_bboxes.eval()
self.assertAllClose(np_bboxes, bboxes)
def testDecodeExampleWithRepeatedImages(self):
image_shape = (2, 3, 3)
image_format = 'png'
image, _ = self.GenerateImage(
image_format=image_format, image_shape=image_shape)
tf_encoded = self._Encoder(image, image_format)
with self.cached_session():
tf_string = tf_encoded.eval()
example = example_pb2.Example(
features=feature_pb2.Features(
feature={
'image/encoded':
feature_pb2.Feature(
bytes_list=feature_pb2.BytesList(
value=[tf_string, tf_string])),
'image/format':
self._StringFeature(image_format),
}))
serialized_example = example.SerializeToString()
with self.cached_session():
serialized_example = array_ops.reshape(serialized_example, shape=[])
decoder = tfexample_decoder.TFExampleDecoder(
keys_to_features={
'image/encoded':
parsing_ops.FixedLenFeature((2,), dtypes.string),
'image/format':
parsing_ops.FixedLenFeature(
(), dtypes.string, default_value=image_format),
},
items_to_handlers={'image': tfexample_decoder.Image(repeated=True)})
[tf_image] = decoder.decode(serialized_example, ['image'])
output_image = tf_image.eval()
self.assertEqual(output_image.shape, (2, 2, 3, 3))
self.assertAllEqual(np.squeeze(output_image[0, :, :, :]), image)
self.assertAllEqual(np.squeeze(output_image[1, :, :, :]), image)
def testDecodeExampleWithLookup(self):
example = example_pb2.Example(
features=feature_pb2.Features(
feature={
'image/object/class/text':
self._BytesFeature(np.array(['cat', 'dog', 'guinea pig'])),
}))
serialized_example = example.SerializeToString()
# 'dog' -> 0, 'guinea pig' -> 1, 'cat' -> 2
table = lookup_ops.index_table_from_tensor(
constant_op.constant(['dog', 'guinea pig', 'cat']))
with self.cached_session() as sess:
sess.run(lookup_ops.tables_initializer())
serialized_example = array_ops.reshape(serialized_example, shape=[])
keys_to_features = {
'image/object/class/text': parsing_ops.VarLenFeature(dtypes.string),
}
items_to_handlers = {
'labels':
tfexample_decoder.LookupTensor('image/object/class/text', table),
}
decoder = tfexample_decoder.TFExampleDecoder(keys_to_features,
items_to_handlers)
obtained_class_ids = decoder.decode(serialized_example)[0].eval()
self.assertAllClose([2, 0, 1], obtained_class_ids)
def testDecodeExampleWithBackupHandlerLookup(self):
example1 = example_pb2.Example(
features=feature_pb2.Features(
feature={
'image/object/class/text':
self._BytesFeature(np.array(['cat', 'dog', 'guinea pig'])),
'image/object/class/label':
self._EncodedInt64Feature(np.array([42, 10, 900]))
}))
example2 = example_pb2.Example(
features=feature_pb2.Features(
feature={
'image/object/class/text':
self._BytesFeature(np.array(['cat', 'dog', 'guinea pig'])),
}))
example3 = example_pb2.Example(
features=feature_pb2.Features(
feature={
'image/object/class/label':
self._EncodedInt64Feature(np.array([42, 10, 901]))
}))
# 'dog' -> 0, 'guinea pig' -> 1, 'cat' -> 2
table = lookup_ops.index_table_from_tensor(
constant_op.constant(['dog', 'guinea pig', 'cat']))
keys_to_features = {
'image/object/class/text': parsing_ops.VarLenFeature(dtypes.string),
'image/object/class/label': parsing_ops.VarLenFeature(dtypes.int64),
}
backup_handler = tfexample_decoder.BackupHandler(
handler=tfexample_decoder.Tensor('image/object/class/label'),
backup=tfexample_decoder.LookupTensor('image/object/class/text', table))
items_to_handlers = {
'labels': backup_handler,
}
decoder = tfexample_decoder.TFExampleDecoder(keys_to_features,
items_to_handlers)
obtained_class_ids_each_example = []
with self.cached_session() as sess:
sess.run(lookup_ops.tables_initializer())
for example in [example1, example2, example3]:
serialized_example = array_ops.reshape(
example.SerializeToString(), shape=[])
obtained_class_ids_each_example.append(
decoder.decode(serialized_example)[0].eval())
self.assertAllClose([42, 10, 900], obtained_class_ids_each_example[0])
self.assertAllClose([2, 0, 1], obtained_class_ids_each_example[1])
self.assertAllClose([42, 10, 901], obtained_class_ids_each_example[2])
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/slim/python/slim/data/tfexample_decoder_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A DataProvider that provides data from a Dataset.
DatasetDataProviders provide data from datasets. The provide can be configured
to use multiple readers simultaneously or read via a single reader.
Additionally, the data being read can be optionally shuffled.
For example, to read data using a single thread without shuffling:
pascal_voc_data_provider = DatasetDataProvider(
slim.datasets.pascal_voc.get_split('train'),
shuffle=False)
images, labels = pascal_voc_data_provider.get(['images', 'labels'])
To read data using multiple readers simultaneous with shuffling:
pascal_voc_data_provider = DatasetDataProvider(
slim.datasets.pascal_voc.Dataset(),
num_readers=10,
shuffle=True)
images, labels = pascal_voc_data_provider.get(['images', 'labels'])
Equivalently, one may request different fields of the same sample separately:
[images] = pascal_voc_data_provider.get(['images'])
[labels] = pascal_voc_data_provider.get(['labels'])
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.slim.python.slim.data import data_provider
from tensorflow.contrib.slim.python.slim.data import parallel_reader
class DatasetDataProvider(data_provider.DataProvider):
def __init__(self,
dataset,
num_readers=1,
reader_kwargs=None,
shuffle=True,
num_epochs=None,
common_queue_capacity=256,
common_queue_min=128,
record_key='record_key',
seed=None,
scope=None):
"""Creates a DatasetDataProvider.
Note: if `num_epochs` is not `None`, local counter `epochs` will be created
by relevant function. Use `local_variables_initializer()` to initialize
local variables.
Args:
dataset: An instance of the Dataset class.
num_readers: The number of parallel readers to use.
reader_kwargs: An optional dict of kwargs for the reader.
shuffle: Whether to shuffle the data sources and common queue when
reading.
num_epochs: The number of times each data source is read. If left as None,
the data will be cycled through indefinitely.
common_queue_capacity: The capacity of the common queue.
common_queue_min: The minimum number of elements in the common queue after
a dequeue.
record_key: The item name to use for the dataset record keys in the
provided tensors.
seed: The seed to use if shuffling.
scope: Optional name scope for the ops.
Raises:
ValueError: If `record_key` matches one of the items in the dataset.
"""
key, data = parallel_reader.parallel_read(
dataset.data_sources,
reader_class=dataset.reader,
num_epochs=num_epochs,
num_readers=num_readers,
reader_kwargs=reader_kwargs,
shuffle=shuffle,
capacity=common_queue_capacity,
min_after_dequeue=common_queue_min,
seed=seed,
scope=scope)
items = dataset.decoder.list_items()
tensors = dataset.decoder.decode(data, items)
items_to_tensors = dict(zip(items, tensors))
if record_key in items_to_tensors:
raise ValueError('The item name used for `record_key` cannot also be '
'used for a dataset item: %s', record_key)
items_to_tensors[record_key] = key
super(DatasetDataProvider, self).__init__(
items_to_tensors=items_to_tensors,
num_samples=dataset.num_samples)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/slim/python/slim/data/dataset_data_provider.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains code for the DataProvider.
A DataProvider is a class which provides some predefined data types from some
source (TFRecord, etc). The most basic function of a
data provider is the `Get` operation where one requests one or more types of
data, or 'items':
provider.get(items=['image', 'sentence', 'class'])
More concretely, a data provider (a subclass of BaseDataProvider) returns a
single tensor for each requested item (data type):
provider = MyDataProvider(...)
image, sentence, clazz = provider.get(['image', 'sentence', 'class'])
In this example, the provider `MyDataProvider` must know how to load each item.
A data provider may be written in a way that the logic necessary to map from
each item to tensor is completely encapsulated within the data_provider itself.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
@six.add_metaclass(abc.ABCMeta)
class DataProvider(object):
"""Maps a list of requested data items to tensors from a data source.
All data providers must inherit from DataProvider and implement the Get
method which returns arbitrary types of data. No assumption is made about the
source of the data nor the mechanism for providing it.
"""
def __init__(self, items_to_tensors, num_samples):
"""Constructs the Data Provider.
Args:
items_to_tensors: a dictionary of names to tensors.
num_samples: the number of samples in the dataset being provided.
"""
self._items_to_tensors = items_to_tensors
self._num_samples = num_samples
def get(self, items):
"""Returns a list of tensors specified by the given list of items.
The list of items is arbitrary different data providers satisfy different
lists of items. For example the Pascal VOC might accept items 'image' and
'semantics', whereas the NYUDepthV2 data provider might accept items
'image', 'depths' and 'normals'.
Args:
items: a list of strings, each of which indicate a particular data type.
Returns:
a list of tensors, whose length matches the length of `items`, where each
tensor corresponds to each item.
Raises:
ValueError: if any of the items cannot be satisfied.
"""
self._validate_items(items)
return [self._items_to_tensors[item] for item in items]
def list_items(self):
"""Returns the list of item names that can be provided by the data provider.
Returns:
a list of item names that can be passed to Get([items]).
"""
return self._items_to_tensors.keys()
def num_samples(self):
"""Returns the number of data samples in the dataset.
Returns:
a positive whole number.
"""
return self._num_samples
def _validate_items(self, items):
"""Verifies that each given item is a member of the list from ListItems().
Args:
items: a list or tuple of strings.
Raises:
ValueError: if `items` is not a tuple or list or if any of the elements of
`items` is not found in the list provided by self.ListItems().
"""
if not isinstance(items, (list, tuple)):
raise ValueError('items must be a list or tuple')
valid_items = self.list_items()
for item in items:
if item not in valid_items:
raise ValueError('Item [%s] is invalid. Valid entries include: %s' %
(item, valid_items))
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/slim/python/slim/data/data_provider.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implements a simple prefetch_queue."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes as _dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.summary import summary
from tensorflow.python.training import queue_runner
def _which_queue(dynamic_pad):
return (data_flow_ops.PaddingFIFOQueue
if dynamic_pad else data_flow_ops.FIFOQueue)
def prefetch_queue(tensors,
capacity=8,
num_threads=1,
dynamic_pad=False,
shared_name=None,
name=None):
"""Creates a queue to prefetch tensors from `tensors`.
A queue runner for enqueuing tensors into the prefetch_queue is automatically
added to the TF QueueRunners collection.
Example:
This is for example useful to pre-assemble input batches read with
`tf.compat.v1.train.batch()` and enqueue the pre-assembled batches. Ops that
dequeue
from the pre-assembled queue will not pay the cost of assembling the batch.
images, labels = tf.compat.v1.train.batch([image, label], batch_size=32,
num_threads=4)
batch_queue = prefetch_queue([images, labels])
images, labels = batch_queue.dequeue()
logits = Net(images)
loss = Loss(logits, labels)
Args:
tensors: A list or dictionary of `Tensors` to enqueue in the buffer.
capacity: An integer. The maximum number of elements in the queue.
num_threads: An integer. Number of threads running the enqueue op.
dynamic_pad: Boolean. Whether to allow variable dimensions in input shapes.
shared_name: (optional). If set, this queue will be shared under the given
name across multiple sessions.
name: (Optional) A name for the operations.
Returns:
A queue from which you can dequeue tensors with the same type and shape
as `tensors`.
"""
if isinstance(tensors, dict):
# Need to wrap the keys and values in list() since Python3 returns views.
# We sort the keys so the order is consistent across runs.
names = list(sorted(tensors.keys()))
tensor_list = list([tensors[n] for n in names])
else:
names = None
tensor_list = tensors
with ops.name_scope(name, "prefetch_queue", tensor_list) as name:
dtypes = [t.dtype for t in tensor_list]
shapes = [t.get_shape() for t in tensor_list]
queue = _which_queue(dynamic_pad)(
capacity=capacity,
dtypes=dtypes,
shapes=shapes,
names=names,
shared_name=shared_name)
enqueue_op = queue.enqueue(tensors)
queue_runner.add_queue_runner(
queue_runner.QueueRunner(queue, [enqueue_op] * num_threads))
summary.scalar(
"fraction_of_%d_full" % capacity,
math_ops.cast(queue.size(), _dtypes.float32) * (1. / capacity))
return queue
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/slim/python/slim/data/prefetch_queue.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the TFExampleDecoder its associated helper classes.
The TFExampleDecode is a DataDecoder used to decode TensorFlow Example protos.
In order to do so each requested item must be paired with one or more Example
features that are parsed to produce the Tensor-based manifestation of the item.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
from tensorflow.contrib.slim.python.slim.data import data_decoder
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import map_fn
from tensorflow.python.ops import image_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import sparse_ops
@six.add_metaclass(abc.ABCMeta)
class ItemHandler(object):
"""Specifies the item-to-Features mapping for tf.parse_example.
An ItemHandler both specifies a list of Features used for parsing an Example
proto as well as a function that post-processes the results of Example
parsing.
"""
def __init__(self, keys):
"""Constructs the handler with the name of the tf.Feature keys to use.
See third_party/tensorflow/core/example/feature.proto
Args:
keys: the name of the TensorFlow Example Feature.
"""
if not isinstance(keys, (tuple, list)):
keys = [keys]
self._keys = keys
@property
def keys(self):
return self._keys
@abc.abstractmethod
def tensors_to_item(self, keys_to_tensors):
"""Maps the given dictionary of tensors to the requested item.
Args:
keys_to_tensors: a mapping of TF-Example keys to parsed tensors.
Returns:
the final tensor representing the item being handled.
"""
pass
class ItemHandlerCallback(ItemHandler):
"""An ItemHandler that converts the parsed tensors via a given function.
Unlike other ItemHandlers, the ItemHandlerCallback resolves its item via
a callback function rather than using prespecified behavior.
"""
def __init__(self, keys, func):
"""Initializes the ItemHandler.
Args:
keys: a list of TF-Example keys.
func: a function that takes as an argument a dictionary from `keys` to
parsed Tensors.
"""
super(ItemHandlerCallback, self).__init__(keys)
self._func = func
def tensors_to_item(self, keys_to_tensors):
return self._func(keys_to_tensors)
class BoundingBox(ItemHandler):
"""An ItemHandler that concatenates a set of parsed Tensors to Bounding Boxes.
"""
def __init__(self, keys=None, prefix=''):
"""Initialize the bounding box handler.
Args:
keys: A list of four key names representing the ymin, xmin, ymax, mmax
prefix: An optional prefix for each of the bounding box keys.
If provided, `prefix` is appended to each key in `keys`.
Raises:
ValueError: if keys is not `None` and also not a list of exactly 4 keys
"""
if keys is None:
keys = ['ymin', 'xmin', 'ymax', 'xmax']
elif len(keys) != 4:
raise ValueError('BoundingBox expects 4 keys but got {}'.format(
len(keys)))
self._prefix = prefix
self._keys = keys
self._full_keys = [prefix + k for k in keys]
super(BoundingBox, self).__init__(self._full_keys)
def tensors_to_item(self, keys_to_tensors):
"""Maps the given dictionary of tensors to a concatenated list of bboxes.
Args:
keys_to_tensors: a mapping of TF-Example keys to parsed tensors.
Returns:
[num_boxes, 4] tensor of bounding box coordinates,
i.e. 1 bounding box per row, in order [y_min, x_min, y_max, x_max].
"""
sides = []
for key in self._full_keys:
side = keys_to_tensors[key]
if isinstance(side, sparse_tensor.SparseTensor):
side = side.values
side = array_ops.expand_dims(side, 0)
sides.append(side)
bounding_box = array_ops.concat(sides, 0)
return array_ops.transpose(bounding_box)
class Tensor(ItemHandler):
"""An ItemHandler that returns a parsed Tensor."""
def __init__(self, tensor_key, shape_keys=None, shape=None, default_value=0):
"""Initializes the Tensor handler.
Tensors are, by default, returned without any reshaping. However, there are
two mechanisms which allow reshaping to occur at load time. If `shape_keys`
is provided, both the `Tensor` corresponding to `tensor_key` and
`shape_keys` is loaded and the former `Tensor` is reshaped with the values
of the latter. Alternatively, if a fixed `shape` is provided, the `Tensor`
corresponding to `tensor_key` is loaded and reshape appropriately.
If neither `shape_keys` nor `shape` are provided, the `Tensor` will be
returned without any reshaping.
Args:
tensor_key: the name of the `TFExample` feature to read the tensor from.
shape_keys: Optional name or list of names of the TF-Example feature in
which the tensor shape is stored. If a list, then each corresponds to
one dimension of the shape.
shape: Optional output shape of the `Tensor`. If provided, the `Tensor` is
reshaped accordingly.
default_value: The value used when the `tensor_key` is not found in a
particular `TFExample`.
Raises:
ValueError: if both `shape_keys` and `shape` are specified.
"""
if shape_keys and shape is not None:
raise ValueError('Cannot specify both shape_keys and shape parameters.')
if shape_keys and not isinstance(shape_keys, list):
shape_keys = [shape_keys]
self._tensor_key = tensor_key
self._shape_keys = shape_keys
self._shape = shape
self._default_value = default_value
keys = [tensor_key]
if shape_keys:
keys.extend(shape_keys)
super(Tensor, self).__init__(keys)
def tensors_to_item(self, keys_to_tensors):
tensor = keys_to_tensors[self._tensor_key]
shape = self._shape
if self._shape_keys:
shape_dims = []
for k in self._shape_keys:
shape_dim = keys_to_tensors[k]
if isinstance(shape_dim, sparse_tensor.SparseTensor):
shape_dim = sparse_ops.sparse_tensor_to_dense(shape_dim)
shape_dims.append(shape_dim)
shape = array_ops.reshape(array_ops.stack(shape_dims), [-1])
if isinstance(tensor, sparse_tensor.SparseTensor):
if shape is not None:
tensor = sparse_ops.sparse_reshape(tensor, shape)
tensor = sparse_ops.sparse_tensor_to_dense(tensor, self._default_value)
else:
if shape is not None:
tensor = array_ops.reshape(tensor, shape)
return tensor
class LookupTensor(Tensor):
"""An ItemHandler that returns a parsed Tensor, the result of a lookup."""
def __init__(self,
tensor_key,
table,
shape_keys=None,
shape=None,
default_value=''):
"""Initializes the LookupTensor handler.
See Tensor. Simply calls a vocabulary (most often, a label mapping) lookup.
Args:
tensor_key: the name of the `TFExample` feature to read the tensor from.
table: A tf.lookup table.
shape_keys: Optional name or list of names of the TF-Example feature in
which the tensor shape is stored. If a list, then each corresponds to
one dimension of the shape.
shape: Optional output shape of the `Tensor`. If provided, the `Tensor` is
reshaped accordingly.
default_value: The value used when the `tensor_key` is not found in a
particular `TFExample`.
Raises:
ValueError: if both `shape_keys` and `shape` are specified.
"""
self._table = table
super(LookupTensor, self).__init__(tensor_key, shape_keys, shape,
default_value)
def tensors_to_item(self, keys_to_tensors):
unmapped_tensor = super(LookupTensor, self).tensors_to_item(keys_to_tensors)
return self._table.lookup(unmapped_tensor)
class BackupHandler(ItemHandler):
"""An ItemHandler that tries two ItemHandlers in order."""
def __init__(self, handler, backup):
"""Initializes the BackupHandler handler.
If the first Handler's tensors_to_item returns a Tensor with no elements,
the second Handler is used.
Args:
handler: The primary ItemHandler.
backup: The backup ItemHandler.
Raises:
ValueError: if either is not an ItemHandler.
"""
if not isinstance(handler, ItemHandler):
raise ValueError('Primary handler is of type %s instead of ItemHandler'
% type(handler))
if not isinstance(backup, ItemHandler):
raise ValueError('Backup handler is of type %s instead of ItemHandler'
% type(backup))
self._handler = handler
self._backup = backup
super(BackupHandler, self).__init__(handler.keys + backup.keys)
def tensors_to_item(self, keys_to_tensors):
item = self._handler.tensors_to_item(keys_to_tensors)
return control_flow_ops.cond(
pred=math_ops.equal(math_ops.reduce_prod(array_ops.shape(item)), 0),
true_fn=lambda: self._backup.tensors_to_item(keys_to_tensors),
false_fn=lambda: item)
class SparseTensor(ItemHandler):
"""An ItemHandler for SparseTensors."""
def __init__(self,
indices_key=None,
values_key=None,
shape_key=None,
shape=None,
densify=False,
default_value=0):
"""Initializes the Tensor handler.
Args:
indices_key: the name of the TF-Example feature that contains the ids.
Defaults to 'indices'.
values_key: the name of the TF-Example feature that contains the values.
Defaults to 'values'.
shape_key: the name of the TF-Example feature that contains the shape.
If provided it would be used.
shape: the output shape of the SparseTensor. If `shape_key` is not
provided this `shape` would be used.
densify: whether to convert the SparseTensor into a dense Tensor.
default_value: Scalar value to set when making dense for indices not
specified in the `SparseTensor`.
"""
indices_key = indices_key or 'indices'
values_key = values_key or 'values'
self._indices_key = indices_key
self._values_key = values_key
self._shape_key = shape_key
self._shape = shape
self._densify = densify
self._default_value = default_value
keys = [indices_key, values_key]
if shape_key:
keys.append(shape_key)
super(SparseTensor, self).__init__(keys)
def tensors_to_item(self, keys_to_tensors):
indices = keys_to_tensors[self._indices_key]
values = keys_to_tensors[self._values_key]
if self._shape_key:
shape = keys_to_tensors[self._shape_key]
if isinstance(shape, sparse_tensor.SparseTensor):
shape = sparse_ops.sparse_tensor_to_dense(shape)
elif self._shape:
shape = self._shape
else:
shape = indices.dense_shape
indices_shape = array_ops.shape(indices.indices)
rank = indices_shape[1]
ids = math_ops.cast(indices.values, dtypes.int64)
indices_columns_to_preserve = array_ops.slice(
indices.indices, [0, 0], array_ops.stack([-1, rank - 1]))
new_indices = array_ops.concat(
[indices_columns_to_preserve, array_ops.reshape(ids, [-1, 1])], 1)
tensor = sparse_tensor.SparseTensor(new_indices, values.values, shape)
if self._densify:
tensor = sparse_ops.sparse_tensor_to_dense(tensor, self._default_value)
return tensor
class Image(ItemHandler):
"""An ItemHandler that decodes a parsed Tensor as an image."""
def __init__(self,
image_key=None,
format_key=None,
shape=None,
channels=3,
dtype=dtypes.uint8,
repeated=False,
dct_method=''):
"""Initializes the image.
Args:
image_key: the name of the TF-Example feature in which the encoded image
is stored.
format_key: the name of the TF-Example feature in which the image format
is stored.
shape: the output shape of the image as 1-D `Tensor`
[height, width, channels]. If provided, the image is reshaped
accordingly. If left as None, no reshaping is done. A shape should
be supplied only if all the stored images have the same shape.
channels: the number of channels in the image.
dtype: images will be decoded at this bit depth. Different formats
support different bit depths.
See tf.image.decode_image,
tf.io.decode_raw,
repeated: if False, decodes a single image. If True, decodes a
variable number of image strings from a 1D tensor of strings.
dct_method: An optional string. Defaults to empty string. It only takes
effect when image format is jpeg, used to specify a hint about the
algorithm used for jpeg decompression. Currently valid values
are ['INTEGER_FAST', 'INTEGER_ACCURATE']. The hint may be ignored, for
example, the jpeg library does not have that specific option.
"""
if not image_key:
image_key = 'image/encoded'
if not format_key:
format_key = 'image/format'
super(Image, self).__init__([image_key, format_key])
self._image_key = image_key
self._format_key = format_key
self._shape = shape
self._channels = channels
self._dtype = dtype
self._repeated = repeated
self._dct_method = dct_method
def tensors_to_item(self, keys_to_tensors):
"""See base class."""
image_buffer = keys_to_tensors[self._image_key]
image_format = keys_to_tensors[self._format_key]
if self._repeated:
return map_fn.map_fn(lambda x: self._decode(x, image_format),
image_buffer, dtype=self._dtype)
else:
return self._decode(image_buffer, image_format)
def _decode(self, image_buffer, image_format):
"""Decodes the image buffer.
Args:
image_buffer: The tensor representing the encoded image tensor.
image_format: The image format for the image in `image_buffer`. If image
format is `raw`, all images are expected to be in this format, otherwise
this op can decode a mix of `jpg` and `png` formats.
Returns:
A tensor that represents decoded image of self._shape, or
(?, ?, self._channels) if self._shape is not specified.
"""
def decode_image():
"""Decodes a image based on the headers."""
return math_ops.cast(
image_ops.decode_image(image_buffer, channels=self._channels),
self._dtype)
def decode_jpeg():
"""Decodes a jpeg image with specified '_dct_method'."""
return math_ops.cast(
image_ops.decode_jpeg(
image_buffer,
channels=self._channels,
dct_method=self._dct_method), self._dtype)
def check_jpeg():
"""Checks if an image is jpeg."""
# For jpeg, we directly use image_ops.decode_jpeg rather than decode_image
# in order to feed the jpeg specify parameter 'dct_method'.
return control_flow_ops.cond(
image_ops.is_jpeg(image_buffer),
decode_jpeg,
decode_image,
name='cond_jpeg')
def decode_raw():
"""Decodes a raw image."""
return parsing_ops.decode_raw(image_buffer, out_type=self._dtype)
pred_fn_pairs = [(math_ops.logical_or(
math_ops.equal(image_format, 'raw'),
math_ops.equal(image_format, 'RAW')), decode_raw)]
image = control_flow_ops.case(
pred_fn_pairs, default=check_jpeg, exclusive=True)
image.set_shape([None, None, self._channels])
if self._shape is not None:
image = array_ops.reshape(image, self._shape)
return image
class TFExampleDecoder(data_decoder.DataDecoder):
"""A decoder for TensorFlow Examples.
Decoding Example proto buffers is comprised of two stages: (1) Example parsing
and (2) tensor manipulation.
In the first stage, the tf.io.parse_example function is called with a list of
FixedLenFeatures and SparseLenFeatures. These instances tell TF how to parse
the example. The output of this stage is a set of tensors.
In the second stage, the resulting tensors are manipulated to provide the
requested 'item' tensors.
To perform this decoding operation, an ExampleDecoder is given a list of
ItemHandlers. Each ItemHandler indicates the set of features for stage 1 and
contains the instructions for post_processing its tensors for stage 2.
"""
def __init__(self, keys_to_features, items_to_handlers):
"""Constructs the decoder.
Args:
keys_to_features: a dictionary from TF-Example keys to either
tf.io.VarLenFeature or tf.io.FixedLenFeature instances. See tensorflow's
parsing_ops.py.
items_to_handlers: a dictionary from items (strings) to ItemHandler
instances. Note that the ItemHandler's are provided the keys that they
use to return the final item Tensors.
"""
self._keys_to_features = keys_to_features
self._items_to_handlers = items_to_handlers
def list_items(self):
"""See base class."""
return list(self._items_to_handlers.keys())
def decode(self, serialized_example, items=None):
"""Decodes the given serialized TF-example.
Args:
serialized_example: a serialized TF-example tensor.
items: the list of items to decode. These must be a subset of the item
keys in self._items_to_handlers. If `items` is left as None, then all
of the items in self._items_to_handlers are decoded.
Returns:
the decoded items, a list of tensor.
"""
example = parsing_ops.parse_single_example(serialized_example,
self._keys_to_features)
# Reshape non-sparse elements just once, adding the reshape ops in
# deterministic order.
for k in sorted(self._keys_to_features):
v = self._keys_to_features[k]
if isinstance(v, parsing_ops.FixedLenFeature):
example[k] = array_ops.reshape(example[k], v.shape)
if not items:
items = self._items_to_handlers.keys()
outputs = []
for item in items:
handler = self._items_to_handlers[item]
keys_to_tensors = {key: example[key] for key in handler.keys}
outputs.append(handler.tensors_to_item(keys_to_tensors))
return outputs
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/slim/python/slim/data/tfexample_decoder.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Libsvm decoder.
@@decode_libsvm
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.libsvm.python.ops.libsvm_ops import decode_libsvm
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = [
"decode_libsvm",
]
remove_undocumented(__name__)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/libsvm/__init__.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for DecodeLibsvm op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.libsvm.python.ops import libsvm_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import sparse_ops
from tensorflow.python.platform import test
class DecodeLibsvmOpTest(test.TestCase):
def testBasic(self):
with self.cached_session() as sess:
content = [
"1 1:3.4 2:0.5 4:0.231", "1 2:2.5 3:inf 5:0.503",
"2 3:2.5 2:nan 1:0.105"
]
sparse_features, labels = libsvm_ops.decode_libsvm(
content, num_features=6)
features = sparse_ops.sparse_tensor_to_dense(
sparse_features, validate_indices=False)
self.assertAllEqual(labels.get_shape().as_list(), [3])
features, labels = sess.run([features, labels])
self.assertAllEqual(labels, [1, 1, 2])
self.assertAllClose(
features, [[0, 3.4, 0.5, 0, 0.231, 0], [0, 0, 2.5, np.inf, 0, 0.503],
[0, 0.105, np.nan, 2.5, 0, 0]])
def testNDimension(self):
with self.cached_session() as sess:
content = [["1 1:3.4 2:0.5 4:0.231", "1 1:3.4 2:0.5 4:0.231"],
["1 2:2.5 3:inf 5:0.503", "1 2:2.5 3:inf 5:0.503"],
["2 3:2.5 2:nan 1:0.105", "2 3:2.5 2:nan 1:0.105"]]
sparse_features, labels = libsvm_ops.decode_libsvm(
content, num_features=6, label_dtype=dtypes.float64)
features = sparse_ops.sparse_tensor_to_dense(
sparse_features, validate_indices=False)
self.assertAllEqual(labels.get_shape().as_list(), [3, 2])
features, labels = sess.run([features, labels])
self.assertAllEqual(labels, [[1, 1], [1, 1], [2, 2]])
self.assertAllClose(
features, [[[0, 3.4, 0.5, 0, 0.231, 0], [0, 3.4, 0.5, 0, 0.231, 0]], [
[0, 0, 2.5, np.inf, 0, 0.503], [0, 0, 2.5, np.inf, 0, 0.503]
], [[0, 0.105, np.nan, 2.5, 0, 0], [0, 0.105, np.nan, 2.5, 0, 0]]])
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/libsvm/python/kernel_tests/decode_libsvm_op_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Libsvm decoder."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.libsvm.ops import gen_libsvm_ops
from tensorflow.contrib.util import loader
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.platform import resource_loader
from tensorflow.python.util.deprecation import deprecated
_libsvm_ops_so = loader.load_op_library(
resource_loader.get_path_to_datafile("_libsvm_ops.so"))
@deprecated(None,
'tf.contrib.libsvm will be removed in 2.0, the support for libsvm '
'format will continue to be provided in tensorflow-io: '
'https://github.com/tensorflow/io')
def decode_libsvm(content, num_features, dtype=None, label_dtype=None):
"""Convert Libsvm records to a tensor of label and a tensor of feature.
Args:
content: A `Tensor` of type `string`. Each string is a record/row in
the Libsvm format.
num_features: The number of features.
dtype: The type of the output feature tensor. Default to tf.float32.
label_dtype: The type of the output label tensor. Default to tf.int64.
Returns:
features: A `SparseTensor` of the shape `[input_shape, num_features]`.
labels: A `Tensor` of the same shape as content.
"""
labels, indices, values, shape = gen_libsvm_ops.decode_libsvm(
content, num_features, dtype=dtype, label_dtype=label_dtype)
return sparse_tensor.SparseTensor(indices, values, shape), labels
ops.NotDifferentiable("DecodeLibSVM")
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/libsvm/python/ops/libsvm_ops.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops and modules related to RPC.
@@rpc
@@try_rpc
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.rpc.python.ops.rpc_op import rpc
from tensorflow.contrib.rpc.python.ops.rpc_op import try_rpc
from tensorflow.python.util.all_util import remove_undocumented
remove_undocumented(__name__)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/rpc/__init__.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Base class for RpcOp tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import numpy as np
from tensorflow.contrib.rpc.python.kernel_tests import test_example_pb2
from tensorflow.contrib.rpc.python.ops import rpc_op
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.ops import proto_ops
__all__ = ['I_WARNED_YOU', 'RpcOpTestBase']
I_WARNED_YOU = 'I warned you!'
class RpcOpTestBase(object):
# pylint: disable=missing-docstring,invalid-name
"""Base class for RpcOp tests."""
def get_method_name(self, suffix):
raise NotImplementedError
def rpc(self, *args, **kwargs):
return rpc_op.rpc(*args, protocol=self._protocol, **kwargs)
def try_rpc(self, *args, **kwargs):
return rpc_op.try_rpc(*args, protocol=self._protocol, **kwargs)
def testScalarHostPortRpc(self):
with self.cached_session() as sess:
request_tensors = (
test_example_pb2.TestCase(values=[1, 2, 3]).SerializeToString())
response_tensors = self.rpc(
method=self.get_method_name('Increment'),
address=self._address,
request=request_tensors)
self.assertEqual(response_tensors.shape, ())
response_values = sess.run(response_tensors)
response_message = test_example_pb2.TestCase()
self.assertTrue(response_message.ParseFromString(response_values))
self.assertAllEqual([2, 3, 4], response_message.values)
def testScalarHostPortTryRpc(self):
with self.cached_session() as sess:
request_tensors = (
test_example_pb2.TestCase(values=[1, 2, 3]).SerializeToString())
response_tensors, status_code, status_message = self.try_rpc(
method=self.get_method_name('Increment'),
address=self._address,
request=request_tensors)
self.assertEqual(status_code.shape, ())
self.assertEqual(status_message.shape, ())
self.assertEqual(response_tensors.shape, ())
response_values, status_code_values, status_message_values = (
sess.run((response_tensors, status_code, status_message)))
response_message = test_example_pb2.TestCase()
self.assertTrue(response_message.ParseFromString(response_values))
self.assertAllEqual([2, 3, 4], response_message.values)
# For the base Rpc op, don't expect to get error status back.
self.assertEqual(errors.OK, status_code_values)
self.assertEqual(b'', status_message_values)
def testEmptyHostPortRpc(self):
with self.cached_session() as sess:
request_tensors = []
response_tensors = self.rpc(
method=self.get_method_name('Increment'),
address=self._address,
request=request_tensors)
self.assertAllEqual(response_tensors.shape, [0])
response_values = sess.run(response_tensors)
self.assertAllEqual(response_values.shape, [0])
def testInvalidMethod(self):
for method in [
'/InvalidService.Increment',
self.get_method_name('InvalidMethodName')
]:
with self.cached_session() as sess:
with self.assertRaisesOpError(self.invalid_method_string):
sess.run(self.rpc(method=method, address=self._address, request=''))
_, status_code_value, status_message_value = sess.run(
self.try_rpc(method=method, address=self._address, request=''))
self.assertEqual(errors.UNIMPLEMENTED, status_code_value)
self.assertTrue(
self.invalid_method_string in status_message_value.decode('ascii'))
def testInvalidAddress(self):
# This covers the case of address='' and address='localhost:293874293874'
address = 'unix:/tmp/this_unix_socket_doesnt_exist_97820348!!@'
with self.cached_session() as sess:
with self.assertRaises(errors.UnavailableError):
sess.run(
self.rpc(
method=self.get_method_name('Increment'),
address=address,
request=''))
_, status_code_value, status_message_value = sess.run(
self.try_rpc(
method=self.get_method_name('Increment'),
address=address,
request=''))
self.assertEqual(errors.UNAVAILABLE, status_code_value)
def testAlwaysFailingMethod(self):
with self.cached_session() as sess:
response_tensors = self.rpc(
method=self.get_method_name('AlwaysFailWithInvalidArgument'),
address=self._address,
request='')
self.assertEqual(response_tensors.shape, ())
with self.assertRaisesOpError(I_WARNED_YOU):
sess.run(response_tensors)
response_tensors, status_code, status_message = self.try_rpc(
method=self.get_method_name('AlwaysFailWithInvalidArgument'),
address=self._address,
request='')
self.assertEqual(response_tensors.shape, ())
self.assertEqual(status_code.shape, ())
self.assertEqual(status_message.shape, ())
status_code_value, status_message_value = sess.run((status_code,
status_message))
self.assertEqual(errors.INVALID_ARGUMENT, status_code_value)
self.assertTrue(I_WARNED_YOU in status_message_value.decode('ascii'))
def testSometimesFailingMethodWithManyRequests(self):
with self.cached_session() as sess:
# Fail hard by default.
response_tensors = self.rpc(
method=self.get_method_name('SometimesFailWithInvalidArgument'),
address=self._address,
request=[''] * 20)
self.assertEqual(response_tensors.shape, (20,))
with self.assertRaisesOpError(I_WARNED_YOU):
sess.run(response_tensors)
# Don't fail hard, use TryRpc - return the failing status instead.
response_tensors, status_code, status_message = self.try_rpc(
method=self.get_method_name('SometimesFailWithInvalidArgument'),
address=self._address,
request=[''] * 20)
self.assertEqual(response_tensors.shape, (20,))
self.assertEqual(status_code.shape, (20,))
self.assertEqual(status_message.shape, (20,))
status_code_values, status_message_values = sess.run((status_code,
status_message))
self.assertTrue([
x in (errors.OK, errors.INVALID_ARGUMENT) for x in status_code_values
])
expected_message_values = np.where(
status_code_values == errors.INVALID_ARGUMENT,
I_WARNED_YOU.encode('ascii'), b'')
for msg, expected in zip(status_message_values, expected_message_values):
self.assertTrue(expected in msg,
'"%s" did not contain "%s"' % (msg, expected))
def testVecHostPortRpc(self):
with self.cached_session() as sess:
request_tensors = [
test_example_pb2.TestCase(
values=[i, i + 1, i + 2]).SerializeToString() for i in range(20)
]
response_tensors = self.rpc(
method=self.get_method_name('Increment'),
address=self._address,
request=request_tensors)
self.assertEqual(response_tensors.shape, (20,))
response_values = sess.run(response_tensors)
self.assertEqual(response_values.shape, (20,))
for i in range(20):
response_message = test_example_pb2.TestCase()
self.assertTrue(response_message.ParseFromString(response_values[i]))
self.assertAllEqual([i + 1, i + 2, i + 3], response_message.values)
def testVecHostPortManyParallelRpcs(self):
with self.cached_session() as sess:
request_tensors = [
test_example_pb2.TestCase(
values=[i, i + 1, i + 2]).SerializeToString() for i in range(20)
]
many_response_tensors = [
self.rpc(
method=self.get_method_name('Increment'),
address=self._address,
request=request_tensors) for _ in range(10)
]
# Launch parallel 10 calls to the RpcOp, each containing 20 rpc requests.
many_response_values = sess.run(many_response_tensors)
self.assertEqual(10, len(many_response_values))
for response_values in many_response_values:
self.assertEqual(response_values.shape, (20,))
for i in range(20):
response_message = test_example_pb2.TestCase()
self.assertTrue(response_message.ParseFromString(response_values[i]))
self.assertAllEqual([i + 1, i + 2, i + 3], response_message.values)
def testVecHostPortRpcUsingEncodeAndDecodeProto(self):
with self.cached_session() as sess:
request_tensors = proto_ops.encode_proto(
message_type='tensorflow.contrib.rpc.TestCase',
field_names=['values'],
sizes=[[3]] * 20,
values=[
[[i, i + 1, i + 2] for i in range(20)],
])
response_tensor_strings = self.rpc(
method=self.get_method_name('Increment'),
address=self._address,
request=request_tensors)
_, (response_shape,) = proto_ops.decode_proto(
bytes=response_tensor_strings,
message_type='tensorflow.contrib.rpc.TestCase',
field_names=['values'],
output_types=[dtypes.int32])
response_shape_values = sess.run(response_shape)
self.assertAllEqual([[i + 1, i + 2, i + 3]
for i in range(20)], response_shape_values)
def testVecHostPortRpcCancelsUponSessionTimeOutWhenSleepingForever(self):
with self.cached_session() as sess:
request_tensors = [''] * 25 # This will launch 25 RPC requests.
response_tensors = self.rpc(
method=self.get_method_name('SleepForever'),
address=self._address,
request=request_tensors)
for timeout_ms in [1, 500, 1000]:
options = config_pb2.RunOptions(timeout_in_ms=timeout_ms)
with self.assertRaises((errors.UnavailableError,
errors.DeadlineExceededError)):
sess.run(response_tensors, options=options)
def testVecHostPortRpcCancelsUponConfiguredTimeOutWhenSleepingForever(self):
with self.cached_session() as sess:
request_tensors = [''] * 25 # This will launch 25 RPC requests.
response_tensors = self.rpc(
method=self.get_method_name('SleepForever'),
address=self._address,
timeout_in_ms=1000,
request=request_tensors)
with self.assertRaises(errors.DeadlineExceededError):
sess.run(response_tensors)
def testTryRpcPropagatesDeadlineErrorWithSometimesTimingOutRequests(self):
with self.cached_session() as sess:
response_tensors, status_code, status_message = self.try_rpc(
method=self.get_method_name('SometimesSleepForever'),
timeout_in_ms=1000,
address=self._address,
request=[''] * 20)
self.assertEqual(response_tensors.shape, (20,))
self.assertEqual(status_code.shape, (20,))
self.assertEqual(status_message.shape, (20,))
status_code_values = sess.run(status_code)
self.assertTrue([
x in (errors.OK, errors.DEADLINE_EXCEEDED) for x in status_code_values
])
def testTryRpcWithMultipleAddressesSingleRequest(self):
flatten = lambda x: list(itertools.chain.from_iterable(x))
with self.cached_session() as sess:
addresses = flatten([[
self._address, 'unix:/tmp/this_unix_socket_doesnt_exist_97820348!!@'
] for _ in range(10)])
request = test_example_pb2.TestCase(values=[0, 1, 2]).SerializeToString()
response_tensors, status_code, _ = self.try_rpc(
method=self.get_method_name('Increment'),
address=addresses,
request=request)
response_tensors_values, status_code_values = sess.run((response_tensors,
status_code))
self.assertAllEqual(
flatten([errors.OK, errors.UNAVAILABLE] for _ in range(10)),
status_code_values)
for i in range(10):
self.assertTrue(response_tensors_values[2 * i])
self.assertFalse(response_tensors_values[2 * i + 1])
def testTryRpcWithMultipleMethodsSingleRequest(self):
flatten = lambda x: list(itertools.chain.from_iterable(x))
with self.cached_session() as sess:
methods = flatten(
[[self.get_method_name('Increment'), 'InvalidMethodName']
for _ in range(10)])
request = test_example_pb2.TestCase(values=[0, 1, 2]).SerializeToString()
response_tensors, status_code, _ = self.try_rpc(
method=methods, address=self._address, request=request)
response_tensors_values, status_code_values = sess.run((response_tensors,
status_code))
self.assertAllEqual(
flatten([errors.OK, errors.UNIMPLEMENTED] for _ in range(10)),
status_code_values)
for i in range(10):
self.assertTrue(response_tensors_values[2 * i])
self.assertFalse(response_tensors_values[2 * i + 1])
def testTryRpcWithMultipleAddressesAndRequests(self):
flatten = lambda x: list(itertools.chain.from_iterable(x))
with self.cached_session() as sess:
addresses = flatten([[
self._address, 'unix:/tmp/this_unix_socket_doesnt_exist_97820348!!@'
] for _ in range(10)])
requests = [
test_example_pb2.TestCase(
values=[i, i + 1, i + 2]).SerializeToString() for i in range(20)
]
response_tensors, status_code, _ = self.try_rpc(
method=self.get_method_name('Increment'),
address=addresses,
request=requests)
response_tensors_values, status_code_values = sess.run((response_tensors,
status_code))
self.assertAllEqual(
flatten([errors.OK, errors.UNAVAILABLE] for _ in range(10)),
status_code_values)
for i in range(20):
if i % 2 == 1:
self.assertFalse(response_tensors_values[i])
else:
response_message = test_example_pb2.TestCase()
self.assertTrue(
response_message.ParseFromString(response_tensors_values[i]))
self.assertAllEqual([i + 1, i + 2, i + 3], response_message.values)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/rpc/python/kernel_tests/rpc_op_test_base.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Test servicer for RpcOp tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import time
import grpc
from tensorflow.contrib.rpc.python.kernel_tests import rpc_op_test_base
from tensorflow.contrib.rpc.python.kernel_tests import test_example_pb2_grpc
class RpcOpTestServicer(test_example_pb2_grpc.TestCaseServiceServicer):
"""Test servicer for RpcOp tests."""
def Increment(self, request, context):
"""Increment the entries in the `values` attribute of request.
Args:
request: input TestCase.
context: the rpc context.
Returns:
output TestCase.
"""
for i in range(len(request.values)):
request.values[i] += 1
return request
def AlwaysFailWithInvalidArgument(self, request, context):
"""Always fails with an InvalidArgument status.
Args:
request: input TestCase.
context: the rpc context.
Returns:
output TestCase.
"""
del request
context.set_code(grpc.StatusCode.INVALID_ARGUMENT)
context.set_details(rpc_op_test_base.I_WARNED_YOU)
def SometimesFailWithInvalidArgument(self, request, context):
"""Sometimes fails with an InvalidArgument status.
Args:
request: input TestCase.
context: the rpc context.
Returns:
output TestCase.
"""
if random.randint(0, 1) == 1:
context.set_code(grpc.StatusCode.INVALID_ARGUMENT)
context.set_details(rpc_op_test_base.I_WARNED_YOU)
return request
def SleepForever(self, request, context):
"""Sleeps forever.
Args:
request: input TestCase.
context: the rpc context.
Returns:
output TestCase.
"""
# TODO(ebrevdo): Make this async wait like the stubby version.
time.sleep(5)
def SometimesSleepForever(self, request, context):
"""Sometimes sleeps forever.
Args:
request: input TestCase.
context: the rpc context.
Returns:
output TestCase.
"""
if random.randint(0, 1) == 1:
time.sleep(5)
return request
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/rpc/python/kernel_tests/rpc_op_test_servicer.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Tests for RpcOp."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ctypes as ct
import os
import grpc
from grpc.framework.foundation import logging_pool
import portpicker
from tensorflow.contrib.rpc.python.kernel_tests import rpc_op_test_base
from tensorflow.contrib.rpc.python.kernel_tests import rpc_op_test_servicer
from tensorflow.contrib.rpc.python.kernel_tests import test_example_pb2_grpc
from tensorflow.python.platform import test
class RpcOpTest(test.TestCase, rpc_op_test_base.RpcOpTestBase):
_protocol = 'grpc'
invalid_method_string = 'Method not found'
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
super(RpcOpTest, self).__init__(methodName)
lib = os.path.join(os.path.dirname(__file__), 'libtestexample.so')
if os.path.isfile(lib):
ct.cdll.LoadLibrary(lib)
def get_method_name(self, suffix):
return '/tensorflow.contrib.rpc.TestCaseService/%s' % suffix
def setUp(self):
super(RpcOpTest, self).setUp()
service_port = portpicker.pick_unused_port()
server = grpc.server(logging_pool.pool(max_workers=25))
servicer = rpc_op_test_servicer.RpcOpTestServicer()
test_example_pb2_grpc.add_TestCaseServiceServicer_to_server(
servicer, server)
self._address = 'localhost:%d' % service_port
server.add_insecure_port(self._address)
server.start()
self._server = server
def tearDown(self):
self._server.stop(grace=None)
super(RpcOpTest, self).tearDown()
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/rpc/python/kernel_tests/rpc_op_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
# pylint: disable=wildcard-import,unused-import
"""RPC communication."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.rpc.python.ops.gen_rpc_op import rpc
from tensorflow.contrib.rpc.python.ops.gen_rpc_op import try_rpc
from tensorflow.python.framework import ops
ops.NotDifferentiable("Rpc")
ops.NotDifferentiable("TryRpc")
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/rpc/python/ops/rpc_op.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Abstract base class for all predictors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
@six.add_metaclass(abc.ABCMeta)
class Predictor(object):
"""Abstract base class for all predictors."""
@property
def graph(self):
return self._graph
@property
def session(self):
return self._session
@property
def feed_tensors(self):
return self._feed_tensors
@property
def fetch_tensors(self):
return self._fetch_tensors
def __repr__(self):
return '{} with feed tensors {} and fetch_tensors {}'.format(
type(self).__name__, self._feed_tensors, self._fetch_tensors)
def __call__(self, input_dict):
"""Returns predictions based on `input_dict`.
Args:
input_dict: a `dict` mapping strings to numpy arrays. These keys
must match `self._feed_tensors.keys()`.
Returns:
A `dict` mapping strings to numpy arrays. The keys match
`self.fetch_tensors.keys()`.
Raises:
ValueError: `input_dict` does not match `feed_tensors`.
"""
# TODO(jamieas): make validation optional?
input_keys = set(input_dict.keys())
expected_keys = set(self.feed_tensors.keys())
unexpected_keys = input_keys - expected_keys
if unexpected_keys:
raise ValueError(
'Got unexpected keys in input_dict: {}\nexpected: {}'.format(
unexpected_keys, expected_keys))
feed_dict = {}
for key in self.feed_tensors.keys():
value = input_dict.get(key)
if value is not None:
feed_dict[self.feed_tensors[key]] = value
return self._session.run(fetches=self.fetch_tensors, feed_dict=feed_dict)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/predictor/predictor.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Factory functions for `Predictor`s."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.predictor import contrib_estimator_predictor
from tensorflow.contrib.predictor import core_estimator_predictor
from tensorflow.contrib.predictor import saved_model_predictor
from tensorflow.contrib.learn.python.learn.estimators import estimator as contrib_estimator
from tensorflow.python.estimator import estimator as core_estimator
def from_contrib_estimator(estimator,
prediction_input_fn,
input_alternative_key=None,
output_alternative_key=None,
graph=None,
config=None):
"""Constructs a `Predictor` from a `tf.contrib.learn.Estimator`.
Args:
estimator: an instance of `tf.contrib.learn.Estimator`.
prediction_input_fn: a function that takes no arguments and returns an
instance of `InputFnOps`.
input_alternative_key: Optional. Specify the input alternative used for
prediction.
output_alternative_key: Specify the output alternative used for
prediction. Not needed for single-headed models but required for
multi-headed models.
graph: Optional. The Tensorflow `graph` in which prediction should be
done.
config: `ConfigProto` proto used to configure the session.
Returns:
An initialized `Predictor`.
Raises:
TypeError: if `estimator` is a core `Estimator` instead of a contrib
`Estimator`.
"""
if isinstance(estimator, core_estimator.Estimator):
raise TypeError('Expected estimator to be of type '
'tf.contrib.learn.Estimator, but got type '
'tf.python.estimator.Estimator. You likely want to call '
'from_estimator.')
return contrib_estimator_predictor.ContribEstimatorPredictor(
estimator,
prediction_input_fn,
input_alternative_key=input_alternative_key,
output_alternative_key=output_alternative_key,
graph=graph,
config=config)
def from_estimator(estimator,
serving_input_receiver_fn,
output_key=None,
graph=None,
config=None):
"""Constructs a `Predictor` from a `tf.python.estimator.Estimator`.
Args:
estimator: an instance of `learn.python.estimator.Estimator`.
serving_input_receiver_fn: a function that takes no arguments and returns
an instance of `ServingInputReceiver` compatible with `estimator`.
output_key: Optional string specifying the export output to use. If
`None`, then `DEFAULT_SERVING_SIGNATURE_DEF_KEY` is used.
graph: Optional. The Tensorflow `graph` in which prediction should be
done.
config: `ConfigProto` proto used to configure the session.
Returns:
An initialized `Predictor`.
Raises:
TypeError: if `estimator` is a contrib `Estimator` instead of a core
`Estimator`.
"""
if isinstance(estimator, contrib_estimator.Estimator):
raise TypeError('Expected estimator to be of type '
'tf.python.estimator.Estimator, but got type '
'tf.contrib.learn.Estimator. You likely want to call '
'from_contrib_estimator.')
return core_estimator_predictor.CoreEstimatorPredictor(
estimator,
serving_input_receiver_fn,
output_key=output_key,
graph=graph,
config=config)
def from_saved_model(export_dir,
signature_def_key=None,
signature_def=None,
input_names=None,
output_names=None,
tags=None,
graph=None,
config=None):
"""Constructs a `Predictor` from a `SavedModel` on disk.
Args:
export_dir: a path to a directory containing a `SavedModel`.
signature_def_key: Optional string specifying the signature to use. If
`None`, then `DEFAULT_SERVING_SIGNATURE_DEF_KEY` is used. Only one of
`signature_def_key` and `signature_def`
signature_def: A `SignatureDef` proto specifying the inputs and outputs
for prediction. Only one of `signature_def_key` and `signature_def`
should be specified.
input_names: A dictionary mapping strings to `Tensor`s in the `SavedModel`
that represent the input. The keys can be any string of the user's
choosing.
output_names: A dictionary mapping strings to `Tensor`s in the
`SavedModel` that represent the output. The keys can be any string of
the user's choosing.
tags: Optional. Tags that will be used to retrieve the correct
`SignatureDef`. Defaults to `DEFAULT_TAGS`.
graph: Optional. The Tensorflow `graph` in which prediction should be
done.
config: `ConfigProto` proto used to configure the session.
Returns:
An initialized `Predictor`.
Raises:
ValueError: More than one of `signature_def_key` and `signature_def` is
specified.
"""
return saved_model_predictor.SavedModelPredictor(
export_dir,
signature_def_key=signature_def_key,
signature_def=signature_def,
input_names=input_names,
output_names=output_names,
tags=tags,
graph=graph,
config=config)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/predictor/predictor_factories.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for predictor.contrib_estimator_predictor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tempfile
import numpy as np
from tensorflow.contrib.predictor import contrib_estimator_predictor
from tensorflow.contrib.predictor import testing_common
from tensorflow.python.platform import test
KEYS_AND_OPS = (('sum', lambda x, y: x + y),
('product', lambda x, y: x * y,),
('difference', lambda x, y: x - y))
class ContribEstimatorPredictorTest(test.TestCase):
"""Test fixture for `ContribEstimatorPredictor`."""
def setUp(self):
model_dir = tempfile.mkdtemp()
self._estimator = testing_common.get_arithmetic_estimator(
core=False, model_dir=model_dir)
self._prediction_input_fn = testing_common.get_arithmetic_input_fn(
core=False, train=False)
def testSpecifiedSignatureKey(self):
"""Test prediction with spedicified signatures."""
np.random.seed(1234)
for key, op in KEYS_AND_OPS:
x = np.random.rand()
y = np.random.rand()
expected_output = op(x, y)
predictor = contrib_estimator_predictor.ContribEstimatorPredictor(
estimator=self._estimator,
prediction_input_fn=self._prediction_input_fn,
output_alternative_key=key)
output_tensor_name = predictor.fetch_tensors[key].name
self.assertRegexpMatches(
output_tensor_name,
key,
msg='Unexpected fetch tensor.')
output = predictor({'x': x, 'y': y})[key]
self.assertAlmostEqual(
expected_output, output, places=3,
msg='Failed for output key "{}." '
'Got output {} for x = {} and y = {}'.format(
key, output, x, y))
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/predictor/contrib_estimator_predictor_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A `Predictor constructed from a `tf.contrib.learn.Estimator`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn.utils import saved_model_export_utils
from tensorflow.contrib.predictor import predictor
from tensorflow.python.framework import ops
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training import monitored_session
class ContribEstimatorPredictor(predictor.Predictor):
"""A `Predictor constructed from a `tf.contrib.learn.Estimator`."""
def __init__(self,
estimator,
prediction_input_fn,
input_alternative_key=None,
output_alternative_key=None,
graph=None,
config=None):
"""Initialize a `ContribEstimatorPredictor`.
Args:
estimator: an instance of `tf.contrib.learn.Estimator`.
prediction_input_fn: a function that takes no arguments and returns an
instance of `InputFnOps`.
input_alternative_key: Optional. Specify the input alternative used for
prediction.
output_alternative_key: Specify the output alternative used for
prediction. Not needed for single-headed models but required for
multi-headed models.
graph: Optional. The Tensorflow `graph` in which prediction should be
done.
config: `ConfigProto` proto used to configure the session.
"""
self._graph = graph or ops.Graph()
with self._graph.as_default():
input_fn_ops = prediction_input_fn()
# pylint: disable=protected-access
model_fn_ops = estimator._get_predict_ops(input_fn_ops.features)
# pylint: enable=protected-access
checkpoint_path = checkpoint_management.latest_checkpoint(
estimator.model_dir)
self._session = monitored_session.MonitoredSession(
session_creator=monitored_session.ChiefSessionCreator(
config=config,
checkpoint_filename_with_path=checkpoint_path))
input_alternative_key = (
input_alternative_key or
saved_model_export_utils.DEFAULT_INPUT_ALTERNATIVE_KEY)
input_alternatives, _ = saved_model_export_utils.get_input_alternatives(
input_fn_ops)
self._feed_tensors = input_alternatives[input_alternative_key]
(output_alternatives,
output_alternative_key) = saved_model_export_utils.get_output_alternatives(
model_fn_ops, output_alternative_key)
_, fetch_tensors = output_alternatives[output_alternative_key]
self._fetch_tensors = fetch_tensors
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/predictor/contrib_estimator_predictor.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A `Predictor` constructed from an `learn.python.estimator.Estimator`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.predictor import predictor
from tensorflow.python.estimator import model_fn
from tensorflow.python.framework import ops
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.training import monitored_session
def _get_signature_def(
serving_input_receiver, estimator, output_key=None):
"""Construct a `SignatureDef` proto."""
if output_key is None:
output_key = signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
# pylint: disable=protected-access
estimator_spec = estimator.model_fn(
serving_input_receiver.features, None, model_fn.ModeKeys.PREDICT,
estimator.config)
# pylint: enable=protected-access
export_outputs = estimator_spec.export_outputs
export_output = export_outputs.get(output_key)
if export_output is None:
raise KeyError('output_key must be one of {}; got {}'.format(
export_outputs.keys(), output_key))
return export_output.as_signature_def(serving_input_receiver.receiver_tensors)
class CoreEstimatorPredictor(predictor.Predictor):
"""A `Predictor` constructed from an `learn.python.estimator.Estimator`."""
def __init__(self,
estimator,
serving_input_receiver_fn,
output_key=None,
graph=None,
config=None):
"""Initialize a `CoreEstimatorPredictor`.
Args:
estimator: an instance of `learn.python.estimator.Estimator`.
serving_input_receiver_fn: a function that takes no arguments and returns
an instance of `ServingInputReceiver` compatible with `estimator`.
output_key: Optional string specifying the export output to use. If
`None`, then `DEFAULT_SERVING_SIGNATURE_DEF_KEY` is used.
graph: Optional. The Tensorflow `graph` in which prediction should be
done.
config: `ConfigProto` proto used to configure the session.
"""
self._graph = graph or ops.Graph()
with self._graph.as_default():
serving_input_receiver = serving_input_receiver_fn()
signature_def = _get_signature_def(
serving_input_receiver, estimator, output_key)
checkpoint_dir = estimator.model_dir
self._session = monitored_session.MonitoredSession(
session_creator=monitored_session.ChiefSessionCreator(
config=config,
checkpoint_dir=checkpoint_dir))
feed_tensor_info = signature_def.inputs
self._feed_tensors = {k: self._graph.get_tensor_by_name(v.name)
for k, v in feed_tensor_info.items()}
fetch_tensor_info = signature_def.outputs
self._fetch_tensors = {k: self._graph.get_tensor_by_name(v.name)
for k, v in fetch_tensor_info.items()}
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/predictor/core_estimator_predictor.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for predictor.predictor_factories."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.predictor import predictor_factories
from tensorflow.contrib.predictor import testing_common
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.platform import test
MODEL_DIR_NAME = 'contrib/predictor/test_export_dir'
class PredictorFactoriesTest(test.TestCase):
@classmethod
def setUpClass(cls):
# Load a saved model exported from the arithmetic `Estimator`.
# See `testing_common.py`.
cls._export_dir = test.test_src_dir_path(MODEL_DIR_NAME)
def testFromSavedModel(self):
"""Test loading from_saved_model."""
predictor_factories.from_saved_model(self._export_dir)
def testFromSavedModelWithTags(self):
"""Test loading from_saved_model with tags."""
predictor_factories.from_saved_model(self._export_dir, tags='serve')
def testFromSavedModelWithSessionConfig(self):
"""Test loading from_saved_model with session config."""
predictor_factories.from_saved_model(
self._export_dir, config=config_pb2.ConfigProto())
def testFromSavedModelWithBadTags(self):
"""Test that loading fails for bad tags."""
bad_tags_regex = ('.*? could not be found in SavedModel')
with self.assertRaisesRegexp(RuntimeError, bad_tags_regex):
predictor_factories.from_saved_model(self._export_dir, tags='bad_tag')
def testFromContribEstimator(self):
estimator = testing_common.get_arithmetic_estimator(core=False)
input_fn = testing_common.get_arithmetic_input_fn(core=False)
predictor_factories.from_contrib_estimator(
estimator, input_fn, output_alternative_key='sum')
def testFromContribEstimatorWithSessionConfig(self):
estimator = testing_common.get_arithmetic_estimator(core=False)
input_fn = testing_common.get_arithmetic_input_fn(core=False)
predictor_factories.from_contrib_estimator(
estimator, input_fn, output_alternative_key='sum',
config=config_pb2.ConfigProto())
def testFromContribEstimatorWithCoreEstimatorRaises(self):
estimator = testing_common.get_arithmetic_estimator(core=True)
input_fn = testing_common.get_arithmetic_input_fn(core=True)
with self.assertRaises(TypeError):
predictor_factories.from_contrib_estimator(estimator, input_fn)
def testFromCoreEstimator(self):
estimator = testing_common.get_arithmetic_estimator(core=True)
input_fn = testing_common.get_arithmetic_input_fn(core=True)
predictor_factories.from_estimator(estimator, input_fn)
def testFromCoreEstimatorWithSessionConfig(self):
estimator = testing_common.get_arithmetic_estimator(core=True)
input_fn = testing_common.get_arithmetic_input_fn(core=True)
predictor_factories.from_estimator(
estimator, input_fn, config=config_pb2.ConfigProto())
def testFromCoreEstimatorWithContribEstimatorRaises(self):
estimator = testing_common.get_arithmetic_estimator(core=False)
input_fn = testing_common.get_arithmetic_input_fn(core=False)
with self.assertRaises(TypeError):
predictor_factories.from_estimator(estimator, input_fn)
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/predictor/predictor_factories_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Modules for `Predictor`s.
@@from_contrib_estimator
@@from_estimator
@@from_saved_model
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.predictor.predictor_factories import from_contrib_estimator
from tensorflow.contrib.predictor.predictor_factories import from_estimator
from tensorflow.contrib.predictor.predictor_factories import from_saved_model
from tensorflow.python.util.all_util import remove_undocumented
remove_undocumented(__name__)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/predictor/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A `Predictor` constructed from a `SavedModel`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
from tensorflow.contrib.predictor import predictor
from tensorflow.contrib.saved_model.python.saved_model import reader
from tensorflow.python.client import session
from tensorflow.python.framework import ops
from tensorflow.python.saved_model import loader
from tensorflow.python.saved_model import signature_constants
DEFAULT_TAGS = 'serve'
_DEFAULT_INPUT_ALTERNATIVE_FORMAT = 'default_input_alternative:{}'
def get_meta_graph_def(saved_model_dir, tags):
"""Gets `MetaGraphDef` from a directory containing a `SavedModel`.
Returns the `MetaGraphDef` for the given tag-set and SavedModel directory.
Args:
saved_model_dir: Directory containing the SavedModel.
tags: Comma separated list of tags used to identify the correct
`MetaGraphDef`.
Raises:
ValueError: An error when the given tags cannot be found.
Returns:
A `MetaGraphDef` corresponding to the given tags.
"""
saved_model = reader.read_saved_model(saved_model_dir)
set_of_tags = set([tag.strip() for tag in tags.split(',')])
for meta_graph_def in saved_model.meta_graphs:
if set(meta_graph_def.meta_info_def.tags) == set_of_tags:
return meta_graph_def
raise ValueError('Could not find MetaGraphDef with tags {}'.format(tags))
def _get_signature_def(signature_def_key, export_dir, tags):
"""Construct a `SignatureDef` proto."""
signature_def_key = (
signature_def_key or
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY)
metagraph_def = get_meta_graph_def(export_dir, tags)
try:
signature_def = metagraph_def.signature_def[signature_def_key]
except KeyError as e:
formatted_key = _DEFAULT_INPUT_ALTERNATIVE_FORMAT.format(
signature_def_key)
try:
signature_def = metagraph_def.signature_def[formatted_key]
except KeyError:
raise ValueError(
'Got signature_def_key "{}". Available signatures are {}. '
'Original error:\n{}'.format(
signature_def_key, list(metagraph_def.signature_def), e))
logging.warning('Could not find signature def "%s". '
'Using "%s" instead', signature_def_key, formatted_key)
return signature_def
def _check_signature_arguments(signature_def_key,
signature_def,
input_names,
output_names):
"""Validates signature arguments for `SavedModelPredictor`."""
signature_def_key_specified = signature_def_key is not None
signature_def_specified = signature_def is not None
input_names_specified = input_names is not None
output_names_specified = output_names is not None
if input_names_specified != output_names_specified:
raise ValueError(
'input_names and output_names must both be specified or both be '
'unspecified.'
)
if (signature_def_key_specified + signature_def_specified +
input_names_specified > 1):
raise ValueError(
'You must specify at most one of signature_def_key OR signature_def OR'
'(input_names AND output_names).'
)
class SavedModelPredictor(predictor.Predictor):
"""A `Predictor` constructed from a `SavedModel`."""
def __init__(self,
export_dir,
signature_def_key=None,
signature_def=None,
input_names=None,
output_names=None,
tags=None,
graph=None,
config=None):
"""Initialize a `CoreEstimatorPredictor`.
Args:
export_dir: a path to a directory containing a `SavedModel`.
signature_def_key: Optional string specifying the signature to use. If
`None`, then `DEFAULT_SERVING_SIGNATURE_DEF_KEY` is used. Only one of
`signature_def_key` and `signature_def` should be specified.
signature_def: A `SignatureDef` proto specifying the inputs and outputs
for prediction. Only one of `signature_def_key` and `signature_def`
should be specified.
input_names: A dictionary mapping strings to `Tensor`s in the `SavedModel`
that represent the input. The keys can be any string of the user's
choosing.
output_names: A dictionary mapping strings to `Tensor`s in the
`SavedModel` that represent the output. The keys can be any string of
the user's choosing.
tags: Optional. Comma separated list of tags that will be used to retrieve
the correct `SignatureDef`. Defaults to `DEFAULT_TAGS`.
graph: Optional. The Tensorflow `graph` in which prediction should be
done.
config: `ConfigProto` proto used to configure the session.
Raises:
ValueError: If more than one of signature_def_key OR signature_def OR
(input_names AND output_names) is specified.
"""
_check_signature_arguments(
signature_def_key, signature_def, input_names, output_names)
tags = tags or DEFAULT_TAGS
self._graph = graph or ops.Graph()
with self._graph.as_default():
self._session = session.Session(config=config)
loader.load(self._session, tags.split(','), export_dir)
if input_names is None:
if signature_def is None:
signature_def = _get_signature_def(signature_def_key, export_dir, tags)
input_names = {k: v.name for k, v in signature_def.inputs.items()}
output_names = {k: v.name for k, v in signature_def.outputs.items()}
self._feed_tensors = {k: self._graph.get_tensor_by_name(v)
for k, v in input_names.items()}
self._fetch_tensors = {k: self._graph.get_tensor_by_name(v)
for k, v in output_names.items()}
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/predictor/saved_model_predictor.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Common code used for testing `Predictor`s."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn.estimators import constants
from tensorflow.contrib.learn.python.learn.estimators import estimator as contrib_estimator
from tensorflow.contrib.learn.python.learn.estimators import model_fn as contrib_model_fn
from tensorflow.contrib.learn.python.learn.utils import input_fn_utils
from tensorflow.python.estimator import estimator as core_estimator
from tensorflow.python.estimator import model_fn
from tensorflow.python.estimator.export import export_lib
from tensorflow.python.estimator.export import export_output
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.saved_model import signature_constants
def get_arithmetic_estimator(core=True, model_dir=None):
"""Returns an `Estimator` that performs basic arithmetic.
Args:
core: if `True`, returns a `tensorflow.python.estimator.Estimator`.
Otherwise, returns a `tensorflow.contrib.learn.Estimator`.
model_dir: directory in which to export checkpoints and saved models.
Returns:
An `Estimator` that performs arithmetic operations on its inputs.
"""
def _model_fn(features, labels, mode):
_ = labels
x = features['x']
y = features['y']
with ops.name_scope('outputs'):
predictions = {'sum': math_ops.add(x, y, name='sum'),
'product': math_ops.multiply(x, y, name='product'),
'difference': math_ops.subtract(x, y, name='difference')}
if core:
export_outputs = {k: export_output.PredictOutput({k: v})
for k, v in predictions.items()}
export_outputs[signature_constants.
DEFAULT_SERVING_SIGNATURE_DEF_KEY] = export_outputs['sum']
return model_fn.EstimatorSpec(mode=mode,
predictions=predictions,
export_outputs=export_outputs,
loss=constant_op.constant(0),
train_op=control_flow_ops.no_op())
else:
output_alternatives = {k: (constants.ProblemType.UNSPECIFIED, {k: v})
for k, v in predictions.items()}
return contrib_model_fn.ModelFnOps(
mode=mode,
predictions=predictions,
output_alternatives=output_alternatives,
loss=constant_op.constant(0),
train_op=control_flow_ops.no_op())
if core:
return core_estimator.Estimator(_model_fn)
else:
return contrib_estimator.Estimator(_model_fn, model_dir=model_dir)
def get_arithmetic_input_fn(core=True, train=False):
"""Returns a input functions or serving input receiver function."""
def _input_fn():
with ops.name_scope('inputs'):
x = array_ops.placeholder_with_default(0.0, shape=[], name='x')
y = array_ops.placeholder_with_default(0.0, shape=[], name='y')
label = constant_op.constant(0.0)
features = {'x': x, 'y': y}
if core:
if train:
return features, label
return export_lib.ServingInputReceiver(
features=features,
receiver_tensors=features)
else:
if train:
return features, label
return input_fn_utils.InputFnOps(
features=features,
labels={},
default_inputs=features)
return _input_fn
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/predictor/testing_common.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for predictor.core_estimator_predictor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tempfile
import numpy as np
from tensorflow.contrib.predictor import core_estimator_predictor
from tensorflow.contrib.predictor import testing_common
from tensorflow.python.platform import test
KEYS_AND_OPS = (('sum', lambda x, y: x + y),
('product', lambda x, y: x * y,),
('difference', lambda x, y: x - y))
class CoreEstimatorPredictorTest(test.TestCase):
"""Test fixture for `CoreEstimatorPredictor`."""
def setUp(self):
model_dir = tempfile.mkdtemp()
self._estimator = testing_common.get_arithmetic_estimator(
core=True, model_dir=model_dir)
self._serving_input_receiver_fn = testing_common.get_arithmetic_input_fn(
core=True, train=False)
def testDefault(self):
"""Test prediction with default signature."""
np.random.seed(1111)
x = np.random.rand()
y = np.random.rand()
predictor = core_estimator_predictor.CoreEstimatorPredictor(
estimator=self._estimator,
serving_input_receiver_fn=self._serving_input_receiver_fn)
output = predictor({'x': x, 'y': y})['sum']
self.assertAlmostEqual(output, x + y, places=3)
def testSpecifiedSignatureKey(self):
"""Test prediction with spedicified signatures."""
np.random.seed(1234)
for output_key, op in KEYS_AND_OPS:
x = np.random.rand()
y = np.random.rand()
expected_output = op(x, y)
predictor = core_estimator_predictor.CoreEstimatorPredictor(
estimator=self._estimator,
serving_input_receiver_fn=self._serving_input_receiver_fn,
output_key=output_key)
output_tensor_name = predictor.fetch_tensors[output_key].name
self.assertRegexpMatches(
output_tensor_name,
output_key,
msg='Unexpected fetch tensor.')
output = predictor({'x': x, 'y': y})[output_key]
self.assertAlmostEqual(
expected_output, output, places=3,
msg='Failed for output key "{}." '
'Got output {} for x = {} and y = {}'.format(
output_key, output, x, y))
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/predictor/core_estimator_predictor_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for predictor.saved_model_predictor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.predictor import saved_model_predictor
from tensorflow.core.framework import tensor_shape_pb2
from tensorflow.core.framework import types_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
from tensorflow.python.saved_model import signature_def_utils
KEYS_AND_OPS = (('sum', lambda x, y: x + y),
('product', lambda x, y: x * y,),
('difference', lambda x, y: x - y))
MODEL_DIR_NAME = 'contrib/predictor/test_export_dir'
class SavedModelPredictorTest(test.TestCase):
@classmethod
def setUpClass(cls):
# Load a saved model exported from the arithmetic `Estimator`.
# See `testing_common.py`.
cls._export_dir = test.test_src_dir_path(MODEL_DIR_NAME)
def testDefault(self):
"""Test prediction with default signature."""
np.random.seed(1111)
x = np.random.rand()
y = np.random.rand()
predictor = saved_model_predictor.SavedModelPredictor(
export_dir=self._export_dir)
output = predictor({'x': x, 'y': y})['outputs']
self.assertAlmostEqual(output, x + y, places=3)
def testSpecifiedSignatureKey(self):
"""Test prediction with spedicified signature key."""
np.random.seed(1234)
for signature_def_key, op in KEYS_AND_OPS:
x = np.random.rand()
y = np.random.rand()
expected_output = op(x, y)
predictor = saved_model_predictor.SavedModelPredictor(
export_dir=self._export_dir,
signature_def_key=signature_def_key)
output_tensor_name = predictor.fetch_tensors['outputs'].name
self.assertRegexpMatches(
output_tensor_name,
signature_def_key,
msg='Unexpected fetch tensor.')
output = predictor({'x': x, 'y': y})['outputs']
self.assertAlmostEqual(
expected_output, output, places=3,
msg='Failed for signature "{}." '
'Got output {} for x = {} and y = {}'.format(
signature_def_key, output, x, y))
def testSpecifiedSignature(self):
"""Test prediction with spedicified signature definition."""
np.random.seed(4444)
for key, op in KEYS_AND_OPS:
x = np.random.rand()
y = np.random.rand()
expected_output = op(x, y)
inputs = {
'x': meta_graph_pb2.TensorInfo(
name='inputs/x:0',
dtype=types_pb2.DT_FLOAT,
tensor_shape=tensor_shape_pb2.TensorShapeProto()),
'y': meta_graph_pb2.TensorInfo(
name='inputs/y:0',
dtype=types_pb2.DT_FLOAT,
tensor_shape=tensor_shape_pb2.TensorShapeProto())}
outputs = {
key: meta_graph_pb2.TensorInfo(
name='outputs/{}:0'.format(key),
dtype=types_pb2.DT_FLOAT,
tensor_shape=tensor_shape_pb2.TensorShapeProto())}
signature_def = signature_def_utils.build_signature_def(
inputs=inputs,
outputs=outputs,
method_name='tensorflow/serving/regress')
predictor = saved_model_predictor.SavedModelPredictor(
export_dir=self._export_dir,
signature_def=signature_def)
output_tensor_name = predictor.fetch_tensors[key].name
self.assertRegexpMatches(
output_tensor_name,
key,
msg='Unexpected fetch tensor.')
output = predictor({'x': x, 'y': y})[key]
self.assertAlmostEqual(
expected_output, output, places=3,
msg='Failed for signature "{}". '
'Got output {} for x = {} and y = {}'.format(key, output, x, y))
def testSpecifiedTensors(self):
"""Test prediction with spedicified `Tensor`s."""
np.random.seed(987)
for key, op in KEYS_AND_OPS:
x = np.random.rand()
y = np.random.rand()
expected_output = op(x, y)
input_names = {'x': 'inputs/x:0',
'y': 'inputs/y:0'}
output_names = {key: 'outputs/{}:0'.format(key)}
predictor = saved_model_predictor.SavedModelPredictor(
export_dir=self._export_dir,
input_names=input_names,
output_names=output_names)
output_tensor_name = predictor.fetch_tensors[key].name
self.assertRegexpMatches(
output_tensor_name,
key,
msg='Unexpected fetch tensor.')
output = predictor({'x': x, 'y': y})[key]
self.assertAlmostEqual(
expected_output, output, places=3,
msg='Failed for signature "{}". '
'Got output {} for x = {} and y = {}'.format(key, output, x, y))
def testBadTagsFail(self):
"""Test that predictor construction fails for bad tags."""
bad_tags_regex = ('.* could not be found in SavedModel')
with self.assertRaisesRegexp(RuntimeError, bad_tags_regex):
_ = saved_model_predictor.SavedModelPredictor(
export_dir=self._export_dir,
tags=('zomg, bad, tags'))
def testSpecifiedGraph(self):
"""Test that the predictor remembers a specified `Graph`."""
g = ops.Graph()
predictor = saved_model_predictor.SavedModelPredictor(
export_dir=self._export_dir,
graph=g)
self.assertEqual(predictor.graph, g)
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/predictor/saved_model_predictor_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Random forest implementation in tensorflow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,wildcard-import
from tensorflow.contrib.tensor_forest.client import *
from tensorflow.contrib.tensor_forest.python import *
# pylint: enable=unused-import,wildcard-import
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/tensor_forest/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Extremely random forest graph builder. go/brain-tree."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numbers
import random
from google.protobuf import text_format
from tensorflow.contrib.decision_trees.proto import generic_tree_model_pb2 as _tree_proto
from tensorflow.contrib.framework.python.ops import variables as framework_variables
from tensorflow.contrib.tensor_forest.proto import tensor_forest_params_pb2 as _params_proto
from tensorflow.contrib.tensor_forest.python.ops import data_ops
from tensorflow.contrib.tensor_forest.python.ops import model_ops
from tensorflow.contrib.tensor_forest.python.ops import stats_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as tf_variables
from tensorflow.python.platform import tf_logging as logging
# Stores tuples of (leaf model type, stats model type)
CLASSIFICATION_LEAF_MODEL_TYPES = {
'all_dense': (_params_proto.MODEL_DENSE_CLASSIFICATION,
_params_proto.STATS_DENSE_GINI),
'all_sparse': (_params_proto.MODEL_SPARSE_CLASSIFICATION,
_params_proto.STATS_SPARSE_GINI),
'sparse_then_dense': (_params_proto.MODEL_SPARSE_OR_DENSE_CLASSIFICATION,
_params_proto.STATS_SPARSE_THEN_DENSE_GINI),
}
REGRESSION_MODEL_TYPE = (_params_proto.MODEL_REGRESSION,
_params_proto.STATS_LEAST_SQUARES_REGRESSION,
_params_proto.COLLECTION_BASIC)
FINISH_TYPES = {
'basic': _params_proto.SPLIT_FINISH_BASIC,
'hoeffding': _params_proto.SPLIT_FINISH_DOMINATE_HOEFFDING,
'bootstrap': _params_proto.SPLIT_FINISH_DOMINATE_BOOTSTRAP
}
PRUNING_TYPES = {
'none': _params_proto.SPLIT_PRUNE_NONE,
'half': _params_proto.SPLIT_PRUNE_HALF,
'quarter': _params_proto.SPLIT_PRUNE_QUARTER,
'10_percent': _params_proto.SPLIT_PRUNE_10_PERCENT,
'hoeffding': _params_proto.SPLIT_PRUNE_HOEFFDING,
}
SPLIT_TYPES = {
'less_or_equal': _tree_proto.InequalityTest.LESS_OR_EQUAL,
'less': _tree_proto.InequalityTest.LESS_THAN
}
def parse_number_or_string_to_proto(proto, param):
if isinstance(param, numbers.Number):
proto.constant_value = param
else: # assume it's a string
if param.isdigit():
proto.constant_value = int(param)
else:
text_format.Merge(param, proto)
def build_params_proto(params):
"""Build a TensorForestParams proto out of the V4ForestHParams object."""
proto = _params_proto.TensorForestParams()
proto.num_trees = params.num_trees
proto.max_nodes = params.max_nodes
proto.is_regression = params.regression
proto.num_outputs = params.num_classes
proto.num_features = params.num_features
proto.leaf_type = params.leaf_model_type
proto.stats_type = params.stats_model_type
proto.collection_type = _params_proto.COLLECTION_BASIC
proto.pruning_type.type = params.pruning_type
proto.finish_type.type = params.finish_type
proto.inequality_test_type = params.split_type
proto.drop_final_class = False
proto.collate_examples = params.collate_examples
proto.checkpoint_stats = params.checkpoint_stats
proto.use_running_stats_method = params.use_running_stats_method
proto.initialize_average_splits = params.initialize_average_splits
proto.inference_tree_paths = params.inference_tree_paths
parse_number_or_string_to_proto(proto.pruning_type.prune_every_samples,
params.prune_every_samples)
parse_number_or_string_to_proto(proto.finish_type.check_every_steps,
params.early_finish_check_every_samples)
parse_number_or_string_to_proto(proto.split_after_samples,
params.split_after_samples)
parse_number_or_string_to_proto(proto.num_splits_to_consider,
params.num_splits_to_consider)
proto.dominate_fraction.constant_value = params.dominate_fraction
if params.param_file:
with open(params.param_file) as f:
text_format.Merge(f.read(), proto)
return proto
# A convenience class for holding random forest hyperparameters.
#
# To just get some good default parameters, use:
# hparams = ForestHParams(num_classes=2, num_features=40).fill()
#
# Note that num_classes can not be inferred and so must always be specified.
# Also, either num_splits_to_consider or num_features should be set.
#
# To override specific values, pass them to the constructor:
# hparams = ForestHParams(num_classes=5, num_trees=10, num_features=5).fill()
#
# TODO(thomaswc): Inherit from tf.HParams when that is publicly available.
class ForestHParams(object):
"""A base class for holding hyperparameters and calculating good defaults."""
def __init__(
self,
num_trees=100,
max_nodes=10000,
bagging_fraction=1.0,
num_splits_to_consider=0,
feature_bagging_fraction=1.0,
max_fertile_nodes=0, # deprecated, unused.
split_after_samples=250,
valid_leaf_threshold=1,
dominate_method='bootstrap',
dominate_fraction=0.99,
model_name='all_dense',
split_finish_name='basic',
split_pruning_name='none',
prune_every_samples=0,
early_finish_check_every_samples=0,
collate_examples=False,
checkpoint_stats=False,
use_running_stats_method=False,
initialize_average_splits=False,
inference_tree_paths=False,
param_file=None,
split_name='less_or_equal',
**kwargs):
self.num_trees = num_trees
self.max_nodes = max_nodes
self.bagging_fraction = bagging_fraction
self.feature_bagging_fraction = feature_bagging_fraction
self.num_splits_to_consider = num_splits_to_consider
self.max_fertile_nodes = max_fertile_nodes
self.split_after_samples = split_after_samples
self.valid_leaf_threshold = valid_leaf_threshold
self.dominate_method = dominate_method
self.dominate_fraction = dominate_fraction
self.model_name = model_name
self.split_finish_name = split_finish_name
self.split_pruning_name = split_pruning_name
self.collate_examples = collate_examples
self.checkpoint_stats = checkpoint_stats
self.use_running_stats_method = use_running_stats_method
self.initialize_average_splits = initialize_average_splits
self.inference_tree_paths = inference_tree_paths
self.param_file = param_file
self.split_name = split_name
self.early_finish_check_every_samples = early_finish_check_every_samples
self.prune_every_samples = prune_every_samples
for name, value in kwargs.items():
setattr(self, name, value)
def values(self):
return self.__dict__
def fill(self):
"""Intelligently sets any non-specific parameters."""
# Fail fast if num_classes or num_features isn't set.
_ = getattr(self, 'num_classes')
_ = getattr(self, 'num_features')
self.bagged_num_features = int(self.feature_bagging_fraction *
self.num_features)
self.bagged_features = None
if self.feature_bagging_fraction < 1.0:
self.bagged_features = [
random.sample(range(self.num_features), self.bagged_num_features)
for _ in range(self.num_trees)
]
self.regression = getattr(self, 'regression', False)
# Num_outputs is the actual number of outputs (a single prediction for
# classification, a N-dimensional point for regression).
self.num_outputs = self.num_classes if self.regression else 1
# Add an extra column to classes for storing counts, which is needed for
# regression and avoids having to recompute sums for classification.
self.num_output_columns = self.num_classes + 1
# Our experiments have found that num_splits_to_consider = num_features
# gives good accuracy.
self.num_splits_to_consider = self.num_splits_to_consider or min(
max(10, math.floor(math.sqrt(self.num_features))), 1000)
# If base_random_seed is 0, the current time will be used to seed the
# random number generators for each tree. If non-zero, the i-th tree
# will be seeded with base_random_seed + i.
self.base_random_seed = getattr(self, 'base_random_seed', 0)
# How to store leaf models.
self.leaf_model_type = (
REGRESSION_MODEL_TYPE[0] if self.regression else
CLASSIFICATION_LEAF_MODEL_TYPES[self.model_name][0])
# How to store stats objects.
self.stats_model_type = (
REGRESSION_MODEL_TYPE[1] if self.regression else
CLASSIFICATION_LEAF_MODEL_TYPES[self.model_name][1])
self.finish_type = (
_params_proto.SPLIT_FINISH_BASIC
if self.regression else FINISH_TYPES[self.split_finish_name])
self.pruning_type = PRUNING_TYPES[self.split_pruning_name]
if self.pruning_type == _params_proto.SPLIT_PRUNE_NONE:
self.prune_every_samples = 0
else:
if (not self.prune_every_samples and
not (isinstance(numbers.Number) or
self.split_after_samples.isdigit())):
logging.error(
'Must specify prune_every_samples if using a depth-dependent '
'split_after_samples')
# Pruning half-way through split_after_samples seems like a decent
# default, making it easy to select the number being pruned with
# pruning_type while not paying the cost of pruning too often. Note that
# this only holds if not using a depth-dependent split_after_samples.
self.prune_every_samples = (
self.prune_every_samples or int(self.split_after_samples) / 2)
if self.finish_type == _params_proto.SPLIT_FINISH_BASIC:
self.early_finish_check_every_samples = 0
else:
if (not self.early_finish_check_every_samples and
not (isinstance(numbers.Number) or
self.split_after_samples.isdigit())):
logging.error(
'Must specify prune_every_samples if using a depth-dependent '
'split_after_samples')
# Checking for early finish every quarter through split_after_samples
# seems like a decent default. We don't want to incur the checking cost
# too often, but (at least for hoeffding) it's lower than the cost of
# pruning so we can do it a little more frequently.
self.early_finish_check_every_samples = (
self.early_finish_check_every_samples or
int(self.split_after_samples) / 4)
self.split_type = SPLIT_TYPES[self.split_name]
return self
def get_epoch_variable():
"""Returns the epoch variable, or [0] if not defined."""
# Grab epoch variable defined in
# //third_party/tensorflow/python/training/input.py::limit_epochs
for v in tf_variables.local_variables():
if 'limit_epochs/epoch' in v.op.name:
return array_ops.reshape(v, [1])
# TODO(thomaswc): Access epoch from the data feeder.
return [0]
# A simple container to hold the training variables for a single tree.
class TreeVariables(object):
"""Stores tf.Variables for training a single random tree.
Uses tf.compat.v1.get_variable to get tree-specific names so that this can be
used
with a tf.learn-style implementation (one that trains a model, saves it,
then relies on restoring that model to evaluate).
"""
def __init__(self, params, tree_num, training, tree_config='', tree_stat=''):
if (not hasattr(params, 'params_proto') or
not isinstance(params.params_proto, _params_proto.TensorForestParams)):
params.params_proto = build_params_proto(params)
params.serialized_params_proto = params.params_proto.SerializeToString()
self.stats = None
if training:
# TODO(gilberth): Manually shard this to be able to fit it on
# multiple machines.
self.stats = stats_ops.fertile_stats_variable(
params, tree_stat, self.get_tree_name('stats', tree_num))
self.tree = model_ops.tree_variable(params, tree_config, self.stats,
self.get_tree_name('tree', tree_num))
def get_tree_name(self, name, num):
return '{0}-{1}'.format(name, num)
class ForestVariables(object):
"""A container for a forests training data, consisting of multiple trees.
Instantiates a TreeVariables object for each tree. We override the
__getitem__ and __setitem__ function so that usage looks like this:
forest_variables = ForestVariables(params)
... forest_variables.tree ...
"""
def __init__(self,
params,
device_assigner,
training=True,
tree_variables_class=TreeVariables,
tree_configs=None,
tree_stats=None):
self.variables = []
# Set up some scalar variables to run through the device assigner, then
# we can use those to colocate everything related to a tree.
self.device_dummies = []
with ops.device(device_assigner):
for i in range(params.num_trees):
self.device_dummies.append(
variable_scope.get_variable(name='device_dummy_%d' % i, shape=0))
for i in range(params.num_trees):
with ops.device(self.device_dummies[i].device):
kwargs = {}
if tree_configs is not None:
kwargs.update(dict(tree_config=tree_configs[i]))
if tree_stats is not None:
kwargs.update(dict(tree_stat=tree_stats[i]))
self.variables.append(
tree_variables_class(params, i, training, **kwargs))
def __setitem__(self, t, val):
self.variables[t] = val
def __getitem__(self, t):
return self.variables[t]
class RandomForestGraphs(object):
"""Builds TF graphs for random forest training and inference."""
def __init__(self,
params,
tree_configs=None,
tree_stats=None,
device_assigner=None,
variables=None,
tree_variables_class=TreeVariables,
tree_graphs=None,
training=True):
self.params = params
self.device_assigner = (
device_assigner or framework_variables.VariableDeviceChooser())
logging.info('Constructing forest with params = ')
logging.info(self.params.__dict__)
self.variables = variables or ForestVariables(
self.params,
device_assigner=self.device_assigner,
training=training,
tree_variables_class=tree_variables_class,
tree_configs=tree_configs,
tree_stats=tree_stats)
tree_graph_class = tree_graphs or RandomTreeGraphs
self.trees = [
tree_graph_class(self.variables[i], self.params, i)
for i in range(self.params.num_trees)
]
def _bag_features(self, tree_num, input_data):
split_data = array_ops.split(
value=input_data, num_or_size_splits=self.params.num_features, axis=1)
return array_ops.concat(
[split_data[ind] for ind in self.params.bagged_features[tree_num]], 1)
def get_all_resource_handles(self):
return ([self.variables[i].tree for i in range(len(self.trees))] +
[self.variables[i].stats for i in range(len(self.trees))])
def training_graph(self,
input_data,
input_labels,
num_trainers=1,
trainer_id=0,
**tree_kwargs):
"""Constructs a TF graph for training a random forest.
Args:
input_data: A tensor or dict of string->Tensor for input data.
input_labels: A tensor or placeholder for labels associated with
input_data.
num_trainers: Number of parallel trainers to split trees among.
trainer_id: Which trainer this instance is.
**tree_kwargs: Keyword arguments passed to each tree's training_graph.
Returns:
The last op in the random forest training graph.
Raises:
NotImplementedError: If trying to use bagging with sparse features.
"""
processed_dense_features, processed_sparse_features, data_spec = (
data_ops.ParseDataTensorOrDict(input_data))
if input_labels is not None:
labels = data_ops.ParseLabelTensorOrDict(input_labels)
data_spec = data_spec or self.get_default_data_spec(input_data)
tree_graphs = []
trees_per_trainer = self.params.num_trees / num_trainers
tree_start = int(trainer_id * trees_per_trainer)
tree_end = int((trainer_id + 1) * trees_per_trainer)
for i in range(tree_start, tree_end):
with ops.device(self.variables.device_dummies[i].device):
seed = self.params.base_random_seed
if seed != 0:
seed += i
# If using bagging, randomly select some of the input.
tree_data = processed_dense_features
tree_labels = labels
if self.params.bagging_fraction < 1.0:
# TODO(gilberth): Support bagging for sparse features.
if processed_sparse_features is not None:
raise NotImplementedError(
'Bagging not supported with sparse features.')
# TODO(thomaswc): This does sampling without replacement. Consider
# also allowing sampling with replacement as an option.
batch_size = array_ops.strided_slice(
array_ops.shape(processed_dense_features), [0], [1])
r = random_ops.random_uniform(batch_size, seed=seed)
mask = math_ops.less(
r,
array_ops.ones_like(r) * self.params.bagging_fraction)
gather_indices = array_ops.squeeze(array_ops.where_v2(mask), axis=[1])
# TODO(thomaswc): Calculate out-of-bag data and labels, and store
# them for use in calculating statistics later.
tree_data = array_ops.gather(processed_dense_features, gather_indices)
tree_labels = array_ops.gather(labels, gather_indices)
if self.params.bagged_features:
if processed_sparse_features is not None:
raise NotImplementedError(
'Feature bagging not supported with sparse features.')
tree_data = self._bag_features(i, tree_data)
tree_graphs.append(self.trees[i].training_graph(
tree_data,
tree_labels,
seed,
data_spec=data_spec,
sparse_features=processed_sparse_features,
**tree_kwargs))
return control_flow_ops.group(*tree_graphs, name='train')
def inference_graph(self, input_data, **inference_args):
"""Constructs a TF graph for evaluating a random forest.
Args:
input_data: A tensor or dict of string->Tensor for the input data. This
input_data must generate the same spec as the
input_data used in training_graph: the dict must have the
same keys, for example, and all tensors must have the same
size in their first dimension.
**inference_args: Keyword arguments to pass through to each tree.
Returns:
A tuple of (probabilities, tree_paths, variance).
Raises:
NotImplementedError: If trying to use feature bagging with sparse
features.
"""
processed_dense_features, processed_sparse_features, data_spec = (
data_ops.ParseDataTensorOrDict(input_data))
probabilities = []
paths = []
for i in range(self.params.num_trees):
with ops.device(self.variables.device_dummies[i].device):
tree_data = processed_dense_features
if self.params.bagged_features:
if processed_sparse_features is not None:
raise NotImplementedError(
'Feature bagging not supported with sparse features.')
tree_data = self._bag_features(i, tree_data)
probs, path = self.trees[i].inference_graph(
tree_data,
data_spec,
sparse_features=processed_sparse_features,
**inference_args)
probabilities.append(probs)
paths.append(path)
with ops.device(self.variables.device_dummies[0].device):
# shape of all_predict should be [batch_size, num_trees, num_outputs]
all_predict = array_ops.stack(probabilities, axis=1)
average_values = math_ops.div(
math_ops.reduce_sum(all_predict, 1),
self.params.num_trees,
name='probabilities')
tree_paths = array_ops.stack(paths, axis=1)
expected_squares = math_ops.div(
math_ops.reduce_sum(all_predict * all_predict, 1),
self.params.num_trees)
regression_variance = math_ops.maximum(
0., expected_squares - average_values * average_values)
return average_values, tree_paths, regression_variance
def average_size(self):
"""Constructs a TF graph for evaluating the average size of a forest.
Returns:
The average number of nodes over the trees.
"""
sizes = []
for i in range(self.params.num_trees):
with ops.device(self.variables.device_dummies[i].device):
sizes.append(self.trees[i].size())
return math_ops.reduce_mean(
math_ops.cast(array_ops.stack(sizes), dtypes.float32))
# pylint: disable=unused-argument
def training_loss(self, features, labels, name='training_loss'):
return math_ops.negative(self.average_size(), name=name)
# pylint: disable=unused-argument
def validation_loss(self, features, labels):
return math_ops.negative(self.average_size())
def average_impurity(self):
"""Constructs a TF graph for evaluating the leaf impurity of a forest.
Returns:
The last op in the graph.
"""
impurities = []
for i in range(self.params.num_trees):
with ops.device(self.variables.device_dummies[i].device):
impurities.append(self.trees[i].average_impurity())
return math_ops.reduce_mean(array_ops.stack(impurities))
def feature_importances(self):
tree_counts = [
self.trees[i].feature_usage_counts()
for i in range(self.params.num_trees)
]
total_counts = math_ops.reduce_sum(array_ops.stack(tree_counts, 0), 0)
return total_counts / math_ops.reduce_sum(total_counts)
class RandomTreeGraphs(object):
"""Builds TF graphs for random tree training and inference."""
def __init__(self, variables, params, tree_num):
self.variables = variables
self.params = params
self.tree_num = tree_num
def training_graph(self,
input_data,
input_labels,
random_seed,
data_spec,
sparse_features=None,
input_weights=None):
"""Constructs a TF graph for training a random tree.
Args:
input_data: A tensor or placeholder for input data.
input_labels: A tensor or placeholder for labels associated with
input_data.
random_seed: The random number generator seed to use for this tree. 0
means use the current time as the seed.
data_spec: A data_ops.TensorForestDataSpec object specifying the original
feature/columns of the data.
sparse_features: A tf.SparseTensor for sparse input data.
input_weights: A float tensor or placeholder holding per-input weights, or
None if all inputs are to be weighted equally.
Returns:
The last op in the random tree training graph.
"""
# TODO(gilberth): Use this.
unused_epoch = math_ops.cast(get_epoch_variable(), dtypes.int32)
if input_weights is None:
input_weights = []
sparse_indices = []
sparse_values = []
sparse_shape = []
if sparse_features is not None:
sparse_indices = sparse_features.indices
sparse_values = sparse_features.values
sparse_shape = sparse_features.dense_shape
if input_data is None:
input_data = []
leaf_ids = model_ops.traverse_tree_v4(
self.variables.tree,
input_data,
sparse_indices,
sparse_values,
sparse_shape,
input_spec=data_spec.SerializeToString(),
params=self.params.serialized_params_proto)
update_model = model_ops.update_model_v4(
self.variables.tree,
leaf_ids,
input_labels,
input_weights,
params=self.params.serialized_params_proto)
finished_nodes = stats_ops.process_input_v4(
self.variables.tree,
self.variables.stats,
input_data,
sparse_indices,
sparse_values,
sparse_shape,
input_labels,
input_weights,
leaf_ids,
input_spec=data_spec.SerializeToString(),
random_seed=random_seed,
params=self.params.serialized_params_proto)
with ops.control_dependencies([update_model]):
return stats_ops.grow_tree_v4(
self.variables.tree,
self.variables.stats,
finished_nodes,
params=self.params.serialized_params_proto)
def inference_graph(self, input_data, data_spec, sparse_features=None):
"""Constructs a TF graph for evaluating a random tree.
Args:
input_data: A tensor or placeholder for input data.
data_spec: A TensorForestDataSpec proto specifying the original input
columns.
sparse_features: A tf.SparseTensor for sparse input data.
Returns:
A tuple of (probabilities, tree_paths).
"""
sparse_indices = []
sparse_values = []
sparse_shape = []
if sparse_features is not None:
sparse_indices = sparse_features.indices
sparse_values = sparse_features.values
sparse_shape = sparse_features.dense_shape
if input_data is None:
input_data = []
return model_ops.tree_predictions_v4(
self.variables.tree,
input_data,
sparse_indices,
sparse_values,
sparse_shape,
input_spec=data_spec.SerializeToString(),
params=self.params.serialized_params_proto)
def size(self):
"""Constructs a TF graph for evaluating the current number of nodes.
Returns:
The current number of nodes in the tree.
"""
return model_ops.tree_size(self.variables.tree)
def feature_usage_counts(self):
return model_ops.feature_usage_counts(
self.variables.tree, params=self.params.serialized_params_proto)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/tensor_forest/python/tensor_forest.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.contrib.tensor_forest.ops.tensor_forest."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from google.protobuf.json_format import ParseDict
from tensorflow.contrib.decision_trees.proto import generic_tree_model_pb2 as _tree_proto
from tensorflow.contrib.tensor_forest.python import tensor_forest
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import resources
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
class TensorForestTest(test_util.TensorFlowTestCase):
def testForestHParams(self):
hparams = tensor_forest.ForestHParams(
num_classes=2,
num_trees=100,
max_nodes=1000,
split_after_samples=25,
num_features=60).fill()
self.assertEquals(2, hparams.num_classes)
self.assertEquals(3, hparams.num_output_columns)
self.assertEquals(10, hparams.num_splits_to_consider)
# Default value of valid_leaf_threshold
self.assertEquals(1, hparams.valid_leaf_threshold)
self.assertEquals(0, hparams.base_random_seed)
def testForestHParamsBigTree(self):
hparams = tensor_forest.ForestHParams(
num_classes=2,
num_trees=100,
max_nodes=1000000,
split_after_samples=25,
num_features=1000).fill()
self.assertEquals(31, hparams.num_splits_to_consider)
def testForestHParamsStringParams(self):
hparams = tensor_forest.ForestHParams(
num_classes=2,
num_trees=100,
max_nodes=1000000,
split_after_samples="25",
num_splits_to_consider="1000000",
num_features=1000).fill()
self.assertEquals("1000000", hparams.num_splits_to_consider)
def testTrainingConstructionClassification(self):
input_data = [[-1., 0.], [-1., 2.], # node 1
[1., 0.], [1., -2.]] # node 2
input_labels = [0, 1, 2, 3]
params = tensor_forest.ForestHParams(
num_classes=4,
num_features=2,
num_trees=10,
max_nodes=1000,
split_after_samples=25).fill()
graph_builder = tensor_forest.RandomForestGraphs(params)
graph = graph_builder.training_graph(input_data, input_labels)
self.assertTrue(isinstance(graph, ops.Operation))
def testTrainingConstructionRegression(self):
input_data = [[-1., 0.], [-1., 2.], # node 1
[1., 0.], [1., -2.]] # node 2
input_labels = [0, 1, 2, 3]
params = tensor_forest.ForestHParams(
num_classes=4,
num_features=2,
num_trees=10,
max_nodes=1000,
split_after_samples=25,
regression=True).fill()
graph_builder = tensor_forest.RandomForestGraphs(params)
graph = graph_builder.training_graph(input_data, input_labels)
self.assertTrue(isinstance(graph, ops.Operation))
def testInferenceConstruction(self):
input_data = [[-1., 0.], [-1., 2.], # node 1
[1., 0.], [1., -2.]] # node 2
params = tensor_forest.ForestHParams(
num_classes=4,
num_features=2,
num_trees=10,
max_nodes=1000,
split_after_samples=25).fill()
graph_builder = tensor_forest.RandomForestGraphs(params)
probs, paths, var = graph_builder.inference_graph(input_data)
self.assertTrue(isinstance(probs, ops.Tensor))
self.assertTrue(isinstance(paths, ops.Tensor))
self.assertTrue(isinstance(var, ops.Tensor))
def testInfrenceFromRestoredModel(self):
input_data = [[-1., 0.], [-1., 2.], # node 1
[1., 0.], [1., -2.]] # node 2
expected_prediction = [[0.0, 1.0], [0.0, 1.0],
[0.0, 1.0], [0.0, 1.0]]
hparams = tensor_forest.ForestHParams(
num_classes=2,
num_features=2,
num_trees=1,
max_nodes=1000,
split_after_samples=25).fill()
tree_weight = {'decisionTree':
{'nodes':
[{'binaryNode':
{'rightChildId': 2,
'leftChildId': 1,
'inequalityLeftChildTest':
{'featureId': {'id': '0'},
'threshold': {'floatValue': 0}}}},
{'leaf': {'vector':
{'value': [{'floatValue': 0.0},
{'floatValue': 1.0}]}},
'nodeId': 1},
{'leaf': {'vector':
{'value': [{'floatValue': 0.0},
{'floatValue': 1.0}]}},
'nodeId': 2}]}}
restored_tree_param = ParseDict(tree_weight,
_tree_proto.Model()).SerializeToString()
graph_builder = tensor_forest.RandomForestGraphs(hparams,
[restored_tree_param])
probs, paths, var = graph_builder.inference_graph(input_data)
self.assertTrue(isinstance(probs, ops.Tensor))
self.assertTrue(isinstance(paths, ops.Tensor))
self.assertTrue(isinstance(var, ops.Tensor))
with self.cached_session():
variables.global_variables_initializer().run()
resources.initialize_resources(resources.shared_resources()).run()
self.assertEquals(probs.eval().shape, (4, 2))
self.assertEquals(probs.eval().tolist(), expected_prediction)
def testTrainingConstructionClassificationSparse(self):
input_data = sparse_tensor.SparseTensor(
indices=[[0, 0], [0, 3], [1, 0], [1, 7], [2, 1], [3, 9]],
values=[-1.0, 0.0, -1., 2., 1., -2.0],
dense_shape=[4, 10])
input_labels = [0, 1, 2, 3]
params = tensor_forest.ForestHParams(
num_classes=4,
num_features=10,
num_trees=10,
max_nodes=1000,
split_after_samples=25).fill()
graph_builder = tensor_forest.RandomForestGraphs(params)
graph = graph_builder.training_graph(input_data, input_labels)
self.assertTrue(isinstance(graph, ops.Operation))
def testInferenceConstructionSparse(self):
input_data = sparse_tensor.SparseTensor(
indices=[[0, 0], [0, 3],
[1, 0], [1, 7],
[2, 1],
[3, 9]],
values=[-1.0, 0.0,
-1., 2.,
1.,
-2.0],
dense_shape=[4, 10])
params = tensor_forest.ForestHParams(
num_classes=4,
num_features=10,
num_trees=10,
max_nodes=1000,
regression=True,
split_after_samples=25).fill()
graph_builder = tensor_forest.RandomForestGraphs(params)
probs, paths, var = graph_builder.inference_graph(input_data)
self.assertTrue(isinstance(probs, ops.Tensor))
self.assertTrue(isinstance(paths, ops.Tensor))
self.assertTrue(isinstance(var, ops.Tensor))
if __name__ == "__main__":
googletest.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/tensor_forest/python/tensor_forest_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Random forest implementation in tensorflow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.tensor_forest.python import tensor_forest
from tensorflow.contrib.tensor_forest.python.ops import data_ops
from tensorflow.contrib.tensor_forest.python.ops import model_ops
from tensorflow.contrib.tensor_forest.python.ops import stats_ops
from tensorflow.contrib.tensor_forest.python.ops import tensor_forest_ops
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/tensor_forest/python/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.contrib.tensor_forest.ops.scatter_add_ndim_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.tensor_forest.python.ops import tensor_forest_ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
class ScatterAddNdimTest(test_util.TensorFlowTestCase):
def test1dim(self):
input_data = variables.VariableV1(
[1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.])
indices = [[1], [10]]
updates = [100., 200.]
with self.cached_session():
variables.global_variables_initializer().run()
tensor_forest_ops.scatter_add_ndim(input_data, indices, updates).run()
self.assertAllEqual(
[1., 102., 3., 4., 5., 6., 7., 8., 9., 10., 211., 12.],
input_data.eval())
def test3dim(self):
input_data = variables.VariableV1([[[1., 2., 3.], [4., 5., 6.]],
[[7., 8., 9.], [10., 11., 12.]]])
indices = [[0, 0, 1], [1, 1, 2]]
updates = [100., 200.]
with self.cached_session():
variables.global_variables_initializer().run()
tensor_forest_ops.scatter_add_ndim(input_data, indices, updates).run()
self.assertAllEqual([[[1., 102., 3.], [4., 5., 6.]],
[[7., 8., 9.], [10., 11., 212.]]], input_data.eval())
def testNoUpdates(self):
init_val = [[[1., 2., 3.], [4., 5., 6.]], [[7., 8., 9.], [10., 11., 12.]]]
input_data = variables.VariableV1(init_val)
indices = []
updates = []
with self.cached_session():
variables.global_variables_initializer().run()
tensor_forest_ops.scatter_add_ndim(input_data, indices, updates).run()
self.assertAllEqual(init_val, input_data.eval())
def testBadInput(self):
init_val = [[[1., 2., 3.], [4., 5., 6.]], [[7., 8., 9.], [10., 11., 12.]]]
input_data = variables.VariableV1(init_val)
indices = [[0, 0, 1], [1, 1, 2]]
updates = [100.]
with self.cached_session():
variables.global_variables_initializer().run()
with self.assertRaisesOpError(
'Number of updates should be same as number of indices.'):
tensor_forest_ops.scatter_add_ndim(input_data, indices, updates).run()
self.assertAllEqual(init_val, input_data.eval())
def testIncompleteIndices(self):
input_data = variables.VariableV1([[[1., 2., 3.], [4., 5., 6.]],
[[7., 8., 9.], [10., 11., 12.]]])
indices = [[0, 0], [1, 1]]
updates = [[100., 200., 300.], [400., 500., 600.]]
with self.cached_session():
variables.global_variables_initializer().run()
tensor_forest_ops.scatter_add_ndim(input_data, indices, updates).run()
self.assertAllEqual([[[101., 202., 303.], [4., 5., 6.]],
[[7., 8., 9.], [410., 511., 612.]]],
input_data.eval())
if __name__ == '__main__':
googletest.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/tensor_forest/python/kernel_tests/scatter_add_ndim_op_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Stats ops python wrappers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from tensorflow.contrib.tensor_forest.python.ops import gen_stats_ops
# pylint: disable=unused-import
from tensorflow.contrib.tensor_forest.python.ops.gen_stats_ops import finalize_tree
from tensorflow.contrib.tensor_forest.python.ops.gen_stats_ops import grow_tree_v4
from tensorflow.contrib.tensor_forest.python.ops.gen_stats_ops import process_input_v4
# pylint: enable=unused-import
from tensorflow.contrib.util import loader
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.ops import resources
from tensorflow.python.platform import resource_loader
from tensorflow.python.training import saver
from tensorflow.python.training.tracking import tracking
_stats_ops = loader.load_op_library(
resource_loader.get_path_to_datafile("_stats_ops.so"))
ops.NotDifferentiable("FertileStatsVariable")
ops.NotDifferentiable("FertileStatsSerialize")
ops.NotDifferentiable("FertileStatsDeserialize")
ops.NotDifferentiable("GrowTreeV4")
ops.NotDifferentiable("ProcessInputV4")
ops.NotDifferentiable("FinalizeTree")
class FertileStatsVariableSavable(saver.BaseSaverBuilder.SaveableObject):
"""SaveableObject implementation for FertileStatsVariable."""
def __init__(self, params, stats_handle, create_op, name):
"""Creates a FertileStatsVariableSavable object.
Args:
params: A TensorForestParams object.
stats_handle: handle to the tree variable.
create_op: the op to initialize the variable.
name: the name to save the tree variable under.
"""
self.params = params
tensor = gen_stats_ops.fertile_stats_serialize(
stats_handle, params=params.serialized_params_proto)
# slice_spec is useful for saving a slice from a variable.
# It's not meaningful the tree variable. So we just pass an empty value.
slice_spec = ""
specs = [saver.BaseSaverBuilder.SaveSpec(tensor, slice_spec, name),]
super(FertileStatsVariableSavable,
self).__init__(stats_handle, specs, name)
self._stats_handle = stats_handle
self._create_op = create_op
def restore(self, restored_tensors, unused_restored_shapes):
"""Restores the associated tree from 'restored_tensors'.
Args:
restored_tensors: the tensors that were loaded from a checkpoint.
unused_restored_shapes: the shapes this object should conform to after
restore. Not meaningful for trees.
Returns:
The operation that restores the state of the tree variable.
"""
with ops.control_dependencies([self._create_op]):
return gen_stats_ops.fertile_stats_deserialize(
self._stats_handle, restored_tensors[0],
params=self.params.serialized_params_proto)
class FertileStatsVariable(tracking.TrackableResource):
"""A Fertile stats variable."""
def __init__(self, params, stats_config, name, container=None):
self._params = params
self._stats_config = stats_config
self._name = name
self._container = container
self._init_op = None
super(FertileStatsVariable, self).__init__()
self._resource_handle = self._create_resource()
def _create_resource(self):
if context.executing_eagerly():
# TODO(allenl): This will leak memory due to kernel caching by the
# shared_name attribute value (but is better than the alternative of
# sharing everything by default when executing eagerly; hopefully creating
# tables in a loop is uncommon).
shared_name = "fertile_stats_variable_%d" % (ops.uid(),)
else:
shared_name = self._name
return gen_stats_ops.fertile_stats_resource_handle_op(
self._container, shared_name=shared_name, name=self._name)
def _initialize(self):
return gen_stats_ops.create_fertile_stats_variable(
self.resource_handle,
self._stats_config,
params=self._params.serialized_params_proto)
@property
def initializer(self):
if self._init_op is None:
self._init_op = self._initialize()
return self._init_op
def is_initialized(self):
return gen_stats_ops.fertile_stats_is_initialized_op(self.resource_handle)
def _gather_saveables_for_checkpoint(self):
"""For object-based checkpointing."""
return {
"fertile_stats_variable":
functools.partial(
FertileStatsVariableSavable,
params=self._params,
stats_handle=self.resource_handle,
create_op=self.initializer)
}
def fertile_stats_variable(params, stats_config, name, container=None):
r"""Creates a stats object and returns a handle to it.
Args:
params: A TensorForestParams object.
stats_config: A `Tensor` of type `string`. Serialized proto of the stats.
name: A name for the variable.
container: An optional `string`. Defaults to `""`.
Returns:
A `Tensor` of type mutable `string`. The handle to the stats.
"""
with ops.name_scope(name, "FertileStatsVariable") as name:
fertile_stats_var = FertileStatsVariable(params, stats_config, name,
container)
resource_handle = fertile_stats_var.resource_handle
create_op = fertile_stats_var.initializer
is_initialized_op = fertile_stats_var.is_initialized()
# Adds the variable to the savable list.
saveable = (
fertile_stats_var._gather_saveables_for_checkpoint()[ # pylint: disable=protected-access
"fertile_stats_variable"](name=resource_handle.name))
ops.add_to_collection(ops.GraphKeys.SAVEABLE_OBJECTS, saveable)
resources.register_resource(resource_handle, create_op, is_initialized_op)
return resource_handle
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/tensor_forest/python/ops/stats_ops.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Custom ops used by tensorforest."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.contrib.tensor_forest.python.ops.gen_tensor_forest_ops import *
# pylint: enable=wildcard-import
from tensorflow.contrib.util import loader
from tensorflow.python.platform import resource_loader
_tensor_forest_ops = loader.load_op_library(
resource_loader.get_path_to_datafile('_tensor_forest_ops.so'))
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/tensor_forest/python/ops/tensor_forest_ops.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model ops python wrappers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from tensorflow.contrib.tensor_forest.python.ops import gen_model_ops
# pylint: disable=unused-import
from tensorflow.contrib.tensor_forest.python.ops.gen_model_ops import feature_usage_counts
from tensorflow.contrib.tensor_forest.python.ops.gen_model_ops import traverse_tree_v4
from tensorflow.contrib.tensor_forest.python.ops.gen_model_ops import tree_predictions_v4
from tensorflow.contrib.tensor_forest.python.ops.gen_model_ops import tree_size
from tensorflow.contrib.tensor_forest.python.ops.gen_model_ops import update_model_v4
# pylint: enable=unused-import
from tensorflow.contrib.util import loader
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.ops import resources
from tensorflow.python.platform import resource_loader
from tensorflow.python.training import saver
from tensorflow.python.training.tracking import tracking
_model_ops = loader.load_op_library(
resource_loader.get_path_to_datafile("_model_ops.so"))
ops.NotDifferentiable("TreeVariable")
ops.NotDifferentiable("TreeSerialize")
ops.NotDifferentiable("TreeDeserialize")
ops.NotDifferentiable("TreeSize")
ops.NotDifferentiable("TreePredictionsV4")
ops.NotDifferentiable("FeatureUsageCounts")
class TreeVariableSavable(saver.BaseSaverBuilder.SaveableObject):
"""SaveableObject implementation for TreeVariable."""
def __init__(self, params, tree_handle, stats_handle, create_op, name):
"""Creates a TreeVariableSavable object.
Args:
params: A TensorForestParams object.
tree_handle: handle to the tree variable.
stats_handle: handle to the stats variable.
create_op: the op to initialize the variable.
name: the name to save the tree variable under.
"""
self.params = params
tensor = gen_model_ops.tree_serialize(tree_handle)
# slice_spec is useful for saving a slice from a variable.
# It's not meaningful the tree variable. So we just pass an empty value.
slice_spec = ""
specs = [saver.BaseSaverBuilder.SaveSpec(tensor, slice_spec, name),]
super(TreeVariableSavable,
self).__init__(tree_handle, specs, name)
self._tree_handle = tree_handle
self._create_op = create_op
def restore(self, restored_tensors, unused_restored_shapes):
"""Restores the associated tree from 'restored_tensors'.
Args:
restored_tensors: the tensors that were loaded from a checkpoint.
unused_restored_shapes: the shapes this object should conform to after
restore. Not meaningful for trees.
Returns:
The operation that restores the state of the tree variable.
"""
with ops.control_dependencies([self._create_op]):
return gen_model_ops.tree_deserialize(
self._tree_handle,
restored_tensors[0],
params=self.params.serialized_params_proto)
class TreeVariable(tracking.TrackableResource):
"""A tree model."""
def __init__(self, params, tree_config, stats_handle, name, container=None):
self._params = params
self._tree_config = tree_config
self._stats_handle = stats_handle
self._name = name
self._container = container
self._init_op = None
super(TreeVariable, self).__init__()
self._resource_handle = self._create_resource()
def _create_resource(self):
if context.executing_eagerly():
# TODO(allenl): This will leak memory due to kernel caching by the
# shared_name attribute value (but is better than the alternative of
# sharing everything by default when executing eagerly; hopefully creating
# tables in a loop is uncommon).
shared_name = "tree_variable_%d" % (ops.uid(),)
else:
shared_name = self._name
return gen_model_ops.decision_tree_resource_handle_op(
self._container, shared_name=shared_name, name=self._name)
def _initialize(self):
return gen_model_ops.create_tree_variable(
self.resource_handle,
self._tree_config,
params=self._params.serialized_params_proto)
@property
def initializer(self):
if self._init_op is None:
self._init_op = self._initialize()
return self._init_op
def is_initialized(self):
return gen_model_ops.tree_is_initialized_op(self.resource_handle)
def _gather_saveables_for_checkpoint(self):
"""For object-based checkpointing."""
return {
"tree_variable":
functools.partial(
TreeVariableSavable,
params=self._params,
tree_handle=self.resource_handle,
stats_handle=self._stats_handle,
create_op=self._init_op)
}
def tree_variable(params, tree_config, stats_handle, name, container=None):
r"""Creates a tree model and returns a handle to it.
Args:
params: A TensorForestParams object.
tree_config: A `Tensor` of type `string`. Serialized proto of the tree.
stats_handle: Resource handle to the stats object.
name: A name for the variable.
container: An optional `string`. Defaults to `""`.
Returns:
A `Tensor` of type mutable `string`. The handle to the tree.
"""
with ops.name_scope(name, "TreeVariable") as name:
tree_var = TreeVariable(params, tree_config, stats_handle, name, container)
resource_handle = tree_var.resource_handle
create_op = tree_var.initializer
is_initialized_op = tree_var.is_initialized()
# Adds the variable to the savable list.
saveable = tree_var._gather_saveables_for_checkpoint()["tree_variable"]( # pylint: disable=protected-access
name=resource_handle.name)
ops.add_to_collection(ops.GraphKeys.SAVEABLE_OBJECTS, saveable)
resources.register_resource(resource_handle, create_op, is_initialized_op)
return resource_handle
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/tensor_forest/python/ops/model_ops.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops for preprocessing data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.tensor_forest.python.ops import tensor_forest_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.platform import tf_logging as logging
# Data column types for indicating categorical or other non-float values.
DATA_FLOAT = 0
DATA_CATEGORICAL = 1
DTYPE_TO_FTYPE = {
dtypes.string: DATA_CATEGORICAL,
dtypes.int32: DATA_CATEGORICAL,
dtypes.int64: DATA_CATEGORICAL,
dtypes.float32: DATA_FLOAT,
dtypes.float64: DATA_FLOAT
}
def CastToFloat(tensor):
if tensor.dtype == dtypes.string:
return tensor_forest_ops.reinterpret_string_to_float(tensor)
elif tensor.dtype.is_integer:
return math_ops.cast(tensor, dtypes.float32)
else:
return tensor
# TODO(gilberth): If protos are ever allowed in dynamically loaded custom
# op libraries, convert this to a proto like a sane person.
class TensorForestDataSpec(object):
def __init__(self):
self.sparse = DataColumnCollection()
self.dense = DataColumnCollection()
self.dense_features_size = 0
def SerializeToString(self):
return 'dense_features_size: %d dense: [%s] sparse: [%s]' % (
self.dense_features_size, self.dense.SerializeToString(),
self.sparse.SerializeToString())
class DataColumnCollection(object):
"""Collection of DataColumns, meant to mimic a proto repeated field."""
def __init__(self):
self.cols = []
def add(self): # pylint: disable=invalid-name
self.cols.append(DataColumn())
return self.cols[-1]
def size(self): # pylint: disable=invalid-name
return len(self.cols)
def SerializeToString(self):
ret = ''
for c in self.cols:
ret += '{%s}' % c.SerializeToString()
return ret
class DataColumn(object):
def __init__(self):
self.name = ''
self.original_type = ''
self.size = 0
def SerializeToString(self):
return 'name: {0} original_type: {1} size: {2}'.format(self.name,
self.original_type,
self.size)
def GetColumnName(column_key, col_num):
if isinstance(column_key, str):
return column_key
else:
return getattr(column_key, 'column_name', str(col_num))
def ParseDataTensorOrDict(data):
"""Return a tensor to use for input data.
The incoming features can be a dict where keys are the string names of the
columns, which we turn into a single 2-D tensor.
Args:
data: `Tensor` or `dict` of `Tensor` objects.
Returns:
A 2-D tensor for input to tensor_forest, a keys tensor for the
tf.Examples if they exist, and a list of the type of each column
(e.g. continuous float, categorical).
"""
data_spec = TensorForestDataSpec()
if isinstance(data, dict):
dense_features_size = 0
dense_features = []
sparse_features = []
for k in sorted(data.keys()):
is_sparse = isinstance(data[k], sparse_tensor.SparseTensor)
if is_sparse:
# TODO(gilberth): support sparse continuous.
if data[k].dtype == dtypes.float32:
logging.info('TensorForest does not support sparse continuous.')
continue
elif data_spec.sparse.size() == 0:
col_spec = data_spec.sparse.add()
col_spec.original_type = DATA_CATEGORICAL
col_spec.name = 'all_sparse'
col_spec.size = -1
sparse_features.append(
sparse_tensor.SparseTensor(data[
k].indices, CastToFloat(data[k].values), data[k].dense_shape))
else:
col_spec = data_spec.dense.add()
col_spec.original_type = DTYPE_TO_FTYPE[data[k].dtype]
col_spec.name = GetColumnName(k, len(dense_features))
# the second dimension of get_shape should always be known.
shape = data[k].get_shape()
if len(shape) == 1:
col_spec.size = 1
else:
col_spec.size = shape[1].value
dense_features_size += col_spec.size
dense_features.append(CastToFloat(data[k]))
processed_dense_features = None
processed_sparse_features = None
if dense_features:
processed_dense_features = array_ops.concat(dense_features, 1)
data_spec.dense_features_size = dense_features_size
if sparse_features:
processed_sparse_features = sparse_ops.sparse_concat(1, sparse_features)
logging.info(data_spec.SerializeToString())
return processed_dense_features, processed_sparse_features, data_spec
elif isinstance(data, sparse_tensor.SparseTensor):
col_spec = data_spec.sparse.add()
col_spec.name = 'sparse_features'
col_spec.original_type = DTYPE_TO_FTYPE[data.dtype]
col_spec.size = -1
data_spec.dense_features_size = 0
return None, data, data_spec
else:
data = ops.convert_to_tensor(data)
col_spec = data_spec.dense.add()
col_spec.name = 'dense_features'
col_spec.original_type = DTYPE_TO_FTYPE[data.dtype]
col_spec.size = data.get_shape()[1]
data_spec.dense_features_size = col_spec.size
return data, None, data_spec
def ParseLabelTensorOrDict(labels):
"""Return a tensor to use for input labels to tensor_forest.
The incoming targets can be a dict where keys are the string names of the
columns, which we turn into a single 1-D tensor for classification or
2-D tensor for regression.
Converts sparse tensors to dense ones.
Args:
labels: `Tensor` or `dict` of `Tensor` objects.
Returns:
A 2-D tensor for labels/outputs.
"""
if isinstance(labels, dict):
return math_ops.cast(
array_ops.concat(
[
sparse_ops.sparse_tensor_to_dense(
labels[k], default_value=-1) if isinstance(
labels, sparse_tensor.SparseTensor) else labels[k]
for k in sorted(labels.keys())
],
1),
dtypes.float32)
else:
if isinstance(labels, sparse_tensor.SparseTensor):
return math_ops.cast(
sparse_ops.sparse_tensor_to_dense(labels, default_value=-1),
dtypes.float32)
else:
return math_ops.cast(labels, dtypes.float32)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/tensor_forest/python/ops/data_ops.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A collection of functions to be used as evaluation metrics."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib import losses
from tensorflow.contrib.learn.python.learn.estimators import prediction_key
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import metrics
from tensorflow.python.ops import nn
INFERENCE_PROB_NAME = prediction_key.PredictionKey.PROBABILITIES
INFERENCE_PRED_NAME = prediction_key.PredictionKey.CLASSES
FEATURE_IMPORTANCE_NAME = 'global_feature_importance'
def _top_k_generator(k):
def _top_k(probabilities, targets):
targets = math_ops.cast(targets, dtypes.int32)
if targets.get_shape().ndims > 1:
targets = array_ops.squeeze(targets, axis=[1])
return metrics.mean(nn.in_top_k(probabilities, targets, k))
return _top_k
def _accuracy(predictions, targets, weights=None):
return metrics.accuracy(
labels=targets, predictions=predictions, weights=weights)
def _r2(probabilities, targets, weights=None):
targets = math_ops.cast(targets, dtypes.float32)
y_mean = math_ops.reduce_mean(targets, 0)
squares_total = math_ops.reduce_sum(
math_ops.squared_difference(targets, y_mean), 0)
squares_residuals = math_ops.reduce_sum(
math_ops.squared_difference(targets, probabilities), 0)
score = 1 - math_ops.reduce_sum(squares_residuals / squares_total)
return metrics.mean(score, weights=weights)
def _squeeze_and_onehot(targets, depth):
targets = array_ops.squeeze(targets, axis=[1])
return array_ops.one_hot(math_ops.cast(targets, dtypes.int32), depth)
def _sigmoid_entropy(probabilities, targets, weights=None):
return metrics.mean(
losses.sigmoid_cross_entropy(probabilities,
_squeeze_and_onehot(
targets,
array_ops.shape(probabilities)[1])),
weights=weights)
def _softmax_entropy(probabilities, targets, weights=None):
return metrics.mean(
losses.sparse_softmax_cross_entropy(probabilities,
math_ops.cast(targets, dtypes.int32)),
weights=weights)
def _predictions(predictions, unused_targets, **unused_kwargs):
return predictions
def _class_log_loss(probabilities, targets, weights=None):
return metrics.mean(
losses.log_loss(probabilities,
_squeeze_and_onehot(targets,
array_ops.shape(probabilities)[1])),
weights=weights)
def _precision(predictions, targets, weights=None):
return metrics.precision(
labels=targets, predictions=predictions, weights=weights)
def _precision_at_thresholds(predictions, targets, weights=None):
return metrics.precision_at_thresholds(
labels=targets,
predictions=array_ops.slice(predictions, [0, 1], [-1, 1]),
thresholds=np.arange(0, 1, 0.01, dtype=np.float32),
weights=weights)
def _recall(predictions, targets, weights=None):
return metrics.recall(
labels=targets, predictions=predictions, weights=weights)
def _recall_at_thresholds(predictions, targets, weights=None):
return metrics.recall_at_thresholds(
labels=targets,
predictions=array_ops.slice(predictions, [0, 1], [-1, 1]),
thresholds=np.arange(0, 1, 0.01, dtype=np.float32),
weights=weights)
def _auc(probs, targets, weights=None):
return metrics.auc(
labels=targets,
predictions=array_ops.slice(probs, [0, 1], [-1, 1]),
weights=weights)
_EVAL_METRICS = {
'auc': _auc,
'sigmoid_entropy': _sigmoid_entropy,
'softmax_entropy': _softmax_entropy,
'accuracy': _accuracy,
'r2': _r2,
'predictions': _predictions,
'classification_log_loss': _class_log_loss,
'precision': _precision,
'precision_at_thresholds': _precision_at_thresholds,
'recall': _recall,
'recall_at_thresholds': _recall_at_thresholds,
'top_5': _top_k_generator(5)
}
_PREDICTION_KEYS = {
'auc': INFERENCE_PROB_NAME,
'sigmoid_entropy': INFERENCE_PROB_NAME,
'softmax_entropy': INFERENCE_PROB_NAME,
'accuracy': INFERENCE_PRED_NAME,
'r2': prediction_key.PredictionKey.SCORES,
'predictions': INFERENCE_PRED_NAME,
'classification_log_loss': INFERENCE_PROB_NAME,
'precision': INFERENCE_PRED_NAME,
'precision_at_thresholds': INFERENCE_PROB_NAME,
'recall': INFERENCE_PRED_NAME,
'recall_at_thresholds': INFERENCE_PROB_NAME,
'top_5': INFERENCE_PROB_NAME
}
def get_metric(metric_name):
"""Given a metric name, return the corresponding metric function."""
return _EVAL_METRICS[metric_name]
def get_prediction_key(metric_name):
return _PREDICTION_KEYS[metric_name]
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/tensor_forest/client/eval_metrics.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Random forest implementation in tensorflow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from tensorflow.contrib.tensor_forest.client import eval_metrics
from tensorflow.contrib.tensor_forest.client import random_forest
# pylint: enable=unused-import
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/tensor_forest/client/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.contrib.tensor_forest.client.eval_metrics."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.tensor_forest.client import eval_metrics
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
class EvalMetricsTest(test_util.TensorFlowTestCase):
def testTop2(self):
top_2_fn = eval_metrics._top_k_generator(2)
probabilities = constant_op.constant([[0.1, 0.2, 0.3], [0.4, 0.7, 0.5],
[0.9, 0.8, 0.2], [0.6, 0.4, 0.8]])
targets = constant_op.constant([[0], [2], [1], [1]])
in_top_2_op, update_op = top_2_fn(probabilities, targets)
with self.cached_session():
# initializes internal accuracy vars
variables.local_variables_initializer().run()
# need to call in order to run the in_top_2_op internal operations because
# it is a streaming function
update_op.eval()
self.assertNear(0.5, in_top_2_op.eval(), 0.0001)
def testTop3(self):
top_3_fn = eval_metrics._top_k_generator(3)
probabilities = constant_op.constant([[0.1, 0.2, 0.6, 0.3, 0.5, 0.5],
[0.1, 0.4, 0.7, 0.3, 0.5, 0.2],
[0.1, 0.3, 0.8, 0.7, 0.4, 0.9],
[0.9, 0.8, 0.1, 0.8, 0.2, 0.7],
[0.3, 0.6, 0.9, 0.4, 0.8, 0.6]])
targets = constant_op.constant([3, 0, 2, 5, 1])
in_top_3_op, update_op = top_3_fn(probabilities, targets)
with self.cached_session():
# initializes internal accuracy vars
variables.local_variables_initializer().run()
# need to call in order to run the in_top_3_op internal operations because
# it is a streaming function
update_op.eval()
self.assertNear(0.4, in_top_3_op.eval(), 0.0001)
def testAccuracy(self):
predictions = constant_op.constant([0, 1, 3, 6, 5, 2, 7, 6, 4, 9])
targets = constant_op.constant([0, 1, 4, 6, 5, 1, 7, 5, 4, 8])
accuracy_op, update_op = eval_metrics._accuracy(predictions, targets)
with self.cached_session():
variables.local_variables_initializer().run()
# need to call in order to run the accuracy_op internal operations because
# it is a streaming function
update_op.eval()
self.assertNear(0.6, accuracy_op.eval(), 0.0001)
def testR2(self):
scores = constant_op.constant(
[1.2, 3.9, 2.1, 0.9, 2.2, 0.1, 6.0, 4.0, 0.9])
targets = constant_op.constant(
[1.0, 4.3, 2.6, 0.5, 1.1, 0.7, 5.1, 3.4, 1.8])
r2_op, update_op = eval_metrics._r2(scores, targets)
with self.cached_session():
# initializes internal accuracy vars
variables.local_variables_initializer().run()
# need to call in order to run the r2_op internal operations because
# it is a streaming function
update_op.eval()
self.assertNear(0.813583, r2_op.eval(), 0.0001)
if __name__ == '__main__':
googletest.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/tensor_forest/client/eval_metrics_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TensorForestTrainer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.tensor_forest.client import random_forest
from tensorflow.contrib.tensor_forest.python import tensor_forest
from tensorflow.python.estimator.canned import head as head_lib
from tensorflow.python.estimator.inputs import numpy_io
from tensorflow.python.feature_column import feature_column_lib as core_feature_column
from tensorflow.python.framework import ops
from tensorflow.python.ops.losses import losses
from tensorflow.python.platform import test
from tensorflow.python.training import checkpoint_utils
def _get_classification_input_fns():
iris = base.load_iris()
data = iris.data.astype(np.float32)
labels = iris.target.astype(np.int32)
train_input_fn = numpy_io.numpy_input_fn(
x=data, y=labels, batch_size=150, num_epochs=None, shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x=data[:1,], y=None, batch_size=1, num_epochs=1, shuffle=False)
return train_input_fn, predict_input_fn
def _get_regression_input_fns():
boston = base.load_boston()
data = boston.data.astype(np.float32)
labels = boston.target.astype(np.int32)
train_input_fn = numpy_io.numpy_input_fn(
x=data, y=labels, batch_size=506, num_epochs=None, shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x=data[:1,], y=None, batch_size=1, num_epochs=1, shuffle=False)
return train_input_fn, predict_input_fn
class TensorForestTrainerTests(test.TestCase):
def testClassification(self):
"""Tests multi-class classification using matrix data as input."""
hparams = tensor_forest.ForestHParams(
num_trees=3,
max_nodes=1000,
num_classes=3,
num_features=4,
split_after_samples=20,
inference_tree_paths=True)
classifier = random_forest.TensorForestEstimator(hparams.fill())
input_fn, predict_input_fn = _get_classification_input_fns()
classifier.fit(input_fn=input_fn, steps=100)
res = classifier.evaluate(input_fn=input_fn, steps=10)
self.assertEqual(1.0, res['accuracy'])
self.assertAllClose(0.55144483, res['loss'])
predictions = list(classifier.predict(input_fn=predict_input_fn))
self.assertAllClose([[0.576117, 0.211942, 0.211942]],
[pred['probabilities'] for pred in predictions])
def testRegression(self):
"""Tests regression using matrix data as input."""
hparams = tensor_forest.ForestHParams(
num_trees=5,
max_nodes=1000,
num_classes=1,
num_features=13,
regression=True,
split_after_samples=20)
regressor = random_forest.TensorForestEstimator(hparams.fill())
input_fn, predict_input_fn = _get_regression_input_fns()
regressor.fit(input_fn=input_fn, steps=100)
res = regressor.evaluate(input_fn=input_fn, steps=10)
self.assertGreaterEqual(0.1, res['loss'])
predictions = list(regressor.predict(input_fn=predict_input_fn))
self.assertAllClose([24.], [pred['scores'] for pred in predictions], atol=1)
def testAdditionalOutputs(self):
"""Tests multi-class classification using matrix data as input."""
hparams = tensor_forest.ForestHParams(
num_trees=1,
max_nodes=100,
num_classes=3,
num_features=4,
split_after_samples=20,
inference_tree_paths=True)
classifier = random_forest.TensorForestEstimator(
hparams.fill(), keys_column='keys', include_all_in_serving=True)
iris = base.load_iris()
data = iris.data.astype(np.float32)
labels = iris.target.astype(np.int32)
input_fn = numpy_io.numpy_input_fn(
x={
'x': data,
'keys': np.arange(len(iris.data)).reshape(150, 1)
},
y=labels,
batch_size=10,
num_epochs=1,
shuffle=False)
classifier.fit(input_fn=input_fn, steps=100)
predictions = list(classifier.predict(input_fn=input_fn))
# Check that there is a key column, tree paths and var.
for pred in predictions:
self.assertTrue('keys' in pred)
self.assertTrue('tree_paths' in pred)
self.assertTrue('prediction_variance' in pred)
def _assert_checkpoint(self, model_dir, global_step):
reader = checkpoint_utils.load_checkpoint(model_dir)
self.assertLessEqual(
reader.get_tensor(ops.GraphKeys.GLOBAL_STEP), global_step)
def testEarlyStopping(self):
"""Tests multi-class classification using matrix data as input."""
hparams = tensor_forest.ForestHParams(
num_trees=100,
max_nodes=10000,
num_classes=3,
num_features=4,
split_after_samples=20,
inference_tree_paths=True)
classifier = random_forest.TensorForestEstimator(
hparams.fill(),
# Set a crazy threshold - 30% loss change.
early_stopping_loss_threshold=0.3,
early_stopping_rounds=2)
input_fn, _ = _get_classification_input_fns()
classifier.fit(input_fn=input_fn, steps=100)
# We stopped early.
self._assert_checkpoint(classifier.model_dir, global_step=5)
class CoreTensorForestTests(test.TestCase):
def testTrainEvaluateInferDoesNotThrowErrorForClassifier(self):
head_fn = head_lib._multi_class_head_with_softmax_cross_entropy_loss(
n_classes=3, loss_reduction=losses.Reduction.SUM_OVER_NONZERO_WEIGHTS)
hparams = tensor_forest.ForestHParams(
num_trees=3,
max_nodes=1000,
num_classes=3,
num_features=4,
split_after_samples=20,
inference_tree_paths=True)
est = random_forest.CoreTensorForestEstimator(hparams.fill(), head=head_fn)
input_fn, predict_input_fn = _get_classification_input_fns()
est.train(input_fn=input_fn, steps=100)
res = est.evaluate(input_fn=input_fn, steps=1)
self.assertEqual(1.0, res['accuracy'])
self.assertAllClose(0.55144483, res['loss'])
predictions = list(est.predict(input_fn=predict_input_fn))
self.assertAllClose([[0.576117, 0.211942, 0.211942]],
[pred['probabilities'] for pred in predictions])
def testRegression(self):
"""Tests regression using matrix data as input."""
head_fn = head_lib._regression_head(
label_dimension=1,
loss_reduction=losses.Reduction.SUM_OVER_NONZERO_WEIGHTS)
hparams = tensor_forest.ForestHParams(
num_trees=5,
max_nodes=1000,
num_classes=1,
num_features=13,
regression=True,
split_after_samples=20)
regressor = random_forest.CoreTensorForestEstimator(
hparams.fill(), head=head_fn)
input_fn, predict_input_fn = _get_regression_input_fns()
regressor.train(input_fn=input_fn, steps=100)
res = regressor.evaluate(input_fn=input_fn, steps=10)
self.assertGreaterEqual(0.1, res['loss'])
predictions = list(regressor.predict(input_fn=predict_input_fn))
self.assertAllClose(
[[24.]], [pred['predictions'] for pred in predictions], atol=1)
def testWithFeatureColumns(self):
head_fn = head_lib._multi_class_head_with_softmax_cross_entropy_loss(
n_classes=3, loss_reduction=losses.Reduction.SUM_OVER_NONZERO_WEIGHTS)
hparams = tensor_forest.ForestHParams(
num_trees=3,
max_nodes=1000,
num_classes=3,
num_features=4,
split_after_samples=20,
inference_tree_paths=True)
est = random_forest.CoreTensorForestEstimator(
hparams.fill(),
head=head_fn,
feature_columns=[core_feature_column.numeric_column('x')])
iris = base.load_iris()
data = {'x': iris.data.astype(np.float32)}
labels = iris.target.astype(np.int32)
input_fn = numpy_io.numpy_input_fn(
x=data, y=labels, batch_size=150, num_epochs=None, shuffle=False)
est.train(input_fn=input_fn, steps=100)
res = est.evaluate(input_fn=input_fn, steps=1)
self.assertEqual(1.0, res['accuracy'])
self.assertAllClose(0.55144483, res['loss'])
def testAutofillsClassificationHead(self):
hparams = tensor_forest.ForestHParams(
num_trees=3,
max_nodes=1000,
num_classes=3,
num_features=4,
split_after_samples=20,
inference_tree_paths=True)
est = random_forest.CoreTensorForestEstimator(hparams.fill())
input_fn, _ = _get_classification_input_fns()
est.train(input_fn=input_fn, steps=100)
res = est.evaluate(input_fn=input_fn, steps=1)
self.assertEqual(1.0, res['accuracy'])
self.assertAllClose(0.55144483, res['loss'])
def testAutofillsRegressionHead(self):
hparams = tensor_forest.ForestHParams(
num_trees=5,
max_nodes=1000,
num_classes=1,
num_features=13,
regression=True,
split_after_samples=20)
regressor = random_forest.CoreTensorForestEstimator(hparams.fill())
input_fn, predict_input_fn = _get_regression_input_fns()
regressor.train(input_fn=input_fn, steps=100)
res = regressor.evaluate(input_fn=input_fn, steps=10)
self.assertGreaterEqual(0.1, res['loss'])
predictions = list(regressor.predict(input_fn=predict_input_fn))
self.assertAllClose(
[[24.]], [pred['predictions'] for pred in predictions], atol=1)
def testAdditionalOutputs(self):
"""Tests multi-class classification using matrix data as input."""
hparams = tensor_forest.ForestHParams(
num_trees=1,
max_nodes=100,
num_classes=3,
num_features=4,
split_after_samples=20,
inference_tree_paths=True)
classifier = random_forest.CoreTensorForestEstimator(
hparams.fill(), keys_column='keys', include_all_in_serving=True)
iris = base.load_iris()
data = iris.data.astype(np.float32)
labels = iris.target.astype(np.int32)
input_fn = numpy_io.numpy_input_fn(
x={
'x': data,
'keys': np.arange(len(iris.data)).reshape(150, 1)
},
y=labels,
batch_size=10,
num_epochs=1,
shuffle=False)
classifier.train(input_fn=input_fn, steps=100)
predictions = list(classifier.predict(input_fn=input_fn))
# Check that there is a key column, tree paths and var.
for pred in predictions:
self.assertTrue('keys' in pred)
self.assertTrue('tree_paths' in pred)
self.assertTrue('prediction_variance' in pred)
def _assert_checkpoint(self, model_dir, global_step):
reader = checkpoint_utils.load_checkpoint(model_dir)
self.assertLessEqual(
reader.get_tensor(ops.GraphKeys.GLOBAL_STEP), global_step)
def testEarlyStopping(self):
head_fn = head_lib._multi_class_head_with_softmax_cross_entropy_loss(
n_classes=3, loss_reduction=losses.Reduction.SUM_OVER_NONZERO_WEIGHTS)
hparams = tensor_forest.ForestHParams(
num_trees=3,
max_nodes=1000,
num_classes=3,
num_features=4,
split_after_samples=20,
inference_tree_paths=True)
est = random_forest.CoreTensorForestEstimator(
hparams.fill(),
head=head_fn,
# Set a crazy threshold - 30% loss change.
early_stopping_loss_threshold=0.3,
early_stopping_rounds=2)
input_fn, _ = _get_classification_input_fns()
est.train(input_fn=input_fn, steps=100)
# We stopped early.
self._assert_checkpoint(est.model_dir, global_step=8)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/tensor_forest/client/random_forest_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A tf.learn implementation of online extremely random forests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import layers
from tensorflow.contrib.estimator.python.estimator import head as core_head_lib
from tensorflow.contrib.learn.python.learn.estimators import constants
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
from tensorflow.contrib.learn.python.learn.estimators import model_fn as model_fn_lib
from tensorflow.contrib.tensor_forest.client import eval_metrics
from tensorflow.contrib.tensor_forest.python import tensor_forest
from tensorflow.python.estimator import estimator as core_estimator
from tensorflow.python.estimator.export.export_output import PredictOutput
from tensorflow.python.feature_column import feature_column as fc_core
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops.losses import losses
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import training_util
KEYS_NAME = 'keys'
LOSS_NAME = 'rf_training_loss'
TREE_PATHS_PREDICTION_KEY = 'tree_paths'
VARIANCE_PREDICTION_KEY = 'prediction_variance'
ALL_SERVING_KEY = 'tensorforest_all'
EPSILON = 0.000001
class ModelBuilderOutputType(object):
MODEL_FN_OPS = 0
ESTIMATOR_SPEC = 1
class TensorForestRunOpAtEndHook(session_run_hook.SessionRunHook):
def __init__(self, op_dict):
"""Ops is a dict of {name: op} to run before the session is destroyed."""
self._ops = op_dict
def end(self, session):
for name in sorted(self._ops.keys()):
logging.info('{0}: {1}'.format(name, session.run(self._ops[name])))
class TensorForestLossHook(session_run_hook.SessionRunHook):
"""Monitor to request stop when loss stops decreasing."""
def __init__(self,
early_stopping_rounds,
early_stopping_loss_threshold=None,
loss_op=None):
self.early_stopping_rounds = early_stopping_rounds
self.early_stopping_loss_threshold = early_stopping_loss_threshold
self.loss_op = loss_op
self.min_loss = None
self.last_step = -1
# self.steps records the number of steps for which the loss has been
# non-decreasing
self.steps = 0
def before_run(self, run_context):
loss = (self.loss_op if self.loss_op is not None else
run_context.session.graph.get_operation_by_name(
LOSS_NAME).outputs[0])
return session_run_hook.SessionRunArgs(
{'global_step': training_util.get_global_step(),
'current_loss': loss})
def after_run(self, run_context, run_values):
current_loss = run_values.results['current_loss']
current_step = run_values.results['global_step']
self.steps += 1
# Guard against the global step going backwards, which might happen
# if we recover from something.
if self.last_step == -1 or self.last_step > current_step:
logging.info('TensorForestLossHook resetting last_step.')
self.last_step = current_step
self.steps = 0
self.min_loss = None
return
self.last_step = current_step
if (self.min_loss is None or current_loss <
(self.min_loss - self.min_loss * self.early_stopping_loss_threshold)):
self.min_loss = current_loss
self.steps = 0
if self.steps > self.early_stopping_rounds:
logging.info('TensorForestLossHook requesting stop.')
run_context.request_stop()
def _get_default_head(params, weights_name, output_type, name=None):
"""Creates a default head based on a type of a problem."""
if output_type == ModelBuilderOutputType.MODEL_FN_OPS:
if params.regression:
return head_lib.regression_head(
weight_column_name=weights_name,
label_dimension=params.num_outputs,
enable_centered_bias=False,
head_name=name)
else:
return head_lib.multi_class_head(
params.num_classes,
weight_column_name=weights_name,
enable_centered_bias=False,
head_name=name)
else:
if params.regression:
return core_head_lib.regression_head(
weight_column=weights_name,
label_dimension=params.num_outputs,
name=name,
loss_reduction=losses.Reduction.SUM_OVER_BATCH_SIZE)
else:
if params.num_classes == 2:
return core_head_lib.binary_classification_head(
weight_column=weights_name,
name=name,
loss_reduction=losses.Reduction.SUM_OVER_BATCH_SIZE)
else:
return core_head_lib.multi_class_head(
n_classes=params.num_classes,
weight_column=weights_name,
name=name,
loss_reduction=losses.Reduction.SUM_OVER_BATCH_SIZE)
def get_model_fn(params,
graph_builder_class,
device_assigner,
feature_columns=None,
weights_name=None,
model_head=None,
keys_name=None,
early_stopping_rounds=100,
early_stopping_loss_threshold=0.001,
num_trainers=1,
trainer_id=0,
report_feature_importances=False,
local_eval=False,
head_scope=None,
include_all_in_serving=False,
output_type=ModelBuilderOutputType.MODEL_FN_OPS):
"""Return a model function given a way to construct a graph builder."""
if model_head is None:
model_head = _get_default_head(params, weights_name, output_type)
def _model_fn(features, labels, mode):
"""Function that returns predictions, training loss, and training op."""
if (isinstance(features, ops.Tensor) or
isinstance(features, sparse_tensor.SparseTensor)):
features = {'features': features}
if feature_columns:
features = features.copy()
if output_type == ModelBuilderOutputType.MODEL_FN_OPS:
features.update(layers.transform_features(features, feature_columns))
else:
for fc in feature_columns:
tensor = fc_core._transform_features(features, [fc])[fc] # pylint: disable=protected-access
features[fc.name] = tensor
weights = None
if weights_name and weights_name in features:
weights = features.pop(weights_name)
keys = None
if keys_name and keys_name in features:
keys = features.pop(keys_name)
# If we're doing eval, optionally ignore device_assigner.
# Also ignore device assigner if we're exporting (mode == INFER)
dev_assn = device_assigner
if (mode == model_fn_lib.ModeKeys.INFER or
(local_eval and mode == model_fn_lib.ModeKeys.EVAL)):
dev_assn = None
graph_builder = graph_builder_class(params,
device_assigner=dev_assn)
logits, tree_paths, regression_variance = graph_builder.inference_graph(
features)
summary.scalar('average_tree_size', graph_builder.average_size())
# For binary classification problems, convert probabilities to logits.
# Includes hack to get around the fact that a probability might be 0 or 1.
if not params.regression and params.num_classes == 2:
class_1_probs = array_ops.slice(logits, [0, 1], [-1, 1])
logits = math_ops.log(
math_ops.maximum(class_1_probs / math_ops.maximum(
1.0 - class_1_probs, EPSILON), EPSILON))
# labels might be None if we're doing prediction (which brings up the
# question of why we force everything to adhere to a single model_fn).
training_graph = None
training_hooks = []
if labels is not None and mode == model_fn_lib.ModeKeys.TRAIN:
with ops.control_dependencies([logits.op]):
training_graph = control_flow_ops.group(
graph_builder.training_graph(
features, labels, input_weights=weights,
num_trainers=num_trainers,
trainer_id=trainer_id),
state_ops.assign_add(training_util.get_global_step(), 1))
# Put weights back in
if weights is not None:
features[weights_name] = weights
# TensorForest's training graph isn't calculated directly from the loss
# like many other models.
def _train_fn(unused_loss):
return training_graph
# Ops are run in lexigraphical order of their keys. Run the resource
# clean-up op last.
all_handles = graph_builder.get_all_resource_handles()
ops_at_end = {
'9: clean up resources':
control_flow_ops.group(*[
resource_variable_ops.destroy_resource_op(handle)
for handle in all_handles
])
}
if report_feature_importances:
ops_at_end['1: feature_importances'] = (
graph_builder.feature_importances())
training_hooks = [TensorForestRunOpAtEndHook(ops_at_end)]
if output_type == ModelBuilderOutputType.MODEL_FN_OPS:
model_ops = model_head.create_model_fn_ops(
features=features,
labels=labels,
mode=mode,
train_op_fn=_train_fn,
logits=logits,
scope=head_scope)
if early_stopping_rounds:
training_hooks.append(
TensorForestLossHook(
early_stopping_rounds,
early_stopping_loss_threshold=early_stopping_loss_threshold,
loss_op=model_ops.loss))
model_ops.training_hooks.extend(training_hooks)
if keys is not None:
model_ops.predictions[keys_name] = keys
if params.inference_tree_paths:
model_ops.predictions[TREE_PATHS_PREDICTION_KEY] = tree_paths
model_ops.predictions[VARIANCE_PREDICTION_KEY] = regression_variance
if include_all_in_serving:
# In order to serve the variance we need to add the prediction dict
# to output_alternatives dict.
if not model_ops.output_alternatives:
model_ops.output_alternatives = {}
model_ops.output_alternatives[ALL_SERVING_KEY] = (
constants.ProblemType.UNSPECIFIED, model_ops.predictions)
return model_ops
else:
# Estimator spec
estimator_spec = model_head.create_estimator_spec(
features=features,
mode=mode,
labels=labels,
train_op_fn=_train_fn,
logits=logits)
if early_stopping_rounds:
training_hooks.append(
TensorForestLossHook(
early_stopping_rounds,
early_stopping_loss_threshold=early_stopping_loss_threshold,
loss_op=estimator_spec.loss))
estimator_spec = estimator_spec._replace(
training_hooks=training_hooks + list(estimator_spec.training_hooks))
if keys is not None:
estimator_spec.predictions[keys_name] = keys
if params.inference_tree_paths:
estimator_spec.predictions[TREE_PATHS_PREDICTION_KEY] = tree_paths
estimator_spec.predictions[VARIANCE_PREDICTION_KEY] = regression_variance
if include_all_in_serving:
outputs = estimator_spec.export_outputs
if not outputs:
outputs = {}
outputs = {ALL_SERVING_KEY: PredictOutput(estimator_spec.predictions)}
print(estimator_spec.export_outputs)
# In order to serve the variance we need to add the prediction dict
# to output_alternatives dict.
estimator_spec = estimator_spec._replace(export_outputs=outputs)
return estimator_spec
return _model_fn
class TensorForestEstimator(estimator.Estimator):
"""An estimator that can train and evaluate a random forest.
Example:
```python
params = tf.contrib.tensor_forest.python.tensor_forest.ForestHParams(
num_classes=2, num_features=40, num_trees=10, max_nodes=1000)
# Estimator using the default graph builder.
estimator = TensorForestEstimator(params, model_dir=model_dir)
# Or estimator using TrainingLossForest as the graph builder.
estimator = TensorForestEstimator(
params, graph_builder_class=tensor_forest.TrainingLossForest,
model_dir=model_dir)
# Input builders
def input_fn_train: # returns x, y
...
def input_fn_eval: # returns x, y
...
estimator.fit(input_fn=input_fn_train)
estimator.evaluate(input_fn=input_fn_eval)
# Predict returns an iterable of dicts.
results = list(estimator.predict(x=x))
prob0 = results[0][eval_metrics.INFERENCE_PROB_NAME]
prediction0 = results[0][eval_metrics.INFERENCE_PRED_NAME]
```
"""
def __init__(self,
params,
device_assigner=None,
model_dir=None,
feature_columns=None,
graph_builder_class=tensor_forest.RandomForestGraphs,
config=None,
weight_column=None,
keys_column=None,
feature_engineering_fn=None,
early_stopping_rounds=100,
early_stopping_loss_threshold=0.001,
num_trainers=1,
trainer_id=0,
report_feature_importances=False,
local_eval=False,
version=None,
head=None,
include_all_in_serving=False):
"""Initializes a TensorForestEstimator instance.
Args:
params: ForestHParams object that holds random forest hyperparameters.
These parameters will be passed into `model_fn`.
device_assigner: An `object` instance that controls how trees get
assigned to devices. If `None`, will use
`tensor_forest.RandomForestDeviceAssigner`.
model_dir: Directory to save model parameters, graph, etc. To continue
training a previously saved model, load checkpoints saved to this
directory into an estimator.
feature_columns: An iterable containing all the feature columns used by
the model. All items in the set should be instances of classes derived
from `_FeatureColumn`.
graph_builder_class: An `object` instance that defines how TF graphs for
random forest training and inference are built. By default will use
`tensor_forest.RandomForestGraphs`. Can be overridden by version
kwarg.
config: `RunConfig` object to configure the runtime settings.
weight_column: A string defining feature column name representing
weights. Will be multiplied by the loss of the example. Used to
downweight or boost examples during training.
keys_column: A string naming one of the features to strip out and
pass through into the inference/eval results dict. Useful for
associating specific examples with their prediction.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and returns features and
labels which will be fed into the model.
early_stopping_rounds: Allows training to terminate early if the forest is
no longer growing. 100 by default. Set to a Falsy value to disable
the default training hook.
early_stopping_loss_threshold: Percentage (as fraction) that loss must
improve by within early_stopping_rounds steps, otherwise training will
terminate.
num_trainers: Number of training jobs, which will partition trees
among them.
trainer_id: Which trainer this instance is.
report_feature_importances: If True, print out feature importances
during evaluation.
local_eval: If True, don't use a device assigner for eval. This is to
support some common setups where eval is done on a single machine, even
though training might be distributed.
version: Unused.
head: A heads_lib.Head object that calculates losses and such. If None,
one will be automatically created based on params.
include_all_in_serving: if True, allow preparation of the complete
prediction dict including the variance to be exported for serving with
the Servo lib; and it also requires calling export_savedmodel with
default_output_alternative_key=ALL_SERVING_KEY, i.e.
estimator.export_savedmodel(export_dir_base=your_export_dir,
serving_input_fn=your_export_input_fn,
default_output_alternative_key=ALL_SERVING_KEY)
if False, resort to default behavior, i.e. export scores and
probabilities but no variances. In this case
default_output_alternative_key should be None while calling
export_savedmodel().
Note, that due to backward compatibility we cannot always set
include_all_in_serving to True because in this case calling
export_saved_model() without
default_output_alternative_key=ALL_SERVING_KEY (legacy behavior) the
saved_model_export_utils.get_output_alternatives() would raise
ValueError.
Returns:
A `TensorForestEstimator` instance.
"""
# Override default number of trainers if config is provided.
if num_trainers == 1 and config is not None:
num_trainers = max(1, config.num_worker_replicas)
super(TensorForestEstimator, self).__init__(
model_fn=get_model_fn(
params.fill(),
graph_builder_class,
device_assigner,
feature_columns=feature_columns,
model_head=head,
weights_name=weight_column,
keys_name=keys_column,
early_stopping_rounds=early_stopping_rounds,
early_stopping_loss_threshold=early_stopping_loss_threshold,
num_trainers=num_trainers,
trainer_id=trainer_id,
report_feature_importances=report_feature_importances,
local_eval=local_eval,
include_all_in_serving=include_all_in_serving,
),
model_dir=model_dir,
config=config,
feature_engineering_fn=feature_engineering_fn)
def get_combined_model_fn(model_fns):
"""Get a combined model function given a list of other model fns.
The model function returned will call the individual model functions and
combine them appropriately. For:
training ops: tf.group them.
loss: average them.
predictions: concat probabilities such that predictions[*][0-C1] are the
probabilities for output 1 (where C1 is the number of classes in output 1),
predictions[*][C1-(C1+C2)] are the probabilities for output 2 (where C2
is the number of classes in output 2), etc. Also stack predictions such
that predictions[i][j] is the class prediction for example i and output j.
This assumes that labels are 2-dimensional, with labels[i][j] being the
label for example i and output j, where forest j is trained using only
output j.
Args:
model_fns: A list of model functions obtained from get_model_fn.
Returns:
A ModelFnOps instance.
"""
def _model_fn(features, labels, mode):
"""Function that returns predictions, training loss, and training op."""
model_fn_ops = []
for i in range(len(model_fns)):
with variable_scope.variable_scope('label_{0}'.format(i)):
sliced_labels = array_ops.slice(labels, [0, i], [-1, 1])
model_fn_ops.append(
model_fns[i](features, sliced_labels, mode))
training_hooks = []
for mops in model_fn_ops:
training_hooks += mops.training_hooks
predictions = {}
if (mode == model_fn_lib.ModeKeys.EVAL or
mode == model_fn_lib.ModeKeys.INFER):
# Flatten the probabilities into one dimension.
predictions[eval_metrics.INFERENCE_PROB_NAME] = array_ops.concat(
[mops.predictions[eval_metrics.INFERENCE_PROB_NAME]
for mops in model_fn_ops], axis=1)
predictions[eval_metrics.INFERENCE_PRED_NAME] = array_ops.stack(
[mops.predictions[eval_metrics.INFERENCE_PRED_NAME]
for mops in model_fn_ops], axis=1)
loss = None
if (mode == model_fn_lib.ModeKeys.EVAL or
mode == model_fn_lib.ModeKeys.TRAIN):
loss = math_ops.reduce_sum(
array_ops.stack(
[mops.loss for mops in model_fn_ops])) / len(model_fn_ops)
train_op = None
if mode == model_fn_lib.ModeKeys.TRAIN:
train_op = control_flow_ops.group(
*[mops.train_op for mops in model_fn_ops])
return model_fn_lib.ModelFnOps(
mode=mode,
predictions=predictions,
loss=loss,
train_op=train_op,
training_hooks=training_hooks,
scaffold=None,
output_alternatives=None)
return _model_fn
class MultiForestMultiHeadEstimator(estimator.Estimator):
"""An estimator that can train a forest for a multi-headed problems.
This class essentially trains separate forests (each with their own
ForestHParams) for each output.
For multi-headed regression, a single-headed TensorForestEstimator can
be used to train a single model that predicts all outputs. This class can
be used to train separate forests for each output.
"""
def __init__(self,
params_list,
device_assigner=None,
model_dir=None,
feature_columns=None,
graph_builder_class=tensor_forest.RandomForestGraphs,
config=None,
weight_column=None,
keys_column=None,
feature_engineering_fn=None,
early_stopping_rounds=100,
num_trainers=1,
trainer_id=0,
report_feature_importances=False,
local_eval=False):
"""See TensorForestEstimator.__init__."""
model_fns = []
# Override default number of trainers if config is provided.
if num_trainers == 1 and config is not None:
num_trainers = max(1, config.num_worker_replicas)
for i in range(len(params_list)):
params = params_list[i].fill()
model_fns.append(
get_model_fn(
params,
graph_builder_class,
device_assigner,
model_head=_get_default_head(
params,
weight_column,
name='head{0}'.format(i),
output_type=ModelBuilderOutputType.MODEL_FN_OPS),
weights_name=weight_column,
keys_name=keys_column,
early_stopping_rounds=early_stopping_rounds,
num_trainers=num_trainers,
trainer_id=trainer_id,
report_feature_importances=report_feature_importances,
local_eval=local_eval,
head_scope='output{0}'.format(i)))
super(MultiForestMultiHeadEstimator, self).__init__(
model_fn=get_combined_model_fn(model_fns),
model_dir=model_dir,
config=config,
feature_engineering_fn=feature_engineering_fn)
class CoreTensorForestEstimator(core_estimator.Estimator):
"""A CORE estimator that can train and evaluate a random forest.
Example:
```python
params = tf.contrib.tensor_forest.python.tensor_forest.ForestHParams(
num_classes=2, num_features=40, num_trees=10, max_nodes=1000)
# Estimator using the default graph builder.
estimator = CoreTensorForestEstimator(params, model_dir=model_dir)
# Or estimator using TrainingLossForest as the graph builder.
estimator = CoreTensorForestEstimator(
params, graph_builder_class=tensor_forest.TrainingLossForest,
model_dir=model_dir)
# Input builders
def input_fn_train: # returns x, y
...
def input_fn_eval: # returns x, y
...
estimator.train(input_fn=input_fn_train)
estimator.evaluate(input_fn=input_fn_eval)
# Predict returns an iterable of dicts.
results = list(estimator.predict(x=x))
prob0 = results[0][eval_metrics.INFERENCE_PROB_NAME]
prediction0 = results[0][eval_metrics.INFERENCE_PRED_NAME]
```
"""
def __init__(self,
params,
device_assigner=None,
model_dir=None,
feature_columns=None,
graph_builder_class=tensor_forest.RandomForestGraphs,
config=None,
weight_column=None,
keys_column=None,
feature_engineering_fn=None,
early_stopping_rounds=100,
early_stopping_loss_threshold=0.001,
num_trainers=1,
trainer_id=0,
report_feature_importances=False,
local_eval=False,
version=None,
head=None,
include_all_in_serving=False):
"""Initializes a TensorForestEstimator instance.
Args:
params: ForestHParams object that holds random forest hyperparameters.
These parameters will be passed into `model_fn`.
device_assigner: An `object` instance that controls how trees get
assigned to devices. If `None`, will use
`tensor_forest.RandomForestDeviceAssigner`.
model_dir: Directory to save model parameters, graph, etc. To continue
training a previously saved model, load checkpoints saved to this
directory into an estimator.
feature_columns: An iterable containing all the feature columns used by
the model. All items in the set should be instances of classes derived
from `_FeatureColumn`.
graph_builder_class: An `object` instance that defines how TF graphs for
random forest training and inference are built. By default will use
`tensor_forest.RandomForestGraphs`. Can be overridden by version
kwarg.
config: `RunConfig` object to configure the runtime settings.
weight_column: A string defining feature column name representing
weights. Will be multiplied by the loss of the example. Used to
downweight or boost examples during training.
keys_column: A string naming one of the features to strip out and
pass through into the inference/eval results dict. Useful for
associating specific examples with their prediction.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and returns features and
labels which will be fed into the model.
early_stopping_rounds: Allows training to terminate early if the forest is
no longer growing. 100 by default. Set to a Falsy value to disable
the default training hook.
early_stopping_loss_threshold: Percentage (as fraction) that loss must
improve by within early_stopping_rounds steps, otherwise training will
terminate.
num_trainers: Number of training jobs, which will partition trees
among them.
trainer_id: Which trainer this instance is.
report_feature_importances: If True, print out feature importances
during evaluation.
local_eval: If True, don't use a device assigner for eval. This is to
support some common setups where eval is done on a single machine, even
though training might be distributed.
version: Unused.
head: A heads_lib.Head object that calculates losses and such. If None,
one will be automatically created based on params.
include_all_in_serving: if True, allow preparation of the complete
prediction dict including the variance to be exported for serving with
the Servo lib; and it also requires calling export_savedmodel with
default_output_alternative_key=ALL_SERVING_KEY, i.e.
estimator.export_savedmodel(export_dir_base=your_export_dir,
serving_input_fn=your_export_input_fn,
default_output_alternative_key=ALL_SERVING_KEY)
if False, resort to default behavior, i.e. export scores and
probabilities but no variances. In this case
default_output_alternative_key should be None while calling
export_savedmodel().
Note, that due to backward compatibility we cannot always set
include_all_in_serving to True because in this case calling
export_saved_model() without
default_output_alternative_key=ALL_SERVING_KEY (legacy behavior) the
saved_model_export_utils.get_output_alternatives() would raise
ValueError.
Returns:
A `TensorForestEstimator` instance.
"""
# Override default number of trainers if config is provided.
if num_trainers == 1 and config is not None:
num_trainers = max(1, config.num_worker_replicas)
if trainer_id == 0 and config is not None:
trainer_id = config.global_id_in_cluster
super(CoreTensorForestEstimator, self).__init__(
model_fn=get_model_fn(
params.fill(),
graph_builder_class,
device_assigner,
feature_columns=feature_columns,
model_head=head,
weights_name=weight_column,
keys_name=keys_column,
early_stopping_rounds=early_stopping_rounds,
early_stopping_loss_threshold=early_stopping_loss_threshold,
num_trainers=num_trainers,
trainer_id=trainer_id,
report_feature_importances=report_feature_importances,
local_eval=local_eval,
include_all_in_serving=include_all_in_serving,
output_type=ModelBuilderOutputType.ESTIMATOR_SPEC),
model_dir=model_dir,
config=config)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/tensor_forest/client/random_forest.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Initialize tensor_forest/hybrid."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,wildcard-import
from tensorflow.contrib.tensor_forest.hybrid.python import *
# pylint: enable=unused-import,wildcard-import
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/tensor_forest/hybrid/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Defines the layer abstraction for hybrid models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.framework.python.ops import variables as framework_variables
class HybridLayer(object):
"""Layers are building blocks for hybrid models."""
def _define_vars(self,
params,
**kwargs):
"""Override to define the TensorFlow variables for the layer."""
raise NotImplementedError
# pylint: disable=unused-argument
def __init__(self, params, layer_num, device_assigner, *args, **kwargs):
self.layer_num = layer_num
self.device_assigner = (
device_assigner or framework_variables.VariableDeviceChooser())
self.params = params
self._define_vars(params, **kwargs)
def inference_graph(self, data, data_spec=None):
raise NotImplementedError
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/tensor_forest/hybrid/python/hybrid_layer.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the hybrid tensor forest model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from tensorflow.contrib.tensor_forest.hybrid.python import hybrid_model
from tensorflow.contrib.tensor_forest.hybrid.python.layers import fully_connected
from tensorflow.contrib.tensor_forest.python import tensor_forest
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
class HybridLayerTest(test_util.TensorFlowTestCase):
def setUp(self):
self.params = tensor_forest.ForestHParams(
num_classes=3,
num_features=7,
layer_size=11,
num_layers=13,
num_trees=17,
connection_probability=0.1,
hybrid_tree_depth=4,
regularization_strength=0.01,
regularization="",
weight_init_mean=0.0,
weight_init_std=0.1)
self.params.num_nodes = 2**self.params.hybrid_tree_depth - 1
self.params.num_leaves = 2**(self.params.hybrid_tree_depth - 1)
def testLayerNums(self):
l1 = fully_connected.FullyConnectedLayer(self.params, 0, None)
self.assertEquals(l1.layer_num, 0)
l2 = fully_connected.FullyConnectedLayer(self.params, 1, None)
self.assertEquals(l2.layer_num, 1)
l3 = fully_connected.FullyConnectedLayer(self.params, 2, None)
self.assertEquals(l3.layer_num, 2)
if __name__ == "__main__":
googletest.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/tensor_forest/hybrid/python/hybrid_layer_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Defines the model abstraction for hybrid models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import layers
from tensorflow.contrib.framework.python.ops import variables as framework_variables
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variables
from tensorflow.python.training import adagrad
from tensorflow.python.util.compat import collections_abc
class HybridModel(object):
"""Defines a hybrid model.
Models chain together the results of inference layers and provide training
capabilities.
"""
# pylint: disable=unused-argument
def __init__(self,
params,
device_assigner=None,
optimizer_class=adagrad.AdagradOptimizer,
**kwargs):
self.device_assigner = (
device_assigner or framework_variables.VariableDeviceChooser())
self.params = params
self.optimizer = optimizer_class(self.params.learning_rate)
self.is_regression = params.regression
self.regularizer = None
if params.regularization == "l1":
self.regularizer = layers.l1_regularizer(
self.params.regularization_strength)
elif params.regularization == "l2":
self.regularizer = layers.l2_regularizer(
self.params.regularization_strength)
def _do_layer_inference(self, layer, data):
# If this is a collection of layers, return the mean of their inference
# results.
if isinstance(layer, collections_abc.Iterable):
return math_ops.reduce_mean(
array_ops.stack([l.inference_graph(data) for l in layer]), 0)
# If this is a single layer, return its inference result.
else:
return layer.inference_graph(data)
def _base_inference(self, data, data_spec=None):
"""Returns an op that performs inference without a softmax."""
inference_result = self._do_layer_inference(self.layers[0], data)
for layer in self.layers[1:]:
inference_result = self._do_layer_inference(layer, inference_result)
output_size = 1 if self.is_regression else self.params.num_classes
output = layers.fully_connected(
inference_result, output_size, activation_fn=array_ops.identity)
return output
def inference_graph(self, data, data_spec=None):
"""Returns the op that performs inference on a batch of data."""
return nn_ops.softmax(self._base_inference(data, data_spec=data_spec))
def training_inference_graph(self, data, data_spec=None):
"""Returns an inference-without-softmax op for training purposes."""
return self._base_inference(data, data_spec=data_spec)
def predict_proba(self, data, data_spec=None):
inference_result = self.inference_graph(data, data_spec=data_spec)
probabilities = nn_ops.softmax(inference_result, name="probabilities")
return probabilities
def training_graph(self, data, labels, data_spec=None, epoch=None):
"""Returns the op that trains the hybrid model."""
return self.optimizer.minimize(self.training_loss(data, labels))
def loss(self, data, labels):
"""The loss to minimize while training."""
if self.is_regression:
diff = self.training_inference_graph(data) - math_ops.cast(
labels, dtypes.float32)
mean_squared_error = math_ops.reduce_mean(diff * diff)
root_mean_squared_error = math_ops.sqrt(mean_squared_error, name="loss")
loss = root_mean_squared_error
else:
loss = math_ops.reduce_mean(
nn_ops.sparse_softmax_cross_entropy_with_logits(
labels=array_ops.squeeze(math_ops.cast(labels, dtypes.int32)),
logits=self.training_inference_graph(data)),
name="loss")
if self.regularizer:
loss += layers.apply_regularization(self.regularizer,
variables.trainable_variables())
return loss
def training_loss(self, data, labels):
return self.loss(data, labels)
def validation_loss(self, data, labels):
return self.loss(data, labels)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/tensor_forest/hybrid/python/hybrid_model.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Initialize tensor_forest/hybrid/python."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.tensor_forest.hybrid.python import layers
from tensorflow.contrib.tensor_forest.hybrid.python import models
from tensorflow.contrib.tensor_forest.hybrid.python.ops import training_ops
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/tensor_forest/hybrid/python/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Initialize tensor_forest/hybrid/python/layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/tensor_forest/hybrid/python/layers/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Neural network components for hybrid models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import layers
from tensorflow.contrib.tensor_forest.hybrid.python import hybrid_layer
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
class FullyConnectedLayer(hybrid_layer.HybridLayer):
"""A stacked, fully-connected feed-forward neural network layer."""
def _define_vars(self, params):
pass
def inference_graph(self, data):
with ops.device(self.device_assigner):
# Compute activations for the neural network.
nn_activations = layers.fully_connected(data, self.params.layer_size)
for _ in range(1, self.params.num_layers):
# pylint: disable=W0106
nn_activations = layers.fully_connected(nn_activations,
self.params.layer_size)
return nn_activations
class ManyToOneLayer(hybrid_layer.HybridLayer):
def _define_vars(self, params):
pass
def inference_graph(self, data):
with ops.device(self.device_assigner):
# Compute activations for the neural network.
nn_activations = layers.fully_connected(data, 1)
# There is always one activation per instance by definition, so squeeze
# away the extra dimension.
return array_ops.squeeze(nn_activations, axis=[1])
class FlattenedFullyConnectedLayer(hybrid_layer.HybridLayer):
"""A stacked, fully-connected flattened feed-forward neural network layer."""
def _define_vars(self, params):
pass
def inference_graph(self, data):
with ops.device(self.device_assigner):
# Compute activations for the neural network.
nn_activations = [layers.fully_connected(data, self.params.layer_size)]
for _ in range(1, self.params.num_layers):
# pylint: disable=W0106
nn_activations.append(
layers.fully_connected(
nn_activations[-1],
self.params.layer_size))
nn_activations_tensor = array_ops.concat(
nn_activations, 1, name="flattened_nn_activations")
return nn_activations_tensor
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/tensor_forest/hybrid/python/layers/fully_connected.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
# pylint: disable=unused-import
from tensorflow.contrib.tensor_forest.hybrid.python.layers import decisions_to_data
from tensorflow.contrib.tensor_forest.python import tensor_forest
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util
from tensorflow.python.framework.ops import Operation
from tensorflow.python.framework.ops import Tensor
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import googletest
class DecisionsToDataTest(test_util.TensorFlowTestCase):
def setUp(self):
self.params = tensor_forest.ForestHParams(
num_classes=2,
num_features=31,
layer_size=11,
num_layers=13,
num_trees=17,
connection_probability=0.1,
hybrid_tree_depth=4,
regularization_strength=0.01,
regularization="",
learning_rate=0.01,
weight_init_mean=0.0,
weight_init_std=0.1)
self.params.regression = False
self.params.num_nodes = 2**self.params.hybrid_tree_depth - 1
self.params.num_leaves = 2**(self.params.hybrid_tree_depth - 1)
# pylint: disable=W0612
self.input_data = constant_op.constant(
[[random.uniform(-1, 1) for i in range(self.params.num_features)]
for _ in range(100)])
def testInferenceConstruction(self):
with variable_scope.variable_scope(
"DecisionsToDataTest_testInferenceContruction"):
graph_builder = decisions_to_data.DecisionsToDataLayer(self.params, 0,
None)
unused_graph = graph_builder.inference_graph(self.input_data)
if __name__ == "__main__":
googletest.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/tensor_forest/hybrid/python/layers/decisions_to_data_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Treats a decision tree as a representation transformation layer.
A decision tree transformer takes features as input and returns the probability
of reaching each leaf as output. The routing throughout the tree is learnable
via backpropagation.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.tensor_forest.hybrid.ops import gen_training_ops
from tensorflow.contrib.tensor_forest.hybrid.python import hybrid_layer
from tensorflow.contrib.tensor_forest.hybrid.python.ops import training_ops
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import variable_scope
class DecisionsToDataLayer(hybrid_layer.HybridLayer):
"""A layer that treats soft decisions as data."""
def _define_vars(self, params, **kwargs):
with ops.device(self.device_assigner):
self.tree_parameters = variable_scope.get_variable(
name='tree_parameters_%d' % self.layer_num,
shape=[params.num_nodes, params.num_features],
initializer=init_ops.truncated_normal_initializer(
mean=params.weight_init_mean, stddev=params.weight_init_std))
self.tree_thresholds = variable_scope.get_variable(
name='tree_thresholds_%d' % self.layer_num,
shape=[params.num_nodes],
initializer=init_ops.truncated_normal_initializer(
mean=params.weight_init_mean, stddev=params.weight_init_std))
def __init__(self, params, layer_num, device_assigner,
*args, **kwargs):
super(DecisionsToDataLayer, self).__init__(
params, layer_num, device_assigner, *args, **kwargs)
self._training_ops = training_ops.Load()
def inference_graph(self, data):
with ops.device(self.device_assigner):
routing_probabilities = gen_training_ops.routing_function(
data,
self.tree_parameters,
self.tree_thresholds,
max_nodes=self.params.num_nodes)
output = array_ops.slice(
routing_probabilities,
[0, self.params.num_nodes - self.params.num_leaves - 1],
[-1, self.params.num_leaves])
return output
class KFeatureDecisionsToDataLayer(hybrid_layer.HybridLayer):
"""A layer that treats soft decisions made on single features as data."""
def _define_vars(self, params, **kwargs):
with ops.device(self.device_assigner):
self.tree_parameters = variable_scope.get_variable(
name='tree_parameters_%d' % self.layer_num,
shape=[params.num_nodes, params.num_features_per_node],
initializer=init_ops.truncated_normal_initializer(
mean=params.weight_init_mean, stddev=params.weight_init_std))
self.tree_thresholds = variable_scope.get_variable(
name='tree_thresholds_%d' % self.layer_num,
shape=[params.num_nodes],
initializer=init_ops.truncated_normal_initializer(
mean=params.weight_init_mean, stddev=params.weight_init_std))
def __init__(self, params, layer_num, device_assigner,
*args, **kwargs):
super(KFeatureDecisionsToDataLayer, self).__init__(
params, layer_num, device_assigner, *args, **kwargs)
self._training_ops = training_ops.Load()
# pylint: disable=unused-argument
def inference_graph(self, data):
with ops.device(self.device_assigner):
routing_probabilities = gen_training_ops.k_feature_routing_function(
data,
self.tree_parameters,
self.tree_thresholds,
max_nodes=self.params.num_nodes,
num_features_per_node=self.params.num_features_per_node,
layer_num=0,
random_seed=self.params.base_random_seed)
output = array_ops.slice(
routing_probabilities,
[0, self.params.num_nodes - self.params.num_leaves - 1],
[-1, self.params.num_leaves])
return output
class HardDecisionsToDataLayer(DecisionsToDataLayer):
"""A layer that learns a soft decision tree but treats it as hard at test."""
def _define_vars(self, params, **kwargs):
with ops.device(self.device_assigner):
self.tree_parameters = variable_scope.get_variable(
name='hard_tree_parameters_%d' % self.layer_num,
shape=[params.num_nodes, params.num_features],
initializer=variable_scope.truncated_normal_initializer(
mean=params.weight_init_mean, stddev=params.weight_init_std))
self.tree_thresholds = variable_scope.get_variable(
name='hard_tree_thresholds_%d' % self.layer_num,
shape=[params.num_nodes],
initializer=variable_scope.truncated_normal_initializer(
mean=params.weight_init_mean, stddev=params.weight_init_std))
def soft_inference_graph(self, data):
return super(HardDecisionsToDataLayer, self).inference_graph(data)
def inference_graph(self, data):
with ops.device(self.device_assigner):
path_probability, path = gen_training_ops.hard_routing_function(
data,
self.tree_parameters,
self.tree_thresholds,
max_nodes=self.params.num_nodes,
tree_depth=self.params.hybrid_tree_depth)
output = array_ops.slice(
gen_training_ops.unpack_path(path, path_probability),
[0, self.params.num_nodes - self.params.num_leaves - 1],
[-1, self.params.num_leaves])
return output
class StochasticHardDecisionsToDataLayer(HardDecisionsToDataLayer):
"""A layer that learns a soft decision tree by sampling paths."""
def _define_vars(self, params, **kwargs):
with ops.device(self.device_assigner):
self.tree_parameters = variable_scope.get_variable(
name='stochastic_hard_tree_parameters_%d' % self.layer_num,
shape=[params.num_nodes, params.num_features],
initializer=init_ops.truncated_normal_initializer(
mean=params.weight_init_mean, stddev=params.weight_init_std))
self.tree_thresholds = variable_scope.get_variable(
name='stochastic_hard_tree_thresholds_%d' % self.layer_num,
shape=[params.num_nodes],
initializer=init_ops.truncated_normal_initializer(
mean=params.weight_init_mean, stddev=params.weight_init_std))
def soft_inference_graph(self, data):
with ops.device(self.device_assigner):
path_probability, path = (
gen_training_ops.stochastic_hard_routing_function(
data,
self.tree_parameters,
self.tree_thresholds,
tree_depth=self.params.hybrid_tree_depth,
random_seed=self.params.base_random_seed))
output = array_ops.slice(
gen_training_ops.unpack_path(path, path_probability),
[0, self.params.num_nodes - self.params.num_leaves - 1],
[-1, self.params.num_leaves])
return output
def inference_graph(self, data):
with ops.device(self.device_assigner):
path_probability, path = gen_training_ops.hard_routing_function(
data,
self.tree_parameters,
self.tree_thresholds,
max_nodes=self.params.num_nodes,
tree_depth=self.params.hybrid_tree_depth)
output = array_ops.slice(
gen_training_ops.unpack_path(path, path_probability),
[0, self.params.num_nodes - self.params.num_leaves - 1],
[-1, self.params.num_leaves])
return output
class StochasticSoftDecisionsToDataLayer(StochasticHardDecisionsToDataLayer):
"""A layer that learns a soft decision tree by sampling paths."""
def _define_vars(self, params, **kwargs):
with ops.device(self.device_assigner):
self.tree_parameters = variable_scope.get_variable(
name='stochastic_soft_tree_parameters_%d' % self.layer_num,
shape=[params.num_nodes, params.num_features],
initializer=init_ops.truncated_normal_initializer(
mean=params.weight_init_mean, stddev=params.weight_init_std))
self.tree_thresholds = variable_scope.get_variable(
name='stochastic_soft_tree_thresholds_%d' % self.layer_num,
shape=[params.num_nodes],
initializer=init_ops.truncated_normal_initializer(
mean=params.weight_init_mean, stddev=params.weight_init_std))
def inference_graph(self, data):
with ops.device(self.device_assigner):
routes = gen_training_ops.routing_function(
data,
self.tree_parameters,
self.tree_thresholds,
max_nodes=self.params.num_nodes)
leaf_routes = array_ops.slice(
routes, [0, self.params.num_nodes - self.params.num_leaves - 1],
[-1, self.params.num_leaves])
return leaf_routes
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/tensor_forest/hybrid/python/layers/decisions_to_data.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the routing function op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.tensor_forest.hybrid.ops import gen_training_ops
from tensorflow.contrib.tensor_forest.hybrid.python.ops import training_ops
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
class RoutingFunctionTest(test_util.TensorFlowTestCase):
def setUp(self):
self.input_data = [[-1., 0.], [-1., 2.],
[1., 0.], [1., -2.]]
self.input_labels = [0., 1., 2., 3.]
self.tree = [[1, 0], [-1, 0], [-1, 0]]
self.tree_weights = [[1.0, 0.0], [1.0, 0.0], [1.0, 0.0]]
self.tree_thresholds = [0., 0., 0.]
self.ops = training_ops.Load()
def testRoutingFunction(self):
with self.cached_session():
route_tensor = gen_training_ops.routing_function(
self.input_data, self.tree_weights, self.tree_thresholds, max_nodes=3)
route_tensor_shape = route_tensor.get_shape()
self.assertEquals(len(route_tensor_shape), 2)
self.assertEquals(route_tensor_shape[0], 4)
self.assertEquals(route_tensor_shape[1], 3)
routes = route_tensor.eval()
# Point 1
# Node 1 is a decision node => probability = 1.0
self.assertAlmostEquals(1.0, routes[0, 0])
# Probability left output = 1.0 / (1.0 + exp(1.0)) = 0.26894142
self.assertAlmostEquals(0.26894142, routes[0, 1])
# Probability right = 1 - 0.2689414 = 0.73105858
self.assertAlmostEquals(0.73105858, routes[0, 2])
if __name__ == '__main__':
googletest.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/tensor_forest/hybrid/python/kernel_tests/routing_function_op_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the routing function op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.tensor_forest.hybrid.ops import gen_training_ops
from tensorflow.contrib.tensor_forest.hybrid.python.ops import training_ops
from tensorflow.contrib.tensor_forest.python import tensor_forest
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
class KFeatureRoutingFunctionTest(test_util.TensorFlowTestCase):
def setUp(self):
self.input_data = [[-1., 0.], [-1., 2.],
[1., 0.], [1., -2.]]
self.input_labels = [0., 1., 2., 3.]
self.tree = [[1, 0], [-1, 0], [-1, 0]]
self.tree_weights = [[1.0, 0.0], [1.0, 0.0], [1.0, 0.0]]
self.tree_thresholds = [0., 0., 0.]
self.ops = training_ops.Load()
self.params = tensor_forest.ForestHParams(
num_features=2,
hybrid_tree_depth=2,
base_random_seed=10,
feature_bagging_fraction=1.0,
regularization_strength=0.01,
regularization="",
weight_init_mean=0.0,
weight_init_std=0.1)
self.params.num_nodes = 2**self.params.hybrid_tree_depth - 1
self.params.num_leaves = 2**(self.params.hybrid_tree_depth - 1)
self.params.num_features_per_node = (
self.params.feature_bagging_fraction * self.params.num_features)
self.params.regression = False
def testParams(self):
self.assertEquals(self.params.num_nodes, 3)
self.assertEquals(self.params.num_features, 2)
self.assertEquals(self.params.num_features_per_node, 2)
def testRoutingFunction(self):
with self.cached_session():
route_tensor = gen_training_ops.k_feature_routing_function(
self.input_data,
self.tree_weights,
self.tree_thresholds,
max_nodes=self.params.num_nodes,
num_features_per_node=self.params.num_features_per_node,
layer_num=0,
random_seed=self.params.base_random_seed)
route_tensor_shape = route_tensor.get_shape()
self.assertEquals(len(route_tensor_shape), 2)
self.assertEquals(route_tensor_shape[0], 4)
self.assertEquals(route_tensor_shape[1], 3)
routes = route_tensor.eval()
print(routes)
# Point 1
# Node 1 is a decision node => probability = 1.0
self.assertAlmostEquals(1.0, routes[0, 0])
# Probability left output = 1.0 / (1.0 + exp(1.0)) = 0.26894142
self.assertAlmostEquals(0.26894142, routes[0, 1])
# Probability right = 1 - 0.2689414 = 0.73105858
self.assertAlmostEquals(0.73105858, routes[0, 2])
if __name__ == "__main__":
googletest.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/tensor_forest/hybrid/python/kernel_tests/k_feature_routing_function_op_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A hybrid model that samples paths when training."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.tensor_forest.hybrid.python.layers import decisions_to_data
from tensorflow.contrib.tensor_forest.hybrid.python.layers import fully_connected
from tensorflow.contrib.tensor_forest.hybrid.python.models import hard_decisions_to_data_then_nn
from tensorflow.python.training import adagrad
class StochasticSoftDecisionsToDataThenNN(
hard_decisions_to_data_then_nn.HardDecisionsToDataThenNN):
"""A hybrid model that samples paths when training."""
def __init__(self,
params,
device_assigner=None,
optimizer_class=adagrad.AdagradOptimizer,
**kwargs):
super(StochasticSoftDecisionsToDataThenNN, self).__init__(
params,
device_assigner=device_assigner,
optimizer_class=optimizer_class,
**kwargs)
self.layers = [decisions_to_data.StochasticSoftDecisionsToDataLayer(
params, 0, device_assigner),
fully_connected.FullyConnectedLayer(
params, 1, device_assigner=device_assigner)]
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/tensor_forest/hybrid/python/models/stochastic_soft_decisions_to_data_then_nn.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A simple baseline feed-forward neural network."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.tensor_forest.hybrid.python import hybrid_model
from tensorflow.contrib.tensor_forest.hybrid.python.layers import fully_connected
from tensorflow.python.training import adagrad
class NN(hybrid_model.HybridModel):
"""A simple baseline feed-forward neural network."""
def __init__(self,
params,
device_assigner=None,
optimizer_class=adagrad.AdagradOptimizer,
**kwargs):
super(NN, self).__init__(
params,
device_assigner=device_assigner,
optimizer_class=optimizer_class,
**kwargs)
self.layers = [fully_connected.FullyConnectedLayer(
params, 0, device_assigner=device_assigner)]
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/tensor_forest/hybrid/python/models/nn.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A model that places a decision tree embedding before a neural net."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.tensor_forest.hybrid.python import hybrid_model
from tensorflow.contrib.tensor_forest.hybrid.python.layers import decisions_to_data
from tensorflow.contrib.tensor_forest.hybrid.python.layers import fully_connected
from tensorflow.python.training import adagrad
class DecisionsToDataThenNN(hybrid_model.HybridModel):
"""A model that places a decision tree embedding before a neural net."""
def __init__(self,
params,
device_assigner=None,
optimizer_class=adagrad.AdagradOptimizer,
**kwargs):
super(DecisionsToDataThenNN, self).__init__(
params,
device_assigner=device_assigner,
optimizer_class=optimizer_class,
**kwargs)
self.layers = [decisions_to_data.DecisionsToDataLayer(params,
0, device_assigner),
fully_connected.FullyConnectedLayer(
params, 1, device_assigner=device_assigner)]
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/tensor_forest/hybrid/python/models/decisions_to_data_then_nn.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A model that places a hard decision tree embedding before a neural net."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import layers
from tensorflow.contrib.tensor_forest.hybrid.python import hybrid_model
from tensorflow.contrib.tensor_forest.hybrid.python.layers import decisions_to_data
from tensorflow.contrib.tensor_forest.hybrid.python.layers import fully_connected
from tensorflow.python.ops import nn_ops
from tensorflow.python.training import adagrad
class HardDecisionsToDataThenNN(hybrid_model.HybridModel):
"""A model that treats tree inference as hard at test."""
def __init__(self,
params,
device_assigner=None,
optimizer_class=adagrad.AdagradOptimizer,
**kwargs):
super(HardDecisionsToDataThenNN, self).__init__(
params,
device_assigner=device_assigner,
optimizer_class=optimizer_class,
**kwargs)
self.layers = [decisions_to_data.HardDecisionsToDataLayer(
params, 0, device_assigner),
fully_connected.FullyConnectedLayer(
params, 1, device_assigner=device_assigner)]
def _base_inference(self, data, data_spec=None, soft=False):
if soft:
inference_result = self.layers[0].soft_inference_graph(data)
else:
inference_result = self._do_layer_inference(self.layers[0], data)
for layer in self.layers[1:]:
inference_result = self._do_layer_inference(layer, inference_result)
output_size = 1 if self.is_regression else self.params.num_classes
output = layers.fully_connected(
inference_result, output_size, activation_fn=nn_ops.softmax)
return output
def inference_graph(self, data, data_spec=None):
"""Returns the op that performs inference on a batch of data."""
return nn_ops.softmax(
self._base_inference(
data, data_spec=data_spec, soft=True))
# pylint: disable=unused-argument
def training_inference_graph(self, data, data_spec=None):
return self._base_inference(data, data_spec=data_spec, soft=False)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/tensor_forest/hybrid/python/models/hard_decisions_to_data_then_nn.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Initialize tensor_forest/hybrid/python/models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/tensor_forest/hybrid/python/models/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A hybrid model that samples paths when training."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.tensor_forest.hybrid.python.layers import decisions_to_data
from tensorflow.contrib.tensor_forest.hybrid.python.layers import fully_connected
from tensorflow.contrib.tensor_forest.hybrid.python.models import hard_decisions_to_data_then_nn
from tensorflow.python.training import adagrad
class StochasticHardDecisionsToDataThenNN(
hard_decisions_to_data_then_nn.HardDecisionsToDataThenNN):
"""A hybrid model that samples paths when training."""
def __init__(self,
params,
device_assigner=None,
optimizer_class=adagrad.AdagradOptimizer,
**kwargs):
super(StochasticHardDecisionsToDataThenNN, self).__init__(
params,
device_assigner=device_assigner,
optimizer_class=optimizer_class,
**kwargs)
self.layers = [decisions_to_data.StochasticHardDecisionsToDataLayer(
params, 0, device_assigner),
fully_connected.FullyConnectedLayer(
params, 1, device_assigner=device_assigner)]
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/tensor_forest/hybrid/python/models/stochastic_hard_decisions_to_data_then_nn.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A model that places a soft decision tree embedding before a neural net."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.tensor_forest.hybrid.python import hybrid_model
from tensorflow.contrib.tensor_forest.hybrid.python.layers import decisions_to_data
from tensorflow.contrib.tensor_forest.hybrid.python.layers import fully_connected
from tensorflow.python.training import adagrad
class KFeatureDecisionsToDataThenNN(hybrid_model.HybridModel):
"""A model that places a soft decision tree embedding before a neural net."""
def __init__(self,
params,
device_assigner=None,
optimizer_class=adagrad.AdagradOptimizer,
**kwargs):
super(KFeatureDecisionsToDataThenNN, self).__init__(
params,
device_assigner=device_assigner,
optimizer_class=optimizer_class,
**kwargs)
self.layers = [decisions_to_data.KFeatureDecisionsToDataLayer(
params, 0, device_assigner),
fully_connected.FullyConnectedLayer(
params, 1, device_assigner=device_assigner)]
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/tensor_forest/hybrid/python/models/k_feature_decisions_to_data_then_nn.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the hybrid tensor forest model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
# pylint: disable=unused-import
from tensorflow.contrib.tensor_forest.hybrid.python.models import forest_to_data_then_nn
from tensorflow.contrib.tensor_forest.python import tensor_forest
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util
from tensorflow.python.framework.ops import Operation
from tensorflow.python.framework.ops import Tensor
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import googletest
class ForestToDataThenNNTest(test_util.TensorFlowTestCase):
def setUp(self):
self.params = tensor_forest.ForestHParams(
num_classes=2,
num_features=31,
layer_size=11,
num_layers=13,
num_trees=3,
connection_probability=0.1,
hybrid_tree_depth=4,
regularization_strength=0.01,
regularization="",
base_random_seed=10,
feature_bagging_fraction=1.0,
learning_rate=0.01,
weight_init_mean=0.0,
weight_init_std=0.1)
self.params.regression = False
self.params.num_nodes = 2**self.params.hybrid_tree_depth - 1
self.params.num_leaves = 2**(self.params.hybrid_tree_depth - 1)
self.params.num_features_per_node = (self.params.feature_bagging_fraction *
self.params.num_features)
def testInferenceConstruction(self):
# pylint: disable=W0612
data = constant_op.constant(
[[random.uniform(-1, 1) for i in range(self.params.num_features)]
for _ in range(100)])
with variable_scope.variable_scope(
"ForestToDataThenNNTest_testInferenceContruction"):
graph_builder = forest_to_data_then_nn.ForestToDataThenNN(self.params)
graph = graph_builder.inference_graph(data, None)
self.assertTrue(isinstance(graph, Tensor))
def testTrainingConstruction(self):
# pylint: disable=W0612
data = constant_op.constant(
[[random.uniform(-1, 1) for i in range(self.params.num_features)]
for _ in range(100)])
labels = [1 for _ in range(100)]
with variable_scope.variable_scope(
"ForestToDataThenNNTest.testTrainingContruction"):
graph_builder = forest_to_data_then_nn.ForestToDataThenNN(self.params)
graph = graph_builder.training_graph(data, labels, None)
self.assertTrue(isinstance(graph, Operation))
if __name__ == "__main__":
googletest.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/tensor_forest/hybrid/python/models/forest_to_data_then_nn_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the hybrid tensor forest model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
# pylint: disable=unused-import
from tensorflow.contrib.tensor_forest.hybrid.python.models import k_feature_decisions_to_data_then_nn
from tensorflow.contrib.tensor_forest.python import tensor_forest
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util
from tensorflow.python.framework.ops import Operation
from tensorflow.python.framework.ops import Tensor
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import googletest
class KFeatureDecisionsToDataThenNNTest(test_util.TensorFlowTestCase):
def setUp(self):
self.params = tensor_forest.ForestHParams(
num_classes=2,
num_features=31,
layer_size=11,
num_layers=13,
num_trees=17,
connection_probability=0.1,
hybrid_tree_depth=4,
regularization_strength=0.01,
regularization="",
base_random_seed=10,
hybrid_feature_bagging_fraction=1.0,
learning_rate=0.01,
weight_init_mean=0.0,
weight_init_std=0.1)
self.params.regression = False
self.params.num_nodes = 2**self.params.hybrid_tree_depth - 1
self.params.num_leaves = 2**(self.params.hybrid_tree_depth - 1)
self.params.num_features_per_node = (self.params.feature_bagging_fraction *
self.params.num_features)
def testKFeatureInferenceConstruction(self):
# pylint: disable=W0612
data = constant_op.constant(
[[random.uniform(-1, 1) for i in range(self.params.num_features)]
for _ in range(100)])
with variable_scope.variable_scope(
"KFeatureDecisionsToDataThenNNTest.testKFeatureInferenceContruction"):
graph_builder = (
k_feature_decisions_to_data_then_nn.KFeatureDecisionsToDataThenNN(
self.params))
graph = graph_builder.inference_graph(data, None)
self.assertTrue(isinstance(graph, Tensor))
def testKFeatureTrainingConstruction(self):
# pylint: disable=W0612
data = constant_op.constant(
[[random.uniform(-1, 1) for i in range(self.params.num_features)]
for _ in range(100)])
labels = [1 for _ in range(100)]
with variable_scope.variable_scope(
"KFeatureDecisionsToDataThenNNTest.testKFeatureTrainingContruction"):
graph_builder = (
k_feature_decisions_to_data_then_nn.KFeatureDecisionsToDataThenNN(
self.params))
graph = graph_builder.training_graph(data, labels, None)
self.assertTrue(isinstance(graph, Operation))
if __name__ == "__main__":
googletest.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/tensor_forest/hybrid/python/models/k_feature_decisions_to_data_then_nn_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A model that combines a decision forest embedding with a neural net."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.tensor_forest.hybrid.python import hybrid_model
from tensorflow.contrib.tensor_forest.hybrid.python.layers import decisions_to_data
from tensorflow.contrib.tensor_forest.hybrid.python.layers import fully_connected
from tensorflow.python.training import adagrad
class ForestToDataThenNN(hybrid_model.HybridModel):
"""A model that combines a decision forest embedding with a neural net."""
def __init__(self,
params,
device_assigner=None,
optimizer_class=adagrad.AdagradOptimizer,
**kwargs):
super(ForestToDataThenNN, self).__init__(
params,
device_assigner=device_assigner,
optimizer_class=optimizer_class,
**kwargs)
self.layers = [[decisions_to_data.KFeatureDecisionsToDataLayer(
params, i, device_assigner)
for i in range(self.params.num_trees)],
fully_connected.FullyConnectedLayer(
params,
self.params.num_trees,
device_assigner=device_assigner)]
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/tensor_forest/hybrid/python/models/forest_to_data_then_nn.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the hybrid tensor forest model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
# pylint: disable=unused-import
from tensorflow.contrib.tensor_forest.hybrid.python.models import decisions_to_data_then_nn
from tensorflow.contrib.tensor_forest.python import tensor_forest
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util
from tensorflow.python.framework.ops import Operation
from tensorflow.python.framework.ops import Tensor
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import googletest
class DecisionsToDataThenNNTest(test_util.TensorFlowTestCase):
def setUp(self):
self.params = tensor_forest.ForestHParams(
num_classes=2,
num_features=31,
layer_size=11,
num_layers=13,
num_trees=17,
connection_probability=0.1,
hybrid_tree_depth=4,
regularization_strength=0.01,
learning_rate=0.01,
regularization="",
weight_init_mean=0.0,
weight_init_std=0.1)
self.params.regression = False
self.params.num_nodes = 2**self.params.hybrid_tree_depth - 1
self.params.num_leaves = 2**(self.params.hybrid_tree_depth - 1)
def testHParams(self):
self.assertEquals(self.params.num_classes, 2)
self.assertEquals(self.params.num_features, 31)
self.assertEquals(self.params.layer_size, 11)
self.assertEquals(self.params.num_layers, 13)
self.assertEquals(self.params.num_trees, 17)
self.assertEquals(self.params.hybrid_tree_depth, 4)
self.assertEquals(self.params.connection_probability, 0.1)
# Building the graphs modifies the params.
with variable_scope.variable_scope("DecisionsToDataThenNNTest_testHParams"):
# pylint: disable=W0612
graph_builder = decisions_to_data_then_nn.DecisionsToDataThenNN(
self.params)
# Tree with depth 4 should have 2**0 + 2**1 + 2**2 + 2**3 = 15 nodes.
self.assertEquals(self.params.num_nodes, 15)
def testConstructionPollution(self):
"""Ensure that graph building doesn't modify the params in a bad way."""
# pylint: disable=W0612
data = [[random.uniform(-1, 1) for i in range(self.params.num_features)]
for _ in range(100)]
self.assertTrue(isinstance(self.params, tensor_forest.ForestHParams))
self.assertFalse(
isinstance(self.params.num_trees, tensor_forest.ForestHParams))
with variable_scope.variable_scope(
"DecisionsToDataThenNNTest_testConstructionPollution"):
graph_builder = decisions_to_data_then_nn.DecisionsToDataThenNN(
self.params)
self.assertTrue(isinstance(self.params, tensor_forest.ForestHParams))
self.assertFalse(
isinstance(self.params.num_trees, tensor_forest.ForestHParams))
def testInferenceConstruction(self):
# pylint: disable=W0612
data = constant_op.constant(
[[random.uniform(-1, 1) for i in range(self.params.num_features)]
for _ in range(100)])
with variable_scope.variable_scope(
"DecisionsToDataThenNNTest_testInferenceConstruction"):
graph_builder = decisions_to_data_then_nn.DecisionsToDataThenNN(
self.params)
graph = graph_builder.inference_graph(data, None)
self.assertTrue(isinstance(graph, Tensor))
def testTrainingConstruction(self):
# pylint: disable=W0612
data = constant_op.constant(
[[random.uniform(-1, 1) for i in range(self.params.num_features)]
for _ in range(100)])
labels = [1 for _ in range(100)]
with variable_scope.variable_scope(
"DecisionsToDataThenNNTest_testTrainingConstruction"):
graph_builder = decisions_to_data_then_nn.DecisionsToDataThenNN(
self.params)
graph = graph_builder.training_graph(data, labels, None)
self.assertTrue(isinstance(graph, Operation))
if __name__ == "__main__":
googletest.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/tensor_forest/hybrid/python/models/decisions_to_data_then_nn_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops for hybrid model training."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
from tensorflow.contrib.tensor_forest.hybrid.ops import gen_training_ops
from tensorflow.contrib.util import loader
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import resource_loader
from tensorflow.python.platform import tf_logging as logging
TRAINING_OPS_FILE = '_training_ops.so'
_training_ops = None
_ops_lock = threading.Lock()
# TODO(b/31222613): Some of these ops are probably differentiable, and
# there may be latent bugs here.
ops.NotDifferentiable('HardRoutingFunction')
ops.NotDifferentiable('RoutingGradient')
ops.NotDifferentiable('KFeatureDataGradient')
ops.NotDifferentiable('KFeatureRoutingGradient')
ops.NotDifferentiable('KFeatureWeightGradient')
ops.NotDifferentiable('UnpackPath')
@ops.RegisterGradient('RoutingFunction')
def _RoutingFunctionGradient(op, grad):
"""The gradient of RoutingFunction.
Args:
op: The RoutingFunction op.
grad: Gradient with respect to the output of the RoutingFunction op.
Returns:
Gradients with respect to the input of the RoutingFunction op.
"""
routing_gradient = gen_training_ops.routing_gradient
input_data_tensor = op.inputs[0]
tree_weights_tensor = op.inputs[1]
tree_thresholds_tensor = op.inputs[2]
routing_function_tensor = op.outputs[0]
# The derivatives below are each defined over one or two of three dimensions:
# (batch_size, num_nodes, num_features). We explicitly expand each derivative
# to three dimensions to ensure that they're broadcasted correctly.
# dl / du is the derivative of the loss with respect to the output of the
# routing function, which is provided by tensorflow.
#
# dl / du has dimension (batch_size, num_nodes), which we expand to
# (batch_size, num_nodes, 1).
dl_du = array_ops.expand_dims(grad, 2)
# du / df is the derivative of the output of the routing function with respect
# to the decision function at each node. It is computed by
# routing_gradient_op.cc.
#
# du / df has dimension (batch_size, num_nodes), which we expand to
# (batch_size, num_nodes, 1).
du_df = array_ops.expand_dims(
routing_gradient(
input_data_tensor,
tree_weights_tensor,
tree_thresholds_tensor,
routing_function_tensor,
max_nodes=op.get_attr('max_nodes')),
2)
# df / dx is the derivative of the decision function with respect to the input
# data. f_i(x) = (-t_i * x + b_i), so df_i / dx = -t_i.
#
# df / dx has dimension (num_nodes, num_features), which we expand to
# (1, num_nodes, num_features).
df_dx = -array_ops.expand_dims(tree_weights_tensor, 0)
# df / dt is the derivative of the decision function with respect to its
# parameters. f_i(x) = (-t_i * x + b_i), so df_i / d t_i = -x.
#
# df / dt has dimension (batch_size, num_features), which we expand to
# (batch_size, 1, num_features).
df_dt = -array_ops.expand_dims(input_data_tensor, 1)
# df / dt is the derivative of the decision function with respect to its
# bias parameter. f_i(x) = (-t_i * x + b_i), so df_i / d t_i = 1.
#
# df / db has dimension (num_nodes), which we expand to
# (1, num_nodes, 1).
df_db = array_ops.expand_dims(
array_ops.expand_dims(array_ops.ones_like(tree_thresholds_tensor), 0), 2)
# Compute the derivatives of the loss with respect to the inputs using the
# chain rule (backpropagation).
dl_dx = math_ops.reduce_mean(dl_du * du_df * df_dx, 1)
dl_dt = math_ops.reduce_mean(dl_du * du_df * df_dt, 0)
dl_db = math_ops.reduce_mean(array_ops.squeeze(dl_du * du_df * df_db, [2]), 0)
input_gradients = [dl_dx, dl_dt, dl_db]
return input_gradients
@ops.RegisterGradient('StochasticHardRoutingFunction')
def _StochasticHardRoutingFunctionGradient(op, routing_grad, unused_path_grad):
"""The gradient of RoutingFunction.
Args:
op: The RoutingFunction op.
routing_grad: Gradient with respect to the output of the RoutingFunction op.
Returns:
Gradients with respect to the input of the RoutingFunction op.
"""
gradient_op = gen_training_ops.stochastic_hard_routing_gradient
unpack_path_op = gen_training_ops.unpack_path
input_data_tensor = op.inputs[0]
tree_weights_tensor = op.inputs[1]
tree_thresholds_tensor = op.inputs[2]
path_probability_tensor = op.outputs[0]
path_tensor = op.outputs[1]
# The derivatives below are each defined over one or two of three dimensions:
# (batch_size, num_nodes, num_features). We explicitly expand each derivative
# to three dimensions to ensure that they're broadcasted correctly.
du_df_raw, df_dx_raw, df_dt_raw, df_db_raw = gradient_op(
input_data_tensor,
tree_weights_tensor,
tree_thresholds_tensor,
path_probability_tensor,
path_tensor,
tree_depth=op.get_attr('tree_depth'))
# dl / du is the derivative of the loss with respect to the output of the
# routing function, which is provided by tensorflow.
#
# dl / du has dimension (batch_size, num_nodes), which we expand to
# (batch_size, num_nodes, 1).
dl_du = array_ops.expand_dims(unpack_path_op(path_tensor, routing_grad), 2)
# du / df is the derivative of the output of the routing function with respect
# to the decision function at each node. It is computed by
# single_feature_routing_gradient_op.cc.
#
# du / df has dimension (batch_size, num_nodes), which we expand to
# (batch_size, num_nodes, 1).
du_df = array_ops.expand_dims(du_df_raw, 2)
# df / dx is the derivative of the decision function with respect to the input
# data. f(x) = (-t * x + b), so df / dx = -t for the selected features and
# zero elsewhere.
#
# df / dx has dimension (num_nodes, num_features), which we expand to
# (1, num_nodes, num_features).
df_dx = array_ops.expand_dims(df_dx_raw, 0)
# df / dt is the derivative of the decision function with respect to its
# parameters. f(x) = (-t * x + b), so df / dt = -x[feature].
#
# df / dt has dimension (batch_size, num_nodes, num_features).
df_dt = -df_dt_raw
# df / dt is the derivative of the decision function with respect to its
# bias parameter. f(x) = (-t * x + b), so df / dt = 1.
#
# df / db has dimension (num_nodes), which we expand to
# (1, num_nodes, 1).
df_db = array_ops.expand_dims(array_ops.expand_dims(df_db_raw, 0), 2)
# Compute the derivatives of the loss with respect to the inputs using the
# chain rule (backpropagation).
dl_dx = math_ops.reduce_mean(dl_du * du_df * df_dx, 1)
dl_dt = math_ops.reduce_mean(dl_du * du_df * df_dt, 0)
dl_db = math_ops.reduce_mean(array_ops.squeeze(dl_du * du_df * df_db, [2]), 0)
input_gradients = [dl_dx, dl_dt, dl_db]
return input_gradients
@ops.RegisterGradient('KFeatureRoutingFunction')
def _KFeatureRoutingFunctionGradient(op, grad):
"""The gradient of RoutingFunction.
Args:
op: The RoutingFunction op.
grad: Gradient with respect to the output of the RoutingFunction op.
Returns:
Gradients with respect to the input of the RoutingFunction op.
"""
gradient_op = gen_training_ops.k_feature_gradient
input_data_tensor = op.inputs[0]
tree_weights_tensor = op.inputs[1]
tree_thresholds_tensor = op.inputs[2]
routing_function_tensor = op.outputs[0]
# The derivatives below are each defined over one or two of three dimensions:
# (batch_size, num_nodes, num_features). We explicitly expand each derivative
# to three dimensions to ensure that they're broadcasted correctly.
du_df_raw, df_dx_raw, df_dt_raw = gradient_op(
input_data_tensor,
tree_weights_tensor,
tree_thresholds_tensor,
routing_function_tensor,
layer_num=op.get_attr('layer_num'),
random_seed=op.get_attr('random_seed'))
# dl / du is the derivative of the loss with respect to the output of the
# routing function, which is provided by tensorflow.
#
# dl / du has dimension (batch_size, num_nodes), which we expand to
# (batch_size, num_nodes, 1).
dl_du = array_ops.expand_dims(grad, 2)
# du / df is the derivative of the output of the routing function with respect
# to the decision function at each node. It is computed by
# single_feature_routing_gradient_op.cc.
#
# du / df has dimension (batch_size, num_nodes), which we expand to
# (batch_size, num_nodes, 1).
du_df = array_ops.expand_dims(du_df_raw, 2)
# df / dx is the derivative of the decision function with respect to the input
# data. f(x) = (-t * x + b), so df / dx = -t for the selected features and
# zero elsewhere.
#
# df / dx has dimension (num_nodes, num_features), which we expand to
# (1, num_nodes, num_features).
df_dx = array_ops.expand_dims(df_dx_raw, 0)
# df / dt is the derivative of the decision function with respect to its
# parameters. f(x) = (-t * x + b), so df / dt = -x[feature].
#
# df / dt has dimension (batch_size, num_nodes, num_features).
df_dt = -df_dt_raw
# df / dt is the derivative of the decision function with respect to its
# bias parameter. f(x) = (-t * x + b), so df / dt = 1.
#
# df / db has dimension (num_nodes), which we expand to
# (1, num_nodes, 1).
df_db = array_ops.expand_dims(
array_ops.expand_dims(array_ops.ones_like(tree_thresholds_tensor), 0), 2)
# Compute the derivatives of the loss with respect to the inputs using the
# chain rule (backpropagation).
dl_dx = math_ops.reduce_mean(dl_du * du_df * df_dx, 1)
dl_dt = math_ops.reduce_mean(dl_du * du_df * df_dt, 0)
dl_db = math_ops.reduce_mean(array_ops.squeeze(dl_du * du_df * df_db, [2]), 0)
input_gradients = [dl_dx, dl_dt, dl_db]
return input_gradients
# Workaround for the fact that importing tensorflow imports contrib
# (even if a user isn't using this or any other contrib op), but
# there's not yet any guarantee that the shared object exists.
# In which case, "import tensorflow" will always crash, even for users that
# never use contrib.
def Load():
"""Load training ops library and return the loaded module."""
with _ops_lock:
global _training_ops
if not _training_ops:
ops_path = resource_loader.get_path_to_datafile(TRAINING_OPS_FILE)
logging.info('data path: %s', ops_path)
_training_ops = loader.load_op_library(ops_path)
assert _training_ops, 'Could not load _training_ops.so'
return _training_ops
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/tensor_forest/hybrid/python/ops/training_ops.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Experimental API for building input pipelines.
This module contains experimental `Dataset` sources and transformations that can
be used in conjunction with the `tf.data.Dataset` API. Note that the
`tf.contrib.data` API is not subject to the same backwards compatibility
guarantees as `tf.data`, but we will provide deprecation advice in advance of
removing existing functionality.
See [Importing Data](https://tensorflow.org/guide/datasets) for an overview.
@@Counter
@@CheckpointInputPipelineHook
@@CsvDataset
@@LMDBDataset
@@Optional
@@RandomDataset
@@Reducer
@@SqlDataset
@@TFRecordWriter
@@assert_element_shape
@@batch_and_drop_remainder
@@bucket_by_sequence_length
@@choose_from_datasets
@@copy_to_device
@@dense_to_sparse_batch
@@enumerate_dataset
@@get_next_as_optional
@@get_single_element
@@group_by_reducer
@@group_by_window
@@ignore_errors
@@latency_stats
@@make_batched_features_dataset
@@make_csv_dataset
@@make_saveable_from_iterator
@@map_and_batch
@@padded_batch_and_drop_remainder
@@parallel_interleave
@@parse_example_dataset
@@prefetch_to_device
@@read_batch_features
@@rejection_resample
@@reduce_dataset
@@sample_from_datasets
@@scan
@@set_stats_aggregator
@@shuffle_and_repeat
@@sliding_window_batch
@@sloppy_interleave
@@StatsAggregator
@@unbatch
@@unique
@@AUTOTUNE
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from tensorflow.contrib.data.python.ops.batching import assert_element_shape
from tensorflow.contrib.data.python.ops.batching import batch_and_drop_remainder
from tensorflow.contrib.data.python.ops.batching import dense_to_sparse_batch
from tensorflow.contrib.data.python.ops.batching import map_and_batch
from tensorflow.contrib.data.python.ops.batching import padded_batch_and_drop_remainder
from tensorflow.contrib.data.python.ops.batching import unbatch
from tensorflow.contrib.data.python.ops.counter import Counter
from tensorflow.contrib.data.python.ops.enumerate_ops import enumerate_dataset
from tensorflow.contrib.data.python.ops.error_ops import ignore_errors
from tensorflow.contrib.data.python.ops.get_single_element import get_single_element
from tensorflow.contrib.data.python.ops.get_single_element import reduce_dataset
from tensorflow.contrib.data.python.ops.grouping import bucket_by_sequence_length
from tensorflow.contrib.data.python.ops.grouping import group_by_reducer
from tensorflow.contrib.data.python.ops.grouping import group_by_window
from tensorflow.contrib.data.python.ops.grouping import Reducer
from tensorflow.contrib.data.python.ops.interleave_ops import choose_from_datasets
from tensorflow.contrib.data.python.ops.interleave_ops import parallel_interleave
from tensorflow.contrib.data.python.ops.interleave_ops import sample_from_datasets
from tensorflow.contrib.data.python.ops.interleave_ops import sloppy_interleave
from tensorflow.contrib.data.python.ops.iterator_ops import CheckpointInputPipelineHook
from tensorflow.contrib.data.python.ops.iterator_ops import make_saveable_from_iterator
from tensorflow.contrib.data.python.ops.parsing_ops import parse_example_dataset
from tensorflow.contrib.data.python.ops.prefetching_ops import copy_to_device
from tensorflow.contrib.data.python.ops.prefetching_ops import prefetch_to_device
from tensorflow.contrib.data.python.ops.random_ops import RandomDataset
from tensorflow.contrib.data.python.ops.readers import CsvDataset
from tensorflow.contrib.data.python.ops.readers import LMDBDataset
from tensorflow.contrib.data.python.ops.readers import make_batched_features_dataset
from tensorflow.contrib.data.python.ops.readers import make_csv_dataset
from tensorflow.contrib.data.python.ops.readers import read_batch_features
from tensorflow.contrib.data.python.ops.readers import SqlDataset
from tensorflow.contrib.data.python.ops.resampling import rejection_resample
from tensorflow.contrib.data.python.ops.scan_ops import scan
from tensorflow.contrib.data.python.ops.shuffle_ops import shuffle_and_repeat
from tensorflow.contrib.data.python.ops.sliding import sliding_window_batch
from tensorflow.contrib.data.python.ops.unique import unique
from tensorflow.contrib.data.python.ops.writers import TFRecordWriter
from tensorflow.python.data.ops.dataset_ops import AUTOTUNE
from tensorflow.python.data.ops.iterator_ops import get_next_as_optional
from tensorflow.python.data.ops.optional_ops import Optional
# pylint: enable=unused-import
from tensorflow.python.util.all_util import remove_undocumented
remove_undocumented(__name__)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/data/__init__.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for LMDBDatasetOp."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import sys
from tensorflow.contrib.data.python.ops import readers
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
from tensorflow.python.util import compat
prefix_path = "tensorflow/core/lib"
@test_util.run_v1_only("deprecated API, no eager or V2 test coverage")
class LMDBDatasetTest(test_base.DatasetTestBase):
def setUp(self):
super(LMDBDatasetTest, self).setUp()
# Copy database out because we need the path to be writable to use locks.
# The on-disk format of an LMDB database is different on big-endian
# machines, because LMDB is a memory-mapped database.
db_file = "data.mdb" if sys.byteorder == "little" else "data_bigendian.mdb"
path = os.path.join(prefix_path, "lmdb", "testdata", db_file)
self.db_path = os.path.join(self.get_temp_dir(), "data.mdb")
shutil.copy(path, self.db_path)
def testReadFromFile(self):
filename = self.db_path
filenames = constant_op.constant([filename], dtypes.string)
num_repeats = 2
dataset = readers.LMDBDataset(filenames).repeat(num_repeats)
iterator = dataset_ops.make_initializable_iterator(dataset)
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op)
for _ in range(num_repeats): # Dataset is repeated.
for i in range(10): # 10 records.
k = compat.as_bytes(str(i))
v = compat.as_bytes(str(chr(ord("a") + i)))
self.assertEqual((k, v), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/data/python/kernel_tests/lmdb_dataset_op_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the experimental input pipeline ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.data.python.ops import batching
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.platform import test
@test_util.run_v1_only("deprecated API, no eager or V2 test coverage")
class AssertElementShapeTest(test_base.DatasetTestBase):
def test_assert_element_shape(self):
def create_dataset(_):
return (array_ops.ones(2, dtype=dtypes.float32),
array_ops.zeros((3, 4), dtype=dtypes.int32))
dataset = dataset_ops.Dataset.range(5).map(create_dataset)
expected_shapes = (tensor_shape.TensorShape(2),
tensor_shape.TensorShape((3, 4)))
self.assertEqual(expected_shapes,
dataset_ops.get_legacy_output_shapes(dataset))
result = dataset.apply(batching.assert_element_shape(expected_shapes))
self.assertEqual(expected_shapes,
dataset_ops.get_legacy_output_shapes(result))
iterator = dataset_ops.make_initializable_iterator(result)
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op)
for _ in range(5):
sess.run(get_next)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def test_assert_wrong_element_shape(self):
def create_dataset(_):
return (array_ops.ones(2, dtype=dtypes.float32),
array_ops.zeros((3, 4), dtype=dtypes.int32))
dataset = dataset_ops.Dataset.range(3).map(create_dataset)
wrong_shapes = (tensor_shape.TensorShape(2),
tensor_shape.TensorShape((3, 10)))
with self.assertRaises(ValueError):
dataset.apply(batching.assert_element_shape(wrong_shapes))
def test_assert_element_shape_on_unknown_shape_dataset(self):
def create_unknown_shape_dataset(x):
return script_ops.py_func(
lambda _: ( # pylint: disable=g-long-lambda
np.ones(2, dtype=np.float32),
np.zeros((3, 4), dtype=np.int32)),
[x],
[dtypes.float32, dtypes.int32])
dataset = dataset_ops.Dataset.range(5).map(create_unknown_shape_dataset)
unknown_shapes = (tensor_shape.TensorShape(None),
tensor_shape.TensorShape(None))
self.assertEqual(unknown_shapes,
dataset_ops.get_legacy_output_shapes(dataset))
expected_shapes = (tensor_shape.TensorShape(2),
tensor_shape.TensorShape((3, 4)))
result = dataset.apply(batching.assert_element_shape(expected_shapes))
self.assertEqual(expected_shapes,
dataset_ops.get_legacy_output_shapes(result))
iterator = dataset_ops.make_initializable_iterator(result)
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op)
for _ in range(5):
sess.run(get_next)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def test_assert_wrong_element_shape_on_unknown_shape_dataset(self):
def create_unknown_shape_dataset(x):
return script_ops.py_func(
lambda _: ( # pylint: disable=g-long-lambda
np.ones(2, dtype=np.float32),
np.zeros((3, 4), dtype=np.int32)),
[x],
[dtypes.float32, dtypes.int32])
dataset = dataset_ops.Dataset.range(3).map(create_unknown_shape_dataset)
unknown_shapes = (tensor_shape.TensorShape(None),
tensor_shape.TensorShape(None))
self.assertEqual(unknown_shapes,
dataset_ops.get_legacy_output_shapes(dataset))
wrong_shapes = (tensor_shape.TensorShape(2),
tensor_shape.TensorShape((3, 10)))
iterator = dataset_ops.make_initializable_iterator(
dataset.apply(batching.assert_element_shape(wrong_shapes)))
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op)
with self.assertRaises(errors.InvalidArgumentError):
sess.run(get_next)
def test_assert_partial_element_shape(self):
def create_dataset(_):
return (array_ops.ones(2, dtype=dtypes.float32),
array_ops.zeros((3, 4), dtype=dtypes.int32))
dataset = dataset_ops.Dataset.range(5).map(create_dataset)
partial_expected_shape = (
tensor_shape.TensorShape(None), # Unknown shape
tensor_shape.TensorShape((None, 4))) # Partial shape
result = dataset.apply(
batching.assert_element_shape(partial_expected_shape))
# Partial shapes are merged with actual shapes:
actual_shapes = (tensor_shape.TensorShape(2),
tensor_shape.TensorShape((3, 4)))
self.assertEqual(actual_shapes,
dataset_ops.get_legacy_output_shapes(result))
iterator = dataset_ops.make_initializable_iterator(result)
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op)
for _ in range(5):
sess.run(get_next)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def test_assert_wrong_partial_element_shape(self):
def create_dataset(_):
return (array_ops.ones(2, dtype=dtypes.float32),
array_ops.zeros((3, 4), dtype=dtypes.int32))
dataset = dataset_ops.Dataset.range(3).map(create_dataset)
wrong_shapes = (tensor_shape.TensorShape(2),
tensor_shape.TensorShape((None, 10)))
with self.assertRaises(ValueError):
dataset.apply(batching.assert_element_shape(wrong_shapes))
def test_assert_partial_element_shape_on_unknown_shape_dataset(self):
def create_unknown_shape_dataset(x):
return script_ops.py_func(
lambda _: ( # pylint: disable=g-long-lambda
np.ones(2, dtype=np.float32),
np.zeros((3, 4), dtype=np.int32)),
[x],
[dtypes.float32, dtypes.int32])
dataset = dataset_ops.Dataset.range(5).map(create_unknown_shape_dataset)
unknown_shapes = (tensor_shape.TensorShape(None),
tensor_shape.TensorShape(None))
self.assertEqual(unknown_shapes,
dataset_ops.get_legacy_output_shapes(dataset))
expected_shapes = (tensor_shape.TensorShape(2),
tensor_shape.TensorShape((None, 4)))
result = dataset.apply(batching.assert_element_shape(expected_shapes))
self.assertEqual(expected_shapes,
dataset_ops.get_legacy_output_shapes(result))
iterator = dataset_ops.make_initializable_iterator(result)
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op)
for _ in range(5):
sess.run(get_next)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def test_assert_wrong_partial_element_shape_on_unknown_shape_dataset(self):
def create_unknown_shape_dataset(x):
return script_ops.py_func(
lambda _: ( # pylint: disable=g-long-lambda
np.ones(2, dtype=np.float32),
np.zeros((3, 4), dtype=np.int32)),
[x],
[dtypes.float32, dtypes.int32])
dataset = dataset_ops.Dataset.range(3).map(create_unknown_shape_dataset)
unknown_shapes = (tensor_shape.TensorShape(None),
tensor_shape.TensorShape(None))
self.assertEqual(unknown_shapes,
dataset_ops.get_legacy_output_shapes(dataset))
wrong_shapes = (tensor_shape.TensorShape(2),
tensor_shape.TensorShape((None, 10)))
iterator = dataset_ops.make_initializable_iterator(
dataset.apply(batching.assert_element_shape(wrong_shapes)))
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op)
with self.assertRaises(errors.InvalidArgumentError):
sess.run(get_next)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/data/python/kernel_tests/assert_element_shape_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the experimental input pipeline ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.contrib.data.python.ops import get_single_element
from tensorflow.contrib.data.python.ops import grouping
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class ReduceDatasetTest(test_base.DatasetTestBase, parameterized.TestCase):
@parameterized.named_parameters(
("SumZero", 0),
("SumOne", 1),
("SumFive", 5),
("SumTen", 10),
)
def testReduceDataset(self, stop):
def init_fn(_):
return np.int64(0)
def reduce_fn(state, value):
return state + value
def finalize_fn(state):
return state
sum_reducer = grouping.Reducer(init_fn, reduce_fn, finalize_fn)
dataset = dataset_ops.Dataset.range(stop)
element = get_single_element.reduce_dataset(dataset, sum_reducer)
self.assertEqual(stop * (stop - 1) / 2, self.evaluate(element))
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/data/python/kernel_tests/reduce_dataset_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the experimental input pipeline ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.contrib.data.python.ops import sliding
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
@test_util.run_v1_only("deprecated API, no eager or V2 test coverage")
class SlideDatasetTest(test_base.DatasetTestBase, parameterized.TestCase):
@parameterized.named_parameters(
("1", 20, 14, 7, 1),
("2", 20, 17, 9, 1),
("3", 20, 14, 14, 1),
("4", 20, 10, 14, 1),
("5", 20, 14, 19, 1),
("6", 20, 4, 1, 2),
("7", 20, 2, 1, 6),
("8", 20, 4, 7, 2),
("9", 20, 2, 7, 6),
("10", 1, 10, 4, 1),
("11", 0, 10, 4, 1),
)
def testSlideDataset(self, count, window_size, window_shift, window_stride):
"""Tests a dataset that slides a window its input elements."""
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
count_t = array_ops.placeholder(dtypes.int64, shape=[])
window_size_t = array_ops.placeholder(dtypes.int64, shape=[])
window_shift_t = array_ops.placeholder(dtypes.int64, shape=[])
window_stride_t = array_ops.placeholder(dtypes.int64, shape=[])
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
# The pipeline is TensorSliceDataset -> MapDataset(square_3) ->
# RepeatDataset(count) ->
# _SlideDataset(window_size, window_shift, window_stride).
iterator = dataset_ops.make_initializable_iterator(
dataset_ops.Dataset.from_tensor_slices(components).map(_map_fn)
.repeat(count).apply(
sliding.sliding_window_batch(
window_size=window_size_t,
window_shift=window_shift_t,
window_stride=window_stride_t)))
init_op = iterator.initializer
get_next = iterator.get_next()
self.assertEqual([[None] + list(c.shape[1:]) for c in components],
[t.shape.as_list() for t in get_next])
with self.cached_session() as sess:
sess.run(
init_op,
feed_dict={
count_t: count,
window_size_t: window_size,
window_shift_t: window_shift,
window_stride_t: window_stride
})
num_batches = (count * 7 - (
(window_size - 1) * window_stride + 1)) // window_shift + 1
for i in range(num_batches):
result = sess.run(get_next)
for component, result_component in zip(components, result):
for j in range(window_size):
self.assertAllEqual(
component[(i * window_shift + j * window_stride) % 7]**2,
result_component[j])
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
@parameterized.named_parameters(
("1", 20, 14, 7, 1),
("2", 20, 17, 9, 1),
("3", 20, 14, 14, 1),
("4", 20, 10, 14, 1),
("5", 20, 14, 19, 1),
("6", 20, 4, 1, 2),
("7", 20, 2, 1, 6),
("8", 20, 4, 7, 2),
("9", 20, 2, 7, 6),
("10", 1, 10, 4, 1),
("11", 0, 10, 4, 1),
)
def testSlideDatasetDeprecated(self, count, window_size, stride,
window_stride):
"""Tests a dataset that slides a window its input elements."""
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
count_t = array_ops.placeholder(dtypes.int64, shape=[])
window_size_t = array_ops.placeholder(dtypes.int64, shape=[])
stride_t = array_ops.placeholder(dtypes.int64, shape=[])
window_stride_t = array_ops.placeholder(dtypes.int64, shape=[])
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
# The pipeline is TensorSliceDataset -> MapDataset(square_3) ->
# RepeatDataset(count) -> _SlideDataset(window_size, stride, window_stride).
iterator = dataset_ops.make_initializable_iterator(
dataset_ops.Dataset.from_tensor_slices(components).map(_map_fn)
.repeat(count).apply(
sliding.sliding_window_batch(
window_size=window_size_t,
stride=stride_t,
window_stride=window_stride_t)))
init_op = iterator.initializer
get_next = iterator.get_next()
self.assertEqual([[None] + list(c.shape[1:]) for c in components],
[t.shape.as_list() for t in get_next])
with self.cached_session() as sess:
sess.run(
init_op,
feed_dict={
count_t: count,
window_size_t: window_size,
stride_t: stride,
window_stride_t: window_stride
})
num_batches = (count * 7 - (
(window_size - 1) * window_stride + 1)) // stride + 1
for i in range(num_batches):
result = sess.run(get_next)
for component, result_component in zip(components, result):
for j in range(window_size):
self.assertAllEqual(
component[(i * stride + j * window_stride) % 7]**2,
result_component[j])
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
@parameterized.named_parameters(
("1", 14, 0, 3, 1),
("2", 14, 3, 0, 1),
("3", 14, 3, 3, 0),
)
def testSlideDatasetInvalid(self, count, window_size, window_shift,
window_stride):
count_t = array_ops.placeholder(dtypes.int64, shape=[])
window_size_t = array_ops.placeholder(dtypes.int64, shape=[])
window_shift_t = array_ops.placeholder(dtypes.int64, shape=[])
window_stride_t = array_ops.placeholder(dtypes.int64, shape=[])
iterator = dataset_ops.make_initializable_iterator(
dataset_ops.Dataset.range(10).map(lambda x: x).repeat(count_t).apply(
sliding.sliding_window_batch(
window_size=window_size_t,
window_shift=window_shift_t,
window_stride=window_stride_t)))
init_op = iterator.initializer
with self.cached_session() as sess:
with self.assertRaises(errors.InvalidArgumentError):
sess.run(
init_op,
feed_dict={
count_t: count,
window_size_t: window_size,
window_shift_t: window_shift,
window_stride_t: window_stride
})
def testSlideDatasetValueError(self):
with self.assertRaises(ValueError):
dataset_ops.Dataset.range(10).map(lambda x: x).apply(
sliding.sliding_window_batch(
window_size=1, stride=1, window_shift=1, window_stride=1))
def testSlideSparse(self):
def _sparse(i):
return sparse_tensor.SparseTensorValue(
indices=[[0]], values=(i * [1]), dense_shape=[1])
iterator = dataset_ops.make_initializable_iterator(
dataset_ops.Dataset.range(10).map(_sparse).apply(
sliding.sliding_window_batch(window_size=5, window_shift=3)))
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op)
num_batches = (10 - 5) // 3 + 1
for i in range(num_batches):
actual = sess.run(get_next)
expected = sparse_tensor.SparseTensorValue(
indices=[[0, 0], [1, 0], [2, 0], [3, 0], [4, 0]],
values=[i * 3, i * 3 + 1, i * 3 + 2, i * 3 + 3, i * 3 + 4],
dense_shape=[5, 1])
self.assertTrue(sparse_tensor.is_sparse(actual))
self.assertValuesEqual(actual, expected)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testSlideSparseWithDifferentDenseShapes(self):
def _sparse(i):
return sparse_tensor.SparseTensorValue(
indices=array_ops.expand_dims(
math_ops.range(i, dtype=dtypes.int64), 1),
values=array_ops.fill([math_ops.cast(i, dtypes.int32)], i),
dense_shape=[i])
iterator = dataset_ops.make_initializable_iterator(
dataset_ops.Dataset.range(10).map(_sparse).apply(
sliding.sliding_window_batch(window_size=5, window_shift=3)))
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op)
num_batches = (10 - 5) // 3 + 1
for i in range(num_batches):
actual = sess.run(get_next)
expected_indices = []
expected_values = []
for j in range(5):
for k in range(i * 3 + j):
expected_indices.append([j, k])
expected_values.append(i * 3 + j)
expected = sparse_tensor.SparseTensorValue(
indices=expected_indices,
values=expected_values,
dense_shape=[5, i * 3 + 5 - 1])
self.assertTrue(sparse_tensor.is_sparse(actual))
self.assertValuesEqual(actual, expected)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testNestedSlideSparse(self):
def _sparse(i):
return sparse_tensor.SparseTensorValue(
indices=[[0]], values=(i * [1]), dense_shape=[1])
iterator = dataset_ops.make_initializable_iterator(
dataset_ops.Dataset.range(10).map(_sparse).apply(
sliding.sliding_window_batch(window_size=4, window_shift=2)).apply(
sliding.sliding_window_batch(window_size=3, window_shift=1)))
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op)
# Slide: 1st batch.
actual = sess.run(get_next)
expected = sparse_tensor.SparseTensorValue(
indices=[[0, 0, 0], [0, 1, 0], [0, 2, 0], [0, 3, 0], [1, 0, 0],
[1, 1, 0], [1, 2, 0], [1, 3, 0], [2, 0, 0], [2, 1, 0],
[2, 2, 0], [2, 3, 0]],
values=[0, 1, 2, 3, 2, 3, 4, 5, 4, 5, 6, 7],
dense_shape=[3, 4, 1])
self.assertTrue(sparse_tensor.is_sparse(actual))
self.assertValuesEqual(actual, expected)
# Slide: 2nd batch.
actual = sess.run(get_next)
expected = sparse_tensor.SparseTensorValue(
indices=[[0, 0, 0], [0, 1, 0], [0, 2, 0], [0, 3, 0], [1, 0, 0],
[1, 1, 0], [1, 2, 0], [1, 3, 0], [2, 0, 0], [2, 1, 0],
[2, 2, 0], [2, 3, 0]],
values=[2, 3, 4, 5, 4, 5, 6, 7, 6, 7, 8, 9],
dense_shape=[3, 4, 1])
self.assertTrue(sparse_tensor.is_sparse(actual))
self.assertValuesEqual(actual, expected)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testSlideShapeError(self):
def generator():
yield [1.0, 2.0, 3.0]
yield [4.0, 5.0, 6.0]
yield [7.0, 8.0, 9.0, 10.0]
iterator = dataset_ops.make_initializable_iterator(
dataset_ops.Dataset.from_generator(
generator, dtypes.float32, output_shapes=[None]).apply(
sliding.sliding_window_batch(window_size=3, window_shift=1)))
next_element = iterator.get_next()
with self.cached_session() as sess:
sess.run(iterator.initializer)
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
r"Cannot batch tensors with different shapes in component 0. "
r"First element had shape \[3\] and element 2 had shape \[4\]."):
sess.run(next_element)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/data/python/kernel_tests/slide_dataset_op_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the private `_RestructuredDataset` transformation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.data.python.ops import batching
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import nest
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
# TODO(b/117581999): Add eager specific test.
class RestructuredDatasetTest(test_base.DatasetTestBase):
@test_util.run_deprecated_v1
def testRestructureDataset(self):
components = (array_ops.placeholder(dtypes.int32),
(array_ops.placeholder(dtypes.int32, shape=[None]),
array_ops.placeholder(dtypes.int32, shape=[20, 30])))
dataset = dataset_ops.Dataset.from_tensors(components)
i32 = dtypes.int32
test_cases = [((i32, i32, i32), None),
(((i32, i32), i32), None),
((i32, i32, i32), (None, None, None)),
((i32, i32, i32), ([17], [17], [20, 30]))]
for new_types, new_shape_lists in test_cases:
# pylint: disable=protected-access
new = batching._RestructuredDataset(dataset, new_types, new_shape_lists)
# pylint: enable=protected-access
self.assertEqual(new_types, dataset_ops.get_legacy_output_types(new))
if new_shape_lists is not None:
for expected_shape_list, shape in zip(
nest.flatten(new_shape_lists),
nest.flatten(dataset_ops.get_legacy_output_shapes(new))):
if expected_shape_list is None:
self.assertIs(None, shape.ndims)
else:
self.assertEqual(expected_shape_list, shape.as_list())
fail_cases = [((i32, dtypes.int64, i32), None),
((i32, i32, i32, i32), None),
((i32, i32, i32), ((None, None), None)),
((i32, i32, i32), (None, None, None, None)),
((i32, i32, i32), (None, [None], [21, 30]))]
for new_types, new_shape_lists in fail_cases:
with self.assertRaises(ValueError):
# pylint: disable=protected-access
new = batching._RestructuredDataset(dataset, new_types, new_shape_lists)
# pylint: enable=protected-access
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/data/python/kernel_tests/restructured_dataset_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Sliding dataset transformations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.compat import compat
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import nest
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import gen_experimental_dataset_ops as ged_ops
from tensorflow.python.util import deprecation
class _SlideDataset(dataset_ops.UnaryDataset):
"""A `Dataset` that passes a sliding window over its input."""
def __init__(self, input_dataset, window_size, window_shift, window_stride):
"""See `sliding_window_batch` for details."""
self._input_dataset = input_dataset
self._window_size = ops.convert_to_tensor(
window_size, dtype=dtypes.int64, name="window_stride")
self._window_stride = ops.convert_to_tensor(
window_stride, dtype=dtypes.int64, name="window_stride")
self._window_shift = ops.convert_to_tensor(
window_shift, dtype=dtypes.int64, name="window_shift")
input_structure = dataset_ops.get_structure(input_dataset)
self._element_spec = nest.map_structure(
lambda component_spec: component_spec._batch(None), input_structure) # pylint: disable=protected-access
if compat.forward_compatible(2019, 8, 3):
variant_tensor = ged_ops.sliding_window_dataset(
self._input_dataset._variant_tensor, # pylint: disable=protected-access
window_size=self._window_size,
window_shift=self._window_shift,
window_stride=self._window_stride,
**self._flat_structure)
else:
variant_tensor = ged_ops.experimental_sliding_window_dataset(
self._input_dataset._variant_tensor, # pylint: disable=protected-access
window_size=self._window_size,
window_shift=self._window_shift,
window_stride=self._window_stride,
**self._flat_structure)
super(_SlideDataset, self).__init__(input_dataset, variant_tensor)
@property
def element_spec(self):
return self._element_spec
@deprecation.deprecated_args(
None, "stride is deprecated, use window_shift instead", "stride")
@deprecation.deprecated(
None, "Use `tf.data.Dataset.window(size=window_size, shift=window_shift, "
"stride=window_stride).flat_map(lambda x: x.batch(window_size))` "
"instead.")
def sliding_window_batch(window_size,
stride=None,
window_shift=None,
window_stride=1):
"""A sliding window over a dataset.
This transformation passes a sliding window over this dataset. The window size
is `window_size`, the stride of the input elements is `window_stride`, and the
shift between consecutive windows is `window_shift`. If the remaining elements
cannot fill up the sliding window, this transformation will drop the final
smaller element. For example:
```python
# NOTE: The following examples use `{ ... }` to represent the
# contents of a dataset.
a = { [1], [2], [3], [4], [5], [6] }
a.apply(sliding_window_batch(window_size=3)) ==
{ [[1], [2], [3]], [[2], [3], [4]], [[3], [4], [5]], [[4], [5], [6]] }
a.apply(sliding_window_batch(window_size=3, window_shift=2)) ==
{ [[1], [2], [3]], [[3], [4], [5]] }
a.apply(sliding_window_batch(window_size=3, window_stride=2)) ==
{ [[1], [3], [5]], [[2], [4], [6]] }
```
Args:
window_size: A `tf.int64` scalar `tf.Tensor`, representing the number of
elements in the sliding window. It must be positive.
stride: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the
forward shift of the sliding window in each iteration. The default is `1`.
It must be positive. Deprecated alias for `window_shift`.
window_shift: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the
forward shift of the sliding window in each iteration. The default is `1`.
It must be positive.
window_stride: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the
stride of the input elements in the sliding window. The default is `1`.
It must be positive.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
Raises:
ValueError: if invalid arguments are provided.
"""
if stride is None and window_shift is None:
window_shift = 1
elif stride is not None and window_shift is None:
window_shift = stride
elif stride is not None and window_shift is not None:
raise ValueError("Cannot specify both `stride` and `window_shift`")
def _apply_fn(dataset):
return _SlideDataset(dataset, window_size, window_shift, window_stride)
return _apply_fn
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/data/python/ops/sliding.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Batching dataset transformations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.framework import with_shape
from tensorflow.python.data.experimental.ops import batching
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import nest
from tensorflow.python.data.util import structure
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_shape
from tensorflow.python.util import deprecation
@deprecation.deprecated(
None, "Use `tf.data.experimental.dense_to_sparse_batch(...)`.")
def dense_to_sparse_batch(batch_size, row_shape):
"""A transformation that batches ragged elements into `tf.SparseTensor`s.
Like `Dataset.padded_batch()`, this transformation combines multiple
consecutive elements of the dataset, which might have different
shapes, into a single element. The resulting element has three
components (`indices`, `values`, and `dense_shape`), which
comprise a `tf.SparseTensor` that represents the same data. The
`row_shape` represents the dense shape of each row in the
resulting `tf.SparseTensor`, to which the effective batch size is
prepended. For example:
```python
# NOTE: The following examples use `{ ... }` to represent the
# contents of a dataset.
a = { ['a', 'b', 'c'], ['a', 'b'], ['a', 'b', 'c', 'd'] }
a.apply(tf.data.experimental.dense_to_sparse_batch(batch_size=2,
row_shape=[6])) ==
{
([[0, 0], [0, 1], [0, 2], [1, 0], [1, 1]], # indices
['a', 'b', 'c', 'a', 'b'], # values
[2, 6]), # dense_shape
([[0, 0], [0, 1], [0, 2], [0, 3]],
['a', 'b', 'c', 'd'],
[1, 6])
}
```
Args:
batch_size: A `tf.int64` scalar `tf.Tensor`, representing the number of
consecutive elements of this dataset to combine in a single batch.
row_shape: A `tf.TensorShape` or `tf.int64` vector tensor-like object
representing the equivalent dense shape of a row in the resulting
`tf.SparseTensor`. Each element of this dataset must have the same rank as
`row_shape`, and must have size less than or equal to `row_shape` in each
dimension.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
return batching.dense_to_sparse_batch(batch_size, row_shape)
@deprecation.deprecated(None, "Use `tf.data.experimental.unbatch()`.")
def unbatch():
"""Splits elements of a dataset into multiple elements on the batch dimension.
For example, if elements of the dataset are shaped `[B, a0, a1, ...]`,
where `B` may vary for each input element, then for each element in the
dataset, the unbatched dataset will contain `B` consecutive elements
of shape `[a0, a1, ...]`.
```python
# NOTE: The following example uses `{ ... }` to represent the contents
# of a dataset.
a = { ['a', 'b', 'c'], ['a', 'b'], ['a', 'b', 'c', 'd'] }
a.apply(tf.data.experimental.unbatch()) == {
'a', 'b', 'c', 'a', 'b', 'a', 'b', 'c', 'd'}
```
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
return batching.unbatch()
@deprecation.deprecated(
None, "Use `tf.data.Dataset.batch(..., drop_remainder=True)`.")
def batch_and_drop_remainder(batch_size):
"""A batching transformation that omits the final small batch (if present).
Like `tf.data.Dataset.batch`, this transformation combines
consecutive elements of this dataset into batches. However, if the batch
size does not evenly divide the input dataset size, this transformation will
drop the final smaller element.
The following example illustrates the difference between this
transformation and `Dataset.batch()`:
```python
dataset = tf.data.Dataset.range(200)
batched =
dataset.apply(tf.contrib.data.batch_and_drop_remainder(128))
print(batched.output_shapes) # ==> "(128,)" (the batch dimension is known)
```
By contrast, `dataset.batch(128)` would yield a two-element dataset with
shapes `(128,)` and `(72,)`, so the batch dimension would not be statically
known.
Args:
batch_size: A `tf.int64` scalar `tf.Tensor`, representing the number of
consecutive elements of this dataset to combine in a single batch.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`
"""
def _apply_fn(dataset):
"""Function from `Dataset` to `Dataset` that applies the transformation."""
return dataset.batch(batch_size, drop_remainder=True)
return _apply_fn
@deprecation.deprecated(
None, "Use `tf.data.Dataset.padded_batch(..., drop_remainder=True)`.")
def padded_batch_and_drop_remainder(batch_size,
padded_shapes,
padding_values=None):
"""A batching and padding transformation that omits the final small batch.
Like `tf.data.Dataset.padded_batch`, this transformation combines
consecutive elements of this dataset into batches. However, if the batch
size does not evenly divide the input dataset size, this transformation will
drop the final smaller element.
See `tf.contrib.data.batch_and_drop_remainder` for more details.
Args:
batch_size: A `tf.int64` scalar `tf.Tensor`, representing the number of
consecutive elements of this dataset to combine in a single batch.
padded_shapes: A nested structure of `tf.TensorShape` or `tf.int64` vector
tensor-like objects. See `tf.data.Dataset.padded_batch` for details.
padding_values: (Optional.) A nested structure of scalar-shaped `tf.Tensor`.
See `tf.data.Dataset.padded_batch` for details.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`
"""
def _apply_fn(dataset):
"""Function from `Dataset` to `Dataset` that applies the transformation."""
return dataset.padded_batch(
batch_size, padded_shapes=padded_shapes, padding_values=padding_values,
drop_remainder=True)
return _apply_fn
# TODO(b/116817045): Move this to `tf.data.experimental` when the `with_shape()`
# function is available in the core.
def assert_element_shape(expected_shapes):
"""Assert the shape of this `Dataset`.
```python
shapes = [tf.TensorShape([16, 256]), tf.TensorShape([None, 2])]
result = dataset.apply(tf.data.experimental.assert_element_shape(shapes))
print(result.output_shapes) # ==> "((16, 256), (<unknown>, 2))"
```
If dataset shapes and expected_shape, are fully defined, assert they match.
Otherwise, add assert op that will validate the shapes when tensors are
evaluated, and set shapes on tensors, respectively.
Note that unknown dimension in `expected_shapes` will be ignored.
Args:
expected_shapes: A nested structure of `tf.TensorShape` objects.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`
"""
def _merge_output_shapes(original_shapes, expected_shapes):
flat_original_shapes = nest.flatten(original_shapes)
flat_new_shapes = nest.flatten_up_to(original_shapes, expected_shapes)
flat_merged_output_shapes = [
original_shape.merge_with(new_shape)
for original_shape, new_shape in zip(flat_original_shapes,
flat_new_shapes)]
return nest.pack_sequence_as(original_shapes, flat_merged_output_shapes)
def _check_shape(*elements):
flatten_tensors = nest.flatten(elements)
flatten_shapes = nest.flatten(expected_shapes)
checked_tensors = [
with_shape(shape, tensor) if shape else tensor # Ignore unknown shape
for shape, tensor in zip(flatten_shapes, flatten_tensors)
]
return nest.pack_sequence_as(elements, checked_tensors)
def _apply_fn(dataset):
output_shapes = _merge_output_shapes(
dataset_ops.get_legacy_output_shapes(dataset), expected_shapes)
# pylint: disable=protected-access
return _RestructuredDataset(
dataset.map(_check_shape),
dataset_ops.get_legacy_output_types(dataset),
output_shapes=output_shapes,
output_classes=dataset_ops.get_legacy_output_classes(dataset))
return _apply_fn
@deprecation.deprecated(None, "Use `tf.data.experimental.map_and_batch(...)`.")
def map_and_batch(map_func,
batch_size,
num_parallel_batches=None,
drop_remainder=False,
num_parallel_calls=None):
"""Fused implementation of `map` and `batch`.
Maps `map_func` across `batch_size` consecutive elements of this dataset
and then combines them into a batch. Functionally, it is equivalent to `map`
followed by `batch`. However, by fusing the two transformations together, the
implementation can be more efficient. Surfacing this transformation in the API
is temporary. Once automatic input pipeline optimization is implemented,
the fusing of `map` and `batch` will happen automatically and this API will be
deprecated.
Args:
map_func: A function mapping a nested structure of tensors to another nested
structure of tensors.
batch_size: A `tf.int64` scalar `tf.Tensor`, representing the number of
consecutive elements of this dataset to combine in a single batch.
num_parallel_batches: (Optional.) A `tf.int64` scalar `tf.Tensor`,
representing the number of batches to create in parallel. On one hand,
higher values can help mitigate the effect of stragglers. On the other
hand, higher values can increase contention if CPU is scarce.
drop_remainder: (Optional.) A `tf.bool` scalar `tf.Tensor`, representing
whether the last batch should be dropped in case its size is smaller than
desired; the default behavior is not to drop the smaller batch.
num_parallel_calls: (Optional.) A `tf.int32` scalar `tf.Tensor`,
representing the number of elements to process in parallel. If not
specified, `batch_size * num_parallel_batches` elements will be processed
in parallel.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
Raises:
ValueError: If both `num_parallel_batches` and `num_parallel_calls` are
specified.
"""
return batching.map_and_batch(map_func, batch_size, num_parallel_batches,
drop_remainder, num_parallel_calls)
class _RestructuredDataset(dataset_ops.UnaryDataset):
"""An internal helper for changing the structure and shape of a dataset."""
def __init__(self,
dataset,
output_types,
output_shapes=None,
output_classes=None):
"""Creates a new dataset with the given output types and shapes.
The given `dataset` must have a structure that is convertible:
* `dataset.output_types` must be the same as `output_types` module nesting.
* Each shape in `dataset.output_shapes` must be compatible with each shape
in `output_shapes` (if given).
Note: This helper permits "unsafe casts" for shapes, equivalent to using
`tf.Tensor.set_shape()` where domain-specific knowledge is available.
Args:
dataset: A `Dataset` object.
output_types: A nested structure of `tf.DType` objects.
output_shapes: (Optional.) A nested structure of `tf.TensorShape` objects.
If omitted, the shapes will be inherited from `dataset`.
output_classes: (Optional.) A nested structure of class types. If omitted,
the class types will be inherited from `dataset`.
Raises:
ValueError: If either `output_types` or `output_shapes` is not compatible
with the structure of `dataset`.
"""
self._input_dataset = dataset
input_types = dataset_ops.get_legacy_output_types(dataset)
# Validate that the types are compatible.
output_types = nest.map_structure(dtypes.as_dtype, output_types)
flat_original_types = nest.flatten(input_types)
flat_new_types = nest.flatten(output_types)
if flat_original_types != flat_new_types:
raise ValueError(
"Dataset with output types %r cannot be restructured to have "
"output types %r" %
(dataset_ops.get_legacy_output_types(dataset), output_types))
input_shapes = dataset_ops.get_legacy_output_shapes(dataset)
if output_shapes is None:
# Inherit shapes from the original `dataset`.
output_shapes = nest.pack_sequence_as(
output_types, nest.flatten(input_shapes))
else:
# Validate that the shapes are compatible.
nest.assert_same_structure(output_types, output_shapes)
flat_original_shapes = nest.flatten(input_shapes)
flat_new_shapes = nest.flatten_up_to(output_types, output_shapes)
for original_shape, new_shape in zip(flat_original_shapes,
flat_new_shapes):
if not original_shape.is_compatible_with(new_shape):
raise ValueError(
"Dataset with output shapes %r cannot be restructured to have "
"incompatible output shapes %r" % (input_shapes,
output_shapes))
output_shapes = nest.map_structure_up_to(
output_types, tensor_shape.as_shape, output_shapes)
input_classes = dataset_ops.get_legacy_output_classes(dataset)
if output_classes is None:
# Inherit class types from the original `dataset`.
output_classes = nest.pack_sequence_as(
output_types, nest.flatten(input_classes))
self._element_spec = structure.convert_legacy_structure(
output_types, output_shapes, output_classes)
variant_tensor = self._input_dataset._variant_tensor # pylint: disable=protected-access
super(_RestructuredDataset, self).__init__(dataset, variant_tensor)
@property
def element_spec(self):
return self._element_spec
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/data/python/ops/batching.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python wrappers for reader Datasets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.compat import compat
from tensorflow.python.data.experimental.ops import readers
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import readers as core_readers
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.ops import gen_experimental_dataset_ops
from tensorflow.python.util import deprecation
@deprecation.deprecated(None,
"Use `tf.data.experimental.make_csv_dataset(...)`.")
def make_csv_dataset(
file_pattern,
batch_size,
column_names=None,
column_defaults=None,
label_name=None,
select_columns=None,
field_delim=",",
use_quote_delim=True,
na_value="",
header=True,
num_epochs=None,
shuffle=True,
shuffle_buffer_size=10000,
shuffle_seed=None,
prefetch_buffer_size=None,
num_parallel_reads=None,
sloppy=False,
num_rows_for_inference=100,
compression_type=None,
):
"""Reads CSV files into a dataset.
Reads CSV files into a dataset, where each element is a (features, labels)
tuple that corresponds to a batch of CSV rows. The features dictionary
maps feature column names to `Tensor`s containing the corresponding
feature data, and labels is a `Tensor` containing the batch's label data.
Args:
file_pattern: List of files or patterns of file paths containing CSV
records. See `tf.io.gfile.glob` for pattern rules.
batch_size: An int representing the number of records to combine
in a single batch.
column_names: An optional list of strings that corresponds to the CSV
columns, in order. One per column of the input record. If this is not
provided, infers the column names from the first row of the records.
These names will be the keys of the features dict of each dataset element.
column_defaults: A optional list of default values for the CSV fields. One
item per selected column of the input record. Each item in the list is
either a valid CSV dtype (float32, float64, int32, int64, or string), or a
`Tensor` with one of the aforementioned types. The tensor can either be
a scalar default value (if the column is optional), or an empty tensor (if
the column is required). If a dtype is provided instead of a tensor, the
column is also treated as required. If this list is not provided, tries
to infer types based on reading the first num_rows_for_inference rows of
files specified, and assumes all columns are optional, defaulting to `0`
for numeric values and `""` for string values. If both this and
`select_columns` are specified, these must have the same lengths, and
`column_defaults` is assumed to be sorted in order of increasing column
index.
label_name: A optional string corresponding to the label column. If
provided, the data for this column is returned as a separate `Tensor` from
the features dictionary, so that the dataset complies with the format
expected by a `tf.Estimator.train` or `tf.Estimator.evaluate` input
function.
select_columns: An optional list of integer indices or string column
names, that specifies a subset of columns of CSV data to select. If
column names are provided, these must correspond to names provided in
`column_names` or inferred from the file header lines. When this argument
is specified, only a subset of CSV columns will be parsed and returned,
corresponding to the columns specified. Using this results in faster
parsing and lower memory usage. If both this and `column_defaults` are
specified, these must have the same lengths, and `column_defaults` is
assumed to be sorted in order of increasing column index.
field_delim: An optional `string`. Defaults to `","`. Char delimiter to
separate fields in a record.
use_quote_delim: An optional bool. Defaults to `True`. If false, treats
double quotation marks as regular characters inside of the string fields.
na_value: Additional string to recognize as NA/NaN.
header: A bool that indicates whether the first rows of provided CSV files
correspond to header lines with column names, and should not be included
in the data.
num_epochs: An int specifying the number of times this dataset is repeated.
If None, cycles through the dataset forever.
shuffle: A bool that indicates whether the input should be shuffled.
shuffle_buffer_size: Buffer size to use for shuffling. A large buffer size
ensures better shuffling, but increases memory usage and startup time.
shuffle_seed: Randomization seed to use for shuffling.
prefetch_buffer_size: An int specifying the number of feature
batches to prefetch for performance improvement. Recommended value is the
number of batches consumed per training step. Defaults to auto-tune.
num_parallel_reads: Number of threads used to read CSV records from files.
If >1, the results will be interleaved. Defaults to `1`.
sloppy: If `True`, reading performance will be improved at
the cost of non-deterministic ordering. If `False`, the order of elements
produced is deterministic prior to shuffling (elements are still
randomized if `shuffle=True`. Note that if the seed is set, then order
of elements after shuffling is deterministic). Defaults to `False`.
num_rows_for_inference: Number of rows of a file to use for type inference
if record_defaults is not provided. If None, reads all the rows of all
the files. Defaults to 100.
compression_type: (Optional.) A `tf.string` scalar evaluating to one of
`""` (no compression), `"ZLIB"`, or `"GZIP"`. Defaults to no compression.
Returns:
A dataset, where each element is a (features, labels) tuple that corresponds
to a batch of `batch_size` CSV rows. The features dictionary maps feature
column names to `Tensor`s containing the corresponding column data, and
labels is a `Tensor` containing the column data for the label column
specified by `label_name`.
Raises:
ValueError: If any of the arguments is malformed.
"""
return readers.make_csv_dataset(
file_pattern, batch_size, column_names, column_defaults, label_name,
select_columns, field_delim, use_quote_delim, na_value, header,
num_epochs, shuffle, shuffle_buffer_size, shuffle_seed,
prefetch_buffer_size, num_parallel_reads, sloppy, num_rows_for_inference,
compression_type)
class CsvDataset(readers.CsvDataset):
"""A Dataset comprising lines from one or more CSV files."""
@deprecation.deprecated(None, "Use `tf.data.experimental.CsvDataset(...)`.")
def __init__(self,
filenames,
record_defaults,
compression_type=None,
buffer_size=None,
header=False,
field_delim=",",
use_quote_delim=True,
na_value="",
select_cols=None):
super(CsvDataset, self).__init__(
filenames, record_defaults, compression_type, buffer_size, header,
field_delim, use_quote_delim, na_value, select_cols)
@deprecation.deprecated(
None, "Use `tf.data.experimental.make_batched_features_dataset(...)`.")
def make_batched_features_dataset(file_pattern,
batch_size,
features,
reader=core_readers.TFRecordDataset,
label_key=None,
reader_args=None,
num_epochs=None,
shuffle=True,
shuffle_buffer_size=10000,
shuffle_seed=None,
prefetch_buffer_size=None,
reader_num_threads=None,
parser_num_threads=None,
sloppy_ordering=False,
drop_final_batch=False):
"""Returns a `Dataset` of feature dictionaries from `Example` protos.
If label_key argument is provided, returns a `Dataset` of tuple
comprising of feature dictionaries and label.
Example:
```
serialized_examples = [
features {
feature { key: "age" value { int64_list { value: [ 0 ] } } }
feature { key: "gender" value { bytes_list { value: [ "f" ] } } }
feature { key: "kws" value { bytes_list { value: [ "code", "art" ] } } }
},
features {
feature { key: "age" value { int64_list { value: [] } } }
feature { key: "gender" value { bytes_list { value: [ "f" ] } } }
feature { key: "kws" value { bytes_list { value: [ "sports" ] } } }
}
]
```
We can use arguments:
```
features: {
"age": FixedLenFeature([], dtype=tf.int64, default_value=-1),
"gender": FixedLenFeature([], dtype=tf.string),
"kws": VarLenFeature(dtype=tf.string),
}
```
And the expected output is:
```python
{
"age": [[0], [-1]],
"gender": [["f"], ["f"]],
"kws": SparseTensor(
indices=[[0, 0], [0, 1], [1, 0]],
values=["code", "art", "sports"]
dense_shape=[2, 2]),
}
```
Args:
file_pattern: List of files or patterns of file paths containing
`Example` records. See `tf.io.gfile.glob` for pattern rules.
batch_size: An int representing the number of records to combine
in a single batch.
features: A `dict` mapping feature keys to `FixedLenFeature` or
`VarLenFeature` values. See `tf.io.parse_example`.
reader: A function or class that can be
called with a `filenames` tensor and (optional) `reader_args` and returns
a `Dataset` of `Example` tensors. Defaults to `tf.data.TFRecordDataset`.
label_key: (Optional) A string corresponding to the key labels are stored in
`tf.Examples`. If provided, it must be one of the `features` key,
otherwise results in `ValueError`.
reader_args: Additional arguments to pass to the reader class.
num_epochs: Integer specifying the number of times to read through the
dataset. If None, cycles through the dataset forever. Defaults to `None`.
shuffle: A boolean, indicates whether the input should be shuffled. Defaults
to `True`.
shuffle_buffer_size: Buffer size of the ShuffleDataset. A large capacity
ensures better shuffling but would increase memory usage and startup time.
shuffle_seed: Randomization seed to use for shuffling.
prefetch_buffer_size: Number of feature batches to prefetch in order to
improve performance. Recommended value is the number of batches consumed
per training step. Defaults to auto-tune.
reader_num_threads: Number of threads used to read `Example` records. If >1,
the results will be interleaved. Defaults to `1`.
parser_num_threads: Number of threads to use for parsing `Example` tensors
into a dictionary of `Feature` tensors. Defaults to `2`.
sloppy_ordering: If `True`, reading performance will be improved at
the cost of non-deterministic ordering. If `False`, the order of elements
produced is deterministic prior to shuffling (elements are still
randomized if `shuffle=True`. Note that if the seed is set, then order
of elements after shuffling is deterministic). Defaults to `False`.
drop_final_batch: If `True`, and the batch size does not evenly divide the
input dataset size, the final smaller batch will be dropped. Defaults to
`False`.
Returns:
A dataset of `dict` elements, (or a tuple of `dict` elements and label).
Each `dict` maps feature keys to `Tensor` or `SparseTensor` objects.
Raises:
ValueError: If `label_key` is not one of the `features` keys.
"""
return readers.make_batched_features_dataset(
file_pattern, batch_size, features, reader, label_key, reader_args,
num_epochs, shuffle, shuffle_buffer_size, shuffle_seed,
prefetch_buffer_size, reader_num_threads, parser_num_threads,
sloppy_ordering, drop_final_batch)
@deprecation.deprecated(
None, "Use `tf.data.experimental.make_batched_features_dataset(...)`")
def read_batch_features(file_pattern,
batch_size,
features,
reader=core_readers.TFRecordDataset,
reader_args=None,
randomize_input=True,
num_epochs=None,
capacity=10000):
"""Reads batches of Examples.
Example:
```
serialized_examples = [
features {
feature { key: "age" value { int64_list { value: [ 0 ] } } }
feature { key: "gender" value { bytes_list { value: [ "f" ] } } }
feature { key: "kws" value { bytes_list { value: [ "code", "art" ] } } }
},
features {
feature { key: "age" value { int64_list { value: [] } } }
feature { key: "gender" value { bytes_list { value: [ "f" ] } } }
feature { key: "kws" value { bytes_list { value: [ "sports" ] } } }
}
]
```
We can use arguments:
```
features: {
"age": FixedLenFeature([], dtype=tf.int64, default_value=-1),
"gender": FixedLenFeature([], dtype=tf.string),
"kws": VarLenFeature(dtype=tf.string),
}
```
And the expected output is:
```python
{
"age": [[0], [-1]],
"gender": [["f"], ["f"]],
"kws": SparseTensor(
indices=[[0, 0], [0, 1], [1, 0]],
values=["code", "art", "sports"]
dense_shape=[2, 2]),
}
```
Args:
file_pattern: List of files or patterns of file paths containing
`Example` records. See `tf.io.gfile.glob` for pattern rules.
batch_size: An int representing the number of records to combine
in a single batch.
features: A `dict` mapping feature keys to `FixedLenFeature` or
`VarLenFeature` values. See `tf.io.parse_example`.
reader: A function or class that can be
called with a `filenames` tensor and (optional) `reader_args` and returns
a `Dataset` of `Example` tensors. Defaults to `tf.data.TFRecordDataset`.
reader_args: Additional arguments to pass to the reader class.
randomize_input: Whether the input should be randomized.
num_epochs: Integer specifying the number of times to read through the
dataset. If None, cycles through the dataset forever.
capacity: Buffer size of the ShuffleDataset. A large capacity ensures better
shuffling but would increase memory usage and startup time.
Returns:
A dict from keys in features to `Tensor` or `SparseTensor` objects.
"""
dataset = readers.make_batched_features_dataset(
file_pattern,
batch_size,
features,
reader=reader,
reader_args=reader_args,
shuffle=randomize_input,
num_epochs=num_epochs,
shuffle_buffer_size=capacity)
iterator = dataset_ops.make_one_shot_iterator(dataset)
outputs = iterator.get_next()
return outputs
class SqlDataset(readers.SqlDataset):
"""A `Dataset` consisting of the results from a SQL query."""
@deprecation.deprecated(None, "Use `tf.data.experimental.SqlDataset(...)`.")
def __init__(self, driver_name, data_source_name, query, output_types):
super(SqlDataset, self).__init__(
driver_name, data_source_name, query, output_types)
class LMDBDataset(dataset_ops.DatasetSource):
"""A LMDB Dataset that reads the lmdb file."""
def __init__(self, filenames):
"""Create a `LMDBDataset`.
`LMDBDataset` allows a user to read data from a mdb file as
(key value) pairs sequentially.
For example:
```python
tf.compat.v1.enable_eager_execution()
dataset = tf.contrib.lmdb.LMDBDataset("/foo/bar.mdb")
# Prints the (key, value) pairs inside a lmdb file.
for key, value in dataset:
print(key, value)
```
Args:
filenames: A `tf.string` tensor containing one or more filenames.
"""
self._filenames = ops.convert_to_tensor(
filenames, dtype=dtypes.string, name="filenames")
if compat.forward_compatible(2019, 8, 3):
variant_tensor = gen_experimental_dataset_ops.lmdb_dataset(
self._filenames, **self._flat_structure)
else:
variant_tensor = gen_experimental_dataset_ops.experimental_lmdb_dataset(
self._filenames, **self._flat_structure)
super(LMDBDataset, self).__init__(variant_tensor)
@property
def element_spec(self):
return (tensor_spec.TensorSpec([], dtypes.string),
tensor_spec.TensorSpec([], dtypes.string))
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/data/python/ops/readers.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Resampling dataset transformations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.ops import resampling
from tensorflow.python.util import deprecation
@deprecation.deprecated(None,
"Use `tf.data.experimental.rejection_resample(...)`.")
def rejection_resample(class_func, target_dist, initial_dist=None, seed=None):
"""A transformation that resamples a dataset to achieve a target distribution.
**NOTE** Resampling is performed via rejection sampling; some fraction
of the input values will be dropped.
Args:
class_func: A function mapping an element of the input dataset to a scalar
`tf.int32` tensor. Values should be in `[0, num_classes)`.
target_dist: A floating point type tensor, shaped `[num_classes]`.
initial_dist: (Optional.) A floating point type tensor, shaped
`[num_classes]`. If not provided, the true class distribution is
estimated live in a streaming fashion.
seed: (Optional.) Python integer seed for the resampler.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
return resampling.rejection_resample(class_func, target_dist, initial_dist,
seed)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/data/python/ops/resampling.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python wrapper for prefetching_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.ops import prefetching_ops
from tensorflow.python.util import deprecation
@deprecation.deprecated(None,
"Use `tf.data.experimental.prefetch_to_device(...)`.")
def prefetch_to_device(device, buffer_size=None):
"""A transformation that prefetches dataset values to the given `device`.
NOTE: Although the transformation creates a `tf.data.Dataset`, the
transformation must be the final `Dataset` in the input pipeline.
Args:
device: A string. The name of a device to which elements will be prefetched.
buffer_size: (Optional.) The number of elements to buffer on `device`.
Defaults to an automatically chosen value.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
return prefetching_ops.prefetch_to_device(device, buffer_size)
@deprecation.deprecated(None, "Use `tf.data.experimental.copy_to_device(...)`.")
def copy_to_device(target_device, source_device="/cpu:0"):
"""A transformation that copies dataset elements to the given `target_device`.
Args:
target_device: The name of a device to which elements will be copied.
source_device: The original device on which `input_dataset` will be placed.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
return prefetching_ops.copy_to_device(target_device, source_device)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/data/python/ops/prefetching_ops.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Grouping dataset transformations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.ops import grouping
from tensorflow.python.util import deprecation
@deprecation.deprecated(None,
"Use `tf.data.experimental.group_by_reducer(...)`.")
def group_by_reducer(key_func, reducer):
"""A transformation that groups elements and performs a reduction.
This transformation maps element of a dataset to a key using `key_func` and
groups the elements by key. The `reducer` is used to process each group; its
`init_func` is used to initialize state for each group when it is created, the
`reduce_func` is used to update the state every time an element is mapped to
the matching group, and the `finalize_func` is used to map the final state to
an output value.
Args:
key_func: A function mapping a nested structure of tensors
(having shapes and types defined by `self.output_shapes` and
`self.output_types`) to a scalar `tf.int64` tensor.
reducer: An instance of `Reducer`, which captures the reduction logic using
the `init_func`, `reduce_func`, and `finalize_func` functions.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
return grouping.group_by_reducer(key_func, reducer)
@deprecation.deprecated(None,
"Use `tf.data.experimental.group_by_window(...)`.")
def group_by_window(key_func,
reduce_func,
window_size=None,
window_size_func=None):
"""A transformation that groups windows of elements by key and reduces them.
This transformation maps each consecutive element in a dataset to a key
using `key_func` and groups the elements by key. It then applies
`reduce_func` to at most `window_size_func(key)` elements matching the same
key. All except the final window for each key will contain
`window_size_func(key)` elements; the final window may be smaller.
You may provide either a constant `window_size` or a window size determined by
the key through `window_size_func`.
Args:
key_func: A function mapping a nested structure of tensors
(having shapes and types defined by `self.output_shapes` and
`self.output_types`) to a scalar `tf.int64` tensor.
reduce_func: A function mapping a key and a dataset of up to `window_size`
consecutive elements matching that key to another dataset.
window_size: A `tf.int64` scalar `tf.Tensor`, representing the number of
consecutive elements matching the same key to combine in a single
batch, which will be passed to `reduce_func`. Mutually exclusive with
`window_size_func`.
window_size_func: A function mapping a key to a `tf.int64` scalar
`tf.Tensor`, representing the number of consecutive elements matching
the same key to combine in a single batch, which will be passed to
`reduce_func`. Mutually exclusive with `window_size`.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
Raises:
ValueError: if neither or both of {`window_size`, `window_size_func`} are
passed.
"""
return grouping.group_by_window(key_func, reduce_func, window_size,
window_size_func)
@deprecation.deprecated(
None, "Use `tf.data.experimental.bucket_by_sequence_length(...)`.")
def bucket_by_sequence_length(element_length_func,
bucket_boundaries,
bucket_batch_sizes,
padded_shapes=None,
padding_values=None,
pad_to_bucket_boundary=False,
no_padding=False):
"""A transformation that buckets elements in a `Dataset` by length.
Elements of the `Dataset` are grouped together by length and then are padded
and batched.
This is useful for sequence tasks in which the elements have variable length.
Grouping together elements that have similar lengths reduces the total
fraction of padding in a batch which increases training step efficiency.
Args:
element_length_func: function from element in `Dataset` to `tf.int32`,
determines the length of the element, which will determine the bucket it
goes into.
bucket_boundaries: `list<int>`, upper length boundaries of the buckets.
bucket_batch_sizes: `list<int>`, batch size per bucket. Length should be
`len(bucket_boundaries) + 1`.
padded_shapes: Nested structure of `tf.TensorShape` to pass to
`tf.data.Dataset.padded_batch`. If not provided, will use
`dataset.output_shapes`, which will result in variable length dimensions
being padded out to the maximum length in each batch.
padding_values: Values to pad with, passed to
`tf.data.Dataset.padded_batch`. Defaults to padding with 0.
pad_to_bucket_boundary: bool, if `False`, will pad dimensions with unknown
size to maximum length in batch. If `True`, will pad dimensions with
unknown size to bucket boundary minus 1 (i.e., the maximum length in each
bucket), and caller must ensure that the source `Dataset` does not contain
any elements with length longer than `max(bucket_boundaries)`.
no_padding: `bool`, indicates whether to pad the batch features (features
need to be either of type `tf.SparseTensor` or of same shape).
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
Raises:
ValueError: if `len(bucket_batch_sizes) != len(bucket_boundaries) + 1`.
"""
return grouping.bucket_by_sequence_length(
element_length_func, bucket_boundaries, bucket_batch_sizes, padded_shapes,
padding_values, pad_to_bucket_boundary, no_padding)
class Reducer(grouping.Reducer):
"""A reducer is used for reducing a set of elements.
A reducer is represented as a tuple of the three functions:
1) initialization function: key => initial state
2) reduce function: (old state, input) => new state
3) finalization function: state => result
"""
@deprecation.deprecated(None, "Use `tf.data.experimental.Reducer(...)`.")
def __init__(self, init_func, reduce_func, finalize_func):
super(Reducer, self).__init__(init_func, reduce_func, finalize_func)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/data/python/ops/grouping.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Experimental API for controlling threading in `tf.data` pipelines."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from tensorflow.python.data.experimental.ops.threadpool import override_threadpool
from tensorflow.python.data.experimental.ops.threadpool import PrivateThreadPool
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/data/python/ops/threadpool.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Experimental `dataset` API for parsing example."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.ops import parsing_ops
from tensorflow.python.util import deprecation
@deprecation.deprecated(
None, "Use `tf.data.experimental.parse_example_dataset(...)`.")
def parse_example_dataset(features, num_parallel_calls=1):
"""A transformation that parses `Example` protos into a `dict` of tensors.
Parses a number of serialized `Example` protos given in `serialized`. We refer
to `serialized` as a batch with `batch_size` many entries of individual
`Example` protos.
This op parses serialized examples into a dictionary mapping keys to `Tensor`
and `SparseTensor` objects. `features` is a dict from keys to `VarLenFeature`,
`SparseFeature`, and `FixedLenFeature` objects. Each `VarLenFeature`
and `SparseFeature` is mapped to a `SparseTensor`, and each
`FixedLenFeature` is mapped to a `Tensor`. See `tf.io.parse_example` for more
details about feature dictionaries.
Args:
features: A `dict` mapping feature keys to `FixedLenFeature`,
`VarLenFeature`, and `SparseFeature` values.
num_parallel_calls: (Optional.) A `tf.int32` scalar `tf.Tensor`,
representing the number of parsing processes to call in parallel.
Returns:
A dataset transformation function, which can be passed to
`tf.data.Dataset.apply`.
Raises:
ValueError: if features argument is None.
"""
return parsing_ops.parse_example_dataset(features, num_parallel_calls)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/data/python/ops/parsing_ops.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unique element dataset transformations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.ops import unique as experimental_unique
from tensorflow.python.util import deprecation
@deprecation.deprecated(None, "Use `tf.data.experimental.unique()`.")
def unique():
"""Creates a `Dataset` from another `Dataset`, discarding duplicates.
Use this transformation to produce a dataset that contains one instance of
each unique element in the input. For example:
```python
dataset = tf.data.Dataset.from_tensor_slices([1, 37, 2, 37, 2, 1])
# Using `unique()` will drop the duplicate elements.
dataset = dataset.apply(tf.data.experimental.unique()) # ==> { 1, 37, 2 }
```
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
return experimental_unique.unique()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/data/python/ops/unique.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python wrappers for Datasets and Iterators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.data.experimental.ops import get_single_element as experimental_get_single_element
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.util import deprecation
@deprecation.deprecated(None,
"Use `tf.data.experimental.get_single_element(...)`.")
def get_single_element(dataset):
"""Returns the single element in `dataset` as a nested structure of tensors.
This function enables you to use a `tf.data.Dataset` in a stateless
"tensor-in tensor-out" expression, without creating a
`tf.compat.v1.data.Iterator`.
This can be useful when your preprocessing transformations are expressed
as a `Dataset`, and you want to use the transformation at serving time.
For example:
```python
input_batch = tf.compat.v1.placeholder(tf.string, shape=[BATCH_SIZE])
def preprocessing_fn(input_str):
# ...
return image, label
dataset = (tf.data.Dataset.from_tensor_slices(input_batch)
.map(preprocessing_fn, num_parallel_calls=BATCH_SIZE)
.batch(BATCH_SIZE))
image_batch, label_batch = tf.data.experimental.get_single_element(dataset)
```
Args:
dataset: A `tf.data.Dataset` object containing a single element.
Returns:
A nested structure of `tf.Tensor` objects, corresponding to the single
element of `dataset`.
Raises:
TypeError: if `dataset` is not a `tf.data.Dataset` object.
InvalidArgumentError (at runtime): if `dataset` does not contain exactly
one element.
"""
return experimental_get_single_element.get_single_element(dataset)
@deprecation.deprecated(None, "Use `tf.data.Dataset.reduce(...)`.")
def reduce_dataset(dataset, reducer):
"""Returns the result of reducing the `dataset` using `reducer`.
Args:
dataset: A `tf.data.Dataset` object.
reducer: A `tf.data.experimental.Reducer` object representing the reduce
logic.
Returns:
A nested structure of `tf.Tensor` objects, corresponding to the result
of reducing `dataset` using `reducer`.
Raises:
TypeError: if `dataset` is not a `tf.data.Dataset` object.
"""
if not isinstance(dataset, dataset_ops.Dataset):
raise TypeError("`dataset` must be a `tf.data.Dataset` object.")
return dataset.reduce(reducer.init_func(np.int64(0)), reducer.reduce_func)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/data/python/ops/get_single_element.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Scan dataset transformation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.ops import scan_ops
from tensorflow.python.util import deprecation
@deprecation.deprecated(None, "Use `tf.data.experimental.scan(...)`.")
def scan(initial_state, scan_func):
"""A transformation that scans a function across an input dataset.
This transformation is a stateful relative of `tf.data.Dataset.map`.
In addition to mapping `scan_func` across the elements of the input dataset,
`scan()` accumulates one or more state tensors, whose initial values are
`initial_state`.
Args:
initial_state: A nested structure of tensors, representing the initial state
of the accumulator.
scan_func: A function that maps `(old_state, input_element)` to
`(new_state, output_element). It must take two arguments and return a
pair of nested structures of tensors. The `new_state` must match the
structure of `initial_state`.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
return scan_ops.scan(initial_state, scan_func)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/data/python/ops/scan_ops.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Experimental shuffle ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.ops import shuffle_ops
from tensorflow.python.util import deprecation
@deprecation.deprecated(None,
"Use `tf.data.experimental.shuffle_and_repeat(...)`.")
def shuffle_and_repeat(buffer_size, count=None, seed=None):
"""Shuffles and repeats a Dataset returning a new permutation for each epoch.
`dataset.apply(tf.data.experimental.shuffle_and_repeat(buffer_size, count))`
is equivalent to
`dataset.shuffle(buffer_size, reshuffle_each_iteration=True).repeat(count)`
The difference is that the latter dataset is not serializable. So,
if you need to checkpoint an input pipeline with reshuffling you must use
this implementation.
Args:
buffer_size: A `tf.int64` scalar `tf.Tensor`, representing the
maximum number elements that will be buffered when prefetching.
count: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the
number of times the dataset should be repeated. The default behavior
(if `count` is `None` or `-1`) is for the dataset be repeated
indefinitely.
seed: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the
random seed that will be used to create the distribution. See
`tf.compat.v1.set_random_seed` for behavior.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
return shuffle_ops.shuffle_and_repeat(buffer_size, count, seed)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/data/python/ops/shuffle_ops.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python wrappers for tf.data writers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.ops import writers
from tensorflow.python.util import deprecation
class TFRecordWriter(writers.TFRecordWriter):
"""Writes data to a TFRecord file."""
@deprecation.deprecated(
None, "Use `tf.data.experimental.TFRecordWriter(...)`.")
def __init__(self, filename, compression_type=None):
super(TFRecordWriter, self).__init__(filename, compression_type)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/data/python/ops/writers.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Iterator ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.ops import iterator_ops
from tensorflow.python.util import deprecation
@deprecation.deprecated(
None, "Use `tf.data.experimental.make_saveable_from_iterator(...)`.")
def make_saveable_from_iterator(iterator):
"""Returns a SaveableObject for saving/restore iterator state using Saver.
Args:
iterator: Iterator.
For example:
```python
with tf.Graph().as_default():
ds = tf.data.Dataset.range(10)
iterator = ds.make_initializable_iterator()
# Build the iterator SaveableObject.
saveable_obj = tf.data.experimental.make_saveable_from_iterator(iterator)
# Add the SaveableObject to the SAVEABLE_OBJECTS collection so
# it can be automatically saved using Saver.
tf.compat.v1.add_to_collection(tf.GraphKeys.SAVEABLE_OBJECTS, saveable_obj)
saver = tf.compat.v1.train.Saver()
while continue_training:
... Perform training ...
if should_save_checkpoint:
saver.save()
```
Note: When restoring the iterator, the existing iterator state is completely
discarded. This means that any changes you may have made to the Dataset
graph will be discarded as well! This includes the new Dataset graph
that you may have built during validation. So, while running validation,
make sure to run the initializer for the validation input pipeline after
restoring the checkpoint.
Note: Not all iterators support checkpointing yet. Attempting to save the
state of an unsupported iterator will throw an error.
"""
return iterator_ops.make_saveable_from_iterator(iterator)
class CheckpointInputPipelineHook(iterator_ops.CheckpointInputPipelineHook):
"""Checkpoints input pipeline state every N steps or seconds.
This hook saves the state of the iterators in the `Graph` so that when
training is resumed the input pipeline continues from where it left off.
This could potentially avoid overfitting in certain pipelines where the
number of training steps per eval are small compared to the dataset
size or if the training pipeline is pre-empted.
Differences from `CheckpointSaverHook`:
1. Saves only the input pipelines in the "iterators" collection and not the
global variables or other saveable objects.
2. Does not write the `GraphDef` and `MetaGraphDef` to the summary.
Example of checkpointing the training pipeline:
```python
est = tf.estimator.Estimator(model_fn)
while True:
est.train(
train_input_fn,
hooks=[tf.data.experimental.CheckpointInputPipelineHook(est)],
steps=train_steps_per_eval)
# Note: We do not pass the hook here.
metrics = est.evaluate(eval_input_fn)
if should_stop_the_training(metrics):
break
```
This hook should be used if the input pipeline state needs to be saved
separate from the model checkpoint. Doing so may be useful for a few reasons:
1. The input pipeline checkpoint may be large, if there are large shuffle
or prefetch buffers for instance, and may bloat the checkpoint size.
2. If the input pipeline is shared between training and validation, restoring
the checkpoint during validation may override the validation input
pipeline.
For saving the input pipeline checkpoint alongside the model weights use
`tf.data.experimental.make_saveable_from_iterator` directly to create a
`SaveableObject` and add to the `SAVEABLE_OBJECTS` collection. Note, however,
that you will need to be careful not to restore the training iterator during
eval. You can do that by not adding the iterator to the SAVEABLE_OBJECTS
collector when building the eval graph.
"""
@deprecation.deprecated(
None, "Use `tf.data.experimental.CheckpointInputPipelineHook(...)`.")
def __init__(self, estimator):
super(CheckpointInputPipelineHook, self).__init__(estimator)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/data/python/ops/iterator_ops.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Non-deterministic dataset transformations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.ops import interleave_ops
from tensorflow.python.util import deprecation
@deprecation.deprecated(None,
"Use `tf.data.experimental.parallel_interleave(...)`.")
def parallel_interleave(map_func,
cycle_length,
block_length=1,
sloppy=False,
buffer_output_elements=None,
prefetch_input_elements=None):
"""A parallel version of the `Dataset.interleave()` transformation.
`parallel_interleave()` maps `map_func` across its input to produce nested
datasets, and outputs their elements interleaved. Unlike
`tf.data.Dataset.interleave`, it gets elements from `cycle_length` nested
datasets in parallel, which increases the throughput, especially in the
presence of stragglers. Furthermore, the `sloppy` argument can be used to
improve performance, by relaxing the requirement that the outputs are produced
in a deterministic order, and allowing the implementation to skip over nested
datasets whose elements are not readily available when requested.
Example usage:
```python
# Preprocess 4 files concurrently.
filenames = tf.data.Dataset.list_files("/path/to/data/train*.tfrecords")
dataset = filenames.apply(
tf.data.experimental.parallel_interleave(
lambda filename: tf.data.TFRecordDataset(filename),
cycle_length=4))
```
WARNING: If `sloppy` is `True`, the order of produced elements is not
deterministic.
Args:
map_func: A function mapping a nested structure of tensors to a `Dataset`.
cycle_length: The number of input `Dataset`s to interleave from in parallel.
block_length: The number of consecutive elements to pull from an input
`Dataset` before advancing to the next input `Dataset`.
sloppy: If false, elements are produced in deterministic order. Otherwise,
the implementation is allowed, for the sake of expediency, to produce
elements in a non-deterministic order.
buffer_output_elements: The number of elements each iterator being
interleaved should buffer (similar to the `.prefetch()` transformation for
each interleaved iterator).
prefetch_input_elements: The number of input elements to transform to
iterators before they are needed for interleaving.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
return interleave_ops.parallel_interleave(
map_func, cycle_length, block_length, sloppy, buffer_output_elements,
prefetch_input_elements)
@deprecation.deprecated(
None, "Use `tf.contrib.data.parallel_interleave(..., sloppy=True)`.")
def sloppy_interleave(map_func, cycle_length, block_length=1):
"""A non-deterministic version of the `Dataset.interleave()` transformation.
`sloppy_interleave()` maps `map_func` across `dataset`, and
non-deterministically interleaves the results.
The resulting dataset is almost identical to `interleave`. The key
difference is that if retrieving a value from a given output iterator would
cause `get_next` to block, that iterator will be skipped, and consumed
when next available. If consuming from all iterators would cause the
`get_next` call to block, the `get_next` call blocks until the first value is
available.
If the underlying datasets produce elements as fast as they are consumed, the
`sloppy_interleave` transformation behaves identically to `interleave`.
However, if an underlying dataset would block the consumer,
`sloppy_interleave` can violate the round-robin order (that `interleave`
strictly obeys), producing an element from a different underlying
dataset instead.
Example usage:
```python
# Preprocess 4 files concurrently.
filenames = tf.data.Dataset.list_files("/path/to/data/train*.tfrecords")
dataset = filenames.apply(
tf.contrib.data.sloppy_interleave(
lambda filename: tf.data.TFRecordDataset(filename),
cycle_length=4))
```
WARNING: The order of elements in the resulting dataset is not
deterministic. Use `Dataset.interleave()` if you want the elements to have a
deterministic order.
Args:
map_func: A function mapping a nested structure of tensors (having shapes
and types defined by `self.output_shapes` and `self.output_types`) to a
`Dataset`.
cycle_length: The number of input `Dataset`s to interleave from in parallel.
block_length: The number of consecutive elements to pull from an input
`Dataset` before advancing to the next input `Dataset`. Note:
`sloppy_interleave` will skip the remainder of elements in the
`block_length` in order to avoid blocking.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
return interleave_ops.parallel_interleave(
map_func, cycle_length, block_length, sloppy=True)
@deprecation.deprecated(None,
"Use `tf.data.experimental.sample_from_datasets(...)`.")
def sample_from_datasets(datasets, weights=None, seed=None):
"""Samples elements at random from the datasets in `datasets`.
Args:
datasets: A list of `tf.data.Dataset` objects with compatible structure.
weights: (Optional.) A list of `len(datasets)` floating-point values where
`weights[i]` represents the probability with which an element should be
sampled from `datasets[i]`, or a `tf.data.Dataset` object where each
element is such a list. Defaults to a uniform distribution across
`datasets`.
seed: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the
random seed that will be used to create the distribution. See
`tf.compat.v1.set_random_seed` for behavior.
Returns:
A dataset that interleaves elements from `datasets` at random, according to
`weights` if provided, otherwise with uniform probability.
Raises:
TypeError: If the `datasets` or `weights` arguments have the wrong type.
ValueError: If the `weights` argument is specified and does not match the
length of the `datasets` element.
"""
return interleave_ops.sample_from_datasets(datasets, weights, seed)
@deprecation.deprecated(None,
"Use `tf.data.experimental.choose_from_datasets(...)`.")
def choose_from_datasets(datasets, choice_dataset):
"""Creates a dataset that deterministically chooses elements from `datasets`.
For example, given the following datasets:
```python
datasets = [tf.data.Dataset.from_tensors("foo").repeat(),
tf.data.Dataset.from_tensors("bar").repeat(),
tf.data.Dataset.from_tensors("baz").repeat()]
# Define a dataset containing `[0, 1, 2, 0, 1, 2, 0, 1, 2]`.
choice_dataset = tf.data.Dataset.range(3).repeat(3)
result = tf.data.experimental.choose_from_datasets(datasets, choice_dataset)
```
The elements of `result` will be:
```
"foo", "bar", "baz", "foo", "bar", "baz", "foo", "bar", "baz"
```
Args:
datasets: A list of `tf.data.Dataset` objects with compatible structure.
choice_dataset: A `tf.data.Dataset` of scalar `tf.int64` tensors between
`0` and `len(datasets) - 1`.
Returns:
A dataset that interleaves elements from `datasets` according to the values
of `choice_dataset`.
Raises:
TypeError: If the `datasets` or `choice_dataset` arguments have the wrong
type.
"""
return interleave_ops.choose_from_datasets(datasets, choice_dataset)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/data/python/ops/interleave_ops.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Enumerate dataset transformations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.ops import enumerate_ops
from tensorflow.python.util import deprecation
@deprecation.deprecated(None,
"Use `tf.data.experimental.enumerate_dataset(...)`.")
def enumerate_dataset(start=0):
"""A transformation that enumerate the elements of a dataset.
It is Similar to python's `enumerate`.
For example:
```python
# NOTE: The following examples use `{ ... }` to represent the
# contents of a dataset.
a = { 1, 2, 3 }
b = { (7, 8), (9, 10) }
# The nested structure of the `datasets` argument determines the
# structure of elements in the resulting dataset.
a.apply(tf.contrib.data.enumerate(start=5)) == { (5, 1), (6, 2), (7, 3) }
b.apply(tf.contrib.data.enumerate()) == { (0, (7, 8)), (1, (9, 10)) }
```
Args:
start: A `tf.int64` scalar `tf.Tensor`, representing the start
value for enumeration.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
return enumerate_ops.enumerate_dataset(start)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/data/python/ops/enumerate_ops.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ignore_errors dataset transformations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.ops import error_ops
from tensorflow.python.util import deprecation
@deprecation.deprecated(None, "Use `tf.data.experimental.ignore_errors()`.")
def ignore_errors():
"""Creates a `Dataset` from another `Dataset` and silently ignores any errors.
Use this transformation to produce a dataset that contains the same elements
as the input, but silently drops any elements that caused an error. For
example:
```python
dataset = tf.data.Dataset.from_tensor_slices([1., 2., 0., 4.])
# Computing `tf.debugging.check_numerics(1. / 0.)` will raise an
InvalidArgumentError.
dataset = dataset.map(lambda x: tf.debugging.check_numerics(1. / x, "error"))
# Using `ignore_errors()` will drop the element that causes an error.
dataset =
dataset.apply(tf.data.experimental.ignore_errors()) # ==> { 1., 0.5, 0.2
}
```
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
return error_ops.ignore_errors()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/data/python/ops/error_ops.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Datasets for random number generators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.ops import random_ops
from tensorflow.python.util import deprecation
class RandomDataset(random_ops.RandomDataset):
"""A `Dataset` of pseudorandom values."""
@deprecation.deprecated(
None, "Use `tf.data.experimental.RandomDataset(...)`.")
def __init__(self, seed=None):
super(RandomDataset, self).__init__(seed)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/data/python/ops/random_ops.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Counter Dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.ops import counter
from tensorflow.python.framework import dtypes
from tensorflow.python.util import deprecation
@deprecation.deprecated(None, "Use `tf.data.experimental.Counter(...)`.")
def Counter(start=0, step=1, dtype=dtypes.int64):
"""Creates a `Dataset` that counts from `start` in steps of size `step`.
For example:
```python
Dataset.count() == [0, 1, 2, ...)
Dataset.count(2) == [2, 3, ...)
Dataset.count(2, 5) == [2, 7, 12, ...)
Dataset.count(0, -1) == [0, -1, -2, ...)
Dataset.count(10, -1) == [10, 9, ...)
```
Args:
start: (Optional.) The starting value for the counter. Defaults to 0.
step: (Optional.) The step size for the counter. Defaults to 1.
dtype: (Optional.) The data type for counter elements. Defaults to
`tf.int64`.
Returns:
A `Dataset` of scalar `dtype` elements.
"""
return counter.Counter(start, step, dtype)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/data/python/ops/counter.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Stateless random ops which take seed as a tensor input.
DEPRECATED: Use `tf.random.stateless_uniform` rather than
`tf.contrib.stateless.stateless_random_uniform`, and similarly for the other
routines.
Instead of taking `seed` as an attr which initializes a mutable state within
the op, these random ops take `seed` as an input, and the random numbers are
a deterministic function of `shape` and `seed`.
WARNING: These ops are in contrib, and are not stable. They should be
consistent across multiple runs on the same hardware, but only for the same
version of the code.
@@stateless_multinomial
@@stateless_random_uniform
@@stateless_random_normal
@@stateless_truncated_normal
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.ops.stateless_random_ops import stateless_random_uniform
from tensorflow.python.ops.stateless_random_ops import stateless_random_normal
from tensorflow.python.ops.stateless_random_ops import stateless_truncated_normal
from tensorflow.python.ops.stateless_random_ops import stateless_multinomial
from tensorflow.python.util.all_util import remove_undocumented
remove_undocumented(__name__)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/stateless/__init__.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.contrib.stateless API.
The real tests are in python/kernel_tests/random/stateless_random_ops_test.py.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import stateless
from tensorflow.python.ops import stateless_random_ops
from tensorflow.python.platform import test
class StatelessOpsTest(test.TestCase):
def testAPI(self):
self.assertIs(stateless.stateless_random_uniform,
stateless_random_ops.stateless_random_uniform)
self.assertIs(stateless.stateless_random_normal,
stateless_random_ops.stateless_random_normal)
self.assertIs(stateless.stateless_truncated_normal,
stateless_random_ops.stateless_truncated_normal)
self.assertIs(stateless.stateless_multinomial,
stateless_random_ops.stateless_multinomial)
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/stateless/python/kernel_tests/stateless_random_ops_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model pruning implementation in tensorflow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from tensorflow.contrib.model_pruning.python.layers.layers import masked_conv2d
from tensorflow.contrib.model_pruning.python.layers.layers import masked_convolution
from tensorflow.contrib.model_pruning.python.layers.layers import masked_fully_connected
from tensorflow.contrib.model_pruning.python.layers.rnn_cells import MaskedBasicLSTMCell
from tensorflow.contrib.model_pruning.python.layers.rnn_cells import MaskedLSTMCell
from tensorflow.contrib.model_pruning.python.learning import train
from tensorflow.contrib.model_pruning.python.pruning import apply_mask
from tensorflow.contrib.model_pruning.python.pruning import get_masked_weights
from tensorflow.contrib.model_pruning.python.pruning import get_masks
from tensorflow.contrib.model_pruning.python.pruning import get_pruning_hparams
from tensorflow.contrib.model_pruning.python.pruning import get_thresholds
from tensorflow.contrib.model_pruning.python.pruning import get_weight_sparsity
from tensorflow.contrib.model_pruning.python.pruning import get_weights
from tensorflow.contrib.model_pruning.python.pruning import Pruning
from tensorflow.contrib.model_pruning.python.strip_pruning_vars_lib import graph_def_from_checkpoint
from tensorflow.contrib.model_pruning.python.strip_pruning_vars_lib import strip_pruning_vars_fn
# pylint: enable=unused-import
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = [
'masked_convolution', 'masked_conv2d', 'masked_fully_connected',
'MaskedBasicLSTMCell', 'MaskedLSTMCell', 'train', 'apply_mask',
'get_masked_weights', 'get_masks', 'get_pruning_hparams', 'get_thresholds',
'get_weights', 'get_weight_sparsity', 'Pruning', 'strip_pruning_vars_fn',
'graph_def_from_checkpoint'
]
remove_undocumented(__name__, _allowed_symbols)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/model_pruning/__init__.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for strip_pruning_vars."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
from tensorflow.contrib.model_pruning.python import pruning
from tensorflow.contrib.model_pruning.python import strip_pruning_vars_lib
from tensorflow.contrib.model_pruning.python.layers import layers
from tensorflow.contrib.model_pruning.python.layers import rnn_cells
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import rnn
from tensorflow.python.ops import rnn_cell as tf_rnn_cells
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import training_util
def _get_number_pruning_vars(graph_def):
number_vars = 0
for node in graph_def.node:
if re.match(r"^.*(mask$)|(threshold$)", node.name):
number_vars += 1
return number_vars
def _get_node_names(tensor_names):
return [
strip_pruning_vars_lib._node_name(tensor_name)
for tensor_name in tensor_names
]
class StripPruningVarsTest(test.TestCase):
def setUp(self):
param_list = [
"pruning_frequency=1", "begin_pruning_step=1", "end_pruning_step=10",
"nbins=2048", "threshold_decay=0.0"
]
self.initial_graph = ops.Graph()
self.initial_graph_def = None
self.final_graph = ops.Graph()
self.final_graph_def = None
self.pruning_spec = ",".join(param_list)
with self.initial_graph.as_default():
self.sparsity = variables.Variable(0.5, name="sparsity")
self.global_step = training_util.get_or_create_global_step()
self.increment_global_step = state_ops.assign_add(self.global_step, 1)
self.mask_update_op = None
def _build_convolutional_model(self, number_of_layers):
# Create a graph with several conv2d layers
kernel_size = 3
base_depth = 4
depth_step = 7
height, width = 7, 9
with variable_scope.variable_scope("conv_model"):
input_tensor = array_ops.ones((8, height, width, base_depth))
top_layer = input_tensor
for ix in range(number_of_layers):
top_layer = layers.masked_conv2d(
top_layer,
base_depth + (ix + 1) * depth_step,
kernel_size,
scope="Conv_" + str(ix))
return top_layer
def _build_fully_connected_model(self, number_of_layers):
base_depth = 4
depth_step = 7
input_tensor = array_ops.ones((8, base_depth))
top_layer = input_tensor
with variable_scope.variable_scope("fc_model"):
for ix in range(number_of_layers):
top_layer = layers.masked_fully_connected(
top_layer, base_depth + (ix + 1) * depth_step)
return top_layer
def _build_lstm_model(self, number_of_layers):
batch_size = 8
dim = 10
inputs = variables.Variable(random_ops.random_normal([batch_size, dim]))
def lstm_cell():
return rnn_cells.MaskedBasicLSTMCell(
dim, forget_bias=0.0, state_is_tuple=True, reuse=False)
cell = tf_rnn_cells.MultiRNNCell(
[lstm_cell() for _ in range(number_of_layers)], state_is_tuple=True)
outputs = rnn.static_rnn(
cell, [inputs],
initial_state=cell.zero_state(batch_size, dtypes.float32))
return outputs
def _prune_model(self, session):
pruning_hparams = pruning.get_pruning_hparams().parse(self.pruning_spec)
p = pruning.Pruning(pruning_hparams, sparsity=self.sparsity)
self.mask_update_op = p.conditional_mask_update_op()
variables.global_variables_initializer().run()
for _ in range(20):
session.run(self.mask_update_op)
session.run(self.increment_global_step)
def _get_outputs(self, session, input_graph, tensors_list, graph_prefix=None):
outputs = []
for output_tensor in tensors_list:
if graph_prefix:
output_tensor = graph_prefix + "/" + output_tensor
outputs.append(
session.run(session.graph.get_tensor_by_name(output_tensor)))
return outputs
def _get_initial_outputs(self, output_tensor_names_list):
with self.session(graph=self.initial_graph) as sess1:
self._prune_model(sess1)
reference_outputs = self._get_outputs(sess1, self.initial_graph,
output_tensor_names_list)
self.initial_graph_def = graph_util.convert_variables_to_constants(
sess1, sess1.graph.as_graph_def(),
_get_node_names(output_tensor_names_list))
return reference_outputs
def _get_final_outputs(self, output_tensor_names_list):
self.final_graph_def = strip_pruning_vars_lib.strip_pruning_vars_fn(
self.initial_graph_def, _get_node_names(output_tensor_names_list))
_ = importer.import_graph_def(self.final_graph_def, name="final")
with self.test_session(self.final_graph) as sess2:
final_outputs = self._get_outputs(
sess2,
self.final_graph,
output_tensor_names_list,
graph_prefix="final")
return final_outputs
def _check_removal_of_pruning_vars(self, number_masked_layers):
self.assertEqual(
_get_number_pruning_vars(self.initial_graph_def), number_masked_layers)
self.assertEqual(_get_number_pruning_vars(self.final_graph_def), 0)
def _check_output_equivalence(self, initial_outputs, final_outputs):
for initial_output, final_output in zip(initial_outputs, final_outputs):
self.assertAllEqual(initial_output, final_output)
def testConvolutionalModel(self):
with self.initial_graph.as_default():
number_masked_conv_layers = 5
top_layer = self._build_convolutional_model(number_masked_conv_layers)
output_tensor_names = [top_layer.name]
initial_outputs = self._get_initial_outputs(output_tensor_names)
# Remove pruning-related nodes.
with self.final_graph.as_default():
final_outputs = self._get_final_outputs(output_tensor_names)
# Check that the final graph has no pruning-related vars
self._check_removal_of_pruning_vars(number_masked_conv_layers)
# Check that outputs remain the same after removal of pruning-related nodes
self._check_output_equivalence(initial_outputs, final_outputs)
def testFullyConnectedModel(self):
with self.initial_graph.as_default():
number_masked_fc_layers = 3
top_layer = self._build_fully_connected_model(number_masked_fc_layers)
output_tensor_names = [top_layer.name]
initial_outputs = self._get_initial_outputs(output_tensor_names)
# Remove pruning-related nodes.
with self.final_graph.as_default():
final_outputs = self._get_final_outputs(output_tensor_names)
# Check that the final graph has no pruning-related vars
self._check_removal_of_pruning_vars(number_masked_fc_layers)
# Check that outputs remain the same after removal of pruning-related nodes
self._check_output_equivalence(initial_outputs, final_outputs)
def testLSTMModel(self):
with self.initial_graph.as_default():
number_masked_lstm_layers = 2
outputs = self._build_lstm_model(number_masked_lstm_layers)
output_tensor_names = [outputs[0][0].name]
initial_outputs = self._get_initial_outputs(output_tensor_names)
# Remove pruning-related nodes.
with self.final_graph.as_default():
final_outputs = self._get_final_outputs(output_tensor_names)
# Check that the final graph has no pruning-related vars
self._check_removal_of_pruning_vars(number_masked_lstm_layers)
# Check that outputs remain the same after removal of pruning-related nodes
self._check_output_equivalence(initial_outputs, final_outputs)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/model_pruning/python/strip_pruning_vars_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helper functions to add support for magnitude-based model pruning.
# Adds variables and ops to the graph to enable
# elementwise masking of weights
apply_mask(weights)
# Returns a list containing the sparsity of each of the weight tensors
get_weight_sparsity()
# Returns a list of all the masked weight tensorflow variables
get_masked_weights()
# Returns a list of all the mask tensorflow variables
get_masks()
# Returns a list of all the thresholds
get_thresholds()
# Returns a list of all the weight tensors that have been masked
get_weights()
The Pruning class uses a tf.hparams object to set up the
parameters for a model pruning. Here's a typical usage:
# Parse pruning hyperparameters
pruning_hparams = pruning.get_pruning_hparams().parse(FLAGS.pruning_hparams)
# Create a pruning object using the pruning_hparams
p = pruning.Pruning(pruning_hparams)
# Add mask update ops to the graph
mask_update_op = p.conditional_mask_update_op()
# Add the summaries
p.add_pruning_summaries()
# Run the op
session.run(mask_update_op)
# An object of the pruning also accepts externally defined sparsity:
sparsity = tf.Variable(0.5, name = "ConstantSparsity")
p = pruning.Pruning(pruning_hparams, sparsity=sparsity)
"""
# pylint: disable=missing-docstring
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
from tensorflow.contrib.model_pruning.python import pruning_utils
from tensorflow.contrib.model_pruning.python.layers import core_layers as core
from tensorflow.contrib.training.python.training import hparam
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_impl
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary
from tensorflow.python.training import training_util
_MASK_COLLECTION = core.MASK_COLLECTION
_THRESHOLD_COLLECTION = core.THRESHOLD_COLLECTION
_MASKED_WEIGHT_COLLECTION = core.MASKED_WEIGHT_COLLECTION
_WEIGHT_COLLECTION = core.WEIGHT_COLLECTION
_MASKED_WEIGHT_NAME = core.MASKED_WEIGHT_NAME
def apply_mask(x, scope=''):
"""Apply mask to a given weight tensor.
Args:
x: Input weight tensor
scope: The current variable scope. Defaults to "".
Returns:
Tensor representing masked_weights
"""
mask = pruning_utils.weight_mask_variable(x, scope)
threshold = pruning_utils.weight_threshold_variable(x, scope)
# Add masked_weights in the weights namescope so as to make it easier
# for the quantization library to add quant ops.
masked_weights = math_ops.multiply(mask, x, _MASKED_WEIGHT_NAME)
# Make sure the mask for a given variable are not added multiple times to the
# collection. This is particularly important when applying mask to RNN's
# weight variables
if mask not in ops.get_collection_ref(_MASK_COLLECTION):
ops.add_to_collection(_THRESHOLD_COLLECTION, threshold)
ops.add_to_collection(_MASK_COLLECTION, mask)
ops.add_to_collection(_MASKED_WEIGHT_COLLECTION, masked_weights)
ops.add_to_collection(_WEIGHT_COLLECTION, x)
return masked_weights
def get_masked_weights():
return ops.get_collection(_MASKED_WEIGHT_COLLECTION)
def get_masks():
return ops.get_collection(_MASK_COLLECTION)
def get_thresholds():
return ops.get_collection(_THRESHOLD_COLLECTION)
def get_weights():
return ops.get_collection(_WEIGHT_COLLECTION)
def get_weight_sparsity():
"""Get sparsity of the weights.
Args:
None
Returns:
A list containing the sparsity of each of the weight tensors
"""
masks = get_masks()
return [nn_impl.zero_fraction(mask) for mask in masks]
def get_pruning_hparams():
"""Get a tf.HParams object with the default values for the hyperparameters.
name: string
name of the pruning specification. Used for adding summaries and ops under
a common tensorflow name_scope
begin_pruning_step: integer
the global step at which to begin pruning
end_pruning_step: integer
the global step at which to terminate pruning. Defaults to -1 implying
that pruning continues till the training stops
weight_sparsity_map: list of strings
comma separed list of {weight_variable_name:target sparsity} or
{regex:target sparsity} pairs.
For layers/weights not in this list, sparsity as specified by the
target_sparsity hyperparameter is used.
Eg. [conv1:0.9,conv2/kernel:0.8]
block_dims_map: list of strings
comma separated list of {weight variable name:block_height x block_width}
or {regex:block_height x block_width} pairs. For layers/weights not in
this list, block dims are specified by the block_height, block_width
hyperparameters are used Eg. [dense1:4x4,dense2:1x16,dense3:1x1]
threshold_decay: float
the decay factor to use for exponential decay of the thresholds
pruning_frequency: integer
How often should the masks be updated? (in # of global_steps)
nbins: integer
number of bins to use for histogram computation
block_height: integer
number of rows in a block (defaults to 1), can be -1 in which
case it is set to the size of the corresponding weight tensor.
block_width: integer
number of cols in a block (defaults to 1), can be -1 in which
case it is set to the size of the corresponding weight tensor.
block_pooling_function: string
Whether to perform average (AVG) or max (MAX) pooling in the block
(default: AVG)
initial_sparsity: float
initial sparsity value
target_sparsity: float
target sparsity value
sparsity_function_begin_step: integer
the global step at this which the gradual sparsity function begins to
take effect
sparsity_function_end_step: integer
the global step used as the end point for the gradual sparsity function
sparsity_function_exponent: float
exponent = 1 is linearly varying sparsity between initial and final.
exponent > 1 varies more slowly towards the end than the beginning
use_tpu: False
Indicates whether to use TPU
We use the following sparsity function:
num_steps = (sparsity_function_end_step -
sparsity_function_begin_step)/pruning_frequency
sparsity(step) = (initial_sparsity - target_sparsity)*
[1-step/(num_steps -1)]**exponent + target_sparsity
Args:
None
Returns:
tf.HParams object initialized to default values
"""
return hparam.HParams(
name='model_pruning',
begin_pruning_step=0,
end_pruning_step=-1,
weight_sparsity_map=[''],
block_dims_map=[''],
threshold_decay=0.0,
pruning_frequency=10,
nbins=256,
block_height=1,
block_width=1,
block_pooling_function='AVG',
initial_sparsity=0.0,
target_sparsity=0.5,
sparsity_function_begin_step=0,
sparsity_function_end_step=100,
sparsity_function_exponent=3.0,
use_tpu=False)
class Pruning(object):
def __init__(self, spec=None, global_step=None, sparsity=None):
"""Set up the specification for model pruning.
If a spec is provided, the sparsity is set up based on the sparsity_function
in the spec. The effect of sparsity_function is overridden if the sparsity
variable is passed to the constructor. This enables setting up arbitrary
sparsity profiles externally and passing it to this pruning functions.
Args:
spec: Pruning spec as defined in pruning.proto
global_step: A tensorflow variable that is used while setting up the
sparsity function
sparsity: A tensorflow scalar variable storing the sparsity
"""
# Pruning specification
self._spec = spec if spec else get_pruning_hparams()
# Sanity check for pruning hparams
self._validate_spec()
# A tensorflow variable that tracks the sparsity function.
# If not provided as input, the graph must already contain the global_step
# variable before calling this constructor.
self._global_step = self._setup_global_step(global_step)
# Stores the tensorflow sparsity variable.
# Built using self._setup_sparsity() or provided externally
self._sparsity = (sparsity
if sparsity is not None else self._setup_sparsity())
# List of tensorflow assignments ops for new masks and thresholds
self._assign_ops = []
# Tensorflow variable keeping track of the last global step when the masks
# were updated
self._last_update_step = self._setup_last_update_step()
# Block dimensions
self._block_dims = [self._spec.block_height, self._spec.block_width]
# Block pooling function
self._block_pooling_function = self._spec.block_pooling_function
# Mapping of layer/weight names and block dims
self._block_dims_map = self._get_block_dims_map()
# Mapping of weight names and target sparsity
self._weight_sparsity_map = self._get_weight_sparsity_map()
def _validate_spec(self):
spec = self._spec
if spec.begin_pruning_step < 0:
raise ValueError('Illegal value for begin_pruning_step')
if spec.begin_pruning_step >= spec.end_pruning_step:
if spec.end_pruning_step != -1:
raise ValueError(
'Pruning must begin before it can end. begin_step=%d, end_step=%d.'
'Set end_pruning_step to -1 if pruning is required till training'
'stops' % (spec.begin_pruning_step, spec.end_pruning_step))
if spec.sparsity_function_begin_step < 0:
raise ValueError('Illegal value for sparsity_function_begin_step')
if spec.sparsity_function_begin_step >= spec.sparsity_function_end_step:
raise ValueError(
'Sparsity function requires begin_step < end_step')
if not 0.0 <= spec.threshold_decay < 1.0:
raise ValueError('threshold_decay must be in range [0,1)')
if not 0.0 <= spec.initial_sparsity < 1.0:
raise ValueError('initial_sparsity must be in range [0,1)')
if not 0.0 <= spec.target_sparsity < 1.0:
raise ValueError('target_sparsity must be in range [0,1)')
def _setup_global_step(self, global_step):
graph_global_step = global_step
if graph_global_step is None:
graph_global_step = training_util.get_global_step()
return math_ops.cast(graph_global_step, dtypes.int32)
def _setup_sparsity(self):
begin_step = self._spec.sparsity_function_begin_step
end_step = self._spec.sparsity_function_end_step
initial_sparsity = self._spec.initial_sparsity
target_sparsity = self._spec.target_sparsity
exponent = self._spec.sparsity_function_exponent
with ops.name_scope(self._spec.name):
p = math_ops.minimum(
1.0,
math_ops.maximum(
0.0,
math_ops.div(
math_ops.cast(self._global_step - begin_step, dtypes.float32),
end_step - begin_step)))
sparsity = math_ops.add(
math_ops.multiply(initial_sparsity - target_sparsity,
math_ops.pow(1 - p, exponent)),
target_sparsity,
name='sparsity')
return sparsity
def _setup_last_update_step(self):
with variable_scope.variable_scope(
self._spec.name, use_resource=self._spec.use_tpu) as scope:
try:
last_update_step = variable_scope.get_variable(
'last_mask_update_step', [],
initializer=init_ops.zeros_initializer(),
trainable=False,
dtype=dtypes.int32)
except ValueError:
scope.reuse_variables()
last_update_step = variable_scope.get_variable(
'last_mask_update_step', dtype=dtypes.int32)
return last_update_step
def _get_block_dims_map(self):
"""Returns the map of layer name: block dims."""
block_dims_map = {}
val_list = self._spec.block_dims_map
filtered_val_list = [l for l in val_list if l]
for val in filtered_val_list:
weight_name, block_dims_str = val.split(':')
block_dims_str = block_dims_str.split('x')
if len(block_dims_str) != 2:
raise ValueError('Expected 2 values for block dim for %s, got %s' %
(weight_name, block_dims_str))
block_dims = [int(block_dims_str[0]), int(block_dims_str[1])]
block_dims_map[re.compile(weight_name)] = block_dims
return block_dims_map
def _get_block_dims(self, weight_name):
"""Returns the block dims for the given layer/weight name."""
block_dims_list = [
block_dims for regexp, block_dims in self._block_dims_map.items()
if regexp.search(weight_name)
]
if not block_dims_list:
return self._block_dims
if len(block_dims_list) > 1:
raise ValueError('Multiple matches in block_dims_map for weight %s' %
weight_name)
return block_dims_list[0]
def _get_weight_sparsity_map(self):
"""Returns the map of weight_name:sparsity parsed from the hparams."""
weight_sparsity_map = {}
val_list = self._spec.weight_sparsity_map
filtered_val_list = [l for l in val_list if l]
for val in filtered_val_list:
weight_name, sparsity = val.split(':')
if float(sparsity) >= 1.0:
raise ValueError('Weight sparsity can not exceed 1.0')
weight_sparsity_map[re.compile(weight_name)] = float(sparsity)
return weight_sparsity_map
def _get_sparsity(self, weight_name):
"""Returns target sparsity for the given layer/weight name."""
target_sparsity = [
sparsity for regexp, sparsity in self._weight_sparsity_map.items()
if regexp.search(weight_name)
]
if not target_sparsity:
return self._sparsity
if len(target_sparsity) > 1:
raise ValueError(
'Multiple matches in weight_sparsity_map for weight %s' % weight_name)
# TODO(suyoggupta): This will work when initial_sparsity = 0. Generalize
# to handle other cases as well.
return math_ops.mul(
self._sparsity,
math_ops.div(target_sparsity[0], self._spec.target_sparsity))
def _update_mask(self, weights, threshold):
"""Updates the mask for a given weight tensor.
This functions first computes the cdf of the weight tensor, and estimates
the threshold value such that 'desired_sparsity' fraction of weights
have magnitude less than the threshold.
Args:
weights: The weight tensor that needs to be masked.
threshold: The current threshold value. The function will compute a new
threshold and return the exponential moving average using the current
value of threshold
Returns:
new_threshold: The new value of the threshold based on weights, and
sparsity at the current global_step
new_mask: A numpy array of the same size and shape as weights containing
0 or 1 to indicate which of the values in weights falls below
the threshold
Raises:
ValueError: if sparsity is not defined
"""
if self._sparsity is None:
raise ValueError('Sparsity variable undefined')
sparsity = self._get_sparsity(weights.op.name)
with ops.name_scope(weights.op.name + '_pruning_ops'):
abs_weights = math_ops.abs(weights)
k = math_ops.cast(
math_ops.round(
math_ops.cast(array_ops.size(abs_weights), dtypes.float32) *
(1 - sparsity)), dtypes.int32)
# Sort the entire array
values, _ = nn_ops.top_k(
array_ops.reshape(abs_weights, [-1]), k=array_ops.size(abs_weights))
# Grab the (k-1) th value
current_threshold = array_ops.gather(values, k - 1)
smoothed_threshold = math_ops.add_n([
math_ops.multiply(current_threshold, 1 - self._spec.threshold_decay),
math_ops.multiply(threshold, self._spec.threshold_decay)
])
new_mask = math_ops.cast(
math_ops.greater_equal(abs_weights, smoothed_threshold),
dtypes.float32)
return smoothed_threshold, new_mask
def _maybe_update_block_mask(self, weights, threshold):
"""Performs block-granular masking of the weights.
Block pruning occurs only if the block_height or block_width is > 1 and
if the weight tensor, when squeezed, has ndims = 2. Otherwise, elementwise
pruning occurs.
Args:
weights: The weight tensor that needs to be masked.
threshold: The current threshold value. The function will compute a new
threshold and return the exponential moving average using the current
value of threshold
Returns:
new_threshold: The new value of the threshold based on weights, and
sparsity at the current global_step
new_mask: A numpy array of the same size and shape as weights containing
0 or 1 to indicate which of the values in weights falls below
the threshold
Raises:
ValueError: if block pooling function is not AVG or MAX
"""
block_dims = self._get_block_dims(weights.op.name)
squeezed_weights = array_ops.squeeze(weights)
if squeezed_weights.get_shape().ndims != 2 or block_dims == [1, 1]:
return self._update_mask(weights, threshold)
for i in range(2):
if block_dims[i] == -1:
block_dims[i] = squeezed_weights.get_shape()[i]
if self._block_pooling_function not in ['AVG', 'MAX']:
raise ValueError('Unknown pooling function for block sparsity: %s' %
self._block_pooling_function)
with ops.name_scope(weights.op.name + '_pruning_ops'):
abs_weights = math_ops.abs(squeezed_weights)
pool_window = block_dims
pool_fn = pruning_utils.factorized_pool
squeeze_axis = None
if not self._spec.use_tpu:
pool_fn = nn_ops.pool
abs_weights = array_ops.reshape(
abs_weights,
[1, abs_weights.get_shape()[0],
abs_weights.get_shape()[1], 1])
squeeze_axis = [0, 3]
pooled_weights = pool_fn(
abs_weights,
window_shape=pool_window,
pooling_type=self._block_pooling_function,
strides=pool_window,
padding='SAME',
name=weights.op.name + '_pooled')
if pooled_weights.get_shape().ndims != 2:
pooled_weights = array_ops.squeeze(pooled_weights, axis=squeeze_axis)
smoothed_threshold, new_mask = self._update_mask(pooled_weights,
threshold)
updated_mask = pruning_utils.expand_tensor(new_mask, block_dims)
sliced_mask = array_ops.slice(
updated_mask, [0, 0],
[squeezed_weights.get_shape()[0],
squeezed_weights.get_shape()[1]])
return smoothed_threshold, array_ops.reshape(sliced_mask,
array_ops.shape(weights))
def _get_mask_assign_ops(self):
# Make sure the assignment ops have not already been added to the list
if self._assign_ops:
raise ValueError(
'Assign op list not empty. _get_mask_assign_ops() called twice?')
masks = get_masks()
weights = get_weights()
thresholds = get_thresholds()
if len(masks) != len(thresholds):
raise ValueError(
'Number of masks %s and number of thresholds %s mismatch' %
(len(masks), len(thresholds)))
for index, mask in enumerate(masks):
threshold = thresholds[index]
weight = weights[index]
is_partitioned = isinstance(weight, variables.PartitionedVariable)
if is_partitioned:
weight = weight.as_tensor()
new_threshold, new_mask = self._maybe_update_block_mask(weight, threshold)
self._assign_ops.append(
pruning_utils.variable_assign(threshold, new_threshold))
self._assign_ops.append(
pruning_utils.partitioned_variable_assign(mask, new_mask)
if is_partitioned else pruning_utils.variable_assign(mask, new_mask))
def mask_update_op(self):
with ops.name_scope(self._spec.name):
if not self._assign_ops:
self._get_mask_assign_ops()
with ops.control_dependencies([
state_ops.assign(
self._last_update_step,
self._global_step,
name='last_mask_update_step_assign')
]):
with ops.control_dependencies(self._assign_ops):
logging.info('Updating masks.')
return control_flow_ops.no_op('mask_update')
def conditional_mask_update_op(self):
def maybe_update_masks():
with ops.name_scope(self._spec.name):
is_step_within_pruning_range = math_ops.logical_and(
math_ops.greater_equal(self._global_step,
self._spec.begin_pruning_step),
# If end_pruning_step is negative, keep pruning forever!
math_ops.logical_or(
math_ops.less_equal(self._global_step,
self._spec.end_pruning_step),
math_ops.less(self._spec.end_pruning_step, 0)))
is_pruning_step = math_ops.less_equal(
math_ops.add(self._last_update_step, self._spec.pruning_frequency),
self._global_step)
return math_ops.logical_and(is_step_within_pruning_range,
is_pruning_step)
def mask_update_op():
return self.mask_update_op()
def no_update_op():
return control_flow_ops.no_op()
return control_flow_ops.cond(maybe_update_masks(), mask_update_op,
no_update_op)
def add_pruning_summaries(self):
"""Adds summaries of weight sparsities and thresholds."""
with ops.name_scope(self._spec.name + '_summaries'):
summary.scalar('sparsity', self._sparsity)
summary.scalar('last_mask_update_step', self._last_update_step)
masks = get_masks()
thresholds = get_thresholds()
for mask, threshold in zip(masks, thresholds):
summary.scalar(mask.op.name + '/sparsity', nn_impl.zero_fraction(mask))
summary.scalar(threshold.op.name + '/threshold', threshold)
def print_hparams(self):
logging.info(self._spec.to_json())
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/model_pruning/python/pruning.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.