python_code
stringlengths 0
1.02M
| repo_name
stringlengths 9
48
| file_path
stringlengths 5
114
|
---|---|---|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for slim.evaluation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import os
import shutil
import time
import numpy as np
from tensorflow.contrib.framework.python.ops import variables as variables_lib
from tensorflow.contrib.slim.python.slim import evaluation
from tensorflow.contrib.training.python.training import evaluation as evaluation_lib
from tensorflow.core.protobuf import saver_pb2
from tensorflow.python.debug.lib import debug_data
from tensorflow.python.debug.wrappers import hooks
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import metrics
from tensorflow.python.ops import variables
from tensorflow.python.platform import flags
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.summary import summary_iterator
from tensorflow.python.training import input # pylint: disable=redefined-builtin
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.training import session_run_hook
FLAGS = flags.FLAGS
def GenerateTestData(num_classes, batch_size):
inputs = np.random.rand(batch_size, num_classes)
np.random.seed(0)
labels = np.random.randint(low=0, high=num_classes, size=batch_size)
labels = labels.reshape((batch_size,))
return inputs, labels
def TestModel(inputs):
scale = variables.Variable(1.0, trainable=False)
# Scaling the outputs wont change the result...
outputs = math_ops.multiply(inputs, scale)
return math_ops.argmax(outputs, 1), scale
def GroundTruthAccuracy(inputs, labels, batch_size):
predictions = np.argmax(inputs, 1)
num_correct = np.sum(predictions == labels)
return float(num_correct) / batch_size
class EvaluationTest(test.TestCase):
def setUp(self):
super(EvaluationTest, self).setUp()
num_classes = 8
batch_size = 16
inputs, labels = GenerateTestData(num_classes, batch_size)
self._expected_accuracy = GroundTruthAccuracy(inputs, labels, batch_size)
self._global_step = variables_lib.get_or_create_global_step()
self._inputs = constant_op.constant(inputs, dtype=dtypes.float32)
self._labels = constant_op.constant(labels, dtype=dtypes.int64)
self._predictions, self._scale = TestModel(self._inputs)
def testFinalOpsOnEvaluationLoop(self):
value_op, update_op = metrics.accuracy(
labels=self._labels, predictions=self._predictions)
init_op = control_flow_ops.group(variables.global_variables_initializer(),
variables.local_variables_initializer())
# Create checkpoint and log directories:
chkpt_dir = os.path.join(self.get_temp_dir(), 'tmp_logs/')
gfile.MakeDirs(chkpt_dir)
logdir = os.path.join(self.get_temp_dir(), 'tmp_logs2/')
gfile.MakeDirs(logdir)
# Save initialized variables to a checkpoint directory:
saver = saver_lib.Saver()
with self.cached_session() as sess:
init_op.run()
saver.save(sess, os.path.join(chkpt_dir, 'chkpt'))
class Object(object):
def __init__(self):
self.hook_was_run = False
obj = Object()
# Create a custom session run hook.
class CustomHook(session_run_hook.SessionRunHook):
def __init__(self, obj):
self.obj = obj
def end(self, session):
self.obj.hook_was_run = True
# Now, run the evaluation loop:
accuracy_value = evaluation.evaluation_loop(
'',
chkpt_dir,
logdir,
eval_op=update_op,
final_op=value_op,
hooks=[CustomHook(obj)],
max_number_of_evaluations=1)
self.assertAlmostEqual(accuracy_value, self._expected_accuracy)
# Validate that custom hook ran.
self.assertTrue(obj.hook_was_run)
def _create_names_to_metrics(self, predictions, labels):
accuracy0, update_op0 = metrics.accuracy(
labels=labels, predictions=predictions)
accuracy1, update_op1 = metrics.accuracy(
labels=labels, predictions=predictions + 1)
names_to_values = {'Accuracy': accuracy0, 'Another_accuracy': accuracy1}
names_to_updates = {'Accuracy': update_op0, 'Another_accuracy': update_op1}
return names_to_values, names_to_updates
def _verify_summaries(self, output_dir, names_to_values):
"""Verifies that the given `names_to_values` are found in the summaries.
Args:
output_dir: An existing directory where summaries are found.
names_to_values: A dictionary of strings to values.
"""
# Check that the results were saved. The events file may have additional
# entries, e.g. the event version stamp, so have to parse things a bit.
output_filepath = glob.glob(os.path.join(output_dir, '*'))
self.assertEqual(len(output_filepath), 1)
events = summary_iterator.summary_iterator(output_filepath[0])
summaries = [e.summary for e in events if e.summary.value]
values = []
for summary in summaries:
for value in summary.value:
values.append(value)
saved_results = {v.tag: v.simple_value for v in values}
for name in names_to_values:
self.assertAlmostEqual(names_to_values[name], saved_results[name])
def testLatestCheckpointReturnsNoneAfterTimeout(self):
start = time.time()
ret = evaluation_lib.wait_for_new_checkpoint(
'/non-existent-dir', 'foo', timeout=1.0, seconds_to_sleep=0.5)
end = time.time()
self.assertIsNone(ret)
# We've waited one time.
self.assertGreater(end, start + 0.5)
# The timeout kicked in.
self.assertLess(end, start + 1.1)
def testTimeoutFnOnEvaluationLoop(self):
# We require a mutable object (e.g. list but not an int) to maintain state
# across calls of a nested function.
timeout_fn_calls = [0]
def _TimeoutFn():
timeout_fn_calls[0] += 1
return timeout_fn_calls[0] >= 3
# Need not do any evaluation, but should just call timeout_fn repeatedly.
evaluation.evaluation_loop('', '', '', timeout=0, timeout_fn=_TimeoutFn)
self.assertEqual(timeout_fn_calls[0], 3)
def testMonitorCheckpointsLoopTimeout(self):
ret = list(
evaluation_lib.checkpoints_iterator(
'/non-existent-dir', timeout=0))
self.assertEqual(ret, [])
def testWithEpochLimit(self):
predictions_limited = input.limit_epochs(self._predictions, num_epochs=1)
labels_limited = input.limit_epochs(self._labels, num_epochs=1)
value_op, update_op = metrics.accuracy(
labels=labels_limited, predictions=predictions_limited)
init_op = control_flow_ops.group(variables.global_variables_initializer(),
variables.local_variables_initializer())
# Create checkpoint and log directories:
chkpt_dir = os.path.join(self.get_temp_dir(), 'tmp_logs/')
gfile.MakeDirs(chkpt_dir)
logdir = os.path.join(self.get_temp_dir(), 'tmp_logs2/')
gfile.MakeDirs(logdir)
# Save initialized variables to a checkpoint directory:
saver = saver_lib.Saver()
with self.cached_session() as sess:
init_op.run()
saver.save(sess, os.path.join(chkpt_dir, 'chkpt'))
# Now, run the evaluation loop:
accuracy_value = evaluation.evaluation_loop(
'', chkpt_dir, logdir, eval_op=update_op, final_op=value_op,
max_number_of_evaluations=1, num_evals=10000)
self.assertAlmostEqual(accuracy_value, self._expected_accuracy)
class SingleEvaluationTest(test.TestCase):
def setUp(self):
super(SingleEvaluationTest, self).setUp()
num_classes = 8
batch_size = 16
inputs, labels = GenerateTestData(num_classes, batch_size)
self._expected_accuracy = GroundTruthAccuracy(inputs, labels, batch_size)
self._global_step = variables_lib.get_or_create_global_step()
self._inputs = constant_op.constant(inputs, dtype=dtypes.float32)
self._labels = constant_op.constant(labels, dtype=dtypes.int64)
self._predictions, self._scale = TestModel(self._inputs)
def testErrorRaisedIfCheckpointDoesntExist(self):
checkpoint_path = os.path.join(self.get_temp_dir(),
'this_file_doesnt_exist')
log_dir = os.path.join(self.get_temp_dir(), 'error_raised')
with self.assertRaises(ValueError):
evaluation.evaluate_once('', checkpoint_path, log_dir)
def _prepareCheckpoint(self, checkpoint_path):
init_op = control_flow_ops.group(variables.global_variables_initializer(),
variables.local_variables_initializer())
saver = saver_lib.Saver(write_version=saver_pb2.SaverDef.V1)
with self.cached_session() as sess:
sess.run(init_op)
saver.save(sess, checkpoint_path)
def testRestoredModelPerformance(self):
checkpoint_path = os.path.join(self.get_temp_dir(), 'model.ckpt')
log_dir = os.path.join(self.get_temp_dir(), 'log_dir1/')
# First, save out the current model to a checkpoint:
self._prepareCheckpoint(checkpoint_path)
# Next, determine the metric to evaluate:
value_op, update_op = metrics.accuracy(
labels=self._labels, predictions=self._predictions)
# Run the evaluation and verify the results:
accuracy_value = evaluation.evaluate_once(
'', checkpoint_path, log_dir, eval_op=update_op, final_op=value_op)
self.assertAlmostEqual(accuracy_value, self._expected_accuracy)
def testAdditionalHooks(self):
checkpoint_path = os.path.join(self.get_temp_dir(), 'model.ckpt')
log_dir = os.path.join(self.get_temp_dir(), 'log_dir1/')
# First, save out the current model to a checkpoint:
self._prepareCheckpoint(checkpoint_path)
# Next, determine the metric to evaluate:
value_op, update_op = metrics.accuracy(
labels=self._labels, predictions=self._predictions)
dumping_root = os.path.join(self.get_temp_dir(), 'tfdbg_dump_dir')
dumping_hook = hooks.DumpingDebugHook(dumping_root, log_usage=False)
try:
# Run the evaluation and verify the results:
accuracy_value = evaluation.evaluate_once(
'', checkpoint_path, log_dir, eval_op=update_op, final_op=value_op,
hooks=[dumping_hook])
self.assertAlmostEqual(accuracy_value, self._expected_accuracy)
dump = debug_data.DebugDumpDir(
glob.glob(os.path.join(dumping_root, 'run_*'))[0])
# Here we simply assert that the dumped data has been loaded and is
# non-empty. We do not care about the detailed model-internal tensors or
# their values.
self.assertTrue(dump.dumped_tensor_data)
finally:
if os.path.isdir(dumping_root):
shutil.rmtree(dumping_root)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/slim/python/slim/evaluation_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains TF-Slim code for training models.
This script contains various functions for training models. These include
manipulating gradients, creating a `train_op` (an operation that computes the
loss and applies the gradients) and a training loop function. The training loop
allows the user to pass in the `train_op` and runs the optimization according
to user-specified arguments. Note that the training loop uses the
tf.compat.v1.train.Supervisor and its managed_session in its implementation to
ensure the
ability of worker processes to recover from failures.
************************************
* A simple working training script *
************************************
# Load data and create the model:
images, labels = LoadData(...)
predictions = MyModel(images)
# Define the loss:
slim.losses.log_loss(predictions, labels)
total_loss = slim.losses.get_total_loss()
# Define the optimizer:
optimizer = tf.compat.v1.train.MomentumOptimizer(FLAGS.learning_rate,
FLAGS.momentum)
# Create the train_op
train_op = slim.learning.create_train_op(total_loss, optimizer)
# Run training.
slim.learning.train(train_op, my_log_dir)
*************************
* Creating the train_op *
*************************
In order to train, TF-Slim's train loop needs a train_op: an `Operation` that
(a) computes the loss, (b) applies the gradients to update the weights and
(c) returns the value of the loss. slim.learning.create_train_op creates
such an `Operation`. This function also provides the ability to manipulate
the gradients using a few arguments:
# Create the train_op and clip the gradient norms:
train_op = slim.learning.create_train_op(
total_loss,
optimizer,
clip_gradient_norm=4)
# Create the train_op and scale the gradients by providing a map from variable
# name (or variable) to a scaling coefficient:
gradient_multipliers = {
'conv0/weights': 1.2,
'fc8/weights': 3.4,
}
train_op = slim.learning.create_train_op(
total_loss,
optimizer,
gradient_multipliers=gradient_multipliers)
****************************************************************
* Performing additional (non-gradient) updates during training *
****************************************************************
Many networks utilize modules, like BatchNorm, that require performing a series
of non-gradient updates during training. slim.learning.create_train_op allows
a user to pass in a list of update_ops to call along with the gradient updates.
train_op = slim.learning.create_train_op(total_loss, optimizer, update_ops)
By default, slim.learning.create_train_op includes all update ops that are
part of the `tf.GraphKeys.UPDATE_OPS` collection. Additionally, TF-Slim's
slim.batch_norm function adds the moving mean and moving variance updates to
this collection. Consequently, users who want to use slim.batch_norm will not
need to take any additional steps in order to have the moving mean and moving
variance updates be computed.
However, users with additional, specialized updates can either override the
default update ops or simply add additional update ops to the
`tf.GraphKeys.UPDATE_OPS` collection:
# Force TF-Slim NOT to use ANY update_ops:
train_op = slim.learning.create_train_op(
total_loss,
optimizer,
update_ops=[])
# Use an alternative set of update ops:
train_op = slim.learning.create_train_op(
total_loss,
optimizer,
update_ops=my_other_update_ops)
# Use an alternative set of update ops in addition to the default updates:
tf.compat.v1.add_to_collection(tf.GraphKeys.UPDATE_OPS, my_update0)
tf.compat.v1.add_to_collection(tf.GraphKeys.UPDATE_OPS, my_update1)
train_op = slim.learning.create_train_op(
total_loss,
optimizer)
# Which is the same as:
train_op = slim.learning.create_train_op(
total_loss,
optimizer,
update_ops=tf.compat.v1.get_collection(tf.GraphKeys.UPDATE_OPS))
******************************************
* Initializing a model from a checkpoint *
******************************************
It is common to want to 'warm-start' a model from a pre-trained checkpoint.
TF-Slim provides a convenient mechanism for doing so:
...
# Create the train_op
train_op = slim.learning.create_train_op(total_loss, optimizer)
# Create the initial assignment op
checkpoint_path = '/path/to/old_model_checkpoint'
variables_to_restore = slim.get_model_variables()
init_assign_op, init_feed_dict = slim.assign_from_checkpoint(
checkpoint_path, variables_to_restore)
# Create an initial assignment function.
def InitAssignFn(sess):
sess.run(init_assign_op, init_feed_dict)
# Run training.
slim.learning.train(train_op, my_log_dir, init_fn=InitAssignFn)
***************************************************************************
* Initializing a model from a checkpoint whose variable names don't match *
***************************************************************************
At times, a user may want to initialize a new model with values from a
checkpoint whose variable names do not match those of the current model. In this
case, one needs to create a mapping from the checkpoint variable names to the
current model variables. This requires only a small modification of the code
above:
...
# Creates a model with two variables, var0 and var1
predictions = MyModel(images)
...
# Create the train_op
train_op = slim.learning.create_train_op(total_loss, optimizer)
checkpoint_path = '/path/to/old_model_checkpoint'
# Create the mapping:
variables_to_restore = {
'name_var_0_in_checkpoint': slim.get_unique_variable('var0'),
'name_var_1_in_checkpoint': slim.get_unique_variable('var1')
}
init_assign_op, init_feed_dict = slim.assign_from_checkpoint(
checkpoint_path, variables_to_restore)
# Create an initial assignment function.
def InitAssignFn(sess):
sess.run(init_assign_op, init_feed_dict)
# Run training.
slim.learning.train(train_op, my_log_dir, init_fn=InitAssignFn)
*************************************************
* Fine-Tuning Part of a model from a checkpoint *
*************************************************
Rather than initializing all of the weights of a given model, we sometimes
only want to restore some of the weights from a checkpoint. To do this, one
need only filter those variables to initialize as follows:
...
# Create the train_op
train_op = slim.learning.create_train_op(total_loss, optimizer)
checkpoint_path = '/path/to/old_model_checkpoint'
# Specify the variables to restore via a list of inclusion or exclusion
# patterns:
variables_to_restore = slim.get_variables_to_restore(
include=["conv"], exclude=["fc8", "fc9])
# or
variables_to_restore = slim.get_variables_to_restore(exclude=["conv"])
init_assign_op, init_feed_dict = slim.assign_from_checkpoint(
checkpoint_path, variables_to_restore)
# Create an initial assignment function.
def InitAssignFn(sess):
sess.run(init_assign_op, init_feed_dict)
# Run training.
slim.learning.train(train_op, my_log_dir, init_fn=InitAssignFn)
******************************************************
* Initializing model variables from values in memory *
******************************************************
One may want to initialize the weights of a model from values from an arbitrary
source (a text document, matlab file, etc). While this is technically feasible
using plain TensorFlow, it also results in the values of your weights being
stored in the graph. For large models, this becomes prohibitively large. TF-Slim
allows you to perform this initial assignment without having to store the values
of the initial model in the graph itself by using placeholders and a feed
dictionary:
...
# Create the train_op
train_op = slim.learning.create_train_op(total_loss, optimizer)
# Create the mapping from variable names to values:
var0_initial_value = ReadFromDisk(...)
var1_initial_value = ReadFromDisk(...)
var_names_to_values = {
'var0': var0_initial_value,
'var1': var1_initial_value,
}
init_assign_op, init_feed_dict = slim.assign_from_values(var_names_to_values)
# Create an initial assignment function.
def InitAssignFn(sess):
sess.run(init_assign_op, init_feed_dict)
# Run training.
slim.learning.train(train_op, my_log_dir, init_fn=InitAssignFn)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import time
from tensorflow.contrib.training.python.training import training
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import timeline
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.lib.io import file_io
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary
from tensorflow.python.training import optimizer as tf_optimizer
from tensorflow.python.training import saver as tf_saver
from tensorflow.python.training import supervisor
from tensorflow.python.training import sync_replicas_optimizer
from tensorflow.python.training import training_util
__all__ = [
'add_gradients_summaries', 'clip_gradient_norms', 'multiply_gradients',
'create_train_op', 'train_step', 'train'
]
def clip_gradient_norms(gradients_to_variables, max_norm):
"""Clips the gradients by the given value.
Args:
gradients_to_variables: A list of gradient to variable pairs (tuples).
max_norm: the maximum norm value.
Returns:
A list of clipped gradient to variable pairs.
"""
clipped_grads_and_vars = []
for grad, var in gradients_to_variables:
if grad is not None:
if isinstance(grad, ops.IndexedSlices):
tmp = clip_ops.clip_by_norm(grad.values, max_norm)
grad = ops.IndexedSlices(tmp, grad.indices, grad.dense_shape)
else:
grad = clip_ops.clip_by_norm(grad, max_norm)
clipped_grads_and_vars.append((grad, var))
return clipped_grads_and_vars
def multiply_gradients(grads_and_vars, gradient_multipliers):
"""Multiply specified gradients.
Args:
grads_and_vars: A list of gradient to variable pairs (tuples).
gradient_multipliers: A map from either `Variables` or `Variable` op names
to the coefficient by which the associated gradient should be scaled.
Returns:
The updated list of gradient to variable pairs.
Raises:
ValueError: If `grads_and_vars` is not a list or if `gradient_multipliers`
is empty or None or if `gradient_multipliers` is not a dictionary.
"""
if not isinstance(grads_and_vars, list):
raise ValueError('`grads_and_vars` must be a list.')
if not gradient_multipliers:
raise ValueError('`gradient_multipliers` is empty.')
if not isinstance(gradient_multipliers, dict):
raise ValueError('`gradient_multipliers` must be a dict.')
multiplied_grads_and_vars = []
for grad, var in grads_and_vars:
if var in gradient_multipliers or var.op.name in gradient_multipliers:
key = var if var in gradient_multipliers else var.op.name
if grad is None:
raise ValueError('Requested multiple of `None` gradient.')
multiplier = gradient_multipliers[key]
if not isinstance(multiplier, ops.Tensor):
multiplier = constant_op.constant(multiplier, dtype=grad.dtype)
if isinstance(grad, ops.IndexedSlices):
tmp = grad.values * multiplier
grad = ops.IndexedSlices(tmp, grad.indices, grad.dense_shape)
else:
grad *= multiplier
multiplied_grads_and_vars.append((grad, var))
return multiplied_grads_and_vars
def add_gradients_summaries(grads_and_vars):
"""Add summaries to gradients.
Args:
grads_and_vars: A list of gradient to variable pairs (tuples).
Returns:
The list of created summaries.
"""
summaries = []
for grad, var in grads_and_vars:
if grad is not None:
if isinstance(grad, ops.IndexedSlices):
grad_values = grad.values
else:
grad_values = grad
summaries.append(
summary.histogram(var.op.name + '/gradient', grad_values))
summaries.append(
summary.scalar(var.op.name + '/gradient_norm',
clip_ops.global_norm([grad_values])))
else:
logging.info('Var %s has no gradient', var.op.name)
return summaries
_USE_GLOBAL_STEP = 0
def create_train_op(total_loss,
optimizer,
global_step=_USE_GLOBAL_STEP,
update_ops=None,
variables_to_train=None,
clip_gradient_norm=0,
summarize_gradients=False,
gate_gradients=tf_optimizer.Optimizer.GATE_OP,
aggregation_method=None,
colocate_gradients_with_ops=False,
gradient_multipliers=None,
check_numerics=True):
"""Creates an `Operation` that evaluates the gradients and returns the loss.
Args:
total_loss: A `Tensor` representing the total loss.
optimizer: A tf.Optimizer to use for computing the gradients.
global_step: A `Tensor` representing the global step variable. If left as
`_USE_GLOBAL_STEP`, then tf.contrib.framework.global_step() is used.
update_ops: An optional list of updates to execute. If `update_ops` is
`None`, then the update ops are set to the contents of the
`tf.GraphKeys.UPDATE_OPS` collection. If `update_ops` is not `None`, but
it doesn't contain all of the update ops in `tf.GraphKeys.UPDATE_OPS`, a
warning will be displayed.
variables_to_train: an optional list of variables to train. If None, it will
default to all tf.compat.v1.trainable_variables().
clip_gradient_norm: If greater than 0 then the gradients would be clipped by
it.
summarize_gradients: Whether or not add summaries for each gradient.
gate_gradients: How to gate the computation of gradients. See tf.Optimizer.
aggregation_method: Specifies the method used to combine gradient terms.
Valid values are defined in the class `AggregationMethod`.
colocate_gradients_with_ops: Whether or not to try colocating the gradients
with the ops that generated them.
gradient_multipliers: A dictionary of either `Variables` or `Variable` op
names to the coefficient by which the associated gradient should be
scaled.
check_numerics: Whether or not we apply check_numerics.
Returns:
A `Tensor` that when evaluated, computes the gradients and returns the total
loss value.
"""
def transform_grads_fn(grads):
if gradient_multipliers:
with ops.name_scope('multiply_grads'):
grads = multiply_gradients(grads, gradient_multipliers)
# Clip gradients.
if clip_gradient_norm > 0:
with ops.name_scope('clip_grads'):
grads = clip_gradient_norms(grads, clip_gradient_norm)
return grads
return training.create_train_op(
total_loss=total_loss,
optimizer=optimizer,
global_step=global_step,
update_ops=update_ops,
variables_to_train=variables_to_train,
transform_grads_fn=transform_grads_fn,
summarize_gradients=summarize_gradients,
gate_gradients=gate_gradients,
aggregation_method=aggregation_method,
colocate_gradients_with_ops=colocate_gradients_with_ops,
check_numerics=check_numerics)
def _wait_for_step(sess, global_step, step):
"""Wait till the global step has reached at least 'step'.
Args:
sess: A session.
global_step: A Tensor.
step: Int. The global step to reach.
"""
while True:
if training_util.global_step(sess, global_step) >= step:
break
time.sleep(1.0)
def train_step(sess, train_op, global_step, train_step_kwargs):
"""Function that takes a gradient step and specifies whether to stop.
Args:
sess: The current session.
train_op: An `Operation` that evaluates the gradients and returns the total
loss.
global_step: A `Tensor` representing the global training step.
train_step_kwargs: A dictionary of keyword arguments.
Returns:
The total loss and a boolean indicating whether or not to stop training.
Raises:
ValueError: if 'should_trace' is in `train_step_kwargs` but `logdir` is not.
"""
start_time = time.time()
trace_run_options = None
run_metadata = None
if 'should_trace' in train_step_kwargs:
if 'logdir' not in train_step_kwargs:
raise ValueError('logdir must be present in train_step_kwargs when '
'should_trace is present')
if sess.run(train_step_kwargs['should_trace']):
trace_run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
total_loss, np_global_step = sess.run([train_op, global_step],
options=trace_run_options,
run_metadata=run_metadata)
time_elapsed = time.time() - start_time
if run_metadata is not None:
tl = timeline.Timeline(run_metadata.step_stats)
trace = tl.generate_chrome_trace_format()
trace_filename = os.path.join(train_step_kwargs['logdir'],
'tf_trace-%d.json' % np_global_step)
logging.info('Writing trace to %s', trace_filename)
file_io.write_string_to_file(trace_filename, trace)
if 'summary_writer' in train_step_kwargs:
train_step_kwargs['summary_writer'].add_run_metadata(
run_metadata, 'run_metadata-%d' % np_global_step)
if 'should_log' in train_step_kwargs:
if sess.run(train_step_kwargs['should_log']):
logging.info('global step %d: loss = %.4f (%.3f sec/step)',
np_global_step, total_loss, time_elapsed)
# TODO(nsilberman): figure out why we can't put this into sess.run. The
# issue right now is that the stop check depends on the global step. The
# increment of global step often happens via the train op, which used
# created using optimizer.apply_gradients.
#
# Since running `train_op` causes the global step to be incremented, one
# would expected that using a control dependency would allow the
# should_stop check to be run in the same session.run call:
#
# with ops.control_dependencies([train_op]):
# should_stop_op = ...
#
# However, this actually seems not to work on certain platforms.
if 'should_stop' in train_step_kwargs:
should_stop = sess.run(train_step_kwargs['should_stop'])
else:
should_stop = False
return total_loss, should_stop
_USE_DEFAULT = 0
def train(train_op,
logdir,
train_step_fn=train_step,
train_step_kwargs=_USE_DEFAULT,
log_every_n_steps=1,
graph=None,
master='',
is_chief=True,
global_step=None,
number_of_steps=None,
init_op=_USE_DEFAULT,
init_feed_dict=None,
local_init_op=_USE_DEFAULT,
init_fn=None,
ready_op=_USE_DEFAULT,
summary_op=_USE_DEFAULT,
save_summaries_secs=600,
summary_writer=_USE_DEFAULT,
startup_delay_steps=0,
saver=None,
save_interval_secs=600,
sync_optimizer=None,
session_config=None,
session_wrapper=None,
trace_every_n_steps=None,
ignore_live_threads=False):
"""Runs a training loop using a TensorFlow supervisor.
When the sync_optimizer is supplied, gradient updates are applied
synchronously. Otherwise, gradient updates are applied asynchronous.
Args:
train_op: A `Tensor` that, when executed, will apply the gradients and
return the loss value.
logdir: The directory where training logs are written to. If None, model
checkpoints and summaries will not be written.
train_step_fn: The function to call in order to execute a single gradient
step. The function must have take exactly four arguments: the current
session, the `train_op` `Tensor`, a global step `Tensor` and a
dictionary.
train_step_kwargs: A dictionary which is passed to the `train_step_fn`. By
default, two `Boolean`, scalar ops called "should_stop" and "should_log"
are provided.
log_every_n_steps: The frequency, in terms of global steps, that the loss
and global step are logged.
graph: The graph to pass to the supervisor. If no graph is supplied the
default graph is used.
master: The address of the tensorflow master.
is_chief: Specifies whether or not the training is being run by the primary
replica during replica training.
global_step: The `Tensor` representing the global step. If left as `None`,
then training_util.get_or_create_global_step(), that is,
tf.contrib.framework.global_step() is used.
number_of_steps: The max number of gradient steps to take during training,
as measured by 'global_step': training will stop if global_step is greater
than 'number_of_steps'. If the value is left as None, training proceeds
indefinitely.
init_op: The initialization operation. If left to its default value, then
the session is initialized by calling
`tf.compat.v1.global_variables_initializer()`.
init_feed_dict: A feed dictionary to use when executing the `init_op`.
local_init_op: The local initialization operation. If left to its default
value, then the session is initialized by calling
`tf.compat.v1.local_variables_initializer()` and
`tf.compat.v1.tables_initializer()`.
init_fn: An optional callable to be executed after `init_op` is called. The
callable must accept one argument, the session being initialized.
ready_op: Operation to check if the model is ready to use. If left to its
default value, then the session checks for readiness by calling
`tf.compat.v1.report_uninitialized_variables()`.
summary_op: The summary operation.
save_summaries_secs: How often, in seconds, to save summaries.
summary_writer: `SummaryWriter` to use. Can be `None` to indicate that no
summaries should be written. If unset, we create a SummaryWriter.
startup_delay_steps: The number of steps to wait for before beginning. Note
that this must be 0 if a sync_optimizer is supplied.
saver: Saver to save checkpoints. If None, a default one will be created and
used.
save_interval_secs: How often, in seconds, to save the model to `logdir`.
sync_optimizer: an instance of tf.compat.v1.train.SyncReplicasOptimizer, or
a list of them. If the argument is supplied, gradient updates will be
synchronous. If left as `None`, gradient updates will be asynchronous.
session_config: An instance of `tf.compat.v1.ConfigProto` that will be used
to configure the `Session`. If left as `None`, the default will be used.
session_wrapper: A function that takes a `tf.compat.v1.Session` object as
the only argument and returns a wrapped session object that has the same
methods that the original object has, or `None`. Iff not `None`, the
wrapped object will be used for training.
trace_every_n_steps: produce and save a `Timeline` in Chrome trace format
and add it to the summaries every `trace_every_n_steps`. If None, no trace
information will be produced or saved.
ignore_live_threads: If `True` ignores threads that remain running after a
grace period when stopping the supervisor, instead of raising a
RuntimeError.
Returns:
the value of the loss function after training.
Raises:
ValueError: if `train_op` is empty or if `startup_delay_steps` is
non-zero when `sync_optimizer` is supplied, if `number_of_steps` is
negative, or if `trace_every_n_steps` is not `None` and no `logdir` is
provided.
"""
if train_op is None:
raise ValueError('train_op cannot be None.')
if logdir is None:
if summary_op != _USE_DEFAULT:
raise ValueError('Cannot provide summary_op because logdir=None')
if saver is not None:
raise ValueError('Cannot provide saver because logdir=None')
if trace_every_n_steps is not None:
raise ValueError('Cannot provide trace_every_n_steps because '
'logdir=None')
if isinstance(sync_optimizer, sync_replicas_optimizer.SyncReplicasOptimizer):
sync_optimizer = [sync_optimizer]
if sync_optimizer is not None and startup_delay_steps > 0:
raise ValueError(
'startup_delay_steps must be zero when sync_optimizer is supplied.')
if number_of_steps is not None and number_of_steps <= 0:
raise ValueError(
'`number_of_steps` must be either None or a positive number.')
graph = graph or ops.get_default_graph()
with graph.as_default():
if global_step is None:
global_step = training_util.get_or_create_global_step()
saver = saver or tf_saver.Saver()
if sync_optimizer is not None:
for opt in sync_optimizer:
if not isinstance(opt, sync_replicas_optimizer.SyncReplicasOptimizer):
raise ValueError(
'`sync_optimizer` must be a tf.train.SyncReplicasOptimizer.')
with ops.name_scope('init_ops'):
if init_op == _USE_DEFAULT:
init_op = variables.global_variables_initializer()
if ready_op == _USE_DEFAULT:
ready_op = variables.report_uninitialized_variables()
if local_init_op == _USE_DEFAULT:
local_init_op = control_flow_ops.group(
variables.local_variables_initializer(),
lookup_ops.tables_initializer())
if sync_optimizer is not None and isinstance(sync_optimizer, list):
with ops.control_dependencies(
[local_init_op] if local_init_op is not None else []):
if is_chief:
local_init_op = control_flow_ops.group(
*[opt.chief_init_op for opt in sync_optimizer])
else:
local_init_op = control_flow_ops.group(
*[opt.local_step_init_op for opt in sync_optimizer])
ready_for_local_init_op = control_flow_ops.group(
*[opt.ready_for_local_init_op for opt in sync_optimizer])
else:
ready_for_local_init_op = None
if summary_op == _USE_DEFAULT:
summary_op = summary.merge_all()
if summary_writer == _USE_DEFAULT:
summary_writer = supervisor.Supervisor.USE_DEFAULT
if is_chief and sync_optimizer is not None:
# Need to create these BEFORE the supervisor finalizes the graph:
init_tokens_op = [opt.get_init_tokens_op() for opt in sync_optimizer]
chief_queue_runner = [
opt.get_chief_queue_runner() for opt in sync_optimizer
]
if train_step_kwargs == _USE_DEFAULT:
with ops.name_scope('train_step'):
train_step_kwargs = {}
if number_of_steps:
should_stop_op = math_ops.greater_equal(global_step, number_of_steps)
else:
should_stop_op = constant_op.constant(False)
train_step_kwargs['should_stop'] = should_stop_op
if log_every_n_steps > 0:
train_step_kwargs['should_log'] = math_ops.equal(
math_ops.mod(global_step, log_every_n_steps), 0)
if is_chief and trace_every_n_steps is not None:
train_step_kwargs['should_trace'] = math_ops.equal(
math_ops.mod(global_step, trace_every_n_steps), 0)
train_step_kwargs['logdir'] = logdir
sv = supervisor.Supervisor(
graph=graph,
is_chief=is_chief,
logdir=logdir,
init_op=init_op,
init_feed_dict=init_feed_dict,
local_init_op=local_init_op,
ready_for_local_init_op=ready_for_local_init_op,
ready_op=ready_op,
summary_op=summary_op,
summary_writer=summary_writer,
global_step=global_step,
saver=saver,
save_summaries_secs=save_summaries_secs,
save_model_secs=save_interval_secs,
init_fn=init_fn)
if summary_writer is not None:
train_step_kwargs['summary_writer'] = sv.summary_writer
total_loss = None
should_retry = True
while should_retry:
try:
should_retry = False
with sv.managed_session(
master, start_standard_services=False, config=session_config) as sess:
logging.info('Starting Session.')
if session_wrapper is not None:
logging.info('Wrapping session with wrapper function: %s',
session_wrapper)
sess = session_wrapper(sess)
if is_chief:
if logdir:
sv.start_standard_services(sess)
elif startup_delay_steps > 0:
# (use sys.maxsize because sys.maxint doesn't exist in Python 3)
_wait_for_step(
sess, global_step,
min(startup_delay_steps, number_of_steps or sys.maxsize))
threads = sv.start_queue_runners(sess)
logging.info('Starting Queues.')
if is_chief and sync_optimizer is not None:
sv.start_queue_runners(sess, chief_queue_runner)
sess.run(init_tokens_op)
try:
while not sv.should_stop():
total_loss, should_stop = train_step_fn(sess, train_op, global_step,
train_step_kwargs)
if should_stop:
logging.info('Stopping Training.')
sv.request_stop()
break
except errors.OutOfRangeError as e:
# OutOfRangeError is thrown when epoch limit per
# tf.compat.v1.train.limit_epochs is reached.
logging.info('Caught OutOfRangeError. Stopping Training. %s', e)
if logdir and sv.is_chief:
logging.info('Finished training! Saving model to disk.')
sv.saver.save(sess, sv.save_path, global_step=sv.global_step)
sv.stop(
threads,
close_summary_writer=True,
ignore_live_threads=ignore_live_threads)
except errors.AbortedError:
# Always re-run on AbortedError as it indicates a restart of one of the
# distributed tensorflow servers.
logging.info('Retrying training!')
should_retry = True
return total_loss
|
tensorflow-master
|
tensorflow/contrib/slim/python/slim/learning.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains helper functions for creating summaries.
This module contains various helper functions for quickly and easily adding
tensorflow summaries. These allow users to print summary values
automatically as they are computed and add prefixes to collections of summaries.
Example usage:
import tensorflow as tf
slim = tf.contrib.slim
slim.summaries.add_histogram_summaries(slim.variables.get_model_variables())
slim.summaries.add_scalar_summary(total_loss, 'Total Loss')
slim.summaries.add_scalar_summary(learning_rate, 'Learning Rate')
slim.summaries.add_histogram_summaries(my_tensors)
slim.summaries.add_zero_fraction_summaries(my_tensors)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import nn_impl as nn
from tensorflow.python.summary import summary
def _get_summary_name(tensor, name=None, prefix=None, postfix=None):
"""Produces the summary name given.
Args:
tensor: A variable or op `Tensor`.
name: The optional name for the summary.
prefix: An optional prefix for the summary name.
postfix: An optional postfix for the summary name.
Returns:
a summary name.
"""
if not name:
name = tensor.op.name
if prefix:
name = prefix + '/' + name
if postfix:
name = name + '/' + postfix
return name
def add_histogram_summary(tensor, name=None, prefix=None):
"""Adds a histogram summary for the given tensor.
Args:
tensor: A variable or op tensor.
name: The optional name for the summary.
prefix: An optional prefix for the summary names.
Returns:
A scalar `Tensor` of type `string` whose contents are the serialized
`Summary` protocol buffer.
"""
return summary.histogram(
_get_summary_name(tensor, name, prefix), tensor)
def add_image_summary(tensor, name=None, prefix=None, print_summary=False):
"""Adds an image summary for the given tensor.
Args:
tensor: a variable or op tensor with shape [batch,height,width,channels]
name: the optional name for the summary.
prefix: An optional prefix for the summary names.
print_summary: If `True`, the summary is printed to stdout when the summary
is computed.
Returns:
An image `Tensor` of type `string` whose contents are the serialized
`Summary` protocol buffer.
"""
summary_name = _get_summary_name(tensor, name, prefix)
# If print_summary, then we need to make sure that this call doesn't add the
# non-printing op to the collection. We'll add it to the collection later.
collections = [] if print_summary else None
op = summary.image(
name=summary_name, tensor=tensor, collections=collections)
if print_summary:
op = logging_ops.Print(op, [tensor], summary_name)
ops.add_to_collection(ops.GraphKeys.SUMMARIES, op)
return op
def add_scalar_summary(tensor, name=None, prefix=None, print_summary=False):
"""Adds a scalar summary for the given tensor.
Args:
tensor: a variable or op tensor.
name: the optional name for the summary.
prefix: An optional prefix for the summary names.
print_summary: If `True`, the summary is printed to stdout when the summary
is computed.
Returns:
A scalar `Tensor` of type `string` whose contents are the serialized
`Summary` protocol buffer.
"""
collections = [] if print_summary else None
summary_name = _get_summary_name(tensor, name, prefix)
# If print_summary, then we need to make sure that this call doesn't add the
# non-printing op to the collection. We'll add it to the collection later.
op = summary.scalar(
name=summary_name, tensor=tensor, collections=collections)
if print_summary:
op = logging_ops.Print(op, [tensor], summary_name)
ops.add_to_collection(ops.GraphKeys.SUMMARIES, op)
return op
def add_zero_fraction_summary(tensor, name=None, prefix=None,
print_summary=False):
"""Adds a summary for the percentage of zero values in the given tensor.
Args:
tensor: a variable or op tensor.
name: the optional name for the summary.
prefix: An optional prefix for the summary names.
print_summary: If `True`, the summary is printed to stdout when the summary
is computed.
Returns:
A scalar `Tensor` of type `string` whose contents are the serialized
`Summary` protocol buffer.
"""
name = _get_summary_name(tensor, name, prefix, 'Fraction_of_Zero_Values')
tensor = nn.zero_fraction(tensor)
return add_scalar_summary(tensor, name, print_summary=print_summary)
def add_histogram_summaries(tensors, prefix=None):
"""Adds a histogram summary for each of the given tensors.
Args:
tensors: A list of variable or op tensors.
prefix: An optional prefix for the summary names.
Returns:
A list of scalar `Tensors` of type `string` whose contents are the
serialized `Summary` protocol buffer.
"""
summary_ops = []
for tensor in tensors:
summary_ops.append(add_histogram_summary(tensor, prefix=prefix))
return summary_ops
def add_image_summaries(tensors, prefix=None):
"""Adds an image summary for each of the given tensors.
Args:
tensors: A list of variable or op tensors.
prefix: An optional prefix for the summary names.
Returns:
A list of scalar `Tensors` of type `string` whose contents are the
serialized `Summary` protocol buffer.
"""
summary_ops = []
for tensor in tensors:
summary_ops.append(add_image_summary(tensor, prefix=prefix))
return summary_ops
def add_scalar_summaries(tensors, prefix=None, print_summary=False):
"""Adds a scalar summary for each of the given tensors.
Args:
tensors: a list of variable or op tensors.
prefix: An optional prefix for the summary names.
print_summary: If `True`, the summary is printed to stdout when the summary
is computed.
Returns:
A list of scalar `Tensors` of type `string` whose contents are the
serialized `Summary` protocol buffer.
"""
summary_ops = []
for tensor in tensors:
summary_ops.append(add_scalar_summary(tensor, prefix=prefix,
print_summary=print_summary))
return summary_ops
def add_zero_fraction_summaries(tensors, prefix=None):
"""Adds a scalar zero-fraction summary for each of the given tensors.
Args:
tensors: a list of variable or op tensors.
prefix: An optional prefix for the summary names.
Returns:
A list of scalar `Tensors` of type `string` whose contents are the
serialized `Summary` protocol buffer.
"""
summary_ops = []
for tensor in tensors:
summary_ops.append(add_zero_fraction_summary(tensor, prefix=prefix))
return summary_ops
|
tensorflow-master
|
tensorflow/contrib/slim/python/slim/summaries.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tools for analyzing the operations and variables in a TensorFlow graph.
To analyze the operations in a graph:
images, labels = LoadData(...)
predictions = MyModel(images)
slim.model_analyzer.analyze_ops(tf.compat.v1.get_default_graph(),
print_info=True)
To analyze the model variables in a graph:
variables = tf.compat.v1.model_variables()
slim.model_analyzer.analyze_vars(variables, print_info=False)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
def tensor_description(var):
"""Returns a compact and informative string about a tensor.
Args:
var: A tensor variable.
Returns:
a string with type and size, e.g.: (float32 1x8x8x1024).
"""
description = '(' + str(var.dtype.name) + ' '
sizes = var.get_shape()
for i, size in enumerate(sizes):
description += str(size)
if i < len(sizes) - 1:
description += 'x'
description += ')'
return description
def analyze_ops(graph, print_info=False):
"""Compute the estimated size of the ops.outputs in the graph.
Args:
graph: the graph containing the operations.
print_info: Optional, if true print ops and their outputs.
Returns:
total size of the ops.outputs
"""
if print_info:
print('---------')
print('Operations: name -> (type shapes) [size]')
print('---------')
total_size = 0
for op in graph.get_operations():
op_size = 0
shapes = []
for output in op.outputs:
# if output.num_elements() is None or [] assume size 0.
output_size = output.get_shape().num_elements() or 0
if output.get_shape():
shapes.append(tensor_description(output))
op_size += output_size
if print_info:
print(op.name, '\t->', ', '.join(shapes), '[' + str(op_size) + ']')
total_size += op_size
return total_size
def analyze_vars(variables, print_info=False):
"""Prints the names and shapes of the variables.
Args:
variables: list of variables, for example tf.compat.v1.global_variables().
print_info: Optional, if true print variables and their shape.
Returns:
(total size of the variables, total bytes of the variables)
"""
if print_info:
print('---------')
print('Variables: name (type shape) [size]')
print('---------')
total_size = 0
total_bytes = 0
for var in variables:
# if var.num_elements() is None or [] assume size 0.
var_size = var.get_shape().num_elements() or 0
var_bytes = var_size * var.dtype.size
total_size += var_size
total_bytes += var_bytes
if print_info:
print(var.name, tensor_description(var),
'[%d, bytes: %d]' % (var_size, var_bytes))
if print_info:
print('Total size of variables: %d' % total_size)
print('Total bytes of variables: %d' % total_bytes)
return total_size, total_bytes
|
tensorflow-master
|
tensorflow/contrib/slim/python/slim/model_analyzer.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for slim.nets.resnet_v2."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib import layers
from tensorflow.contrib.framework.python.ops import arg_scope
from tensorflow.contrib.layers.python.layers import utils
from tensorflow.contrib.slim.python.slim.nets import resnet_utils
from tensorflow.contrib.slim.python.slim.nets import resnet_v2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
def create_test_input(batch_size, height, width, channels):
"""Create test input tensor.
Args:
batch_size: The number of images per batch or `None` if unknown.
height: The height of each image or `None` if unknown.
width: The width of each image or `None` if unknown.
channels: The number of channels per image or `None` if unknown.
Returns:
Either a placeholder `Tensor` of dimension
[batch_size, height, width, channels] if any of the inputs are `None` or a
constant `Tensor` with the mesh grid values along the spatial dimensions.
"""
if None in [batch_size, height, width, channels]:
return array_ops.placeholder(dtypes.float32,
(batch_size, height, width, channels))
else:
return math_ops.cast(
np.tile(
np.reshape(
np.reshape(np.arange(height), [height, 1]) + np.reshape(
np.arange(width), [1, width]), [1, height, width, 1]),
[batch_size, 1, 1, channels]),
dtypes.float32)
class ResnetUtilsTest(test.TestCase):
def testSubsampleThreeByThree(self):
x = array_ops.reshape(math_ops.cast(math_ops.range(9), dtypes.float32),
[1, 3, 3, 1])
x = resnet_utils.subsample(x, 2)
expected = array_ops.reshape(
constant_op.constant([0, 2, 6, 8]), [1, 2, 2, 1])
with self.cached_session():
self.assertAllClose(x.eval(), expected.eval())
def testSubsampleFourByFour(self):
x = array_ops.reshape(math_ops.cast(math_ops.range(16), dtypes.float32),
[1, 4, 4, 1])
x = resnet_utils.subsample(x, 2)
expected = array_ops.reshape(
constant_op.constant([0, 2, 8, 10]), [1, 2, 2, 1])
with self.cached_session():
self.assertAllClose(x.eval(), expected.eval())
def testConv2DSameEven(self):
n, n2 = 4, 2
# Input image.
x = create_test_input(1, n, n, 1)
# Convolution kernel.
w = create_test_input(1, 3, 3, 1)
w = array_ops.reshape(w, [3, 3, 1, 1])
variable_scope.get_variable('Conv/weights', initializer=w)
variable_scope.get_variable('Conv/biases', initializer=array_ops.zeros([1]))
variable_scope.get_variable_scope().reuse_variables()
y1 = layers.conv2d(x, 1, [3, 3], stride=1, scope='Conv')
y1_expected = math_ops.cast([[14, 28, 43, 26],
[28, 48, 66, 37],
[43, 66, 84, 46],
[26, 37, 46, 22]],
dtypes.float32)
y1_expected = array_ops.reshape(y1_expected, [1, n, n, 1])
y2 = resnet_utils.subsample(y1, 2)
y2_expected = math_ops.cast([[14, 43], [43, 84]], dtypes.float32)
y2_expected = array_ops.reshape(y2_expected, [1, n2, n2, 1])
y3 = resnet_utils.conv2d_same(x, 1, 3, stride=2, scope='Conv')
y3_expected = y2_expected
y4 = layers.conv2d(x, 1, [3, 3], stride=2, scope='Conv')
y4_expected = math_ops.cast([[48, 37], [37, 22]], dtypes.float32)
y4_expected = array_ops.reshape(y4_expected, [1, n2, n2, 1])
with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
self.assertAllClose(y1.eval(), y1_expected.eval())
self.assertAllClose(y2.eval(), y2_expected.eval())
self.assertAllClose(y3.eval(), y3_expected.eval())
self.assertAllClose(y4.eval(), y4_expected.eval())
def testConv2DSameOdd(self):
n, n2 = 5, 3
# Input image.
x = create_test_input(1, n, n, 1)
# Convolution kernel.
w = create_test_input(1, 3, 3, 1)
w = array_ops.reshape(w, [3, 3, 1, 1])
variable_scope.get_variable('Conv/weights', initializer=w)
variable_scope.get_variable('Conv/biases', initializer=array_ops.zeros([1]))
variable_scope.get_variable_scope().reuse_variables()
y1 = layers.conv2d(x, 1, [3, 3], stride=1, scope='Conv')
y1_expected = math_ops.cast([[14, 28, 43, 58, 34],
[28, 48, 66, 84, 46],
[43, 66, 84, 102, 55],
[58, 84, 102, 120, 64],
[34, 46, 55, 64, 30]],
dtypes.float32)
y1_expected = array_ops.reshape(y1_expected, [1, n, n, 1])
y2 = resnet_utils.subsample(y1, 2)
y2_expected = math_ops.cast([[14, 43, 34],
[43, 84, 55],
[34, 55, 30]],
dtypes.float32)
y2_expected = array_ops.reshape(y2_expected, [1, n2, n2, 1])
y3 = resnet_utils.conv2d_same(x, 1, 3, stride=2, scope='Conv')
y3_expected = y2_expected
y4 = layers.conv2d(x, 1, [3, 3], stride=2, scope='Conv')
y4_expected = y2_expected
with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
self.assertAllClose(y1.eval(), y1_expected.eval())
self.assertAllClose(y2.eval(), y2_expected.eval())
self.assertAllClose(y3.eval(), y3_expected.eval())
self.assertAllClose(y4.eval(), y4_expected.eval())
def _resnet_plain(self, inputs, blocks, output_stride=None, scope=None):
"""A plain ResNet without extra layers before or after the ResNet blocks."""
with variable_scope.variable_scope(scope, values=[inputs]):
with arg_scope([layers.conv2d], outputs_collections='end_points'):
net = resnet_utils.stack_blocks_dense(inputs, blocks, output_stride)
end_points = utils.convert_collection_to_dict('end_points')
return net, end_points
def testEndPointsV2(self):
"""Test the end points of a tiny v2 bottleneck network."""
blocks = [
resnet_v2.resnet_v2_block(
'block1', base_depth=1, num_units=2, stride=2),
resnet_v2.resnet_v2_block(
'block2', base_depth=2, num_units=2, stride=1),
]
inputs = create_test_input(2, 32, 16, 3)
with arg_scope(resnet_utils.resnet_arg_scope()):
_, end_points = self._resnet_plain(inputs, blocks, scope='tiny')
expected = [
'tiny/block1/unit_1/bottleneck_v2/shortcut',
'tiny/block1/unit_1/bottleneck_v2/conv1',
'tiny/block1/unit_1/bottleneck_v2/conv2',
'tiny/block1/unit_1/bottleneck_v2/conv3',
'tiny/block1/unit_2/bottleneck_v2/conv1',
'tiny/block1/unit_2/bottleneck_v2/conv2',
'tiny/block1/unit_2/bottleneck_v2/conv3',
'tiny/block2/unit_1/bottleneck_v2/shortcut',
'tiny/block2/unit_1/bottleneck_v2/conv1',
'tiny/block2/unit_1/bottleneck_v2/conv2',
'tiny/block2/unit_1/bottleneck_v2/conv3',
'tiny/block2/unit_2/bottleneck_v2/conv1',
'tiny/block2/unit_2/bottleneck_v2/conv2',
'tiny/block2/unit_2/bottleneck_v2/conv3'
]
self.assertItemsEqual(expected, end_points)
def _stack_blocks_nondense(self, net, blocks):
"""A simplified ResNet Block stacker without output stride control."""
for block in blocks:
with variable_scope.variable_scope(block.scope, 'block', [net]):
for i, unit in enumerate(block.args):
with variable_scope.variable_scope('unit_%d' % (i + 1), values=[net]):
net = block.unit_fn(net, rate=1, **unit)
return net
def testAtrousValuesBottleneck(self):
"""Verify the values of dense feature extraction by atrous convolution.
Make sure that dense feature extraction by stack_blocks_dense() followed by
subsampling gives identical results to feature extraction at the nominal
network output stride using the simple self._stack_blocks_nondense() above.
"""
block = resnet_v2.resnet_v2_block
blocks = [
block('block1', base_depth=1, num_units=2, stride=2),
block('block2', base_depth=2, num_units=2, stride=2),
block('block3', base_depth=4, num_units=2, stride=2),
block('block4', base_depth=8, num_units=2, stride=1),
]
nominal_stride = 8
# Test both odd and even input dimensions.
height = 30
width = 31
with arg_scope(resnet_utils.resnet_arg_scope()):
with arg_scope([layers.batch_norm], is_training=False):
for output_stride in [1, 2, 4, 8, None]:
with ops.Graph().as_default():
with self.cached_session() as sess:
random_seed.set_random_seed(0)
inputs = create_test_input(1, height, width, 3)
# Dense feature extraction followed by subsampling.
output = resnet_utils.stack_blocks_dense(inputs, blocks,
output_stride)
if output_stride is None:
factor = 1
else:
factor = nominal_stride // output_stride
output = resnet_utils.subsample(output, factor)
# Make the two networks use the same weights.
variable_scope.get_variable_scope().reuse_variables()
# Feature extraction at the nominal network rate.
expected = self._stack_blocks_nondense(inputs, blocks)
sess.run(variables.global_variables_initializer())
output, expected = sess.run([output, expected])
self.assertAllClose(output, expected, atol=1e-4, rtol=1e-4)
class ResnetCompleteNetworkTest(test.TestCase):
"""Tests with complete small ResNet v2 networks."""
def _resnet_small(self,
inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
include_root_block=True,
reuse=None,
scope='resnet_v2_small'):
"""A shallow and thin ResNet v2 for faster tests."""
block = resnet_v2.resnet_v2_block
blocks = [
block('block1', base_depth=1, num_units=3, stride=2),
block('block2', base_depth=2, num_units=3, stride=2),
block('block3', base_depth=4, num_units=3, stride=2),
block('block4', base_depth=8, num_units=2, stride=1),
]
return resnet_v2.resnet_v2(inputs, blocks, num_classes, is_training,
global_pool, output_stride, include_root_block,
reuse, scope)
def testClassificationEndPoints(self):
global_pool = True
num_classes = 10
inputs = create_test_input(2, 224, 224, 3)
with arg_scope(resnet_utils.resnet_arg_scope()):
logits, end_points = self._resnet_small(
inputs, num_classes, global_pool=global_pool, scope='resnet')
self.assertTrue(logits.op.name.startswith('resnet/logits'))
self.assertListEqual(logits.get_shape().as_list(), [2, 1, 1, num_classes])
self.assertTrue('predictions' in end_points)
self.assertListEqual(end_points['predictions'].get_shape().as_list(),
[2, 1, 1, num_classes])
def testClassificationShapes(self):
global_pool = True
num_classes = 10
inputs = create_test_input(2, 224, 224, 3)
with arg_scope(resnet_utils.resnet_arg_scope()):
_, end_points = self._resnet_small(
inputs, num_classes, global_pool=global_pool, scope='resnet')
endpoint_to_shape = {
'resnet/block1': [2, 28, 28, 4],
'resnet/block2': [2, 14, 14, 8],
'resnet/block3': [2, 7, 7, 16],
'resnet/block4': [2, 7, 7, 32]
}
for endpoint in endpoint_to_shape:
shape = endpoint_to_shape[endpoint]
self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)
def testFullyConvolutionalEndpointShapes(self):
global_pool = False
num_classes = 10
inputs = create_test_input(2, 321, 321, 3)
with arg_scope(resnet_utils.resnet_arg_scope()):
_, end_points = self._resnet_small(
inputs, num_classes, global_pool=global_pool, scope='resnet')
endpoint_to_shape = {
'resnet/block1': [2, 41, 41, 4],
'resnet/block2': [2, 21, 21, 8],
'resnet/block3': [2, 11, 11, 16],
'resnet/block4': [2, 11, 11, 32]
}
for endpoint in endpoint_to_shape:
shape = endpoint_to_shape[endpoint]
self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)
def testRootlessFullyConvolutionalEndpointShapes(self):
global_pool = False
num_classes = 10
inputs = create_test_input(2, 128, 128, 3)
with arg_scope(resnet_utils.resnet_arg_scope()):
_, end_points = self._resnet_small(
inputs,
num_classes,
global_pool=global_pool,
include_root_block=False,
scope='resnet')
endpoint_to_shape = {
'resnet/block1': [2, 64, 64, 4],
'resnet/block2': [2, 32, 32, 8],
'resnet/block3': [2, 16, 16, 16],
'resnet/block4': [2, 16, 16, 32]
}
for endpoint in endpoint_to_shape:
shape = endpoint_to_shape[endpoint]
self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)
def testAtrousFullyConvolutionalEndpointShapes(self):
global_pool = False
num_classes = 10
output_stride = 8
inputs = create_test_input(2, 321, 321, 3)
with arg_scope(resnet_utils.resnet_arg_scope()):
_, end_points = self._resnet_small(
inputs,
num_classes,
global_pool=global_pool,
output_stride=output_stride,
scope='resnet')
endpoint_to_shape = {
'resnet/block1': [2, 41, 41, 4],
'resnet/block2': [2, 41, 41, 8],
'resnet/block3': [2, 41, 41, 16],
'resnet/block4': [2, 41, 41, 32]
}
for endpoint in endpoint_to_shape:
shape = endpoint_to_shape[endpoint]
self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)
def testAtrousFullyConvolutionalValues(self):
"""Verify dense feature extraction with atrous convolution."""
nominal_stride = 32
for output_stride in [4, 8, 16, 32, None]:
with arg_scope(resnet_utils.resnet_arg_scope()):
with ops.Graph().as_default():
with self.cached_session() as sess:
random_seed.set_random_seed(0)
inputs = create_test_input(2, 81, 81, 3)
# Dense feature extraction followed by subsampling.
output, _ = self._resnet_small(
inputs,
None,
is_training=False,
global_pool=False,
output_stride=output_stride)
if output_stride is None:
factor = 1
else:
factor = nominal_stride // output_stride
output = resnet_utils.subsample(output, factor)
# Make the two networks use the same weights.
variable_scope.get_variable_scope().reuse_variables()
# Feature extraction at the nominal network rate.
expected, _ = self._resnet_small(
inputs, None, is_training=False, global_pool=False)
sess.run(variables.global_variables_initializer())
self.assertAllClose(
output.eval(), expected.eval(), atol=1e-4, rtol=1e-4)
def testUnknownBatchSize(self):
batch = 2
height, width = 65, 65
global_pool = True
num_classes = 10
inputs = create_test_input(None, height, width, 3)
with arg_scope(resnet_utils.resnet_arg_scope()):
logits, _ = self._resnet_small(
inputs, num_classes, global_pool=global_pool, scope='resnet')
self.assertTrue(logits.op.name.startswith('resnet/logits'))
self.assertListEqual(logits.get_shape().as_list(),
[None, 1, 1, num_classes])
images = create_test_input(batch, height, width, 3)
with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
output = sess.run(logits, {inputs: images.eval()})
self.assertEqual(output.shape, (batch, 1, 1, num_classes))
def testFullyConvolutionalUnknownHeightWidth(self):
batch = 2
height, width = 65, 65
global_pool = False
inputs = create_test_input(batch, None, None, 3)
with arg_scope(resnet_utils.resnet_arg_scope()):
output, _ = self._resnet_small(inputs, None, global_pool=global_pool)
self.assertListEqual(output.get_shape().as_list(), [batch, None, None, 32])
images = create_test_input(batch, height, width, 3)
with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
output = sess.run(output, {inputs: images.eval()})
self.assertEqual(output.shape, (batch, 3, 3, 32))
def testAtrousFullyConvolutionalUnknownHeightWidth(self):
batch = 2
height, width = 65, 65
global_pool = False
output_stride = 8
inputs = create_test_input(batch, None, None, 3)
with arg_scope(resnet_utils.resnet_arg_scope()):
output, _ = self._resnet_small(
inputs, None, global_pool=global_pool, output_stride=output_stride)
self.assertListEqual(output.get_shape().as_list(), [batch, None, None, 32])
images = create_test_input(batch, height, width, 3)
with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
output = sess.run(output, {inputs: images.eval()})
self.assertEqual(output.shape, (batch, 9, 9, 32))
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/slim/python/slim/nets/resnet_v2_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for slim.nets.overfeat."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.framework.python.ops import variables as variables_lib
from tensorflow.contrib.slim.python.slim.nets import overfeat
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class OverFeatTest(test.TestCase):
def testBuild(self):
batch_size = 5
height, width = 231, 231
num_classes = 1000
with self.cached_session():
inputs = random_ops.random_uniform((batch_size, height, width, 3))
logits, _ = overfeat.overfeat(inputs, num_classes)
self.assertEquals(logits.op.name, 'overfeat/fc8/squeezed')
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
def testFullyConvolutional(self):
batch_size = 1
height, width = 281, 281
num_classes = 1000
with self.cached_session():
inputs = random_ops.random_uniform((batch_size, height, width, 3))
logits, _ = overfeat.overfeat(inputs, num_classes, spatial_squeeze=False)
self.assertEquals(logits.op.name, 'overfeat/fc8/BiasAdd')
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, 2, 2, num_classes])
def testEndPoints(self):
batch_size = 5
height, width = 231, 231
num_classes = 1000
with self.cached_session():
inputs = random_ops.random_uniform((batch_size, height, width, 3))
_, end_points = overfeat.overfeat(inputs, num_classes)
expected_names = [
'overfeat/conv1', 'overfeat/pool1', 'overfeat/conv2',
'overfeat/pool2', 'overfeat/conv3', 'overfeat/conv4',
'overfeat/conv5', 'overfeat/pool5', 'overfeat/fc6', 'overfeat/fc7',
'overfeat/fc8'
]
self.assertSetEqual(set(end_points.keys()), set(expected_names))
def testModelVariables(self):
batch_size = 5
height, width = 231, 231
num_classes = 1000
with self.cached_session():
inputs = random_ops.random_uniform((batch_size, height, width, 3))
overfeat.overfeat(inputs, num_classes)
expected_names = [
'overfeat/conv1/weights',
'overfeat/conv1/biases',
'overfeat/conv2/weights',
'overfeat/conv2/biases',
'overfeat/conv3/weights',
'overfeat/conv3/biases',
'overfeat/conv4/weights',
'overfeat/conv4/biases',
'overfeat/conv5/weights',
'overfeat/conv5/biases',
'overfeat/fc6/weights',
'overfeat/fc6/biases',
'overfeat/fc7/weights',
'overfeat/fc7/biases',
'overfeat/fc8/weights',
'overfeat/fc8/biases',
]
model_variables = [v.op.name for v in variables_lib.get_model_variables()]
self.assertSetEqual(set(model_variables), set(expected_names))
def testEvaluation(self):
batch_size = 2
height, width = 231, 231
num_classes = 1000
with self.cached_session():
eval_inputs = random_ops.random_uniform((batch_size, height, width, 3))
logits, _ = overfeat.overfeat(eval_inputs, is_training=False)
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
predictions = math_ops.argmax(logits, 1)
self.assertListEqual(predictions.get_shape().as_list(), [batch_size])
def testTrainEvalWithReuse(self):
train_batch_size = 2
eval_batch_size = 1
train_height, train_width = 231, 231
eval_height, eval_width = 281, 281
num_classes = 1000
with self.cached_session():
train_inputs = random_ops.random_uniform(
(train_batch_size, train_height, train_width, 3))
logits, _ = overfeat.overfeat(train_inputs)
self.assertListEqual(logits.get_shape().as_list(),
[train_batch_size, num_classes])
variable_scope.get_variable_scope().reuse_variables()
eval_inputs = random_ops.random_uniform(
(eval_batch_size, eval_height, eval_width, 3))
logits, _ = overfeat.overfeat(
eval_inputs, is_training=False, spatial_squeeze=False)
self.assertListEqual(logits.get_shape().as_list(),
[eval_batch_size, 2, 2, num_classes])
logits = math_ops.reduce_mean(logits, [1, 2])
predictions = math_ops.argmax(logits, 1)
self.assertEquals(predictions.get_shape().as_list(), [eval_batch_size])
def testForward(self):
batch_size = 1
height, width = 231, 231
with self.cached_session() as sess:
inputs = random_ops.random_uniform((batch_size, height, width, 3))
logits, _ = overfeat.overfeat(inputs)
sess.run(variables.global_variables_initializer())
output = sess.run(logits)
self.assertTrue(output.any())
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/slim/python/slim/nets/overfeat_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains definitions for the original form of Residual Networks.
The 'v1' residual networks (ResNets) implemented in this module were proposed
by:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
Other variants were introduced in:
[2] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Identity Mappings in Deep Residual Networks. arXiv: 1603.05027
The networks defined in this module utilize the bottleneck building block of
[1] with projection shortcuts only for increasing depths. They employ batch
normalization *after* every weight layer. This is the architecture used by
MSRA in the Imagenet and MSCOCO 2016 competition models ResNet-101 and
ResNet-152. See [2; Fig. 1a] for a comparison between the current 'v1'
architecture and the alternative 'v2' architecture of [2] which uses batch
normalization *before* every weight layer in the so-called full pre-activation
units.
Typical use:
from tensorflow.contrib.slim.python.slim.nets import
resnet_v1
ResNet-101 for image classification into 1000 classes:
# inputs has shape [batch, 224, 224, 3]
with slim.arg_scope(resnet_v1.resnet_arg_scope()):
net, end_points = resnet_v1.resnet_v1_101(inputs, 1000, is_training=False)
ResNet-101 for semantic segmentation into 21 classes:
# inputs has shape [batch, 513, 513, 3]
with slim.arg_scope(resnet_v1.resnet_arg_scope()):
net, end_points = resnet_v1.resnet_v1_101(inputs,
21,
is_training=False,
global_pool=False,
output_stride=16)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import layers
from tensorflow.contrib.framework.python.ops import add_arg_scope
from tensorflow.contrib.framework.python.ops import arg_scope
from tensorflow.contrib.layers.python.layers import layers as layers_lib
from tensorflow.contrib.layers.python.layers import utils
from tensorflow.contrib.slim.python.slim.nets import resnet_utils
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variable_scope
resnet_arg_scope = resnet_utils.resnet_arg_scope
@add_arg_scope
def bottleneck(inputs,
depth,
depth_bottleneck,
stride,
rate=1,
outputs_collections=None,
scope=None):
"""Bottleneck residual unit variant with BN after convolutions.
This is the original residual unit proposed in [1]. See Fig. 1(a) of [2] for
its definition. Note that we use here the bottleneck variant which has an
extra bottleneck layer.
When putting together two consecutive ResNet blocks that use this unit, one
should use stride = 2 in the last unit of the first block.
Args:
inputs: A tensor of size [batch, height, width, channels].
depth: The depth of the ResNet unit output.
depth_bottleneck: The depth of the bottleneck layers.
stride: The ResNet unit's stride. Determines the amount of downsampling of
the units output compared to its input.
rate: An integer, rate for atrous convolution.
outputs_collections: Collection to add the ResNet unit output.
scope: Optional variable_scope.
Returns:
The ResNet unit's output.
"""
with variable_scope.variable_scope(scope, 'bottleneck_v1', [inputs]) as sc:
depth_in = utils.last_dimension(inputs.get_shape(), min_rank=4)
if depth == depth_in:
shortcut = resnet_utils.subsample(inputs, stride, 'shortcut')
else:
shortcut = layers.conv2d(
inputs,
depth, [1, 1],
stride=stride,
activation_fn=None,
scope='shortcut')
residual = layers.conv2d(
inputs, depth_bottleneck, [1, 1], stride=1, scope='conv1')
residual = resnet_utils.conv2d_same(
residual, depth_bottleneck, 3, stride, rate=rate, scope='conv2')
residual = layers.conv2d(
residual, depth, [1, 1], stride=1, activation_fn=None, scope='conv3')
output = nn_ops.relu(shortcut + residual)
return utils.collect_named_outputs(outputs_collections, sc.name, output)
def resnet_v1(inputs,
blocks,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
include_root_block=True,
reuse=None,
scope=None):
"""Generator for v1 ResNet models.
This function generates a family of ResNet v1 models. See the resnet_v1_*()
methods for specific model instantiations, obtained by selecting different
block instantiations that produce ResNets of various depths.
Training for image classification on Imagenet is usually done with [224, 224]
inputs, resulting in [7, 7] feature maps at the output of the last ResNet
block for the ResNets defined in [1] that have nominal stride equal to 32.
However, for dense prediction tasks we advise that one uses inputs with
spatial dimensions that are multiples of 32 plus 1, e.g., [321, 321]. In
this case the feature maps at the ResNet output will have spatial shape
[(height - 1) / output_stride + 1, (width - 1) / output_stride + 1]
and corners exactly aligned with the input image corners, which greatly
facilitates alignment of the features to the image. Using as input [225, 225]
images results in [8, 8] feature maps at the output of the last ResNet block.
For dense prediction tasks, the ResNet needs to run in fully-convolutional
(FCN) mode and global_pool needs to be set to False. The ResNets in [1, 2] all
have nominal stride equal to 32 and a good choice in FCN mode is to use
output_stride=16 in order to increase the density of the computed features at
small computational and memory overhead, cf. http://arxiv.org/abs/1606.00915.
Args:
inputs: A tensor of size [batch, height_in, width_in, channels].
blocks: A list of length equal to the number of ResNet blocks. Each element
is a resnet_utils.Block object describing the units in the block.
num_classes: Number of predicted classes for classification tasks. If None
we return the features before the logit layer.
is_training: whether batch_norm layers are in training mode.
global_pool: If True, we perform global average pooling before computing the
logits. Set to True for image classification, False for dense prediction.
output_stride: If None, then the output will be computed at the nominal
network stride. If output_stride is not None, it specifies the requested
ratio of input to output spatial resolution.
include_root_block: If True, include the initial convolution followed by
max-pooling, if False excludes it.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
Returns:
net: A rank-4 tensor of size [batch, height_out, width_out, channels_out].
If global_pool is False, then height_out and width_out are reduced by a
factor of output_stride compared to the respective height_in and width_in,
else both height_out and width_out equal one. If num_classes is None, then
net is the output of the last ResNet block, potentially after global
average pooling. If num_classes is not None, net contains the pre-softmax
activations.
end_points: A dictionary from components of the network to the corresponding
activation.
Raises:
ValueError: If the target output_stride is not valid.
"""
with variable_scope.variable_scope(
scope, 'resnet_v1', [inputs], reuse=reuse) as sc:
end_points_collection = sc.original_name_scope + '_end_points'
with arg_scope(
[layers.conv2d, bottleneck, resnet_utils.stack_blocks_dense],
outputs_collections=end_points_collection):
with arg_scope([layers.batch_norm], is_training=is_training):
net = inputs
if include_root_block:
if output_stride is not None:
if output_stride % 4 != 0:
raise ValueError('The output_stride needs to be a multiple of 4.')
output_stride /= 4
net = resnet_utils.conv2d_same(net, 64, 7, stride=2, scope='conv1')
net = layers_lib.max_pool2d(net, [3, 3], stride=2, scope='pool1')
net = resnet_utils.stack_blocks_dense(net, blocks, output_stride)
if global_pool:
# Global average pooling.
net = math_ops.reduce_mean(net, [1, 2], name='pool5', keepdims=True)
if num_classes is not None:
net = layers.conv2d(
net,
num_classes, [1, 1],
activation_fn=None,
normalizer_fn=None,
scope='logits')
# Convert end_points_collection into a dictionary of end_points.
end_points = utils.convert_collection_to_dict(end_points_collection)
if num_classes is not None:
end_points['predictions'] = layers_lib.softmax(
net, scope='predictions')
return net, end_points
resnet_v1.default_image_size = 224
def resnet_v1_block(scope, base_depth, num_units, stride):
"""Helper function for creating a resnet_v1 bottleneck block.
Args:
scope: The scope of the block.
base_depth: The depth of the bottleneck layer for each unit.
num_units: The number of units in the block.
stride: The stride of the block, implemented as a stride in the last unit.
All other units have stride=1.
Returns:
A resnet_v1 bottleneck block.
"""
return resnet_utils.Block(scope, bottleneck, [{
'depth': base_depth * 4,
'depth_bottleneck': base_depth,
'stride': 1
}] * (num_units - 1) + [{
'depth': base_depth * 4,
'depth_bottleneck': base_depth,
'stride': stride
}])
def resnet_v1_50(inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
reuse=None,
scope='resnet_v1_50'):
"""ResNet-50 model of [1]. See resnet_v1() for arg and return description."""
blocks = [
resnet_v1_block('block1', base_depth=64, num_units=3, stride=2),
resnet_v1_block('block2', base_depth=128, num_units=4, stride=2),
resnet_v1_block('block3', base_depth=256, num_units=6, stride=2),
resnet_v1_block('block4', base_depth=512, num_units=3, stride=1),
]
return resnet_v1(
inputs,
blocks,
num_classes,
is_training,
global_pool,
output_stride,
include_root_block=True,
reuse=reuse,
scope=scope)
def resnet_v1_101(inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
reuse=None,
scope='resnet_v1_101'):
"""ResNet-101 model of [1]. See resnet_v1() for arg and return description."""
blocks = [
resnet_v1_block('block1', base_depth=64, num_units=3, stride=2),
resnet_v1_block('block2', base_depth=128, num_units=4, stride=2),
resnet_v1_block('block3', base_depth=256, num_units=23, stride=2),
resnet_v1_block('block4', base_depth=512, num_units=3, stride=1),
]
return resnet_v1(
inputs,
blocks,
num_classes,
is_training,
global_pool,
output_stride,
include_root_block=True,
reuse=reuse,
scope=scope)
def resnet_v1_152(inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
reuse=None,
scope='resnet_v1_152'):
"""ResNet-152 model of [1]. See resnet_v1() for arg and return description."""
blocks = [
resnet_v1_block('block1', base_depth=64, num_units=3, stride=2),
resnet_v1_block('block2', base_depth=128, num_units=8, stride=2),
resnet_v1_block('block3', base_depth=256, num_units=36, stride=2),
resnet_v1_block('block4', base_depth=512, num_units=3, stride=1),
]
return resnet_v1(
inputs,
blocks,
num_classes,
is_training,
global_pool,
output_stride,
include_root_block=True,
reuse=reuse,
scope=scope)
def resnet_v1_200(inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
reuse=None,
scope='resnet_v1_200'):
"""ResNet-200 model of [2]. See resnet_v1() for arg and return description."""
blocks = [
resnet_v1_block('block1', base_depth=64, num_units=3, stride=2),
resnet_v1_block('block2', base_depth=128, num_units=24, stride=2),
resnet_v1_block('block3', base_depth=256, num_units=36, stride=2),
resnet_v1_block('block4', base_depth=512, num_units=3, stride=1),
]
return resnet_v1(
inputs,
blocks,
num_classes,
is_training,
global_pool,
output_stride,
include_root_block=True,
reuse=reuse,
scope=scope)
|
tensorflow-master
|
tensorflow/contrib/slim/python/slim/nets/resnet_v1.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains model definitions for versions of the Oxford VGG network.
These model definitions were introduced in the following technical report:
Very Deep Convolutional Networks For Large-Scale Image Recognition
Karen Simonyan and Andrew Zisserman
arXiv technical report, 2015
PDF: http://arxiv.org/pdf/1409.1556.pdf
ILSVRC 2014 Slides: http://www.robots.ox.ac.uk/~karen/pdf/ILSVRC_2014.pdf
CC-BY-4.0
More information can be obtained from the VGG website:
www.robots.ox.ac.uk/~vgg/research/very_deep/
Usage:
with slim.arg_scope(vgg.vgg_arg_scope()):
outputs, end_points = vgg.vgg_a(inputs)
with slim.arg_scope(vgg.vgg_arg_scope()):
outputs, end_points = vgg.vgg_16(inputs)
@@vgg_a
@@vgg_16
@@vgg_19
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import layers
from tensorflow.contrib.framework.python.ops import arg_scope
from tensorflow.contrib.layers.python.layers import layers as layers_lib
from tensorflow.contrib.layers.python.layers import regularizers
from tensorflow.contrib.layers.python.layers import utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variable_scope
def vgg_arg_scope(weight_decay=0.0005):
"""Defines the VGG arg scope.
Args:
weight_decay: The l2 regularization coefficient.
Returns:
An arg_scope.
"""
with arg_scope(
[layers.conv2d, layers_lib.fully_connected],
activation_fn=nn_ops.relu,
weights_regularizer=regularizers.l2_regularizer(weight_decay),
biases_initializer=init_ops.zeros_initializer()):
with arg_scope([layers.conv2d], padding='SAME') as arg_sc:
return arg_sc
def vgg_a(inputs,
num_classes=1000,
is_training=True,
dropout_keep_prob=0.5,
spatial_squeeze=True,
scope='vgg_a'):
"""Oxford Net VGG 11-Layers version A Example.
Note: All the fully_connected layers have been transformed to conv2d layers.
To use in classification mode, resize input to 224x224.
Args:
inputs: a tensor of size [batch_size, height, width, channels].
num_classes: number of predicted classes.
is_training: whether or not the model is being trained.
dropout_keep_prob: the probability that activations are kept in the dropout
layers during training.
spatial_squeeze: whether or not should squeeze the spatial dimensions of the
outputs. Useful to remove unnecessary dimensions for classification.
scope: Optional scope for the variables.
Returns:
the last op containing the log predictions and end_points dict.
"""
with variable_scope.variable_scope(scope, 'vgg_a', [inputs]) as sc:
end_points_collection = sc.original_name_scope + '_end_points'
# Collect outputs for conv2d, fully_connected and max_pool2d.
with arg_scope(
[layers.conv2d, layers_lib.max_pool2d],
outputs_collections=end_points_collection):
net = layers_lib.repeat(
inputs, 1, layers.conv2d, 64, [3, 3], scope='conv1')
net = layers_lib.max_pool2d(net, [2, 2], scope='pool1')
net = layers_lib.repeat(net, 1, layers.conv2d, 128, [3, 3], scope='conv2')
net = layers_lib.max_pool2d(net, [2, 2], scope='pool2')
net = layers_lib.repeat(net, 2, layers.conv2d, 256, [3, 3], scope='conv3')
net = layers_lib.max_pool2d(net, [2, 2], scope='pool3')
net = layers_lib.repeat(net, 2, layers.conv2d, 512, [3, 3], scope='conv4')
net = layers_lib.max_pool2d(net, [2, 2], scope='pool4')
net = layers_lib.repeat(net, 2, layers.conv2d, 512, [3, 3], scope='conv5')
net = layers_lib.max_pool2d(net, [2, 2], scope='pool5')
# Use conv2d instead of fully_connected layers.
net = layers.conv2d(net, 4096, [7, 7], padding='VALID', scope='fc6')
net = layers_lib.dropout(
net, dropout_keep_prob, is_training=is_training, scope='dropout6')
net = layers.conv2d(net, 4096, [1, 1], scope='fc7')
net = layers_lib.dropout(
net, dropout_keep_prob, is_training=is_training, scope='dropout7')
net = layers.conv2d(
net,
num_classes, [1, 1],
activation_fn=None,
normalizer_fn=None,
scope='fc8')
# Convert end_points_collection into a end_point dict.
end_points = utils.convert_collection_to_dict(end_points_collection)
if spatial_squeeze:
net = array_ops.squeeze(net, [1, 2], name='fc8/squeezed')
end_points[sc.name + '/fc8'] = net
return net, end_points
vgg_a.default_image_size = 224
def vgg_16(inputs,
num_classes=1000,
is_training=True,
dropout_keep_prob=0.5,
spatial_squeeze=True,
scope='vgg_16'):
"""Oxford Net VGG 16-Layers version D Example.
Note: All the fully_connected layers have been transformed to conv2d layers.
To use in classification mode, resize input to 224x224.
Args:
inputs: a tensor of size [batch_size, height, width, channels].
num_classes: number of predicted classes.
is_training: whether or not the model is being trained.
dropout_keep_prob: the probability that activations are kept in the dropout
layers during training.
spatial_squeeze: whether or not should squeeze the spatial dimensions of the
outputs. Useful to remove unnecessary dimensions for classification.
scope: Optional scope for the variables.
Returns:
the last op containing the log predictions and end_points dict.
"""
with variable_scope.variable_scope(scope, 'vgg_16', [inputs]) as sc:
end_points_collection = sc.original_name_scope + '_end_points'
# Collect outputs for conv2d, fully_connected and max_pool2d.
with arg_scope(
[layers.conv2d, layers_lib.fully_connected, layers_lib.max_pool2d],
outputs_collections=end_points_collection):
net = layers_lib.repeat(
inputs, 2, layers.conv2d, 64, [3, 3], scope='conv1')
net = layers_lib.max_pool2d(net, [2, 2], scope='pool1')
net = layers_lib.repeat(net, 2, layers.conv2d, 128, [3, 3], scope='conv2')
net = layers_lib.max_pool2d(net, [2, 2], scope='pool2')
net = layers_lib.repeat(net, 3, layers.conv2d, 256, [3, 3], scope='conv3')
net = layers_lib.max_pool2d(net, [2, 2], scope='pool3')
net = layers_lib.repeat(net, 3, layers.conv2d, 512, [3, 3], scope='conv4')
net = layers_lib.max_pool2d(net, [2, 2], scope='pool4')
net = layers_lib.repeat(net, 3, layers.conv2d, 512, [3, 3], scope='conv5')
net = layers_lib.max_pool2d(net, [2, 2], scope='pool5')
# Use conv2d instead of fully_connected layers.
net = layers.conv2d(net, 4096, [7, 7], padding='VALID', scope='fc6')
net = layers_lib.dropout(
net, dropout_keep_prob, is_training=is_training, scope='dropout6')
net = layers.conv2d(net, 4096, [1, 1], scope='fc7')
net = layers_lib.dropout(
net, dropout_keep_prob, is_training=is_training, scope='dropout7')
net = layers.conv2d(
net,
num_classes, [1, 1],
activation_fn=None,
normalizer_fn=None,
scope='fc8')
# Convert end_points_collection into a end_point dict.
end_points = utils.convert_collection_to_dict(end_points_collection)
if spatial_squeeze:
net = array_ops.squeeze(net, [1, 2], name='fc8/squeezed')
end_points[sc.name + '/fc8'] = net
return net, end_points
vgg_16.default_image_size = 224
def vgg_19(inputs,
num_classes=1000,
is_training=True,
dropout_keep_prob=0.5,
spatial_squeeze=True,
scope='vgg_19'):
"""Oxford Net VGG 19-Layers version E Example.
Note: All the fully_connected layers have been transformed to conv2d layers.
To use in classification mode, resize input to 224x224.
Args:
inputs: a tensor of size [batch_size, height, width, channels].
num_classes: number of predicted classes.
is_training: whether or not the model is being trained.
dropout_keep_prob: the probability that activations are kept in the dropout
layers during training.
spatial_squeeze: whether or not should squeeze the spatial dimensions of the
outputs. Useful to remove unnecessary dimensions for classification.
scope: Optional scope for the variables.
Returns:
the last op containing the log predictions and end_points dict.
"""
with variable_scope.variable_scope(scope, 'vgg_19', [inputs]) as sc:
end_points_collection = sc.name + '_end_points'
# Collect outputs for conv2d, fully_connected and max_pool2d.
with arg_scope(
[layers.conv2d, layers_lib.fully_connected, layers_lib.max_pool2d],
outputs_collections=end_points_collection):
net = layers_lib.repeat(
inputs, 2, layers.conv2d, 64, [3, 3], scope='conv1')
net = layers_lib.max_pool2d(net, [2, 2], scope='pool1')
net = layers_lib.repeat(net, 2, layers.conv2d, 128, [3, 3], scope='conv2')
net = layers_lib.max_pool2d(net, [2, 2], scope='pool2')
net = layers_lib.repeat(net, 4, layers.conv2d, 256, [3, 3], scope='conv3')
net = layers_lib.max_pool2d(net, [2, 2], scope='pool3')
net = layers_lib.repeat(net, 4, layers.conv2d, 512, [3, 3], scope='conv4')
net = layers_lib.max_pool2d(net, [2, 2], scope='pool4')
net = layers_lib.repeat(net, 4, layers.conv2d, 512, [3, 3], scope='conv5')
net = layers_lib.max_pool2d(net, [2, 2], scope='pool5')
# Use conv2d instead of fully_connected layers.
net = layers.conv2d(net, 4096, [7, 7], padding='VALID', scope='fc6')
net = layers_lib.dropout(
net, dropout_keep_prob, is_training=is_training, scope='dropout6')
net = layers.conv2d(net, 4096, [1, 1], scope='fc7')
net = layers_lib.dropout(
net, dropout_keep_prob, is_training=is_training, scope='dropout7')
net = layers.conv2d(
net,
num_classes, [1, 1],
activation_fn=None,
normalizer_fn=None,
scope='fc8')
# Convert end_points_collection into a end_point dict.
end_points = utils.convert_collection_to_dict(end_points_collection)
if spatial_squeeze:
net = array_ops.squeeze(net, [1, 2], name='fc8/squeezed')
end_points[sc.name + '/fc8'] = net
return net, end_points
vgg_19.default_image_size = 224
# Alias
vgg_d = vgg_16
vgg_e = vgg_19
|
tensorflow-master
|
tensorflow/contrib/slim/python/slim/nets/vgg.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the definition for inception v1 classification network."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import layers
from tensorflow.contrib.framework.python.ops import arg_scope
from tensorflow.contrib.layers.python.layers import initializers
from tensorflow.contrib.layers.python.layers import layers as layers_lib
from tensorflow.contrib.layers.python.layers import regularizers
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variable_scope
trunc_normal = lambda stddev: init_ops.truncated_normal_initializer(0.0, stddev)
def inception_v1_base(inputs, final_endpoint='Mixed_5c', scope='InceptionV1'):
"""Defines the Inception V1 base architecture.
This architecture is defined in:
Going deeper with convolutions
Christian Szegedy, Wei Liu, Yangqing Jia, Pierre Sermanet, Scott Reed,
Dragomir Anguelov, Dumitru Erhan, Vincent Vanhoucke, Andrew Rabinovich.
http://arxiv.org/pdf/1409.4842v1.pdf.
Args:
inputs: a tensor of size [batch_size, height, width, channels].
final_endpoint: specifies the endpoint to construct the network up to. It
can be one of ['Conv2d_1a_7x7', 'MaxPool_2a_3x3', 'Conv2d_2b_1x1',
'Conv2d_2c_3x3', 'MaxPool_3a_3x3', 'Mixed_3b', 'Mixed_3c',
'MaxPool_4a_3x3', 'Mixed_4b', 'Mixed_4c', 'Mixed_4d', 'Mixed_4e',
'Mixed_4f', 'MaxPool_5a_2x2', 'Mixed_5b', 'Mixed_5c']
scope: Optional variable_scope.
Returns:
A dictionary from components of the network to the corresponding activation.
Raises:
ValueError: if final_endpoint is not set to one of the predefined values.
"""
end_points = {}
with variable_scope.variable_scope(scope, 'InceptionV1', [inputs]):
with arg_scope(
[layers.conv2d, layers_lib.fully_connected],
weights_initializer=trunc_normal(0.01)):
with arg_scope(
[layers.conv2d, layers_lib.max_pool2d], stride=1, padding='SAME'):
end_point = 'Conv2d_1a_7x7'
net = layers.conv2d(inputs, 64, [7, 7], stride=2, scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point:
return net, end_points
end_point = 'MaxPool_2a_3x3'
net = layers_lib.max_pool2d(net, [3, 3], stride=2, scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point:
return net, end_points
end_point = 'Conv2d_2b_1x1'
net = layers.conv2d(net, 64, [1, 1], scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point:
return net, end_points
end_point = 'Conv2d_2c_3x3'
net = layers.conv2d(net, 192, [3, 3], scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point:
return net, end_points
end_point = 'MaxPool_3a_3x3'
net = layers_lib.max_pool2d(net, [3, 3], stride=2, scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point:
return net, end_points
end_point = 'Mixed_3b'
with variable_scope.variable_scope(end_point):
with variable_scope.variable_scope('Branch_0'):
branch_0 = layers.conv2d(net, 64, [1, 1], scope='Conv2d_0a_1x1')
with variable_scope.variable_scope('Branch_1'):
branch_1 = layers.conv2d(net, 96, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = layers.conv2d(
branch_1, 128, [3, 3], scope='Conv2d_0b_3x3')
with variable_scope.variable_scope('Branch_2'):
branch_2 = layers.conv2d(net, 16, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = layers.conv2d(
branch_2, 32, [3, 3], scope='Conv2d_0b_3x3')
with variable_scope.variable_scope('Branch_3'):
branch_3 = layers_lib.max_pool2d(
net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = layers.conv2d(
branch_3, 32, [1, 1], scope='Conv2d_0b_1x1')
net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if final_endpoint == end_point:
return net, end_points
end_point = 'Mixed_3c'
with variable_scope.variable_scope(end_point):
with variable_scope.variable_scope('Branch_0'):
branch_0 = layers.conv2d(net, 128, [1, 1], scope='Conv2d_0a_1x1')
with variable_scope.variable_scope('Branch_1'):
branch_1 = layers.conv2d(net, 128, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = layers.conv2d(
branch_1, 192, [3, 3], scope='Conv2d_0b_3x3')
with variable_scope.variable_scope('Branch_2'):
branch_2 = layers.conv2d(net, 32, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = layers.conv2d(
branch_2, 96, [3, 3], scope='Conv2d_0b_3x3')
with variable_scope.variable_scope('Branch_3'):
branch_3 = layers_lib.max_pool2d(
net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = layers.conv2d(
branch_3, 64, [1, 1], scope='Conv2d_0b_1x1')
net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if final_endpoint == end_point:
return net, end_points
end_point = 'MaxPool_4a_3x3'
net = layers_lib.max_pool2d(net, [3, 3], stride=2, scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point:
return net, end_points
end_point = 'Mixed_4b'
with variable_scope.variable_scope(end_point):
with variable_scope.variable_scope('Branch_0'):
branch_0 = layers.conv2d(net, 192, [1, 1], scope='Conv2d_0a_1x1')
with variable_scope.variable_scope('Branch_1'):
branch_1 = layers.conv2d(net, 96, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = layers.conv2d(
branch_1, 208, [3, 3], scope='Conv2d_0b_3x3')
with variable_scope.variable_scope('Branch_2'):
branch_2 = layers.conv2d(net, 16, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = layers.conv2d(
branch_2, 48, [3, 3], scope='Conv2d_0b_3x3')
with variable_scope.variable_scope('Branch_3'):
branch_3 = layers_lib.max_pool2d(
net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = layers.conv2d(
branch_3, 64, [1, 1], scope='Conv2d_0b_1x1')
net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if final_endpoint == end_point:
return net, end_points
end_point = 'Mixed_4c'
with variable_scope.variable_scope(end_point):
with variable_scope.variable_scope('Branch_0'):
branch_0 = layers.conv2d(net, 160, [1, 1], scope='Conv2d_0a_1x1')
with variable_scope.variable_scope('Branch_1'):
branch_1 = layers.conv2d(net, 112, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = layers.conv2d(
branch_1, 224, [3, 3], scope='Conv2d_0b_3x3')
with variable_scope.variable_scope('Branch_2'):
branch_2 = layers.conv2d(net, 24, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = layers.conv2d(
branch_2, 64, [3, 3], scope='Conv2d_0b_3x3')
with variable_scope.variable_scope('Branch_3'):
branch_3 = layers_lib.max_pool2d(
net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = layers.conv2d(
branch_3, 64, [1, 1], scope='Conv2d_0b_1x1')
net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if final_endpoint == end_point:
return net, end_points
end_point = 'Mixed_4d'
with variable_scope.variable_scope(end_point):
with variable_scope.variable_scope('Branch_0'):
branch_0 = layers.conv2d(net, 128, [1, 1], scope='Conv2d_0a_1x1')
with variable_scope.variable_scope('Branch_1'):
branch_1 = layers.conv2d(net, 128, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = layers.conv2d(
branch_1, 256, [3, 3], scope='Conv2d_0b_3x3')
with variable_scope.variable_scope('Branch_2'):
branch_2 = layers.conv2d(net, 24, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = layers.conv2d(
branch_2, 64, [3, 3], scope='Conv2d_0b_3x3')
with variable_scope.variable_scope('Branch_3'):
branch_3 = layers_lib.max_pool2d(
net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = layers.conv2d(
branch_3, 64, [1, 1], scope='Conv2d_0b_1x1')
net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if final_endpoint == end_point:
return net, end_points
end_point = 'Mixed_4e'
with variable_scope.variable_scope(end_point):
with variable_scope.variable_scope('Branch_0'):
branch_0 = layers.conv2d(net, 112, [1, 1], scope='Conv2d_0a_1x1')
with variable_scope.variable_scope('Branch_1'):
branch_1 = layers.conv2d(net, 144, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = layers.conv2d(
branch_1, 288, [3, 3], scope='Conv2d_0b_3x3')
with variable_scope.variable_scope('Branch_2'):
branch_2 = layers.conv2d(net, 32, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = layers.conv2d(
branch_2, 64, [3, 3], scope='Conv2d_0b_3x3')
with variable_scope.variable_scope('Branch_3'):
branch_3 = layers_lib.max_pool2d(
net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = layers.conv2d(
branch_3, 64, [1, 1], scope='Conv2d_0b_1x1')
net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if final_endpoint == end_point:
return net, end_points
end_point = 'Mixed_4f'
with variable_scope.variable_scope(end_point):
with variable_scope.variable_scope('Branch_0'):
branch_0 = layers.conv2d(net, 256, [1, 1], scope='Conv2d_0a_1x1')
with variable_scope.variable_scope('Branch_1'):
branch_1 = layers.conv2d(net, 160, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = layers.conv2d(
branch_1, 320, [3, 3], scope='Conv2d_0b_3x3')
with variable_scope.variable_scope('Branch_2'):
branch_2 = layers.conv2d(net, 32, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = layers.conv2d(
branch_2, 128, [3, 3], scope='Conv2d_0b_3x3')
with variable_scope.variable_scope('Branch_3'):
branch_3 = layers_lib.max_pool2d(
net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = layers.conv2d(
branch_3, 128, [1, 1], scope='Conv2d_0b_1x1')
net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if final_endpoint == end_point:
return net, end_points
end_point = 'MaxPool_5a_2x2'
net = layers_lib.max_pool2d(net, [2, 2], stride=2, scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point:
return net, end_points
end_point = 'Mixed_5b'
with variable_scope.variable_scope(end_point):
with variable_scope.variable_scope('Branch_0'):
branch_0 = layers.conv2d(net, 256, [1, 1], scope='Conv2d_0a_1x1')
with variable_scope.variable_scope('Branch_1'):
branch_1 = layers.conv2d(net, 160, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = layers.conv2d(
branch_1, 320, [3, 3], scope='Conv2d_0b_3x3')
with variable_scope.variable_scope('Branch_2'):
branch_2 = layers.conv2d(net, 32, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = layers.conv2d(
branch_2, 128, [3, 3], scope='Conv2d_0a_3x3')
with variable_scope.variable_scope('Branch_3'):
branch_3 = layers_lib.max_pool2d(
net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = layers.conv2d(
branch_3, 128, [1, 1], scope='Conv2d_0b_1x1')
net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if final_endpoint == end_point:
return net, end_points
end_point = 'Mixed_5c'
with variable_scope.variable_scope(end_point):
with variable_scope.variable_scope('Branch_0'):
branch_0 = layers.conv2d(net, 384, [1, 1], scope='Conv2d_0a_1x1')
with variable_scope.variable_scope('Branch_1'):
branch_1 = layers.conv2d(net, 192, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = layers.conv2d(
branch_1, 384, [3, 3], scope='Conv2d_0b_3x3')
with variable_scope.variable_scope('Branch_2'):
branch_2 = layers.conv2d(net, 48, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = layers.conv2d(
branch_2, 128, [3, 3], scope='Conv2d_0b_3x3')
with variable_scope.variable_scope('Branch_3'):
branch_3 = layers_lib.max_pool2d(
net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = layers.conv2d(
branch_3, 128, [1, 1], scope='Conv2d_0b_1x1')
net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if final_endpoint == end_point:
return net, end_points
raise ValueError('Unknown final endpoint %s' % final_endpoint)
def inception_v1(inputs,
num_classes=1000,
is_training=True,
dropout_keep_prob=0.8,
prediction_fn=layers_lib.softmax,
spatial_squeeze=True,
reuse=None,
scope='InceptionV1'):
"""Defines the Inception V1 architecture.
This architecture is defined in:
Going deeper with convolutions
Christian Szegedy, Wei Liu, Yangqing Jia, Pierre Sermanet, Scott Reed,
Dragomir Anguelov, Dumitru Erhan, Vincent Vanhoucke, Andrew Rabinovich.
http://arxiv.org/pdf/1409.4842v1.pdf.
The default image size used to train this network is 224x224.
Args:
inputs: a tensor of size [batch_size, height, width, channels].
num_classes: number of predicted classes.
is_training: whether is training or not.
dropout_keep_prob: the percentage of activation values that are retained.
prediction_fn: a function to get predictions out of logits.
spatial_squeeze: if True, logits is of shape is [B, C], if false logits is
of shape [B, 1, 1, C], where B is batch_size and C is number of classes.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
Returns:
logits: the pre-softmax activations, a tensor of size
[batch_size, num_classes]
end_points: a dictionary from components of the network to the corresponding
activation.
"""
# Final pooling and prediction
with variable_scope.variable_scope(
scope, 'InceptionV1', [inputs, num_classes], reuse=reuse) as scope:
with arg_scope(
[layers_lib.batch_norm, layers_lib.dropout], is_training=is_training):
net, end_points = inception_v1_base(inputs, scope=scope)
with variable_scope.variable_scope('Logits'):
net = layers_lib.avg_pool2d(
net, [7, 7], stride=1, scope='MaxPool_0a_7x7')
net = layers_lib.dropout(net, dropout_keep_prob, scope='Dropout_0b')
logits = layers.conv2d(
net,
num_classes, [1, 1],
activation_fn=None,
normalizer_fn=None,
scope='Conv2d_0c_1x1')
if spatial_squeeze:
logits = array_ops.squeeze(logits, [1, 2], name='SpatialSqueeze')
end_points['Logits'] = logits
end_points['Predictions'] = prediction_fn(logits, scope='Predictions')
return logits, end_points
inception_v1.default_image_size = 224
def inception_v1_arg_scope(weight_decay=0.00004,
use_batch_norm=True,
batch_norm_var_collection='moving_vars'):
"""Defines the default InceptionV1 arg scope.
Note: Althougth the original paper didn't use batch_norm we found it useful.
Args:
weight_decay: The weight decay to use for regularizing the model.
use_batch_norm: "If `True`, batch_norm is applied after each convolution.
batch_norm_var_collection: The name of the collection for the batch norm
variables.
Returns:
An `arg_scope` to use for the inception v3 model.
"""
batch_norm_params = {
# Decay for the moving averages.
'decay': 0.9997,
# epsilon to prevent 0s in variance.
'epsilon': 0.001,
# collection containing update_ops.
'updates_collections': ops.GraphKeys.UPDATE_OPS,
# collection containing the moving mean and moving variance.
'variables_collections': {
'beta': None,
'gamma': None,
'moving_mean': [batch_norm_var_collection],
'moving_variance': [batch_norm_var_collection],
}
}
if use_batch_norm:
normalizer_fn = layers_lib.batch_norm
normalizer_params = batch_norm_params
else:
normalizer_fn = None
normalizer_params = {}
# Set weight_decay for weights in Conv and FC layers.
with arg_scope(
[layers.conv2d, layers_lib.fully_connected],
weights_regularizer=regularizers.l2_regularizer(weight_decay)):
with arg_scope(
[layers.conv2d],
weights_initializer=initializers.variance_scaling_initializer(),
activation_fn=nn_ops.relu,
normalizer_fn=normalizer_fn,
normalizer_params=normalizer_params) as sc:
return sc
|
tensorflow-master
|
tensorflow/contrib/slim/python/slim/nets/inception_v1.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for slim.nets.vgg."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.framework.python.ops import variables as variables_lib
from tensorflow.contrib.slim.python.slim.nets import vgg
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class VGGATest(test.TestCase):
def testBuild(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
with self.cached_session():
inputs = random_ops.random_uniform((batch_size, height, width, 3))
logits, _ = vgg.vgg_a(inputs, num_classes)
self.assertEquals(logits.op.name, 'vgg_a/fc8/squeezed')
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
def testFullyConvolutional(self):
batch_size = 1
height, width = 256, 256
num_classes = 1000
with self.cached_session():
inputs = random_ops.random_uniform((batch_size, height, width, 3))
logits, _ = vgg.vgg_a(inputs, num_classes, spatial_squeeze=False)
self.assertEquals(logits.op.name, 'vgg_a/fc8/BiasAdd')
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, 2, 2, num_classes])
def testEndPoints(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
for is_training in [True, False]:
with ops.Graph().as_default():
inputs = random_ops.random_uniform((batch_size, height, width, 3))
_, end_points = vgg.vgg_a(inputs, num_classes, is_training=is_training)
expected_names = [
'vgg_a/conv1/conv1_1', 'vgg_a/pool1', 'vgg_a/conv2/conv2_1',
'vgg_a/pool2', 'vgg_a/conv3/conv3_1', 'vgg_a/conv3/conv3_2',
'vgg_a/pool3', 'vgg_a/conv4/conv4_1', 'vgg_a/conv4/conv4_2',
'vgg_a/pool4', 'vgg_a/conv5/conv5_1', 'vgg_a/conv5/conv5_2',
'vgg_a/pool5', 'vgg_a/fc6', 'vgg_a/fc7', 'vgg_a/fc8'
]
self.assertSetEqual(set(end_points.keys()), set(expected_names))
def testModelVariables(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
with self.cached_session():
inputs = random_ops.random_uniform((batch_size, height, width, 3))
vgg.vgg_a(inputs, num_classes)
expected_names = [
'vgg_a/conv1/conv1_1/weights',
'vgg_a/conv1/conv1_1/biases',
'vgg_a/conv2/conv2_1/weights',
'vgg_a/conv2/conv2_1/biases',
'vgg_a/conv3/conv3_1/weights',
'vgg_a/conv3/conv3_1/biases',
'vgg_a/conv3/conv3_2/weights',
'vgg_a/conv3/conv3_2/biases',
'vgg_a/conv4/conv4_1/weights',
'vgg_a/conv4/conv4_1/biases',
'vgg_a/conv4/conv4_2/weights',
'vgg_a/conv4/conv4_2/biases',
'vgg_a/conv5/conv5_1/weights',
'vgg_a/conv5/conv5_1/biases',
'vgg_a/conv5/conv5_2/weights',
'vgg_a/conv5/conv5_2/biases',
'vgg_a/fc6/weights',
'vgg_a/fc6/biases',
'vgg_a/fc7/weights',
'vgg_a/fc7/biases',
'vgg_a/fc8/weights',
'vgg_a/fc8/biases',
]
model_variables = [v.op.name for v in variables_lib.get_model_variables()]
self.assertSetEqual(set(model_variables), set(expected_names))
def testEvaluation(self):
batch_size = 2
height, width = 224, 224
num_classes = 1000
with self.cached_session():
eval_inputs = random_ops.random_uniform((batch_size, height, width, 3))
logits, _ = vgg.vgg_a(eval_inputs, is_training=False)
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
predictions = math_ops.argmax(logits, 1)
self.assertListEqual(predictions.get_shape().as_list(), [batch_size])
def testTrainEvalWithReuse(self):
train_batch_size = 2
eval_batch_size = 1
train_height, train_width = 224, 224
eval_height, eval_width = 256, 256
num_classes = 1000
with self.cached_session():
train_inputs = random_ops.random_uniform(
(train_batch_size, train_height, train_width, 3))
logits, _ = vgg.vgg_a(train_inputs)
self.assertListEqual(logits.get_shape().as_list(),
[train_batch_size, num_classes])
variable_scope.get_variable_scope().reuse_variables()
eval_inputs = random_ops.random_uniform(
(eval_batch_size, eval_height, eval_width, 3))
logits, _ = vgg.vgg_a(
eval_inputs, is_training=False, spatial_squeeze=False)
self.assertListEqual(logits.get_shape().as_list(),
[eval_batch_size, 2, 2, num_classes])
logits = math_ops.reduce_mean(logits, [1, 2])
predictions = math_ops.argmax(logits, 1)
self.assertEquals(predictions.get_shape().as_list(), [eval_batch_size])
def testForward(self):
batch_size = 1
height, width = 224, 224
with self.cached_session() as sess:
inputs = random_ops.random_uniform((batch_size, height, width, 3))
logits, _ = vgg.vgg_a(inputs)
sess.run(variables.global_variables_initializer())
output = sess.run(logits)
self.assertTrue(output.any())
class VGG16Test(test.TestCase):
def testBuild(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
with self.cached_session():
inputs = random_ops.random_uniform((batch_size, height, width, 3))
logits, _ = vgg.vgg_16(inputs, num_classes)
self.assertEquals(logits.op.name, 'vgg_16/fc8/squeezed')
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
def testFullyConvolutional(self):
batch_size = 1
height, width = 256, 256
num_classes = 1000
with self.cached_session():
inputs = random_ops.random_uniform((batch_size, height, width, 3))
logits, _ = vgg.vgg_16(inputs, num_classes, spatial_squeeze=False)
self.assertEquals(logits.op.name, 'vgg_16/fc8/BiasAdd')
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, 2, 2, num_classes])
def testEndPoints(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
for is_training in [True, False]:
with ops.Graph().as_default():
inputs = random_ops.random_uniform((batch_size, height, width, 3))
_, end_points = vgg.vgg_16(inputs, num_classes, is_training=is_training)
expected_names = [
'vgg_16/conv1/conv1_1', 'vgg_16/conv1/conv1_2', 'vgg_16/pool1',
'vgg_16/conv2/conv2_1', 'vgg_16/conv2/conv2_2', 'vgg_16/pool2',
'vgg_16/conv3/conv3_1', 'vgg_16/conv3/conv3_2',
'vgg_16/conv3/conv3_3', 'vgg_16/pool3', 'vgg_16/conv4/conv4_1',
'vgg_16/conv4/conv4_2', 'vgg_16/conv4/conv4_3', 'vgg_16/pool4',
'vgg_16/conv5/conv5_1', 'vgg_16/conv5/conv5_2',
'vgg_16/conv5/conv5_3', 'vgg_16/pool5', 'vgg_16/fc6', 'vgg_16/fc7',
'vgg_16/fc8'
]
self.assertSetEqual(set(end_points.keys()), set(expected_names))
def testModelVariables(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
with self.cached_session():
inputs = random_ops.random_uniform((batch_size, height, width, 3))
vgg.vgg_16(inputs, num_classes)
expected_names = [
'vgg_16/conv1/conv1_1/weights',
'vgg_16/conv1/conv1_1/biases',
'vgg_16/conv1/conv1_2/weights',
'vgg_16/conv1/conv1_2/biases',
'vgg_16/conv2/conv2_1/weights',
'vgg_16/conv2/conv2_1/biases',
'vgg_16/conv2/conv2_2/weights',
'vgg_16/conv2/conv2_2/biases',
'vgg_16/conv3/conv3_1/weights',
'vgg_16/conv3/conv3_1/biases',
'vgg_16/conv3/conv3_2/weights',
'vgg_16/conv3/conv3_2/biases',
'vgg_16/conv3/conv3_3/weights',
'vgg_16/conv3/conv3_3/biases',
'vgg_16/conv4/conv4_1/weights',
'vgg_16/conv4/conv4_1/biases',
'vgg_16/conv4/conv4_2/weights',
'vgg_16/conv4/conv4_2/biases',
'vgg_16/conv4/conv4_3/weights',
'vgg_16/conv4/conv4_3/biases',
'vgg_16/conv5/conv5_1/weights',
'vgg_16/conv5/conv5_1/biases',
'vgg_16/conv5/conv5_2/weights',
'vgg_16/conv5/conv5_2/biases',
'vgg_16/conv5/conv5_3/weights',
'vgg_16/conv5/conv5_3/biases',
'vgg_16/fc6/weights',
'vgg_16/fc6/biases',
'vgg_16/fc7/weights',
'vgg_16/fc7/biases',
'vgg_16/fc8/weights',
'vgg_16/fc8/biases',
]
model_variables = [v.op.name for v in variables_lib.get_model_variables()]
self.assertSetEqual(set(model_variables), set(expected_names))
def testEvaluation(self):
batch_size = 2
height, width = 224, 224
num_classes = 1000
with self.cached_session():
eval_inputs = random_ops.random_uniform((batch_size, height, width, 3))
logits, _ = vgg.vgg_16(eval_inputs, is_training=False)
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
predictions = math_ops.argmax(logits, 1)
self.assertListEqual(predictions.get_shape().as_list(), [batch_size])
def testTrainEvalWithReuse(self):
train_batch_size = 2
eval_batch_size = 1
train_height, train_width = 224, 224
eval_height, eval_width = 256, 256
num_classes = 1000
with self.cached_session():
train_inputs = random_ops.random_uniform(
(train_batch_size, train_height, train_width, 3))
logits, _ = vgg.vgg_16(train_inputs)
self.assertListEqual(logits.get_shape().as_list(),
[train_batch_size, num_classes])
variable_scope.get_variable_scope().reuse_variables()
eval_inputs = random_ops.random_uniform(
(eval_batch_size, eval_height, eval_width, 3))
logits, _ = vgg.vgg_16(
eval_inputs, is_training=False, spatial_squeeze=False)
self.assertListEqual(logits.get_shape().as_list(),
[eval_batch_size, 2, 2, num_classes])
logits = math_ops.reduce_mean(logits, [1, 2])
predictions = math_ops.argmax(logits, 1)
self.assertEquals(predictions.get_shape().as_list(), [eval_batch_size])
def testForward(self):
batch_size = 1
height, width = 224, 224
with self.cached_session() as sess:
inputs = random_ops.random_uniform((batch_size, height, width, 3))
logits, _ = vgg.vgg_16(inputs)
sess.run(variables.global_variables_initializer())
output = sess.run(logits)
self.assertTrue(output.any())
class VGG19Test(test.TestCase):
def testBuild(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
with self.cached_session():
inputs = random_ops.random_uniform((batch_size, height, width, 3))
logits, _ = vgg.vgg_19(inputs, num_classes)
self.assertEquals(logits.op.name, 'vgg_19/fc8/squeezed')
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
def testFullyConvolutional(self):
batch_size = 1
height, width = 256, 256
num_classes = 1000
with self.cached_session():
inputs = random_ops.random_uniform((batch_size, height, width, 3))
logits, _ = vgg.vgg_19(inputs, num_classes, spatial_squeeze=False)
self.assertEquals(logits.op.name, 'vgg_19/fc8/BiasAdd')
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, 2, 2, num_classes])
def testEndPoints(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
for is_training in [True, False]:
with ops.Graph().as_default():
inputs = random_ops.random_uniform((batch_size, height, width, 3))
_, end_points = vgg.vgg_19(inputs, num_classes, is_training=is_training)
expected_names = [
'vgg_19/conv1/conv1_1', 'vgg_19/conv1/conv1_2', 'vgg_19/pool1',
'vgg_19/conv2/conv2_1', 'vgg_19/conv2/conv2_2', 'vgg_19/pool2',
'vgg_19/conv3/conv3_1', 'vgg_19/conv3/conv3_2',
'vgg_19/conv3/conv3_3', 'vgg_19/conv3/conv3_4', 'vgg_19/pool3',
'vgg_19/conv4/conv4_1', 'vgg_19/conv4/conv4_2',
'vgg_19/conv4/conv4_3', 'vgg_19/conv4/conv4_4', 'vgg_19/pool4',
'vgg_19/conv5/conv5_1', 'vgg_19/conv5/conv5_2',
'vgg_19/conv5/conv5_3', 'vgg_19/conv5/conv5_4', 'vgg_19/pool5',
'vgg_19/fc6', 'vgg_19/fc7', 'vgg_19/fc8'
]
self.assertSetEqual(set(end_points.keys()), set(expected_names))
def testModelVariables(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
with self.cached_session():
inputs = random_ops.random_uniform((batch_size, height, width, 3))
vgg.vgg_19(inputs, num_classes)
expected_names = [
'vgg_19/conv1/conv1_1/weights',
'vgg_19/conv1/conv1_1/biases',
'vgg_19/conv1/conv1_2/weights',
'vgg_19/conv1/conv1_2/biases',
'vgg_19/conv2/conv2_1/weights',
'vgg_19/conv2/conv2_1/biases',
'vgg_19/conv2/conv2_2/weights',
'vgg_19/conv2/conv2_2/biases',
'vgg_19/conv3/conv3_1/weights',
'vgg_19/conv3/conv3_1/biases',
'vgg_19/conv3/conv3_2/weights',
'vgg_19/conv3/conv3_2/biases',
'vgg_19/conv3/conv3_3/weights',
'vgg_19/conv3/conv3_3/biases',
'vgg_19/conv3/conv3_4/weights',
'vgg_19/conv3/conv3_4/biases',
'vgg_19/conv4/conv4_1/weights',
'vgg_19/conv4/conv4_1/biases',
'vgg_19/conv4/conv4_2/weights',
'vgg_19/conv4/conv4_2/biases',
'vgg_19/conv4/conv4_3/weights',
'vgg_19/conv4/conv4_3/biases',
'vgg_19/conv4/conv4_4/weights',
'vgg_19/conv4/conv4_4/biases',
'vgg_19/conv5/conv5_1/weights',
'vgg_19/conv5/conv5_1/biases',
'vgg_19/conv5/conv5_2/weights',
'vgg_19/conv5/conv5_2/biases',
'vgg_19/conv5/conv5_3/weights',
'vgg_19/conv5/conv5_3/biases',
'vgg_19/conv5/conv5_4/weights',
'vgg_19/conv5/conv5_4/biases',
'vgg_19/fc6/weights',
'vgg_19/fc6/biases',
'vgg_19/fc7/weights',
'vgg_19/fc7/biases',
'vgg_19/fc8/weights',
'vgg_19/fc8/biases',
]
model_variables = [v.op.name for v in variables_lib.get_model_variables()]
self.assertSetEqual(set(model_variables), set(expected_names))
def testEvaluation(self):
batch_size = 2
height, width = 224, 224
num_classes = 1000
with self.cached_session():
eval_inputs = random_ops.random_uniform((batch_size, height, width, 3))
logits, _ = vgg.vgg_19(eval_inputs, is_training=False)
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
predictions = math_ops.argmax(logits, 1)
self.assertListEqual(predictions.get_shape().as_list(), [batch_size])
def testTrainEvalWithReuse(self):
train_batch_size = 2
eval_batch_size = 1
train_height, train_width = 224, 224
eval_height, eval_width = 256, 256
num_classes = 1000
with self.cached_session():
train_inputs = random_ops.random_uniform(
(train_batch_size, train_height, train_width, 3))
logits, _ = vgg.vgg_19(train_inputs)
self.assertListEqual(logits.get_shape().as_list(),
[train_batch_size, num_classes])
variable_scope.get_variable_scope().reuse_variables()
eval_inputs = random_ops.random_uniform(
(eval_batch_size, eval_height, eval_width, 3))
logits, _ = vgg.vgg_19(
eval_inputs, is_training=False, spatial_squeeze=False)
self.assertListEqual(logits.get_shape().as_list(),
[eval_batch_size, 2, 2, num_classes])
logits = math_ops.reduce_mean(logits, [1, 2])
predictions = math_ops.argmax(logits, 1)
self.assertEquals(predictions.get_shape().as_list(), [eval_batch_size])
def testForward(self):
batch_size = 1
height, width = 224, 224
with self.cached_session() as sess:
inputs = random_ops.random_uniform((batch_size, height, width, 3))
logits, _ = vgg.vgg_19(inputs)
sess.run(variables.global_variables_initializer())
output = sess.run(logits)
self.assertTrue(output.any())
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/slim/python/slim/nets/vgg_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for slim.nets.resnet_v1."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib import layers
from tensorflow.contrib.framework.python.ops import arg_scope
from tensorflow.contrib.layers.python.layers import utils
from tensorflow.contrib.slim.python.slim.nets import resnet_utils
from tensorflow.contrib.slim.python.slim.nets import resnet_v1
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
def create_test_input(batch_size, height, width, channels):
"""Create test input tensor.
Args:
batch_size: The number of images per batch or `None` if unknown.
height: The height of each image or `None` if unknown.
width: The width of each image or `None` if unknown.
channels: The number of channels per image or `None` if unknown.
Returns:
Either a placeholder `Tensor` of dimension
[batch_size, height, width, channels] if any of the inputs are `None` or a
constant `Tensor` with the mesh grid values along the spatial dimensions.
"""
if None in [batch_size, height, width, channels]:
return array_ops.placeholder(dtypes.float32,
(batch_size, height, width, channels))
else:
return math_ops.cast(
np.tile(
np.reshape(
np.reshape(np.arange(height), [height, 1]) + np.reshape(
np.arange(width), [1, width]), [1, height, width, 1]),
[batch_size, 1, 1, channels]), dtypes.float32)
class ResnetUtilsTest(test.TestCase):
def testSubsampleThreeByThree(self):
x = array_ops.reshape(math_ops.cast(math_ops.range(9), dtypes.float32),
[1, 3, 3, 1])
x = resnet_utils.subsample(x, 2)
expected = array_ops.reshape(
constant_op.constant([0, 2, 6, 8]), [1, 2, 2, 1])
with self.cached_session():
self.assertAllClose(x.eval(), expected.eval())
def testSubsampleFourByFour(self):
x = array_ops.reshape(math_ops.cast(math_ops.range(16), dtypes.float32),
[1, 4, 4, 1])
x = resnet_utils.subsample(x, 2)
expected = array_ops.reshape(
constant_op.constant([0, 2, 8, 10]), [1, 2, 2, 1])
with self.cached_session():
self.assertAllClose(x.eval(), expected.eval())
def testConv2DSameEven(self):
n, n2 = 4, 2
# Input image.
x = create_test_input(1, n, n, 1)
# Convolution kernel.
w = create_test_input(1, 3, 3, 1)
w = array_ops.reshape(w, [3, 3, 1, 1])
variable_scope.get_variable('Conv/weights', initializer=w)
variable_scope.get_variable('Conv/biases', initializer=array_ops.zeros([1]))
variable_scope.get_variable_scope().reuse_variables()
y1 = layers.conv2d(x, 1, [3, 3], stride=1, scope='Conv')
y1_expected = math_ops.cast([[14, 28, 43, 26], [28, 48, 66, 37],
[43, 66, 84, 46], [26, 37, 46, 22]],
dtypes.float32)
y1_expected = array_ops.reshape(y1_expected, [1, n, n, 1])
y2 = resnet_utils.subsample(y1, 2)
y2_expected = math_ops.cast([[14, 43], [43, 84]], dtypes.float32)
y2_expected = array_ops.reshape(y2_expected, [1, n2, n2, 1])
y3 = resnet_utils.conv2d_same(x, 1, 3, stride=2, scope='Conv')
y3_expected = y2_expected
y4 = layers.conv2d(x, 1, [3, 3], stride=2, scope='Conv')
y4_expected = math_ops.cast([[48, 37], [37, 22]], dtypes.float32)
y4_expected = array_ops.reshape(y4_expected, [1, n2, n2, 1])
with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
self.assertAllClose(y1.eval(), y1_expected.eval())
self.assertAllClose(y2.eval(), y2_expected.eval())
self.assertAllClose(y3.eval(), y3_expected.eval())
self.assertAllClose(y4.eval(), y4_expected.eval())
def testConv2DSameOdd(self):
n, n2 = 5, 3
# Input image.
x = create_test_input(1, n, n, 1)
# Convolution kernel.
w = create_test_input(1, 3, 3, 1)
w = array_ops.reshape(w, [3, 3, 1, 1])
variable_scope.get_variable('Conv/weights', initializer=w)
variable_scope.get_variable('Conv/biases', initializer=array_ops.zeros([1]))
variable_scope.get_variable_scope().reuse_variables()
y1 = layers.conv2d(x, 1, [3, 3], stride=1, scope='Conv')
y1_expected = math_ops.cast([[14, 28, 43, 58, 34],
[28, 48, 66, 84, 46],
[43, 66, 84, 102, 55],
[58, 84, 102, 120, 64],
[34, 46, 55, 64, 30]],
dtypes.float32)
y1_expected = array_ops.reshape(y1_expected, [1, n, n, 1])
y2 = resnet_utils.subsample(y1, 2)
y2_expected = math_ops.cast([[14, 43, 34],
[43, 84, 55],
[34, 55, 30]],
dtypes.float32)
y2_expected = array_ops.reshape(y2_expected, [1, n2, n2, 1])
y3 = resnet_utils.conv2d_same(x, 1, 3, stride=2, scope='Conv')
y3_expected = y2_expected
y4 = layers.conv2d(x, 1, [3, 3], stride=2, scope='Conv')
y4_expected = y2_expected
with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
self.assertAllClose(y1.eval(), y1_expected.eval())
self.assertAllClose(y2.eval(), y2_expected.eval())
self.assertAllClose(y3.eval(), y3_expected.eval())
self.assertAllClose(y4.eval(), y4_expected.eval())
def _resnet_plain(self, inputs, blocks, output_stride=None, scope=None):
"""A plain ResNet without extra layers before or after the ResNet blocks."""
with variable_scope.variable_scope(scope, values=[inputs]):
with arg_scope([layers.conv2d], outputs_collections='end_points'):
net = resnet_utils.stack_blocks_dense(inputs, blocks, output_stride)
end_points = utils.convert_collection_to_dict('end_points')
return net, end_points
def testEndPointsV1(self):
"""Test the end points of a tiny v1 bottleneck network."""
blocks = [
resnet_v1.resnet_v1_block(
'block1', base_depth=1, num_units=2, stride=2),
resnet_v1.resnet_v1_block(
'block2', base_depth=2, num_units=2, stride=1),
]
inputs = create_test_input(2, 32, 16, 3)
with arg_scope(resnet_utils.resnet_arg_scope()):
_, end_points = self._resnet_plain(inputs, blocks, scope='tiny')
expected = [
'tiny/block1/unit_1/bottleneck_v1/shortcut',
'tiny/block1/unit_1/bottleneck_v1/conv1',
'tiny/block1/unit_1/bottleneck_v1/conv2',
'tiny/block1/unit_1/bottleneck_v1/conv3',
'tiny/block1/unit_2/bottleneck_v1/conv1',
'tiny/block1/unit_2/bottleneck_v1/conv2',
'tiny/block1/unit_2/bottleneck_v1/conv3',
'tiny/block2/unit_1/bottleneck_v1/shortcut',
'tiny/block2/unit_1/bottleneck_v1/conv1',
'tiny/block2/unit_1/bottleneck_v1/conv2',
'tiny/block2/unit_1/bottleneck_v1/conv3',
'tiny/block2/unit_2/bottleneck_v1/conv1',
'tiny/block2/unit_2/bottleneck_v1/conv2',
'tiny/block2/unit_2/bottleneck_v1/conv3']
self.assertItemsEqual(expected, end_points)
def _stack_blocks_nondense(self, net, blocks):
"""A simplified ResNet Block stacker without output stride control."""
for block in blocks:
with variable_scope.variable_scope(block.scope, 'block', [net]):
for i, unit in enumerate(block.args):
with variable_scope.variable_scope('unit_%d' % (i + 1), values=[net]):
net = block.unit_fn(net, rate=1, **unit)
return net
def testAtrousValuesBottleneck(self):
"""Verify the values of dense feature extraction by atrous convolution.
Make sure that dense feature extraction by stack_blocks_dense() followed by
subsampling gives identical results to feature extraction at the nominal
network output stride using the simple self._stack_blocks_nondense() above.
"""
block = resnet_v1.resnet_v1_block
blocks = [
block('block1', base_depth=1, num_units=2, stride=2),
block('block2', base_depth=2, num_units=2, stride=2),
block('block3', base_depth=4, num_units=2, stride=2),
block('block4', base_depth=8, num_units=2, stride=1),
]
nominal_stride = 8
# Test both odd and even input dimensions.
height = 30
width = 31
with arg_scope(resnet_utils.resnet_arg_scope()):
with arg_scope([layers.batch_norm], is_training=False):
for output_stride in [1, 2, 4, 8, None]:
with ops.Graph().as_default():
with self.cached_session() as sess:
random_seed.set_random_seed(0)
inputs = create_test_input(1, height, width, 3)
# Dense feature extraction followed by subsampling.
output = resnet_utils.stack_blocks_dense(inputs, blocks,
output_stride)
if output_stride is None:
factor = 1
else:
factor = nominal_stride // output_stride
output = resnet_utils.subsample(output, factor)
# Make the two networks use the same weights.
variable_scope.get_variable_scope().reuse_variables()
# Feature extraction at the nominal network rate.
expected = self._stack_blocks_nondense(inputs, blocks)
sess.run(variables.global_variables_initializer())
output, expected = sess.run([output, expected])
self.assertAllClose(output, expected, atol=1e-4, rtol=1e-4)
class ResnetCompleteNetworkTest(test.TestCase):
"""Tests with complete small ResNet v1 networks."""
def _resnet_small(self,
inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
include_root_block=True,
reuse=None,
scope='resnet_v1_small'):
"""A shallow and thin ResNet v1 for faster tests."""
block = resnet_v1.resnet_v1_block
blocks = [
block('block1', base_depth=1, num_units=3, stride=2),
block('block2', base_depth=2, num_units=3, stride=2),
block('block3', base_depth=4, num_units=3, stride=2),
block('block4', base_depth=8, num_units=2, stride=1),
]
return resnet_v1.resnet_v1(inputs, blocks, num_classes, is_training,
global_pool, output_stride, include_root_block,
reuse, scope)
def testClassificationEndPoints(self):
global_pool = True
num_classes = 10
inputs = create_test_input(2, 224, 224, 3)
with arg_scope(resnet_utils.resnet_arg_scope()):
logits, end_points = self._resnet_small(
inputs, num_classes, global_pool=global_pool, scope='resnet')
self.assertTrue(logits.op.name.startswith('resnet/logits'))
self.assertListEqual(logits.get_shape().as_list(), [2, 1, 1, num_classes])
self.assertTrue('predictions' in end_points)
self.assertListEqual(end_points['predictions'].get_shape().as_list(),
[2, 1, 1, num_classes])
def testClassificationShapes(self):
global_pool = True
num_classes = 10
inputs = create_test_input(2, 224, 224, 3)
with arg_scope(resnet_utils.resnet_arg_scope()):
_, end_points = self._resnet_small(
inputs, num_classes, global_pool=global_pool, scope='resnet')
endpoint_to_shape = {
'resnet/block1': [2, 28, 28, 4],
'resnet/block2': [2, 14, 14, 8],
'resnet/block3': [2, 7, 7, 16],
'resnet/block4': [2, 7, 7, 32]
}
for endpoint in endpoint_to_shape:
shape = endpoint_to_shape[endpoint]
self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)
def testFullyConvolutionalEndpointShapes(self):
global_pool = False
num_classes = 10
inputs = create_test_input(2, 321, 321, 3)
with arg_scope(resnet_utils.resnet_arg_scope()):
_, end_points = self._resnet_small(
inputs, num_classes, global_pool=global_pool, scope='resnet')
endpoint_to_shape = {
'resnet/block1': [2, 41, 41, 4],
'resnet/block2': [2, 21, 21, 8],
'resnet/block3': [2, 11, 11, 16],
'resnet/block4': [2, 11, 11, 32]
}
for endpoint in endpoint_to_shape:
shape = endpoint_to_shape[endpoint]
self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)
def testRootlessFullyConvolutionalEndpointShapes(self):
global_pool = False
num_classes = 10
inputs = create_test_input(2, 128, 128, 3)
with arg_scope(resnet_utils.resnet_arg_scope()):
_, end_points = self._resnet_small(
inputs,
num_classes,
global_pool=global_pool,
include_root_block=False,
scope='resnet')
endpoint_to_shape = {
'resnet/block1': [2, 64, 64, 4],
'resnet/block2': [2, 32, 32, 8],
'resnet/block3': [2, 16, 16, 16],
'resnet/block4': [2, 16, 16, 32]
}
for endpoint in endpoint_to_shape:
shape = endpoint_to_shape[endpoint]
self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)
def testAtrousFullyConvolutionalEndpointShapes(self):
global_pool = False
num_classes = 10
output_stride = 8
inputs = create_test_input(2, 321, 321, 3)
with arg_scope(resnet_utils.resnet_arg_scope()):
_, end_points = self._resnet_small(
inputs,
num_classes,
global_pool=global_pool,
output_stride=output_stride,
scope='resnet')
endpoint_to_shape = {
'resnet/block1': [2, 41, 41, 4],
'resnet/block2': [2, 41, 41, 8],
'resnet/block3': [2, 41, 41, 16],
'resnet/block4': [2, 41, 41, 32]
}
for endpoint in endpoint_to_shape:
shape = endpoint_to_shape[endpoint]
self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)
def testAtrousFullyConvolutionalValues(self):
"""Verify dense feature extraction with atrous convolution."""
nominal_stride = 32
for output_stride in [4, 8, 16, 32, None]:
with arg_scope(resnet_utils.resnet_arg_scope()):
with ops.Graph().as_default():
with self.cached_session() as sess:
random_seed.set_random_seed(0)
inputs = create_test_input(2, 81, 81, 3)
# Dense feature extraction followed by subsampling.
output, _ = self._resnet_small(
inputs,
None,
is_training=False,
global_pool=False,
output_stride=output_stride)
if output_stride is None:
factor = 1
else:
factor = nominal_stride // output_stride
output = resnet_utils.subsample(output, factor)
# Make the two networks use the same weights.
variable_scope.get_variable_scope().reuse_variables()
# Feature extraction at the nominal network rate.
expected, _ = self._resnet_small(
inputs, None, is_training=False, global_pool=False)
sess.run(variables.global_variables_initializer())
self.assertAllClose(
output.eval(), expected.eval(), atol=2e-4, rtol=1e-4)
def testUnknownBatchSize(self):
batch = 2
height, width = 65, 65
global_pool = True
num_classes = 10
inputs = create_test_input(None, height, width, 3)
with arg_scope(resnet_utils.resnet_arg_scope()):
logits, _ = self._resnet_small(
inputs, num_classes, global_pool=global_pool, scope='resnet')
self.assertTrue(logits.op.name.startswith('resnet/logits'))
self.assertListEqual(logits.get_shape().as_list(),
[None, 1, 1, num_classes])
images = create_test_input(batch, height, width, 3)
with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
output = sess.run(logits, {inputs: images.eval()})
self.assertEqual(output.shape, (batch, 1, 1, num_classes))
def testFullyConvolutionalUnknownHeightWidth(self):
batch = 2
height, width = 65, 65
global_pool = False
inputs = create_test_input(batch, None, None, 3)
with arg_scope(resnet_utils.resnet_arg_scope()):
output, _ = self._resnet_small(inputs, None, global_pool=global_pool)
self.assertListEqual(output.get_shape().as_list(), [batch, None, None, 32])
images = create_test_input(batch, height, width, 3)
with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
output = sess.run(output, {inputs: images.eval()})
self.assertEqual(output.shape, (batch, 3, 3, 32))
def testAtrousFullyConvolutionalUnknownHeightWidth(self):
batch = 2
height, width = 65, 65
global_pool = False
output_stride = 8
inputs = create_test_input(batch, None, None, 3)
with arg_scope(resnet_utils.resnet_arg_scope()):
output, _ = self._resnet_small(
inputs, None, global_pool=global_pool, output_stride=output_stride)
self.assertListEqual(output.get_shape().as_list(), [batch, None, None, 32])
images = create_test_input(batch, height, width, 3)
with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
output = sess.run(output, {inputs: images.eval()})
self.assertEqual(output.shape, (batch, 9, 9, 32))
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/slim/python/slim/nets/resnet_v1_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Brings inception_v1, inception_v2 and inception_v3 under one namespace."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from tensorflow.contrib.slim.python.slim.nets.inception_v1 import inception_v1
from tensorflow.contrib.slim.python.slim.nets.inception_v1 import inception_v1_arg_scope
from tensorflow.contrib.slim.python.slim.nets.inception_v1 import inception_v1_base
from tensorflow.contrib.slim.python.slim.nets.inception_v2 import inception_v2
from tensorflow.contrib.slim.python.slim.nets.inception_v2 import inception_v2_arg_scope
from tensorflow.contrib.slim.python.slim.nets.inception_v2 import inception_v2_base
from tensorflow.contrib.slim.python.slim.nets.inception_v3 import inception_v3
from tensorflow.contrib.slim.python.slim.nets.inception_v3 import inception_v3_arg_scope
from tensorflow.contrib.slim.python.slim.nets.inception_v3 import inception_v3_base
# pylint: enable=unused-import
|
tensorflow-master
|
tensorflow/contrib/slim/python/slim/nets/inception.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains building blocks for various versions of Residual Networks.
Residual networks (ResNets) were proposed in:
Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385, 2015
More variants were introduced in:
Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Identity Mappings in Deep Residual Networks. arXiv: 1603.05027, 2016
We can obtain different ResNet variants by changing the network depth, width,
and form of residual unit. This module implements the infrastructure for
building them. Concrete ResNet units and full ResNet networks are implemented in
the accompanying resnet_v1.py and resnet_v2.py modules.
Compared to https://github.com/KaimingHe/deep-residual-networks, in the current
implementation we subsample the output activations in the last residual unit of
each block, instead of subsampling the input activations in the first residual
unit of each block. The two implementations give identical results but our
implementation is more memory efficient.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.contrib import layers as layers_lib
from tensorflow.contrib.framework.python.ops import add_arg_scope
from tensorflow.contrib.framework.python.ops import arg_scope
from tensorflow.contrib.layers.python.layers import initializers
from tensorflow.contrib.layers.python.layers import layers
from tensorflow.contrib.layers.python.layers import regularizers
from tensorflow.contrib.layers.python.layers import utils
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variable_scope
class Block(collections.namedtuple('Block', ['scope', 'unit_fn', 'args'])):
"""A named tuple describing a ResNet block.
Its parts are:
scope: The scope of the `Block`.
unit_fn: The ResNet unit function which takes as input a `Tensor` and
returns another `Tensor` with the output of the ResNet unit.
args: A list of length equal to the number of units in the `Block`. The list
contains one (depth, depth_bottleneck, stride) tuple for each unit in the
block to serve as argument to unit_fn.
"""
def subsample(inputs, factor, scope=None):
"""Subsamples the input along the spatial dimensions.
Args:
inputs: A `Tensor` of size [batch, height_in, width_in, channels].
factor: The subsampling factor.
scope: Optional variable_scope.
Returns:
output: A `Tensor` of size [batch, height_out, width_out, channels] with the
input, either intact (if factor == 1) or subsampled (if factor > 1).
"""
if factor == 1:
return inputs
else:
return layers.max_pool2d(inputs, [1, 1], stride=factor, scope=scope)
def conv2d_same(inputs, num_outputs, kernel_size, stride, rate=1, scope=None):
"""Strided 2-D convolution with 'SAME' padding.
When stride > 1, then we do explicit zero-padding, followed by conv2d with
'VALID' padding.
Note that
net = conv2d_same(inputs, num_outputs, 3, stride=stride)
is equivalent to
net = tf.contrib.layers.conv2d(inputs, num_outputs, 3, stride=1,
padding='SAME')
net = subsample(net, factor=stride)
whereas
net = tf.contrib.layers.conv2d(inputs, num_outputs, 3, stride=stride,
padding='SAME')
is different when the input's height or width is even, which is why we add the
current function. For more details, see ResnetUtilsTest.testConv2DSameEven().
Args:
inputs: A 4-D tensor of size [batch, height_in, width_in, channels].
num_outputs: An integer, the number of output filters.
kernel_size: An int with the kernel_size of the filters.
stride: An integer, the output stride.
rate: An integer, rate for atrous convolution.
scope: Scope.
Returns:
output: A 4-D tensor of size [batch, height_out, width_out, channels] with
the convolution output.
"""
if stride == 1:
return layers_lib.conv2d(
inputs,
num_outputs,
kernel_size,
stride=1,
rate=rate,
padding='SAME',
scope=scope)
else:
kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1)
pad_total = kernel_size_effective - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
inputs = array_ops.pad(
inputs, [[0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]])
return layers_lib.conv2d(
inputs,
num_outputs,
kernel_size,
stride=stride,
rate=rate,
padding='VALID',
scope=scope)
@add_arg_scope
def stack_blocks_dense(net,
blocks,
output_stride=None,
outputs_collections=None):
"""Stacks ResNet `Blocks` and controls output feature density.
First, this function creates scopes for the ResNet in the form of
'block_name/unit_1', 'block_name/unit_2', etc.
Second, this function allows the user to explicitly control the ResNet
output_stride, which is the ratio of the input to output spatial resolution.
This is useful for dense prediction tasks such as semantic segmentation or
object detection.
Most ResNets consist of 4 ResNet blocks and subsample the activations by a
factor of 2 when transitioning between consecutive ResNet blocks. This results
to a nominal ResNet output_stride equal to 8. If we set the output_stride to
half the nominal network stride (e.g., output_stride=4), then we compute
responses twice.
Control of the output feature density is implemented by atrous convolution.
Args:
net: A `Tensor` of size [batch, height, width, channels].
blocks: A list of length equal to the number of ResNet `Blocks`. Each
element is a ResNet `Block` object describing the units in the `Block`.
output_stride: If `None`, then the output will be computed at the nominal
network stride. If output_stride is not `None`, it specifies the requested
ratio of input to output spatial resolution, which needs to be equal to
the product of unit strides from the start up to some level of the ResNet.
For example, if the ResNet employs units with strides 1, 2, 1, 3, 4, 1,
then valid values for the output_stride are 1, 2, 6, 24 or None (which
is equivalent to output_stride=24).
outputs_collections: Collection to add the ResNet block outputs.
Returns:
net: Output tensor with stride equal to the specified output_stride.
Raises:
ValueError: If the target output_stride is not valid.
"""
# The current_stride variable keeps track of the effective stride of the
# activations. This allows us to invoke atrous convolution whenever applying
# the next residual unit would result in the activations having stride larger
# than the target output_stride.
current_stride = 1
# The atrous convolution rate parameter.
rate = 1
for block in blocks:
with variable_scope.variable_scope(block.scope, 'block', [net]) as sc:
for i, unit in enumerate(block.args):
if output_stride is not None and current_stride > output_stride:
raise ValueError('The target output_stride cannot be reached.')
with variable_scope.variable_scope('unit_%d' % (i + 1), values=[net]):
# If we have reached the target output_stride, then we need to employ
# atrous convolution with stride=1 and multiply the atrous rate by the
# current unit's stride for use in subsequent layers.
if output_stride is not None and current_stride == output_stride:
net = block.unit_fn(net, rate=rate, **dict(unit, stride=1))
rate *= unit.get('stride', 1)
else:
net = block.unit_fn(net, rate=1, **unit)
current_stride *= unit.get('stride', 1)
net = utils.collect_named_outputs(outputs_collections, sc.name, net)
if output_stride is not None and current_stride != output_stride:
raise ValueError('The target output_stride cannot be reached.')
return net
def resnet_arg_scope(weight_decay=0.0001,
batch_norm_decay=0.997,
batch_norm_epsilon=1e-5,
batch_norm_scale=True):
"""Defines the default ResNet arg scope.
TODO(gpapan): The batch-normalization related default values above are
appropriate for use in conjunction with the reference ResNet models
released at https://github.com/KaimingHe/deep-residual-networks. When
training ResNets from scratch, they might need to be tuned.
Args:
weight_decay: The weight decay to use for regularizing the model.
batch_norm_decay: The moving average decay when estimating layer activation
statistics in batch normalization.
batch_norm_epsilon: Small constant to prevent division by zero when
normalizing activations by their variance in batch normalization.
batch_norm_scale: If True, uses an explicit `gamma` multiplier to scale the
activations in the batch normalization layer.
Returns:
An `arg_scope` to use for the resnet models.
"""
batch_norm_params = {
'decay': batch_norm_decay,
'epsilon': batch_norm_epsilon,
'scale': batch_norm_scale,
'updates_collections': ops.GraphKeys.UPDATE_OPS,
}
with arg_scope(
[layers_lib.conv2d],
weights_regularizer=regularizers.l2_regularizer(weight_decay),
weights_initializer=initializers.variance_scaling_initializer(),
activation_fn=nn_ops.relu,
normalizer_fn=layers.batch_norm,
normalizer_params=batch_norm_params):
with arg_scope([layers.batch_norm], **batch_norm_params):
# The following implies padding='SAME' for pool1, which makes feature
# alignment easier for dense prediction tasks. This is also used in
# https://github.com/facebook/fb.resnet.torch. However the accompanying
# code of 'Deep Residual Learning for Image Recognition' uses
# padding='VALID' for pool1. You can switch to that choice by setting
# tf.contrib.framework.arg_scope([tf.contrib.layers.max_pool2d], padding='VALID').
with arg_scope([layers.max_pool2d], padding='SAME') as arg_sc:
return arg_sc
|
tensorflow-master
|
tensorflow/contrib/slim/python/slim/nets/resnet_utils.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for slim.nets.alexnet."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.framework.python.ops import variables as variables_lib
from tensorflow.contrib.slim.python.slim.nets import alexnet
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class AlexnetV2Test(test.TestCase):
def testBuild(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
with self.cached_session():
inputs = random_ops.random_uniform((batch_size, height, width, 3))
logits, _ = alexnet.alexnet_v2(inputs, num_classes)
self.assertEquals(logits.op.name, 'alexnet_v2/fc8/squeezed')
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
def testFullyConvolutional(self):
batch_size = 1
height, width = 300, 400
num_classes = 1000
with self.cached_session():
inputs = random_ops.random_uniform((batch_size, height, width, 3))
logits, _ = alexnet.alexnet_v2(inputs, num_classes, spatial_squeeze=False)
self.assertEquals(logits.op.name, 'alexnet_v2/fc8/BiasAdd')
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, 4, 7, num_classes])
def testEndPoints(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
with self.cached_session():
inputs = random_ops.random_uniform((batch_size, height, width, 3))
_, end_points = alexnet.alexnet_v2(inputs, num_classes)
expected_names = [
'alexnet_v2/conv1', 'alexnet_v2/pool1', 'alexnet_v2/conv2',
'alexnet_v2/pool2', 'alexnet_v2/conv3', 'alexnet_v2/conv4',
'alexnet_v2/conv5', 'alexnet_v2/pool5', 'alexnet_v2/fc6',
'alexnet_v2/fc7', 'alexnet_v2/fc8'
]
self.assertSetEqual(set(end_points.keys()), set(expected_names))
def testModelVariables(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
with self.cached_session():
inputs = random_ops.random_uniform((batch_size, height, width, 3))
alexnet.alexnet_v2(inputs, num_classes)
expected_names = [
'alexnet_v2/conv1/weights',
'alexnet_v2/conv1/biases',
'alexnet_v2/conv2/weights',
'alexnet_v2/conv2/biases',
'alexnet_v2/conv3/weights',
'alexnet_v2/conv3/biases',
'alexnet_v2/conv4/weights',
'alexnet_v2/conv4/biases',
'alexnet_v2/conv5/weights',
'alexnet_v2/conv5/biases',
'alexnet_v2/fc6/weights',
'alexnet_v2/fc6/biases',
'alexnet_v2/fc7/weights',
'alexnet_v2/fc7/biases',
'alexnet_v2/fc8/weights',
'alexnet_v2/fc8/biases',
]
model_variables = [v.op.name for v in variables_lib.get_model_variables()]
self.assertSetEqual(set(model_variables), set(expected_names))
def testEvaluation(self):
batch_size = 2
height, width = 224, 224
num_classes = 1000
with self.cached_session():
eval_inputs = random_ops.random_uniform((batch_size, height, width, 3))
logits, _ = alexnet.alexnet_v2(eval_inputs, is_training=False)
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
predictions = math_ops.argmax(logits, 1)
self.assertListEqual(predictions.get_shape().as_list(), [batch_size])
def testTrainEvalWithReuse(self):
train_batch_size = 2
eval_batch_size = 1
train_height, train_width = 224, 224
eval_height, eval_width = 300, 400
num_classes = 1000
with self.cached_session():
train_inputs = random_ops.random_uniform(
(train_batch_size, train_height, train_width, 3))
logits, _ = alexnet.alexnet_v2(train_inputs)
self.assertListEqual(logits.get_shape().as_list(),
[train_batch_size, num_classes])
variable_scope.get_variable_scope().reuse_variables()
eval_inputs = random_ops.random_uniform(
(eval_batch_size, eval_height, eval_width, 3))
logits, _ = alexnet.alexnet_v2(
eval_inputs, is_training=False, spatial_squeeze=False)
self.assertListEqual(logits.get_shape().as_list(),
[eval_batch_size, 4, 7, num_classes])
logits = math_ops.reduce_mean(logits, [1, 2])
predictions = math_ops.argmax(logits, 1)
self.assertEquals(predictions.get_shape().as_list(), [eval_batch_size])
def testForward(self):
batch_size = 1
height, width = 224, 224
with self.cached_session() as sess:
inputs = random_ops.random_uniform((batch_size, height, width, 3))
logits, _ = alexnet.alexnet_v2(inputs)
sess.run(variables.global_variables_initializer())
output = sess.run(logits)
self.assertTrue(output.any())
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/slim/python/slim/nets/alexnet_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for nets.inception_v3."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.framework.python.ops import arg_scope
from tensorflow.contrib.framework.python.ops import variables as variables_lib
from tensorflow.contrib.slim.python.slim import model_analyzer
from tensorflow.contrib.slim.python.slim.nets import inception_v3
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class InceptionV3Test(test.TestCase):
def testBuildClassificationNetwork(self):
batch_size = 5
height, width = 299, 299
num_classes = 1000
inputs = random_ops.random_uniform((batch_size, height, width, 3))
logits, end_points = inception_v3.inception_v3(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV3/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
self.assertTrue('Predictions' in end_points)
self.assertListEqual(end_points['Predictions'].get_shape().as_list(),
[batch_size, num_classes])
def testBuildBaseNetwork(self):
batch_size = 5
height, width = 299, 299
inputs = random_ops.random_uniform((batch_size, height, width, 3))
final_endpoint, end_points = inception_v3.inception_v3_base(inputs)
self.assertTrue(final_endpoint.op.name.startswith('InceptionV3/Mixed_7c'))
self.assertListEqual(final_endpoint.get_shape().as_list(),
[batch_size, 8, 8, 2048])
expected_endpoints = [
'Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3', 'MaxPool_3a_3x3',
'Conv2d_3b_1x1', 'Conv2d_4a_3x3', 'MaxPool_5a_3x3', 'Mixed_5b',
'Mixed_5c', 'Mixed_5d', 'Mixed_6a', 'Mixed_6b', 'Mixed_6c', 'Mixed_6d',
'Mixed_6e', 'Mixed_7a', 'Mixed_7b', 'Mixed_7c'
]
self.assertItemsEqual(end_points.keys(), expected_endpoints)
def testBuildOnlyUptoFinalEndpoint(self):
batch_size = 5
height, width = 299, 299
endpoints = [
'Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3', 'MaxPool_3a_3x3',
'Conv2d_3b_1x1', 'Conv2d_4a_3x3', 'MaxPool_5a_3x3', 'Mixed_5b',
'Mixed_5c', 'Mixed_5d', 'Mixed_6a', 'Mixed_6b', 'Mixed_6c', 'Mixed_6d',
'Mixed_6e', 'Mixed_7a', 'Mixed_7b', 'Mixed_7c'
]
for index, endpoint in enumerate(endpoints):
with ops.Graph().as_default():
inputs = random_ops.random_uniform((batch_size, height, width, 3))
out_tensor, end_points = inception_v3.inception_v3_base(
inputs, final_endpoint=endpoint)
self.assertTrue(
out_tensor.op.name.startswith('InceptionV3/' + endpoint))
self.assertItemsEqual(endpoints[:index + 1], end_points)
def testBuildAndCheckAllEndPointsUptoMixed7c(self):
batch_size = 5
height, width = 299, 299
inputs = random_ops.random_uniform((batch_size, height, width, 3))
_, end_points = inception_v3.inception_v3_base(
inputs, final_endpoint='Mixed_7c')
endpoints_shapes = {
'Conv2d_1a_3x3': [batch_size, 149, 149, 32],
'Conv2d_2a_3x3': [batch_size, 147, 147, 32],
'Conv2d_2b_3x3': [batch_size, 147, 147, 64],
'MaxPool_3a_3x3': [batch_size, 73, 73, 64],
'Conv2d_3b_1x1': [batch_size, 73, 73, 80],
'Conv2d_4a_3x3': [batch_size, 71, 71, 192],
'MaxPool_5a_3x3': [batch_size, 35, 35, 192],
'Mixed_5b': [batch_size, 35, 35, 256],
'Mixed_5c': [batch_size, 35, 35, 288],
'Mixed_5d': [batch_size, 35, 35, 288],
'Mixed_6a': [batch_size, 17, 17, 768],
'Mixed_6b': [batch_size, 17, 17, 768],
'Mixed_6c': [batch_size, 17, 17, 768],
'Mixed_6d': [batch_size, 17, 17, 768],
'Mixed_6e': [batch_size, 17, 17, 768],
'Mixed_7a': [batch_size, 8, 8, 1280],
'Mixed_7b': [batch_size, 8, 8, 2048],
'Mixed_7c': [batch_size, 8, 8, 2048]
}
self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())
for endpoint_name in endpoints_shapes:
expected_shape = endpoints_shapes[endpoint_name]
self.assertTrue(endpoint_name in end_points)
self.assertListEqual(end_points[endpoint_name].get_shape().as_list(),
expected_shape)
def testModelHasExpectedNumberOfParameters(self):
batch_size = 5
height, width = 299, 299
inputs = random_ops.random_uniform((batch_size, height, width, 3))
with arg_scope(inception_v3.inception_v3_arg_scope()):
inception_v3.inception_v3_base(inputs)
total_params, _ = model_analyzer.analyze_vars(
variables_lib.get_model_variables())
self.assertAlmostEqual(21802784, total_params)
def testBuildEndPoints(self):
batch_size = 5
height, width = 299, 299
num_classes = 1000
inputs = random_ops.random_uniform((batch_size, height, width, 3))
_, end_points = inception_v3.inception_v3(inputs, num_classes)
self.assertTrue('Logits' in end_points)
logits = end_points['Logits']
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
self.assertTrue('AuxLogits' in end_points)
aux_logits = end_points['AuxLogits']
self.assertListEqual(aux_logits.get_shape().as_list(),
[batch_size, num_classes])
self.assertTrue('Mixed_7c' in end_points)
pre_pool = end_points['Mixed_7c']
self.assertListEqual(pre_pool.get_shape().as_list(),
[batch_size, 8, 8, 2048])
self.assertTrue('PreLogits' in end_points)
pre_logits = end_points['PreLogits']
self.assertListEqual(pre_logits.get_shape().as_list(),
[batch_size, 1, 1, 2048])
def testBuildEndPointsWithDepthMultiplierLessThanOne(self):
batch_size = 5
height, width = 299, 299
num_classes = 1000
inputs = random_ops.random_uniform((batch_size, height, width, 3))
_, end_points = inception_v3.inception_v3(inputs, num_classes)
endpoint_keys = [
key for key in end_points.keys()
if key.startswith('Mixed') or key.startswith('Conv')
]
_, end_points_with_multiplier = inception_v3.inception_v3(
inputs, num_classes, scope='depth_multiplied_net', depth_multiplier=0.5)
for key in endpoint_keys:
original_depth = end_points[key].get_shape().as_list()[3]
new_depth = end_points_with_multiplier[key].get_shape().as_list()[3]
self.assertEqual(0.5 * original_depth, new_depth)
def testBuildEndPointsWithDepthMultiplierGreaterThanOne(self):
batch_size = 5
height, width = 299, 299
num_classes = 1000
inputs = random_ops.random_uniform((batch_size, height, width, 3))
_, end_points = inception_v3.inception_v3(inputs, num_classes)
endpoint_keys = [
key for key in end_points.keys()
if key.startswith('Mixed') or key.startswith('Conv')
]
_, end_points_with_multiplier = inception_v3.inception_v3(
inputs, num_classes, scope='depth_multiplied_net', depth_multiplier=2.0)
for key in endpoint_keys:
original_depth = end_points[key].get_shape().as_list()[3]
new_depth = end_points_with_multiplier[key].get_shape().as_list()[3]
self.assertEqual(2.0 * original_depth, new_depth)
def testRaiseValueErrorWithInvalidDepthMultiplier(self):
batch_size = 5
height, width = 299, 299
num_classes = 1000
inputs = random_ops.random_uniform((batch_size, height, width, 3))
with self.assertRaises(ValueError):
_ = inception_v3.inception_v3(inputs, num_classes, depth_multiplier=-0.1)
with self.assertRaises(ValueError):
_ = inception_v3.inception_v3(inputs, num_classes, depth_multiplier=0.0)
def testHalfSizeImages(self):
batch_size = 5
height, width = 150, 150
num_classes = 1000
inputs = random_ops.random_uniform((batch_size, height, width, 3))
logits, end_points = inception_v3.inception_v3(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV3/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
pre_pool = end_points['Mixed_7c']
self.assertListEqual(pre_pool.get_shape().as_list(),
[batch_size, 3, 3, 2048])
def testUnknownImageShape(self):
ops.reset_default_graph()
batch_size = 2
height, width = 299, 299
num_classes = 1000
input_np = np.random.uniform(0, 1, (batch_size, height, width, 3))
with self.cached_session() as sess:
inputs = array_ops.placeholder(
dtypes.float32, shape=(batch_size, None, None, 3))
logits, end_points = inception_v3.inception_v3(inputs, num_classes)
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
pre_pool = end_points['Mixed_7c']
feed_dict = {inputs: input_np}
variables.global_variables_initializer().run()
pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict)
self.assertListEqual(list(pre_pool_out.shape), [batch_size, 8, 8, 2048])
def testUnknownBatchSize(self):
batch_size = 1
height, width = 299, 299
num_classes = 1000
inputs = array_ops.placeholder(dtypes.float32, (None, height, width, 3))
logits, _ = inception_v3.inception_v3(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV3/Logits'))
self.assertListEqual(logits.get_shape().as_list(), [None, num_classes])
images = random_ops.random_uniform((batch_size, height, width, 3))
with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
output = sess.run(logits, {inputs: images.eval()})
self.assertEquals(output.shape, (batch_size, num_classes))
def testEvaluation(self):
batch_size = 2
height, width = 299, 299
num_classes = 1000
eval_inputs = random_ops.random_uniform((batch_size, height, width, 3))
logits, _ = inception_v3.inception_v3(
eval_inputs, num_classes, is_training=False)
predictions = math_ops.argmax(logits, 1)
with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
output = sess.run(predictions)
self.assertEquals(output.shape, (batch_size,))
def testTrainEvalWithReuse(self):
train_batch_size = 5
eval_batch_size = 2
height, width = 150, 150
num_classes = 1000
train_inputs = random_ops.random_uniform(
(train_batch_size, height, width, 3))
inception_v3.inception_v3(train_inputs, num_classes)
eval_inputs = random_ops.random_uniform((eval_batch_size, height, width, 3))
logits, _ = inception_v3.inception_v3(
eval_inputs, num_classes, is_training=False, reuse=True)
predictions = math_ops.argmax(logits, 1)
with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
output = sess.run(predictions)
self.assertEquals(output.shape, (eval_batch_size,))
def testLogitsNotSqueezed(self):
num_classes = 25
images = random_ops.random_uniform([1, 299, 299, 3])
logits, _ = inception_v3.inception_v3(
images, num_classes=num_classes, spatial_squeeze=False)
with self.cached_session() as sess:
variables.global_variables_initializer().run()
logits_out = sess.run(logits)
self.assertListEqual(list(logits_out.shape), [1, 1, 1, num_classes])
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/slim/python/slim/nets/inception_v3_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the definition for inception v3 classification network."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import layers
from tensorflow.contrib.framework.python.ops import arg_scope
from tensorflow.contrib.layers.python.layers import initializers
from tensorflow.contrib.layers.python.layers import layers as layers_lib
from tensorflow.contrib.layers.python.layers import regularizers
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variable_scope
trunc_normal = lambda stddev: init_ops.truncated_normal_initializer(0.0, stddev)
def inception_v3_base(inputs,
final_endpoint='Mixed_7c',
min_depth=16,
depth_multiplier=1.0,
scope=None):
"""Inception model from http://arxiv.org/abs/1512.00567.
Constructs an Inception v3 network from inputs to the given final endpoint.
This method can construct the network up to the final inception block
Mixed_7c.
Note that the names of the layers in the paper do not correspond to the names
of the endpoints registered by this function although they build the same
network.
Here is a mapping from the old_names to the new names:
Old name | New name
=======================================
conv0 | Conv2d_1a_3x3
conv1 | Conv2d_2a_3x3
conv2 | Conv2d_2b_3x3
pool1 | MaxPool_3a_3x3
conv3 | Conv2d_3b_1x1
conv4 | Conv2d_4a_3x3
pool2 | MaxPool_5a_3x3
mixed_35x35x256a | Mixed_5b
mixed_35x35x288a | Mixed_5c
mixed_35x35x288b | Mixed_5d
mixed_17x17x768a | Mixed_6a
mixed_17x17x768b | Mixed_6b
mixed_17x17x768c | Mixed_6c
mixed_17x17x768d | Mixed_6d
mixed_17x17x768e | Mixed_6e
mixed_8x8x1280a | Mixed_7a
mixed_8x8x2048a | Mixed_7b
mixed_8x8x2048b | Mixed_7c
Args:
inputs: a tensor of size [batch_size, height, width, channels].
final_endpoint: specifies the endpoint to construct the network up to. It
can be one of ['Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3',
'MaxPool_3a_3x3', 'Conv2d_3b_1x1', 'Conv2d_4a_3x3', 'MaxPool_5a_3x3',
'Mixed_5b', 'Mixed_5c', 'Mixed_5d', 'Mixed_6a', 'Mixed_6b', 'Mixed_6c',
'Mixed_6d', 'Mixed_6e', 'Mixed_7a', 'Mixed_7b', 'Mixed_7c'].
min_depth: Minimum depth value (number of channels) for all convolution ops.
Enforced when depth_multiplier < 1, and not an active constraint when
depth_multiplier >= 1.
depth_multiplier: Float multiplier for the depth (number of channels)
for all convolution ops. The value must be greater than zero. Typical
usage will be to set this value in (0, 1) to reduce the number of
parameters or computation cost of the model.
scope: Optional variable_scope.
Returns:
tensor_out: output tensor corresponding to the final_endpoint.
end_points: a set of activations for external use, for example summaries or
losses.
Raises:
ValueError: if final_endpoint is not set to one of the predefined values,
or depth_multiplier <= 0
"""
# end_points will collect relevant activations for external use, for example
# summaries or losses.
end_points = {}
if depth_multiplier <= 0:
raise ValueError('depth_multiplier is not greater than zero.')
depth = lambda d: max(int(d * depth_multiplier), min_depth)
with variable_scope.variable_scope(scope, 'InceptionV3', [inputs]):
with arg_scope(
[layers.conv2d, layers_lib.max_pool2d, layers_lib.avg_pool2d],
stride=1,
padding='VALID'):
# 299 x 299 x 3
end_point = 'Conv2d_1a_3x3'
net = layers.conv2d(inputs, depth(32), [3, 3], stride=2, scope=end_point)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
# 149 x 149 x 32
end_point = 'Conv2d_2a_3x3'
net = layers.conv2d(net, depth(32), [3, 3], scope=end_point)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
# 147 x 147 x 32
end_point = 'Conv2d_2b_3x3'
net = layers.conv2d(
net, depth(64), [3, 3], padding='SAME', scope=end_point)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
# 147 x 147 x 64
end_point = 'MaxPool_3a_3x3'
net = layers_lib.max_pool2d(net, [3, 3], stride=2, scope=end_point)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
# 73 x 73 x 64
end_point = 'Conv2d_3b_1x1'
net = layers.conv2d(net, depth(80), [1, 1], scope=end_point)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
# 73 x 73 x 80.
end_point = 'Conv2d_4a_3x3'
net = layers.conv2d(net, depth(192), [3, 3], scope=end_point)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
# 71 x 71 x 192.
end_point = 'MaxPool_5a_3x3'
net = layers_lib.max_pool2d(net, [3, 3], stride=2, scope=end_point)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
# 35 x 35 x 192.
# Inception blocks
with arg_scope(
[layers.conv2d, layers_lib.max_pool2d, layers_lib.avg_pool2d],
stride=1,
padding='SAME'):
# mixed: 35 x 35 x 256.
end_point = 'Mixed_5b'
with variable_scope.variable_scope(end_point):
with variable_scope.variable_scope('Branch_0'):
branch_0 = layers.conv2d(
net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
with variable_scope.variable_scope('Branch_1'):
branch_1 = layers.conv2d(
net, depth(48), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = layers.conv2d(
branch_1, depth(64), [5, 5], scope='Conv2d_0b_5x5')
with variable_scope.variable_scope('Branch_2'):
branch_2 = layers.conv2d(
net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
branch_2 = layers.conv2d(
branch_2, depth(96), [3, 3], scope='Conv2d_0b_3x3')
branch_2 = layers.conv2d(
branch_2, depth(96), [3, 3], scope='Conv2d_0c_3x3')
with variable_scope.variable_scope('Branch_3'):
branch_3 = layers_lib.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = layers.conv2d(
branch_3, depth(32), [1, 1], scope='Conv2d_0b_1x1')
net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
# mixed_1: 35 x 35 x 288.
end_point = 'Mixed_5c'
with variable_scope.variable_scope(end_point):
with variable_scope.variable_scope('Branch_0'):
branch_0 = layers.conv2d(
net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
with variable_scope.variable_scope('Branch_1'):
branch_1 = layers.conv2d(
net, depth(48), [1, 1], scope='Conv2d_0b_1x1')
branch_1 = layers.conv2d(
branch_1, depth(64), [5, 5], scope='Conv_1_0c_5x5')
with variable_scope.variable_scope('Branch_2'):
branch_2 = layers.conv2d(
net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
branch_2 = layers.conv2d(
branch_2, depth(96), [3, 3], scope='Conv2d_0b_3x3')
branch_2 = layers.conv2d(
branch_2, depth(96), [3, 3], scope='Conv2d_0c_3x3')
with variable_scope.variable_scope('Branch_3'):
branch_3 = layers_lib.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = layers.conv2d(
branch_3, depth(64), [1, 1], scope='Conv2d_0b_1x1')
net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
# mixed_2: 35 x 35 x 288.
end_point = 'Mixed_5d'
with variable_scope.variable_scope(end_point):
with variable_scope.variable_scope('Branch_0'):
branch_0 = layers.conv2d(
net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
with variable_scope.variable_scope('Branch_1'):
branch_1 = layers.conv2d(
net, depth(48), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = layers.conv2d(
branch_1, depth(64), [5, 5], scope='Conv2d_0b_5x5')
with variable_scope.variable_scope('Branch_2'):
branch_2 = layers.conv2d(
net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
branch_2 = layers.conv2d(
branch_2, depth(96), [3, 3], scope='Conv2d_0b_3x3')
branch_2 = layers.conv2d(
branch_2, depth(96), [3, 3], scope='Conv2d_0c_3x3')
with variable_scope.variable_scope('Branch_3'):
branch_3 = layers_lib.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = layers.conv2d(
branch_3, depth(64), [1, 1], scope='Conv2d_0b_1x1')
net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
# mixed_3: 17 x 17 x 768.
end_point = 'Mixed_6a'
with variable_scope.variable_scope(end_point):
with variable_scope.variable_scope('Branch_0'):
branch_0 = layers.conv2d(
net,
depth(384), [3, 3],
stride=2,
padding='VALID',
scope='Conv2d_1a_1x1')
with variable_scope.variable_scope('Branch_1'):
branch_1 = layers.conv2d(
net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = layers.conv2d(
branch_1, depth(96), [3, 3], scope='Conv2d_0b_3x3')
branch_1 = layers.conv2d(
branch_1,
depth(96), [3, 3],
stride=2,
padding='VALID',
scope='Conv2d_1a_1x1')
with variable_scope.variable_scope('Branch_2'):
branch_2 = layers_lib.max_pool2d(
net, [3, 3], stride=2, padding='VALID', scope='MaxPool_1a_3x3')
net = array_ops.concat([branch_0, branch_1, branch_2], 3)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
# mixed4: 17 x 17 x 768.
end_point = 'Mixed_6b'
with variable_scope.variable_scope(end_point):
with variable_scope.variable_scope('Branch_0'):
branch_0 = layers.conv2d(
net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
with variable_scope.variable_scope('Branch_1'):
branch_1 = layers.conv2d(
net, depth(128), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = layers.conv2d(
branch_1, depth(128), [1, 7], scope='Conv2d_0b_1x7')
branch_1 = layers.conv2d(
branch_1, depth(192), [7, 1], scope='Conv2d_0c_7x1')
with variable_scope.variable_scope('Branch_2'):
branch_2 = layers.conv2d(
net, depth(128), [1, 1], scope='Conv2d_0a_1x1')
branch_2 = layers.conv2d(
branch_2, depth(128), [7, 1], scope='Conv2d_0b_7x1')
branch_2 = layers.conv2d(
branch_2, depth(128), [1, 7], scope='Conv2d_0c_1x7')
branch_2 = layers.conv2d(
branch_2, depth(128), [7, 1], scope='Conv2d_0d_7x1')
branch_2 = layers.conv2d(
branch_2, depth(192), [1, 7], scope='Conv2d_0e_1x7')
with variable_scope.variable_scope('Branch_3'):
branch_3 = layers_lib.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = layers.conv2d(
branch_3, depth(192), [1, 1], scope='Conv2d_0b_1x1')
net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
# mixed_5: 17 x 17 x 768.
end_point = 'Mixed_6c'
with variable_scope.variable_scope(end_point):
with variable_scope.variable_scope('Branch_0'):
branch_0 = layers.conv2d(
net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
with variable_scope.variable_scope('Branch_1'):
branch_1 = layers.conv2d(
net, depth(160), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = layers.conv2d(
branch_1, depth(160), [1, 7], scope='Conv2d_0b_1x7')
branch_1 = layers.conv2d(
branch_1, depth(192), [7, 1], scope='Conv2d_0c_7x1')
with variable_scope.variable_scope('Branch_2'):
branch_2 = layers.conv2d(
net, depth(160), [1, 1], scope='Conv2d_0a_1x1')
branch_2 = layers.conv2d(
branch_2, depth(160), [7, 1], scope='Conv2d_0b_7x1')
branch_2 = layers.conv2d(
branch_2, depth(160), [1, 7], scope='Conv2d_0c_1x7')
branch_2 = layers.conv2d(
branch_2, depth(160), [7, 1], scope='Conv2d_0d_7x1')
branch_2 = layers.conv2d(
branch_2, depth(192), [1, 7], scope='Conv2d_0e_1x7')
with variable_scope.variable_scope('Branch_3'):
branch_3 = layers_lib.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = layers.conv2d(
branch_3, depth(192), [1, 1], scope='Conv2d_0b_1x1')
net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
# mixed_6: 17 x 17 x 768.
end_point = 'Mixed_6d'
with variable_scope.variable_scope(end_point):
with variable_scope.variable_scope('Branch_0'):
branch_0 = layers.conv2d(
net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
with variable_scope.variable_scope('Branch_1'):
branch_1 = layers.conv2d(
net, depth(160), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = layers.conv2d(
branch_1, depth(160), [1, 7], scope='Conv2d_0b_1x7')
branch_1 = layers.conv2d(
branch_1, depth(192), [7, 1], scope='Conv2d_0c_7x1')
with variable_scope.variable_scope('Branch_2'):
branch_2 = layers.conv2d(
net, depth(160), [1, 1], scope='Conv2d_0a_1x1')
branch_2 = layers.conv2d(
branch_2, depth(160), [7, 1], scope='Conv2d_0b_7x1')
branch_2 = layers.conv2d(
branch_2, depth(160), [1, 7], scope='Conv2d_0c_1x7')
branch_2 = layers.conv2d(
branch_2, depth(160), [7, 1], scope='Conv2d_0d_7x1')
branch_2 = layers.conv2d(
branch_2, depth(192), [1, 7], scope='Conv2d_0e_1x7')
with variable_scope.variable_scope('Branch_3'):
branch_3 = layers_lib.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = layers.conv2d(
branch_3, depth(192), [1, 1], scope='Conv2d_0b_1x1')
net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
# mixed_7: 17 x 17 x 768.
end_point = 'Mixed_6e'
with variable_scope.variable_scope(end_point):
with variable_scope.variable_scope('Branch_0'):
branch_0 = layers.conv2d(
net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
with variable_scope.variable_scope('Branch_1'):
branch_1 = layers.conv2d(
net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = layers.conv2d(
branch_1, depth(192), [1, 7], scope='Conv2d_0b_1x7')
branch_1 = layers.conv2d(
branch_1, depth(192), [7, 1], scope='Conv2d_0c_7x1')
with variable_scope.variable_scope('Branch_2'):
branch_2 = layers.conv2d(
net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
branch_2 = layers.conv2d(
branch_2, depth(192), [7, 1], scope='Conv2d_0b_7x1')
branch_2 = layers.conv2d(
branch_2, depth(192), [1, 7], scope='Conv2d_0c_1x7')
branch_2 = layers.conv2d(
branch_2, depth(192), [7, 1], scope='Conv2d_0d_7x1')
branch_2 = layers.conv2d(
branch_2, depth(192), [1, 7], scope='Conv2d_0e_1x7')
with variable_scope.variable_scope('Branch_3'):
branch_3 = layers_lib.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = layers.conv2d(
branch_3, depth(192), [1, 1], scope='Conv2d_0b_1x1')
net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
# mixed_8: 8 x 8 x 1280.
end_point = 'Mixed_7a'
with variable_scope.variable_scope(end_point):
with variable_scope.variable_scope('Branch_0'):
branch_0 = layers.conv2d(
net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
branch_0 = layers.conv2d(
branch_0,
depth(320), [3, 3],
stride=2,
padding='VALID',
scope='Conv2d_1a_3x3')
with variable_scope.variable_scope('Branch_1'):
branch_1 = layers.conv2d(
net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = layers.conv2d(
branch_1, depth(192), [1, 7], scope='Conv2d_0b_1x7')
branch_1 = layers.conv2d(
branch_1, depth(192), [7, 1], scope='Conv2d_0c_7x1')
branch_1 = layers.conv2d(
branch_1,
depth(192), [3, 3],
stride=2,
padding='VALID',
scope='Conv2d_1a_3x3')
with variable_scope.variable_scope('Branch_2'):
branch_2 = layers_lib.max_pool2d(
net, [3, 3], stride=2, padding='VALID', scope='MaxPool_1a_3x3')
net = array_ops.concat([branch_0, branch_1, branch_2], 3)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
# mixed_9: 8 x 8 x 2048.
end_point = 'Mixed_7b'
with variable_scope.variable_scope(end_point):
with variable_scope.variable_scope('Branch_0'):
branch_0 = layers.conv2d(
net, depth(320), [1, 1], scope='Conv2d_0a_1x1')
with variable_scope.variable_scope('Branch_1'):
branch_1 = layers.conv2d(
net, depth(384), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = array_ops.concat(
[
layers.conv2d(
branch_1, depth(384), [1, 3], scope='Conv2d_0b_1x3'),
layers.conv2d(
branch_1, depth(384), [3, 1], scope='Conv2d_0b_3x1')
],
3)
with variable_scope.variable_scope('Branch_2'):
branch_2 = layers.conv2d(
net, depth(448), [1, 1], scope='Conv2d_0a_1x1')
branch_2 = layers.conv2d(
branch_2, depth(384), [3, 3], scope='Conv2d_0b_3x3')
branch_2 = array_ops.concat(
[
layers.conv2d(
branch_2, depth(384), [1, 3], scope='Conv2d_0c_1x3'),
layers.conv2d(
branch_2, depth(384), [3, 1], scope='Conv2d_0d_3x1')
],
3)
with variable_scope.variable_scope('Branch_3'):
branch_3 = layers_lib.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = layers.conv2d(
branch_3, depth(192), [1, 1], scope='Conv2d_0b_1x1')
net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
# mixed_10: 8 x 8 x 2048.
end_point = 'Mixed_7c'
with variable_scope.variable_scope(end_point):
with variable_scope.variable_scope('Branch_0'):
branch_0 = layers.conv2d(
net, depth(320), [1, 1], scope='Conv2d_0a_1x1')
with variable_scope.variable_scope('Branch_1'):
branch_1 = layers.conv2d(
net, depth(384), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = array_ops.concat(
[
layers.conv2d(
branch_1, depth(384), [1, 3], scope='Conv2d_0b_1x3'),
layers.conv2d(
branch_1, depth(384), [3, 1], scope='Conv2d_0c_3x1')
],
3)
with variable_scope.variable_scope('Branch_2'):
branch_2 = layers.conv2d(
net, depth(448), [1, 1], scope='Conv2d_0a_1x1')
branch_2 = layers.conv2d(
branch_2, depth(384), [3, 3], scope='Conv2d_0b_3x3')
branch_2 = array_ops.concat(
[
layers.conv2d(
branch_2, depth(384), [1, 3], scope='Conv2d_0c_1x3'),
layers.conv2d(
branch_2, depth(384), [3, 1], scope='Conv2d_0d_3x1')
],
3)
with variable_scope.variable_scope('Branch_3'):
branch_3 = layers_lib.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = layers.conv2d(
branch_3, depth(192), [1, 1], scope='Conv2d_0b_1x1')
net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
raise ValueError('Unknown final endpoint %s' % final_endpoint)
def inception_v3(inputs,
num_classes=1000,
is_training=True,
dropout_keep_prob=0.8,
min_depth=16,
depth_multiplier=1.0,
prediction_fn=layers_lib.softmax,
spatial_squeeze=True,
reuse=None,
scope='InceptionV3'):
"""Inception model from http://arxiv.org/abs/1512.00567.
"Rethinking the Inception Architecture for Computer Vision"
Christian Szegedy, Vincent Vanhoucke, Sergey Ioffe, Jonathon Shlens,
Zbigniew Wojna.
With the default arguments this method constructs the exact model defined in
the paper. However, one can experiment with variations of the inception_v3
network by changing arguments dropout_keep_prob, min_depth and
depth_multiplier.
The default image size used to train this network is 299x299.
Args:
inputs: a tensor of size [batch_size, height, width, channels].
num_classes: number of predicted classes.
is_training: whether is training or not.
dropout_keep_prob: the percentage of activation values that are retained.
min_depth: Minimum depth value (number of channels) for all convolution ops.
Enforced when depth_multiplier < 1, and not an active constraint when
depth_multiplier >= 1.
depth_multiplier: Float multiplier for the depth (number of channels)
for all convolution ops. The value must be greater than zero. Typical
usage will be to set this value in (0, 1) to reduce the number of
parameters or computation cost of the model.
prediction_fn: a function to get predictions out of logits.
spatial_squeeze: if True, logits is of shape is [B, C], if false logits is
of shape [B, 1, 1, C], where B is batch_size and C is number of classes.
To use this parameter, the input images must be smaller
than 300x300 pixels, in which case the output logit layer
does not contain spatial information and can be removed.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
Returns:
logits: the pre-softmax activations, a tensor of size
[batch_size, num_classes]
end_points: a dictionary from components of the network to the corresponding
activation.
Raises:
ValueError: if 'depth_multiplier' is less than or equal to zero.
"""
if depth_multiplier <= 0:
raise ValueError('depth_multiplier is not greater than zero.')
depth = lambda d: max(int(d * depth_multiplier), min_depth)
with variable_scope.variable_scope(
scope, 'InceptionV3', [inputs, num_classes], reuse=reuse) as scope:
with arg_scope(
[layers_lib.batch_norm, layers_lib.dropout], is_training=is_training):
net, end_points = inception_v3_base(
inputs,
scope=scope,
min_depth=min_depth,
depth_multiplier=depth_multiplier)
# Auxiliary Head logits
with arg_scope(
[layers.conv2d, layers_lib.max_pool2d, layers_lib.avg_pool2d],
stride=1,
padding='SAME'):
aux_logits = end_points['Mixed_6e']
with variable_scope.variable_scope('AuxLogits'):
aux_logits = layers_lib.avg_pool2d(
aux_logits, [5, 5],
stride=3,
padding='VALID',
scope='AvgPool_1a_5x5')
aux_logits = layers.conv2d(
aux_logits, depth(128), [1, 1], scope='Conv2d_1b_1x1')
# Shape of feature map before the final layer.
kernel_size = _reduced_kernel_size_for_small_input(aux_logits, [5, 5])
aux_logits = layers.conv2d(
aux_logits,
depth(768),
kernel_size,
weights_initializer=trunc_normal(0.01),
padding='VALID',
scope='Conv2d_2a_{}x{}'.format(*kernel_size))
aux_logits = layers.conv2d(
aux_logits,
num_classes, [1, 1],
activation_fn=None,
normalizer_fn=None,
weights_initializer=trunc_normal(0.001),
scope='Conv2d_2b_1x1')
if spatial_squeeze:
aux_logits = array_ops.squeeze(
aux_logits, [1, 2], name='SpatialSqueeze')
end_points['AuxLogits'] = aux_logits
# Final pooling and prediction
with variable_scope.variable_scope('Logits'):
kernel_size = _reduced_kernel_size_for_small_input(net, [8, 8])
net = layers_lib.avg_pool2d(
net,
kernel_size,
padding='VALID',
scope='AvgPool_1a_{}x{}'.format(*kernel_size))
# 1 x 1 x 2048
net = layers_lib.dropout(
net, keep_prob=dropout_keep_prob, scope='Dropout_1b')
end_points['PreLogits'] = net
# 2048
logits = layers.conv2d(
net,
num_classes, [1, 1],
activation_fn=None,
normalizer_fn=None,
scope='Conv2d_1c_1x1')
if spatial_squeeze:
logits = array_ops.squeeze(logits, [1, 2], name='SpatialSqueeze')
# 1000
end_points['Logits'] = logits
end_points['Predictions'] = prediction_fn(logits, scope='Predictions')
return logits, end_points
inception_v3.default_image_size = 299
def _reduced_kernel_size_for_small_input(input_tensor, kernel_size):
"""Define kernel size which is automatically reduced for small input.
If the shape of the input images is unknown at graph construction time this
function assumes that the input images are is large enough.
Args:
input_tensor: input tensor of size [batch_size, height, width, channels].
kernel_size: desired kernel size of length 2: [kernel_height, kernel_width]
Returns:
a tensor with the kernel size.
TODO(jrru): Make this function work with unknown shapes. Theoretically, this
can be done with the code below. Problems are two-fold: (1) If the shape was
known, it will be lost. (2) inception.tf.contrib.slim.ops._two_element_tuple
cannot
handle tensors that define the kernel size.
shape = tf.shape(input_tensor)
return = tf.stack([tf.minimum(shape[1], kernel_size[0]),
tf.minimum(shape[2], kernel_size[1])])
"""
shape = input_tensor.get_shape().as_list()
if shape[1] is None or shape[2] is None:
kernel_size_out = kernel_size
else:
kernel_size_out = [
min(shape[1], kernel_size[0]), min(shape[2], kernel_size[1])
]
return kernel_size_out
def inception_v3_arg_scope(weight_decay=0.00004,
batch_norm_var_collection='moving_vars',
batch_norm_decay=0.9997,
batch_norm_epsilon=0.001,
updates_collections=ops.GraphKeys.UPDATE_OPS,
use_fused_batchnorm=True):
"""Defines the default InceptionV3 arg scope.
Args:
weight_decay: The weight decay to use for regularizing the model.
batch_norm_var_collection: The name of the collection for the batch norm
variables.
batch_norm_decay: Decay for batch norm moving average
batch_norm_epsilon: Small float added to variance to avoid division by zero
updates_collections: Collections for the update ops of the layer
use_fused_batchnorm: Enable fused batchnorm.
Returns:
An `arg_scope` to use for the inception v3 model.
"""
batch_norm_params = {
# Decay for the moving averages.
'decay': batch_norm_decay,
# epsilon to prevent 0s in variance.
'epsilon': batch_norm_epsilon,
# collection containing update_ops.
'updates_collections': updates_collections,
# Use fused batch norm if possible.
'fused': use_fused_batchnorm,
# collection containing the moving mean and moving variance.
'variables_collections': {
'beta': None,
'gamma': None,
'moving_mean': [batch_norm_var_collection],
'moving_variance': [batch_norm_var_collection],
}
}
# Set weight_decay for weights in Conv and FC layers.
with arg_scope(
[layers.conv2d, layers_lib.fully_connected],
weights_regularizer=regularizers.l2_regularizer(weight_decay)):
with arg_scope(
[layers.conv2d],
weights_initializer=initializers.variance_scaling_initializer(),
activation_fn=nn_ops.relu,
normalizer_fn=layers_lib.batch_norm,
normalizer_params=batch_norm_params) as sc:
return sc
|
tensorflow-master
|
tensorflow/contrib/slim/python/slim/nets/inception_v3.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the definition for inception v2 classification network."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import layers
from tensorflow.contrib.framework.python.ops import arg_scope
from tensorflow.contrib.layers.python.layers import initializers
from tensorflow.contrib.layers.python.layers import layers as layers_lib
from tensorflow.contrib.layers.python.layers import regularizers
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variable_scope
trunc_normal = lambda stddev: init_ops.truncated_normal_initializer(0.0, stddev)
def inception_v2_base(inputs,
final_endpoint='Mixed_5c',
min_depth=16,
depth_multiplier=1.0,
scope=None):
"""Inception v2 (6a2).
Constructs an Inception v2 network from inputs to the given final endpoint.
This method can construct the network up to the layer inception(5b) as
described in http://arxiv.org/abs/1502.03167.
Args:
inputs: a tensor of shape [batch_size, height, width, channels].
final_endpoint: specifies the endpoint to construct the network up to. It
can be one of ['Conv2d_1a_7x7', 'MaxPool_2a_3x3', 'Conv2d_2b_1x1',
'Conv2d_2c_3x3', 'MaxPool_3a_3x3', 'Mixed_3b', 'Mixed_3c', 'Mixed_4a',
'Mixed_4b', 'Mixed_4c', 'Mixed_4d', 'Mixed_4e', 'Mixed_5a', 'Mixed_5b',
'Mixed_5c'].
min_depth: Minimum depth value (number of channels) for all convolution ops.
Enforced when depth_multiplier < 1, and not an active constraint when
depth_multiplier >= 1.
depth_multiplier: Float multiplier for the depth (number of channels)
for all convolution ops. The value must be greater than zero. Typical
usage will be to set this value in (0, 1) to reduce the number of
parameters or computation cost of the model.
scope: Optional variable_scope.
Returns:
tensor_out: output tensor corresponding to the final_endpoint.
end_points: a set of activations for external use, for example summaries or
losses.
Raises:
ValueError: if final_endpoint is not set to one of the predefined values,
or depth_multiplier <= 0
"""
# end_points will collect relevant activations for external use, for example
# summaries or losses.
end_points = {}
# Used to find thinned depths for each layer.
if depth_multiplier <= 0:
raise ValueError('depth_multiplier is not greater than zero.')
depth = lambda d: max(int(d * depth_multiplier), min_depth)
with variable_scope.variable_scope(scope, 'InceptionV2', [inputs]):
with arg_scope(
[
layers.conv2d, layers_lib.max_pool2d, layers_lib.avg_pool2d,
layers.separable_conv2d
],
stride=1,
padding='SAME'):
# Note that sizes in the comments below assume an input spatial size of
# 224x224, however, the inputs can be of any size greater 32x32.
# 224 x 224 x 3
end_point = 'Conv2d_1a_7x7'
# depthwise_multiplier here is different from depth_multiplier.
# depthwise_multiplier determines the output channels of the initial
# depthwise conv (see docs for tf.nn.separable_conv2d), while
# depth_multiplier controls the # channels of the subsequent 1x1
# convolution. Must have
# in_channels * depthwise_multipler <= out_channels
# so that the separable convolution is not overparameterized.
depthwise_multiplier = min(int(depth(64) / 3), 8)
net = layers.separable_conv2d(
inputs,
depth(64), [7, 7],
depth_multiplier=depthwise_multiplier,
stride=2,
weights_initializer=trunc_normal(1.0),
scope=end_point)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
# 112 x 112 x 64
end_point = 'MaxPool_2a_3x3'
net = layers_lib.max_pool2d(net, [3, 3], scope=end_point, stride=2)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
# 56 x 56 x 64
end_point = 'Conv2d_2b_1x1'
net = layers.conv2d(
net,
depth(64), [1, 1],
scope=end_point,
weights_initializer=trunc_normal(0.1))
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
# 56 x 56 x 64
end_point = 'Conv2d_2c_3x3'
net = layers.conv2d(net, depth(192), [3, 3], scope=end_point)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
# 56 x 56 x 192
end_point = 'MaxPool_3a_3x3'
net = layers_lib.max_pool2d(net, [3, 3], scope=end_point, stride=2)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
# 28 x 28 x 192
# Inception module.
end_point = 'Mixed_3b'
with variable_scope.variable_scope(end_point):
with variable_scope.variable_scope('Branch_0'):
branch_0 = layers.conv2d(
net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
with variable_scope.variable_scope('Branch_1'):
branch_1 = layers.conv2d(
net,
depth(64), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_1 = layers.conv2d(
branch_1, depth(64), [3, 3], scope='Conv2d_0b_3x3')
with variable_scope.variable_scope('Branch_2'):
branch_2 = layers.conv2d(
net,
depth(64), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_2 = layers.conv2d(
branch_2, depth(96), [3, 3], scope='Conv2d_0b_3x3')
branch_2 = layers.conv2d(
branch_2, depth(96), [3, 3], scope='Conv2d_0c_3x3')
with variable_scope.variable_scope('Branch_3'):
branch_3 = layers_lib.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = layers.conv2d(
branch_3,
depth(32), [1, 1],
weights_initializer=trunc_normal(0.1),
scope='Conv2d_0b_1x1')
net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
# 28 x 28 x 256
end_point = 'Mixed_3c'
with variable_scope.variable_scope(end_point):
with variable_scope.variable_scope('Branch_0'):
branch_0 = layers.conv2d(
net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
with variable_scope.variable_scope('Branch_1'):
branch_1 = layers.conv2d(
net,
depth(64), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_1 = layers.conv2d(
branch_1, depth(96), [3, 3], scope='Conv2d_0b_3x3')
with variable_scope.variable_scope('Branch_2'):
branch_2 = layers.conv2d(
net,
depth(64), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_2 = layers.conv2d(
branch_2, depth(96), [3, 3], scope='Conv2d_0b_3x3')
branch_2 = layers.conv2d(
branch_2, depth(96), [3, 3], scope='Conv2d_0c_3x3')
with variable_scope.variable_scope('Branch_3'):
branch_3 = layers_lib.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = layers.conv2d(
branch_3,
depth(64), [1, 1],
weights_initializer=trunc_normal(0.1),
scope='Conv2d_0b_1x1')
net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
# 28 x 28 x 320
end_point = 'Mixed_4a'
with variable_scope.variable_scope(end_point):
with variable_scope.variable_scope('Branch_0'):
branch_0 = layers.conv2d(
net,
depth(128), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_0 = layers.conv2d(
branch_0, depth(160), [3, 3], stride=2, scope='Conv2d_1a_3x3')
with variable_scope.variable_scope('Branch_1'):
branch_1 = layers.conv2d(
net,
depth(64), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_1 = layers.conv2d(
branch_1, depth(96), [3, 3], scope='Conv2d_0b_3x3')
branch_1 = layers.conv2d(
branch_1, depth(96), [3, 3], stride=2, scope='Conv2d_1a_3x3')
with variable_scope.variable_scope('Branch_2'):
branch_2 = layers_lib.max_pool2d(
net, [3, 3], stride=2, scope='MaxPool_1a_3x3')
net = array_ops.concat([branch_0, branch_1, branch_2], 3)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
# 14 x 14 x 576
end_point = 'Mixed_4b'
with variable_scope.variable_scope(end_point):
with variable_scope.variable_scope('Branch_0'):
branch_0 = layers.conv2d(
net, depth(224), [1, 1], scope='Conv2d_0a_1x1')
with variable_scope.variable_scope('Branch_1'):
branch_1 = layers.conv2d(
net,
depth(64), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_1 = layers.conv2d(
branch_1, depth(96), [3, 3], scope='Conv2d_0b_3x3')
with variable_scope.variable_scope('Branch_2'):
branch_2 = layers.conv2d(
net,
depth(96), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_2 = layers.conv2d(
branch_2, depth(128), [3, 3], scope='Conv2d_0b_3x3')
branch_2 = layers.conv2d(
branch_2, depth(128), [3, 3], scope='Conv2d_0c_3x3')
with variable_scope.variable_scope('Branch_3'):
branch_3 = layers_lib.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = layers.conv2d(
branch_3,
depth(128), [1, 1],
weights_initializer=trunc_normal(0.1),
scope='Conv2d_0b_1x1')
net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
# 14 x 14 x 576
end_point = 'Mixed_4c'
with variable_scope.variable_scope(end_point):
with variable_scope.variable_scope('Branch_0'):
branch_0 = layers.conv2d(
net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
with variable_scope.variable_scope('Branch_1'):
branch_1 = layers.conv2d(
net,
depth(96), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_1 = layers.conv2d(
branch_1, depth(128), [3, 3], scope='Conv2d_0b_3x3')
with variable_scope.variable_scope('Branch_2'):
branch_2 = layers.conv2d(
net,
depth(96), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_2 = layers.conv2d(
branch_2, depth(128), [3, 3], scope='Conv2d_0b_3x3')
branch_2 = layers.conv2d(
branch_2, depth(128), [3, 3], scope='Conv2d_0c_3x3')
with variable_scope.variable_scope('Branch_3'):
branch_3 = layers_lib.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = layers.conv2d(
branch_3,
depth(128), [1, 1],
weights_initializer=trunc_normal(0.1),
scope='Conv2d_0b_1x1')
net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
# 14 x 14 x 576
end_point = 'Mixed_4d'
with variable_scope.variable_scope(end_point):
with variable_scope.variable_scope('Branch_0'):
branch_0 = layers.conv2d(
net, depth(160), [1, 1], scope='Conv2d_0a_1x1')
with variable_scope.variable_scope('Branch_1'):
branch_1 = layers.conv2d(
net,
depth(128), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_1 = layers.conv2d(
branch_1, depth(160), [3, 3], scope='Conv2d_0b_3x3')
with variable_scope.variable_scope('Branch_2'):
branch_2 = layers.conv2d(
net,
depth(128), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_2 = layers.conv2d(
branch_2, depth(160), [3, 3], scope='Conv2d_0b_3x3')
branch_2 = layers.conv2d(
branch_2, depth(160), [3, 3], scope='Conv2d_0c_3x3')
with variable_scope.variable_scope('Branch_3'):
branch_3 = layers_lib.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = layers.conv2d(
branch_3,
depth(96), [1, 1],
weights_initializer=trunc_normal(0.1),
scope='Conv2d_0b_1x1')
net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
# 14 x 14 x 576
end_point = 'Mixed_4e'
with variable_scope.variable_scope(end_point):
with variable_scope.variable_scope('Branch_0'):
branch_0 = layers.conv2d(
net, depth(96), [1, 1], scope='Conv2d_0a_1x1')
with variable_scope.variable_scope('Branch_1'):
branch_1 = layers.conv2d(
net,
depth(128), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_1 = layers.conv2d(
branch_1, depth(192), [3, 3], scope='Conv2d_0b_3x3')
with variable_scope.variable_scope('Branch_2'):
branch_2 = layers.conv2d(
net,
depth(160), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_2 = layers.conv2d(
branch_2, depth(192), [3, 3], scope='Conv2d_0b_3x3')
branch_2 = layers.conv2d(
branch_2, depth(192), [3, 3], scope='Conv2d_0c_3x3')
with variable_scope.variable_scope('Branch_3'):
branch_3 = layers_lib.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = layers.conv2d(
branch_3,
depth(96), [1, 1],
weights_initializer=trunc_normal(0.1),
scope='Conv2d_0b_1x1')
net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
# 14 x 14 x 576
end_point = 'Mixed_5a'
with variable_scope.variable_scope(end_point):
with variable_scope.variable_scope('Branch_0'):
branch_0 = layers.conv2d(
net,
depth(128), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_0 = layers.conv2d(
branch_0, depth(192), [3, 3], stride=2, scope='Conv2d_1a_3x3')
with variable_scope.variable_scope('Branch_1'):
branch_1 = layers.conv2d(
net,
depth(192), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_1 = layers.conv2d(
branch_1, depth(256), [3, 3], scope='Conv2d_0b_3x3')
branch_1 = layers.conv2d(
branch_1, depth(256), [3, 3], stride=2, scope='Conv2d_1a_3x3')
with variable_scope.variable_scope('Branch_2'):
branch_2 = layers_lib.max_pool2d(
net, [3, 3], stride=2, scope='MaxPool_1a_3x3')
net = array_ops.concat([branch_0, branch_1, branch_2], 3)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
# 7 x 7 x 1024
end_point = 'Mixed_5b'
with variable_scope.variable_scope(end_point):
with variable_scope.variable_scope('Branch_0'):
branch_0 = layers.conv2d(
net, depth(352), [1, 1], scope='Conv2d_0a_1x1')
with variable_scope.variable_scope('Branch_1'):
branch_1 = layers.conv2d(
net,
depth(192), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_1 = layers.conv2d(
branch_1, depth(320), [3, 3], scope='Conv2d_0b_3x3')
with variable_scope.variable_scope('Branch_2'):
branch_2 = layers.conv2d(
net,
depth(160), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_2 = layers.conv2d(
branch_2, depth(224), [3, 3], scope='Conv2d_0b_3x3')
branch_2 = layers.conv2d(
branch_2, depth(224), [3, 3], scope='Conv2d_0c_3x3')
with variable_scope.variable_scope('Branch_3'):
branch_3 = layers_lib.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = layers.conv2d(
branch_3,
depth(128), [1, 1],
weights_initializer=trunc_normal(0.1),
scope='Conv2d_0b_1x1')
net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
# 7 x 7 x 1024
end_point = 'Mixed_5c'
with variable_scope.variable_scope(end_point):
with variable_scope.variable_scope('Branch_0'):
branch_0 = layers.conv2d(
net, depth(352), [1, 1], scope='Conv2d_0a_1x1')
with variable_scope.variable_scope('Branch_1'):
branch_1 = layers.conv2d(
net,
depth(192), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_1 = layers.conv2d(
branch_1, depth(320), [3, 3], scope='Conv2d_0b_3x3')
with variable_scope.variable_scope('Branch_2'):
branch_2 = layers.conv2d(
net,
depth(192), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_2 = layers.conv2d(
branch_2, depth(224), [3, 3], scope='Conv2d_0b_3x3')
branch_2 = layers.conv2d(
branch_2, depth(224), [3, 3], scope='Conv2d_0c_3x3')
with variable_scope.variable_scope('Branch_3'):
branch_3 = layers_lib.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = layers.conv2d(
branch_3,
depth(128), [1, 1],
weights_initializer=trunc_normal(0.1),
scope='Conv2d_0b_1x1')
net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
raise ValueError('Unknown final endpoint %s' % final_endpoint)
def inception_v2(inputs,
num_classes=1000,
is_training=True,
dropout_keep_prob=0.8,
min_depth=16,
depth_multiplier=1.0,
prediction_fn=layers_lib.softmax,
spatial_squeeze=True,
reuse=None,
scope='InceptionV2'):
"""Inception v2 model for classification.
Constructs an Inception v2 network for classification as described in
http://arxiv.org/abs/1502.03167.
The recommended image size used to train this network is 224x224. For image
sizes that differ substantially, it is recommended to use inception_v2_base()
and connect custom final layers to the output.
Args:
inputs: a tensor of shape [batch_size, height, width, channels].
num_classes: number of predicted classes.
is_training: whether is training or not.
dropout_keep_prob: the percentage of activation values that are retained.
min_depth: Minimum depth value (number of channels) for all convolution ops.
Enforced when depth_multiplier < 1, and not an active constraint when
depth_multiplier >= 1.
depth_multiplier: Float multiplier for the depth (number of channels)
for all convolution ops. The value must be greater than zero. Typical
usage will be to set this value in (0, 1) to reduce the number of
parameters or computation cost of the model.
prediction_fn: a function to get predictions out of logits.
spatial_squeeze: if True, logits is of shape [B, C], if false logits is
of shape [B, 1, 1, C], where B is batch_size and C is number of classes.
Note that input image sizes other than 224x224 might lead to different
spatial dimensions, and hence cannot be squeezed. In this event,
it is best to set spatial_squeeze as False, and perform a reduce_mean
over the resulting spatial dimensions with sizes exceeding 1.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
Returns:
logits: the pre-softmax activations, a tensor of size
[batch_size, num_classes]
end_points: a dictionary from components of the network to the corresponding
activation.
Raises:
ValueError: if depth_multiplier <= 0.
"""
if depth_multiplier <= 0:
raise ValueError('depth_multiplier is not greater than zero.')
# Final pooling and prediction
with variable_scope.variable_scope(
scope, 'InceptionV2', [inputs, num_classes], reuse=reuse) as scope:
with arg_scope(
[layers_lib.batch_norm, layers_lib.dropout], is_training=is_training):
net, end_points = inception_v2_base(
inputs,
scope=scope,
min_depth=min_depth,
depth_multiplier=depth_multiplier)
with variable_scope.variable_scope('Logits'):
kernel_size = _reduced_kernel_size_for_small_input(net, [7, 7])
net = layers_lib.avg_pool2d(
net,
kernel_size,
padding='VALID',
scope='AvgPool_1a_{}x{}'.format(*kernel_size))
# 1 x 1 x 1024
net = layers_lib.dropout(
net, keep_prob=dropout_keep_prob, scope='Dropout_1b')
logits = layers.conv2d(
net,
num_classes, [1, 1],
activation_fn=None,
normalizer_fn=None,
scope='Conv2d_1c_1x1')
if spatial_squeeze:
logits = array_ops.squeeze(logits, [1, 2], name='SpatialSqueeze')
end_points['Logits'] = logits
end_points['Predictions'] = prediction_fn(logits, scope='Predictions')
return logits, end_points
inception_v2.default_image_size = 224
def _reduced_kernel_size_for_small_input(input_tensor, kernel_size):
"""Define kernel size which is automatically reduced for small input.
If the shape of the input images is unknown at graph construction time this
function assumes that the input images are is large enough.
Args:
input_tensor: input tensor of size [batch_size, height, width, channels].
kernel_size: desired kernel size of length 2: [kernel_height, kernel_width]
Returns:
a tensor with the kernel size.
TODO(jrru): Make this function work with unknown shapes. Theoretically, this
can be done with the code below. Problems are two-fold: (1) If the shape was
known, it will be lost. (2) inception.slim.ops._two_element_tuple cannot
handle tensors that define the kernel size.
shape = tf.shape(input_tensor)
return = tf.stack([tf.minimum(shape[1], kernel_size[0]),
tf.minimum(shape[2], kernel_size[1])])
"""
shape = input_tensor.get_shape().as_list()
if shape[1] is None or shape[2] is None:
kernel_size_out = kernel_size
else:
kernel_size_out = [
min(shape[1], kernel_size[0]), min(shape[2], kernel_size[1])
]
return kernel_size_out
def inception_v2_arg_scope(weight_decay=0.00004,
batch_norm_var_collection='moving_vars'):
"""Defines the default InceptionV2 arg scope.
Args:
weight_decay: The weight decay to use for regularizing the model.
batch_norm_var_collection: The name of the collection for the batch norm
variables.
Returns:
An `arg_scope` to use for the inception v3 model.
"""
batch_norm_params = {
# Decay for the moving averages.
'decay': 0.9997,
# epsilon to prevent 0s in variance.
'epsilon': 0.001,
# collection containing update_ops.
'updates_collections': ops.GraphKeys.UPDATE_OPS,
# collection containing the moving mean and moving variance.
'variables_collections': {
'beta': None,
'gamma': None,
'moving_mean': [batch_norm_var_collection],
'moving_variance': [batch_norm_var_collection],
}
}
# Set weight_decay for weights in Conv and FC layers.
with arg_scope(
[layers.conv2d, layers_lib.fully_connected],
weights_regularizer=regularizers.l2_regularizer(weight_decay)):
with arg_scope(
[layers.conv2d],
weights_initializer=initializers.variance_scaling_initializer(),
activation_fn=nn_ops.relu,
normalizer_fn=layers_lib.batch_norm,
normalizer_params=batch_norm_params) as sc:
return sc
|
tensorflow-master
|
tensorflow/contrib/slim/python/slim/nets/inception_v2.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for nets.inception_v2."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.framework.python.ops import arg_scope
from tensorflow.contrib.framework.python.ops import variables as variables_lib
from tensorflow.contrib.slim.python.slim import model_analyzer
from tensorflow.contrib.slim.python.slim.nets import inception_v2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class InceptionV2Test(test.TestCase):
def testBuildClassificationNetwork(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
inputs = random_ops.random_uniform((batch_size, height, width, 3))
logits, end_points = inception_v2.inception_v2(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV2/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
self.assertTrue('Predictions' in end_points)
self.assertListEqual(end_points['Predictions'].get_shape().as_list(),
[batch_size, num_classes])
def testBuildBaseNetwork(self):
batch_size = 5
height, width = 224, 224
inputs = random_ops.random_uniform((batch_size, height, width, 3))
mixed_5c, end_points = inception_v2.inception_v2_base(inputs)
self.assertTrue(mixed_5c.op.name.startswith('InceptionV2/Mixed_5c'))
self.assertListEqual(mixed_5c.get_shape().as_list(),
[batch_size, 7, 7, 1024])
expected_endpoints = [
'Mixed_3b', 'Mixed_3c', 'Mixed_4a', 'Mixed_4b', 'Mixed_4c', 'Mixed_4d',
'Mixed_4e', 'Mixed_5a', 'Mixed_5b', 'Mixed_5c', 'Conv2d_1a_7x7',
'MaxPool_2a_3x3', 'Conv2d_2b_1x1', 'Conv2d_2c_3x3', 'MaxPool_3a_3x3'
]
self.assertItemsEqual(end_points.keys(), expected_endpoints)
def testBuildOnlyUptoFinalEndpoint(self):
batch_size = 5
height, width = 224, 224
endpoints = [
'Conv2d_1a_7x7', 'MaxPool_2a_3x3', 'Conv2d_2b_1x1', 'Conv2d_2c_3x3',
'MaxPool_3a_3x3', 'Mixed_3b', 'Mixed_3c', 'Mixed_4a', 'Mixed_4b',
'Mixed_4c', 'Mixed_4d', 'Mixed_4e', 'Mixed_5a', 'Mixed_5b', 'Mixed_5c'
]
for index, endpoint in enumerate(endpoints):
with ops.Graph().as_default():
inputs = random_ops.random_uniform((batch_size, height, width, 3))
out_tensor, end_points = inception_v2.inception_v2_base(
inputs, final_endpoint=endpoint)
self.assertTrue(
out_tensor.op.name.startswith('InceptionV2/' + endpoint))
self.assertItemsEqual(endpoints[:index + 1], end_points)
def testBuildAndCheckAllEndPointsUptoMixed5c(self):
batch_size = 5
height, width = 224, 224
inputs = random_ops.random_uniform((batch_size, height, width, 3))
_, end_points = inception_v2.inception_v2_base(
inputs, final_endpoint='Mixed_5c')
endpoints_shapes = {
'Mixed_3b': [batch_size, 28, 28, 256],
'Mixed_3c': [batch_size, 28, 28, 320],
'Mixed_4a': [batch_size, 14, 14, 576],
'Mixed_4b': [batch_size, 14, 14, 576],
'Mixed_4c': [batch_size, 14, 14, 576],
'Mixed_4d': [batch_size, 14, 14, 576],
'Mixed_4e': [batch_size, 14, 14, 576],
'Mixed_5a': [batch_size, 7, 7, 1024],
'Mixed_5b': [batch_size, 7, 7, 1024],
'Mixed_5c': [batch_size, 7, 7, 1024],
'Conv2d_1a_7x7': [batch_size, 112, 112, 64],
'MaxPool_2a_3x3': [batch_size, 56, 56, 64],
'Conv2d_2b_1x1': [batch_size, 56, 56, 64],
'Conv2d_2c_3x3': [batch_size, 56, 56, 192],
'MaxPool_3a_3x3': [batch_size, 28, 28, 192]
}
self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())
for endpoint_name in endpoints_shapes:
expected_shape = endpoints_shapes[endpoint_name]
self.assertTrue(endpoint_name in end_points)
self.assertListEqual(end_points[endpoint_name].get_shape().as_list(),
expected_shape)
def testModelHasExpectedNumberOfParameters(self):
batch_size = 5
height, width = 224, 224
inputs = random_ops.random_uniform((batch_size, height, width, 3))
with arg_scope(inception_v2.inception_v2_arg_scope()):
inception_v2.inception_v2_base(inputs)
total_params, _ = model_analyzer.analyze_vars(
variables_lib.get_model_variables())
self.assertAlmostEqual(10173112, total_params)
def testBuildEndPointsWithDepthMultiplierLessThanOne(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
inputs = random_ops.random_uniform((batch_size, height, width, 3))
_, end_points = inception_v2.inception_v2(inputs, num_classes)
endpoint_keys = [
key for key in end_points.keys()
if key.startswith('Mixed') or key.startswith('Conv')
]
_, end_points_with_multiplier = inception_v2.inception_v2(
inputs, num_classes, scope='depth_multiplied_net', depth_multiplier=0.5)
for key in endpoint_keys:
original_depth = end_points[key].get_shape().as_list()[3]
new_depth = end_points_with_multiplier[key].get_shape().as_list()[3]
self.assertEqual(0.5 * original_depth, new_depth)
def testBuildEndPointsWithDepthMultiplierGreaterThanOne(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
inputs = random_ops.random_uniform((batch_size, height, width, 3))
_, end_points = inception_v2.inception_v2(inputs, num_classes)
endpoint_keys = [
key for key in end_points.keys()
if key.startswith('Mixed') or key.startswith('Conv')
]
_, end_points_with_multiplier = inception_v2.inception_v2(
inputs, num_classes, scope='depth_multiplied_net', depth_multiplier=2.0)
for key in endpoint_keys:
original_depth = end_points[key].get_shape().as_list()[3]
new_depth = end_points_with_multiplier[key].get_shape().as_list()[3]
self.assertEqual(2.0 * original_depth, new_depth)
def testRaiseValueErrorWithInvalidDepthMultiplier(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
inputs = random_ops.random_uniform((batch_size, height, width, 3))
with self.assertRaises(ValueError):
_ = inception_v2.inception_v2(inputs, num_classes, depth_multiplier=-0.1)
with self.assertRaises(ValueError):
_ = inception_v2.inception_v2(inputs, num_classes, depth_multiplier=0.0)
def testHalfSizeImages(self):
batch_size = 5
height, width = 112, 112
num_classes = 1000
inputs = random_ops.random_uniform((batch_size, height, width, 3))
logits, end_points = inception_v2.inception_v2(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV2/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
pre_pool = end_points['Mixed_5c']
self.assertListEqual(pre_pool.get_shape().as_list(),
[batch_size, 4, 4, 1024])
def testUnknownImageShape(self):
ops.reset_default_graph()
batch_size = 2
height, width = 224, 224
num_classes = 1000
input_np = np.random.uniform(0, 1, (batch_size, height, width, 3))
with self.cached_session() as sess:
inputs = array_ops.placeholder(
dtypes.float32, shape=(batch_size, None, None, 3))
logits, end_points = inception_v2.inception_v2(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV2/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
pre_pool = end_points['Mixed_5c']
feed_dict = {inputs: input_np}
variables.global_variables_initializer().run()
pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict)
self.assertListEqual(list(pre_pool_out.shape), [batch_size, 7, 7, 1024])
def testUnknownBatchSize(self):
batch_size = 1
height, width = 224, 224
num_classes = 1000
inputs = array_ops.placeholder(dtypes.float32, (None, height, width, 3))
logits, _ = inception_v2.inception_v2(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV2/Logits'))
self.assertListEqual(logits.get_shape().as_list(), [None, num_classes])
images = random_ops.random_uniform((batch_size, height, width, 3))
with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
output = sess.run(logits, {inputs: images.eval()})
self.assertEquals(output.shape, (batch_size, num_classes))
def testEvaluation(self):
batch_size = 2
height, width = 224, 224
num_classes = 1000
eval_inputs = random_ops.random_uniform((batch_size, height, width, 3))
logits, _ = inception_v2.inception_v2(
eval_inputs, num_classes, is_training=False)
predictions = math_ops.argmax(logits, 1)
with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
output = sess.run(predictions)
self.assertEquals(output.shape, (batch_size,))
def testTrainEvalWithReuse(self):
train_batch_size = 5
eval_batch_size = 2
height, width = 150, 150
num_classes = 1000
train_inputs = random_ops.random_uniform(
(train_batch_size, height, width, 3))
inception_v2.inception_v2(train_inputs, num_classes)
eval_inputs = random_ops.random_uniform((eval_batch_size, height, width, 3))
logits, _ = inception_v2.inception_v2(eval_inputs, num_classes, reuse=True)
predictions = math_ops.argmax(logits, 1)
with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
output = sess.run(predictions)
self.assertEquals(output.shape, (eval_batch_size,))
def testLogitsNotSqueezed(self):
num_classes = 25
images = random_ops.random_uniform([1, 224, 224, 3])
logits, _ = inception_v2.inception_v2(
images, num_classes=num_classes, spatial_squeeze=False)
with self.cached_session() as sess:
variables.global_variables_initializer().run()
logits_out = sess.run(logits)
self.assertListEqual(list(logits_out.shape), [1, 1, 1, num_classes])
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/slim/python/slim/nets/inception_v2_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the model definition for the OverFeat network.
The definition for the network was obtained from:
OverFeat: Integrated Recognition, Localization and Detection using
Convolutional Networks
Pierre Sermanet, David Eigen, Xiang Zhang, Michael Mathieu, Rob Fergus and
Yann LeCun, 2014
http://arxiv.org/abs/1312.6229
Usage:
with slim.arg_scope(overfeat.overfeat_arg_scope()):
outputs, end_points = overfeat.overfeat(inputs)
@@overfeat
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import layers
from tensorflow.contrib.framework.python.ops import arg_scope
from tensorflow.contrib.layers.python.layers import layers as layers_lib
from tensorflow.contrib.layers.python.layers import regularizers
from tensorflow.contrib.layers.python.layers import utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variable_scope
trunc_normal = lambda stddev: init_ops.truncated_normal_initializer(0.0, stddev)
def overfeat_arg_scope(weight_decay=0.0005):
with arg_scope(
[layers.conv2d, layers_lib.fully_connected],
activation_fn=nn_ops.relu,
weights_regularizer=regularizers.l2_regularizer(weight_decay),
biases_initializer=init_ops.zeros_initializer()):
with arg_scope([layers.conv2d], padding='SAME'):
with arg_scope([layers_lib.max_pool2d], padding='VALID') as arg_sc:
return arg_sc
def overfeat(inputs,
num_classes=1000,
is_training=True,
dropout_keep_prob=0.5,
spatial_squeeze=True,
scope='overfeat'):
"""Contains the model definition for the OverFeat network.
The definition for the network was obtained from:
OverFeat: Integrated Recognition, Localization and Detection using
Convolutional Networks
Pierre Sermanet, David Eigen, Xiang Zhang, Michael Mathieu, Rob Fergus and
Yann LeCun, 2014
http://arxiv.org/abs/1312.6229
Note: All the fully_connected layers have been transformed to conv2d layers.
To use in classification mode, resize input to 231x231. To use in fully
convolutional mode, set spatial_squeeze to false.
Args:
inputs: a tensor of size [batch_size, height, width, channels].
num_classes: number of predicted classes.
is_training: whether or not the model is being trained.
dropout_keep_prob: the probability that activations are kept in the dropout
layers during training.
spatial_squeeze: whether or not should squeeze the spatial dimensions of the
outputs. Useful to remove unnecessary dimensions for classification.
scope: Optional scope for the variables.
Returns:
the last op containing the log predictions and end_points dict.
"""
with variable_scope.variable_scope(scope, 'overfeat', [inputs]) as sc:
end_points_collection = sc.name + '_end_points'
# Collect outputs for conv2d, fully_connected and max_pool2d
with arg_scope(
[layers.conv2d, layers_lib.fully_connected, layers_lib.max_pool2d],
outputs_collections=end_points_collection):
net = layers.conv2d(
inputs, 64, [11, 11], 4, padding='VALID', scope='conv1')
net = layers_lib.max_pool2d(net, [2, 2], scope='pool1')
net = layers.conv2d(net, 256, [5, 5], padding='VALID', scope='conv2')
net = layers_lib.max_pool2d(net, [2, 2], scope='pool2')
net = layers.conv2d(net, 512, [3, 3], scope='conv3')
net = layers.conv2d(net, 1024, [3, 3], scope='conv4')
net = layers.conv2d(net, 1024, [3, 3], scope='conv5')
net = layers_lib.max_pool2d(net, [2, 2], scope='pool5')
with arg_scope(
[layers.conv2d],
weights_initializer=trunc_normal(0.005),
biases_initializer=init_ops.constant_initializer(0.1)):
# Use conv2d instead of fully_connected layers.
net = layers.conv2d(net, 3072, [6, 6], padding='VALID', scope='fc6')
net = layers_lib.dropout(
net, dropout_keep_prob, is_training=is_training, scope='dropout6')
net = layers.conv2d(net, 4096, [1, 1], scope='fc7')
net = layers_lib.dropout(
net, dropout_keep_prob, is_training=is_training, scope='dropout7')
net = layers.conv2d(
net,
num_classes, [1, 1],
activation_fn=None,
normalizer_fn=None,
biases_initializer=init_ops.zeros_initializer(),
scope='fc8')
# Convert end_points_collection into a end_point dict.
end_points = utils.convert_collection_to_dict(end_points_collection)
if spatial_squeeze:
net = array_ops.squeeze(net, [1, 2], name='fc8/squeezed')
end_points[sc.name + '/fc8'] = net
return net, end_points
|
tensorflow-master
|
tensorflow/contrib/slim/python/slim/nets/overfeat.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for nets.inception_v1."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.framework.python.ops import arg_scope
from tensorflow.contrib.framework.python.ops import variables as variables_lib
from tensorflow.contrib.slim.python.slim import model_analyzer
from tensorflow.contrib.slim.python.slim.nets import inception_v1
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class InceptionV1Test(test.TestCase):
def testBuildClassificationNetwork(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
inputs = random_ops.random_uniform((batch_size, height, width, 3))
logits, end_points = inception_v1.inception_v1(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV1/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
self.assertTrue('Predictions' in end_points)
self.assertListEqual(end_points['Predictions'].get_shape().as_list(),
[batch_size, num_classes])
def testBuildBaseNetwork(self):
batch_size = 5
height, width = 224, 224
inputs = random_ops.random_uniform((batch_size, height, width, 3))
mixed_6c, end_points = inception_v1.inception_v1_base(inputs)
self.assertTrue(mixed_6c.op.name.startswith('InceptionV1/Mixed_5c'))
self.assertListEqual(mixed_6c.get_shape().as_list(),
[batch_size, 7, 7, 1024])
expected_endpoints = [
'Conv2d_1a_7x7', 'MaxPool_2a_3x3', 'Conv2d_2b_1x1', 'Conv2d_2c_3x3',
'MaxPool_3a_3x3', 'Mixed_3b', 'Mixed_3c', 'MaxPool_4a_3x3', 'Mixed_4b',
'Mixed_4c', 'Mixed_4d', 'Mixed_4e', 'Mixed_4f', 'MaxPool_5a_2x2',
'Mixed_5b', 'Mixed_5c'
]
self.assertItemsEqual(end_points.keys(), expected_endpoints)
def testBuildOnlyUptoFinalEndpoint(self):
batch_size = 5
height, width = 224, 224
endpoints = [
'Conv2d_1a_7x7', 'MaxPool_2a_3x3', 'Conv2d_2b_1x1', 'Conv2d_2c_3x3',
'MaxPool_3a_3x3', 'Mixed_3b', 'Mixed_3c', 'MaxPool_4a_3x3', 'Mixed_4b',
'Mixed_4c', 'Mixed_4d', 'Mixed_4e', 'Mixed_4f', 'MaxPool_5a_2x2',
'Mixed_5b', 'Mixed_5c'
]
for index, endpoint in enumerate(endpoints):
with ops.Graph().as_default():
inputs = random_ops.random_uniform((batch_size, height, width, 3))
out_tensor, end_points = inception_v1.inception_v1_base(
inputs, final_endpoint=endpoint)
self.assertTrue(
out_tensor.op.name.startswith('InceptionV1/' + endpoint))
self.assertItemsEqual(endpoints[:index + 1], end_points)
def testBuildAndCheckAllEndPointsUptoMixed5c(self):
batch_size = 5
height, width = 224, 224
inputs = random_ops.random_uniform((batch_size, height, width, 3))
_, end_points = inception_v1.inception_v1_base(
inputs, final_endpoint='Mixed_5c')
endpoints_shapes = {
'Conv2d_1a_7x7': [5, 112, 112, 64],
'MaxPool_2a_3x3': [5, 56, 56, 64],
'Conv2d_2b_1x1': [5, 56, 56, 64],
'Conv2d_2c_3x3': [5, 56, 56, 192],
'MaxPool_3a_3x3': [5, 28, 28, 192],
'Mixed_3b': [5, 28, 28, 256],
'Mixed_3c': [5, 28, 28, 480],
'MaxPool_4a_3x3': [5, 14, 14, 480],
'Mixed_4b': [5, 14, 14, 512],
'Mixed_4c': [5, 14, 14, 512],
'Mixed_4d': [5, 14, 14, 512],
'Mixed_4e': [5, 14, 14, 528],
'Mixed_4f': [5, 14, 14, 832],
'MaxPool_5a_2x2': [5, 7, 7, 832],
'Mixed_5b': [5, 7, 7, 832],
'Mixed_5c': [5, 7, 7, 1024]
}
self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())
for endpoint_name in endpoints_shapes:
expected_shape = endpoints_shapes[endpoint_name]
self.assertTrue(endpoint_name in end_points)
self.assertListEqual(end_points[endpoint_name].get_shape().as_list(),
expected_shape)
def testModelHasExpectedNumberOfParameters(self):
batch_size = 5
height, width = 224, 224
inputs = random_ops.random_uniform((batch_size, height, width, 3))
with arg_scope(inception_v1.inception_v1_arg_scope()):
inception_v1.inception_v1_base(inputs)
total_params, _ = model_analyzer.analyze_vars(
variables_lib.get_model_variables())
self.assertAlmostEqual(5607184, total_params)
def testHalfSizeImages(self):
batch_size = 5
height, width = 112, 112
inputs = random_ops.random_uniform((batch_size, height, width, 3))
mixed_5c, _ = inception_v1.inception_v1_base(inputs)
self.assertTrue(mixed_5c.op.name.startswith('InceptionV1/Mixed_5c'))
self.assertListEqual(mixed_5c.get_shape().as_list(),
[batch_size, 4, 4, 1024])
def testUnknownImageShape(self):
ops.reset_default_graph()
batch_size = 2
height, width = 224, 224
num_classes = 1000
input_np = np.random.uniform(0, 1, (batch_size, height, width, 3))
with self.cached_session() as sess:
inputs = array_ops.placeholder(
dtypes.float32, shape=(batch_size, None, None, 3))
logits, end_points = inception_v1.inception_v1(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV1/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
pre_pool = end_points['Mixed_5c']
feed_dict = {inputs: input_np}
variables.global_variables_initializer().run()
pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict)
self.assertListEqual(list(pre_pool_out.shape), [batch_size, 7, 7, 1024])
def testUnknownBatchSize(self):
batch_size = 1
height, width = 224, 224
num_classes = 1000
inputs = array_ops.placeholder(dtypes.float32, (None, height, width, 3))
logits, _ = inception_v1.inception_v1(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV1/Logits'))
self.assertListEqual(logits.get_shape().as_list(), [None, num_classes])
images = random_ops.random_uniform((batch_size, height, width, 3))
with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
output = sess.run(logits, {inputs: images.eval()})
self.assertEquals(output.shape, (batch_size, num_classes))
def testEvaluation(self):
batch_size = 2
height, width = 224, 224
num_classes = 1000
eval_inputs = random_ops.random_uniform((batch_size, height, width, 3))
logits, _ = inception_v1.inception_v1(
eval_inputs, num_classes, is_training=False)
predictions = math_ops.argmax(logits, 1)
with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
output = sess.run(predictions)
self.assertEquals(output.shape, (batch_size,))
def testTrainEvalWithReuse(self):
train_batch_size = 5
eval_batch_size = 2
height, width = 224, 224
num_classes = 1000
train_inputs = random_ops.random_uniform(
(train_batch_size, height, width, 3))
inception_v1.inception_v1(train_inputs, num_classes)
eval_inputs = random_ops.random_uniform((eval_batch_size, height, width, 3))
logits, _ = inception_v1.inception_v1(eval_inputs, num_classes, reuse=True)
predictions = math_ops.argmax(logits, 1)
with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
output = sess.run(predictions)
self.assertEquals(output.shape, (eval_batch_size,))
def testLogitsNotSqueezed(self):
num_classes = 25
images = random_ops.random_uniform([1, 224, 224, 3])
logits, _ = inception_v1.inception_v1(
images, num_classes=num_classes, spatial_squeeze=False)
with self.cached_session() as sess:
variables.global_variables_initializer().run()
logits_out = sess.run(logits)
self.assertListEqual(list(logits_out.shape), [1, 1, 1, num_classes])
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/slim/python/slim/nets/inception_v1_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains a model definition for AlexNet.
This work was first described in:
ImageNet Classification with Deep Convolutional Neural Networks
Alex Krizhevsky, Ilya Sutskever and Geoffrey E. Hinton
and later refined in:
One weird trick for parallelizing convolutional neural networks
Alex Krizhevsky, 2014
Here we provide the implementation proposed in "One weird trick" and not
"ImageNet Classification", as per the paper, the LRN layers have been removed.
Usage:
with slim.arg_scope(alexnet.alexnet_v2_arg_scope()):
outputs, end_points = alexnet.alexnet_v2(inputs)
@@alexnet_v2
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import layers
from tensorflow.contrib.framework.python.ops import arg_scope
from tensorflow.contrib.layers.python.layers import layers as layers_lib
from tensorflow.contrib.layers.python.layers import regularizers
from tensorflow.contrib.layers.python.layers import utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variable_scope
trunc_normal = lambda stddev: init_ops.truncated_normal_initializer(0.0, stddev)
def alexnet_v2_arg_scope(weight_decay=0.0005):
with arg_scope(
[layers.conv2d, layers_lib.fully_connected],
activation_fn=nn_ops.relu,
biases_initializer=init_ops.constant_initializer(0.1),
weights_regularizer=regularizers.l2_regularizer(weight_decay)):
with arg_scope([layers.conv2d], padding='SAME'):
with arg_scope([layers_lib.max_pool2d], padding='VALID') as arg_sc:
return arg_sc
def alexnet_v2(inputs,
num_classes=1000,
is_training=True,
dropout_keep_prob=0.5,
spatial_squeeze=True,
scope='alexnet_v2'):
"""AlexNet version 2.
Described in: http://arxiv.org/pdf/1404.5997v2.pdf
Parameters from:
github.com/akrizhevsky/cuda-convnet2/blob/master/layers/
layers-imagenet-1gpu.cfg
Note: All the fully_connected layers have been transformed to conv2d layers.
To use in classification mode, resize input to 224x224. To use in fully
convolutional mode, set spatial_squeeze to false.
The LRN layers have been removed and change the initializers from
random_normal_initializer to xavier_initializer.
Args:
inputs: a tensor of size [batch_size, height, width, channels].
num_classes: number of predicted classes.
is_training: whether or not the model is being trained.
dropout_keep_prob: the probability that activations are kept in the dropout
layers during training.
spatial_squeeze: whether or not should squeeze the spatial dimensions of the
outputs. Useful to remove unnecessary dimensions for classification.
scope: Optional scope for the variables.
Returns:
the last op containing the log predictions and end_points dict.
"""
with variable_scope.variable_scope(scope, 'alexnet_v2', [inputs]) as sc:
end_points_collection = sc.original_name_scope + '_end_points'
# Collect outputs for conv2d, fully_connected and max_pool2d.
with arg_scope(
[layers.conv2d, layers_lib.fully_connected, layers_lib.max_pool2d],
outputs_collections=[end_points_collection]):
net = layers.conv2d(
inputs, 64, [11, 11], 4, padding='VALID', scope='conv1')
net = layers_lib.max_pool2d(net, [3, 3], 2, scope='pool1')
net = layers.conv2d(net, 192, [5, 5], scope='conv2')
net = layers_lib.max_pool2d(net, [3, 3], 2, scope='pool2')
net = layers.conv2d(net, 384, [3, 3], scope='conv3')
net = layers.conv2d(net, 384, [3, 3], scope='conv4')
net = layers.conv2d(net, 256, [3, 3], scope='conv5')
net = layers_lib.max_pool2d(net, [3, 3], 2, scope='pool5')
# Use conv2d instead of fully_connected layers.
with arg_scope(
[layers.conv2d],
weights_initializer=trunc_normal(0.005),
biases_initializer=init_ops.constant_initializer(0.1)):
net = layers.conv2d(net, 4096, [5, 5], padding='VALID', scope='fc6')
net = layers_lib.dropout(
net, dropout_keep_prob, is_training=is_training, scope='dropout6')
net = layers.conv2d(net, 4096, [1, 1], scope='fc7')
net = layers_lib.dropout(
net, dropout_keep_prob, is_training=is_training, scope='dropout7')
net = layers.conv2d(
net,
num_classes, [1, 1],
activation_fn=None,
normalizer_fn=None,
biases_initializer=init_ops.zeros_initializer(),
scope='fc8')
# Convert end_points_collection into a end_point dict.
end_points = utils.convert_collection_to_dict(end_points_collection)
if spatial_squeeze:
net = array_ops.squeeze(net, [1, 2], name='fc8/squeezed')
end_points[sc.name + '/fc8'] = net
return net, end_points
alexnet_v2.default_image_size = 224
|
tensorflow-master
|
tensorflow/contrib/slim/python/slim/nets/alexnet.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains definitions for the preactivation form of Residual Networks.
Residual networks (ResNets) were originally proposed in:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
The full preactivation 'v2' ResNet variant implemented in this module was
introduced by:
[2] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Identity Mappings in Deep Residual Networks. arXiv: 1603.05027
The key difference of the full preactivation 'v2' variant compared to the
'v1' variant in [1] is the use of batch normalization before every weight layer.
Typical use:
from tensorflow.contrib.slim.python.slim.nets import
resnet_v2
ResNet-101 for image classification into 1000 classes:
# inputs has shape [batch, 224, 224, 3]
with slim.arg_scope(resnet_v2.resnet_arg_scope()):
net, end_points = resnet_v2.resnet_v2_101(inputs, 1000, is_training=False)
ResNet-101 for semantic segmentation into 21 classes:
# inputs has shape [batch, 513, 513, 3]
with slim.arg_scope(resnet_v2.resnet_arg_scope()):
net, end_points = resnet_v2.resnet_v2_101(inputs,
21,
is_training=False,
global_pool=False,
output_stride=16)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import layers as layers_lib
from tensorflow.contrib.framework.python.ops import add_arg_scope
from tensorflow.contrib.framework.python.ops import arg_scope
from tensorflow.contrib.layers.python.layers import layers
from tensorflow.contrib.layers.python.layers import utils
from tensorflow.contrib.slim.python.slim.nets import resnet_utils
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variable_scope
resnet_arg_scope = resnet_utils.resnet_arg_scope
@add_arg_scope
def bottleneck(inputs,
depth,
depth_bottleneck,
stride,
rate=1,
outputs_collections=None,
scope=None):
"""Bottleneck residual unit variant with BN before convolutions.
This is the full preactivation residual unit variant proposed in [2]. See
Fig. 1(b) of [2] for its definition. Note that we use here the bottleneck
variant which has an extra bottleneck layer.
When putting together two consecutive ResNet blocks that use this unit, one
should use stride = 2 in the last unit of the first block.
Args:
inputs: A tensor of size [batch, height, width, channels].
depth: The depth of the ResNet unit output.
depth_bottleneck: The depth of the bottleneck layers.
stride: The ResNet unit's stride. Determines the amount of downsampling of
the units output compared to its input.
rate: An integer, rate for atrous convolution.
outputs_collections: Collection to add the ResNet unit output.
scope: Optional variable_scope.
Returns:
The ResNet unit's output.
"""
with variable_scope.variable_scope(scope, 'bottleneck_v2', [inputs]) as sc:
depth_in = utils.last_dimension(inputs.get_shape(), min_rank=4)
preact = layers.batch_norm(
inputs, activation_fn=nn_ops.relu, scope='preact')
if depth == depth_in:
shortcut = resnet_utils.subsample(inputs, stride, 'shortcut')
else:
shortcut = layers_lib.conv2d(
preact,
depth, [1, 1],
stride=stride,
normalizer_fn=None,
activation_fn=None,
scope='shortcut')
residual = layers_lib.conv2d(
preact, depth_bottleneck, [1, 1], stride=1, scope='conv1')
residual = resnet_utils.conv2d_same(
residual, depth_bottleneck, 3, stride, rate=rate, scope='conv2')
residual = layers_lib.conv2d(
residual,
depth, [1, 1],
stride=1,
normalizer_fn=None,
activation_fn=None,
scope='conv3')
output = shortcut + residual
return utils.collect_named_outputs(outputs_collections, sc.name, output)
def resnet_v2(inputs,
blocks,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
include_root_block=True,
reuse=None,
scope=None):
"""Generator for v2 (preactivation) ResNet models.
This function generates a family of ResNet v2 models. See the resnet_v2_*()
methods for specific model instantiations, obtained by selecting different
block instantiations that produce ResNets of various depths.
Training for image classification on Imagenet is usually done with [224, 224]
inputs, resulting in [7, 7] feature maps at the output of the last ResNet
block for the ResNets defined in [1] that have nominal stride equal to 32.
However, for dense prediction tasks we advise that one uses inputs with
spatial dimensions that are multiples of 32 plus 1, e.g., [321, 321]. In
this case the feature maps at the ResNet output will have spatial shape
[(height - 1) / output_stride + 1, (width - 1) / output_stride + 1]
and corners exactly aligned with the input image corners, which greatly
facilitates alignment of the features to the image. Using as input [225, 225]
images results in [8, 8] feature maps at the output of the last ResNet block.
For dense prediction tasks, the ResNet needs to run in fully-convolutional
(FCN) mode and global_pool needs to be set to False. The ResNets in [1, 2] all
have nominal stride equal to 32 and a good choice in FCN mode is to use
output_stride=16 in order to increase the density of the computed features at
small computational and memory overhead, cf. http://arxiv.org/abs/1606.00915.
Args:
inputs: A tensor of size [batch, height_in, width_in, channels].
blocks: A list of length equal to the number of ResNet blocks. Each element
is a resnet_utils.Block object describing the units in the block.
num_classes: Number of predicted classes for classification tasks. If None
we return the features before the logit layer.
is_training: whether batch_norm layers are in training mode.
global_pool: If True, we perform global average pooling before computing the
logits. Set to True for image classification, False for dense prediction.
output_stride: If None, then the output will be computed at the nominal
network stride. If output_stride is not None, it specifies the requested
ratio of input to output spatial resolution.
include_root_block: If True, include the initial convolution followed by
max-pooling, if False excludes it. If excluded, `inputs` should be the
results of an activation-less convolution.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
Returns:
net: A rank-4 tensor of size [batch, height_out, width_out, channels_out].
If global_pool is False, then height_out and width_out are reduced by a
factor of output_stride compared to the respective height_in and width_in,
else both height_out and width_out equal one. If num_classes is None, then
net is the output of the last ResNet block, potentially after global
average pooling. If num_classes is not None, net contains the pre-softmax
activations.
end_points: A dictionary from components of the network to the corresponding
activation.
Raises:
ValueError: If the target output_stride is not valid.
"""
with variable_scope.variable_scope(
scope, 'resnet_v2', [inputs], reuse=reuse) as sc:
end_points_collection = sc.original_name_scope + '_end_points'
with arg_scope(
[layers_lib.conv2d, bottleneck, resnet_utils.stack_blocks_dense],
outputs_collections=end_points_collection):
with arg_scope([layers.batch_norm], is_training=is_training):
net = inputs
if include_root_block:
if output_stride is not None:
if output_stride % 4 != 0:
raise ValueError('The output_stride needs to be a multiple of 4.')
output_stride /= 4
# We do not include batch normalization or activation functions in
# conv1 because the first ResNet unit will perform these. Cf.
# Appendix of [2].
with arg_scope(
[layers_lib.conv2d], activation_fn=None, normalizer_fn=None):
net = resnet_utils.conv2d_same(net, 64, 7, stride=2, scope='conv1')
net = layers.max_pool2d(net, [3, 3], stride=2, scope='pool1')
net = resnet_utils.stack_blocks_dense(net, blocks, output_stride)
# This is needed because the pre-activation variant does not have batch
# normalization or activation functions in the residual unit output. See
# Appendix of [2].
net = layers.batch_norm(
net, activation_fn=nn_ops.relu, scope='postnorm')
if global_pool:
# Global average pooling.
net = math_ops.reduce_mean(net, [1, 2], name='pool5', keepdims=True)
if num_classes is not None:
net = layers_lib.conv2d(
net,
num_classes, [1, 1],
activation_fn=None,
normalizer_fn=None,
scope='logits')
# Convert end_points_collection into a dictionary of end_points.
end_points = utils.convert_collection_to_dict(end_points_collection)
if num_classes is not None:
end_points['predictions'] = layers.softmax(net, scope='predictions')
return net, end_points
resnet_v2.default_image_size = 224
def resnet_v2_block(scope, base_depth, num_units, stride):
"""Helper function for creating a resnet_v2 bottleneck block.
Args:
scope: The scope of the block.
base_depth: The depth of the bottleneck layer for each unit.
num_units: The number of units in the block.
stride: The stride of the block, implemented as a stride in the last unit.
All other units have stride=1.
Returns:
A resnet_v2 bottleneck block.
"""
return resnet_utils.Block(scope, bottleneck, [{
'depth': base_depth * 4,
'depth_bottleneck': base_depth,
'stride': 1
}] * (num_units - 1) + [{
'depth': base_depth * 4,
'depth_bottleneck': base_depth,
'stride': stride
}])
def resnet_v2_50(inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
reuse=None,
scope='resnet_v2_50'):
"""ResNet-50 model of [1]. See resnet_v2() for arg and return description."""
blocks = [
resnet_v2_block('block1', base_depth=64, num_units=3, stride=2),
resnet_v2_block('block2', base_depth=128, num_units=4, stride=2),
resnet_v2_block('block3', base_depth=256, num_units=6, stride=2),
resnet_v2_block('block4', base_depth=512, num_units=3, stride=1),
]
return resnet_v2(
inputs,
blocks,
num_classes,
is_training,
global_pool,
output_stride,
include_root_block=True,
reuse=reuse,
scope=scope)
def resnet_v2_101(inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
reuse=None,
scope='resnet_v2_101'):
"""ResNet-101 model of [1]. See resnet_v2() for arg and return description."""
blocks = [
resnet_v2_block('block1', base_depth=64, num_units=3, stride=2),
resnet_v2_block('block2', base_depth=128, num_units=4, stride=2),
resnet_v2_block('block3', base_depth=256, num_units=23, stride=2),
resnet_v2_block('block4', base_depth=512, num_units=3, stride=1),
]
return resnet_v2(
inputs,
blocks,
num_classes,
is_training,
global_pool,
output_stride,
include_root_block=True,
reuse=reuse,
scope=scope)
def resnet_v2_152(inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
reuse=None,
scope='resnet_v2_152'):
"""ResNet-152 model of [1]. See resnet_v2() for arg and return description."""
blocks = [
resnet_v2_block('block1', base_depth=64, num_units=3, stride=2),
resnet_v2_block('block2', base_depth=128, num_units=8, stride=2),
resnet_v2_block('block3', base_depth=256, num_units=36, stride=2),
resnet_v2_block('block4', base_depth=512, num_units=3, stride=1),
]
return resnet_v2(
inputs,
blocks,
num_classes,
is_training,
global_pool,
output_stride,
include_root_block=True,
reuse=reuse,
scope=scope)
def resnet_v2_200(inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
reuse=None,
scope='resnet_v2_200'):
"""ResNet-200 model of [2]. See resnet_v2() for arg and return description."""
blocks = [
resnet_v2_block('block1', base_depth=64, num_units=3, stride=2),
resnet_v2_block('block2', base_depth=128, num_units=24, stride=2),
resnet_v2_block('block3', base_depth=256, num_units=36, stride=2),
resnet_v2_block('block4', base_depth=512, num_units=3, stride=1),
]
return resnet_v2(
inputs,
blocks,
num_classes,
is_training,
global_pool,
output_stride,
include_root_block=True,
reuse=reuse,
scope=scope)
|
tensorflow-master
|
tensorflow/contrib/slim/python/slim/nets/resnet_v2.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implements a parallel data reader with queues and optional shuffling."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes as tf_dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import gfile
from tensorflow.python.summary import summary
from tensorflow.python.training import input as tf_input
from tensorflow.python.training import queue_runner
class ParallelReader(io_ops.ReaderBase):
"""Reader class that uses multiple readers in parallel to improve speed.
See ReaderBase for supported methods.
"""
def __init__(self,
reader_class,
common_queue,
num_readers=4,
reader_kwargs=None):
"""ParallelReader creates num_readers instances of the reader_class.
Each instance is created by calling the `reader_class` function passing
the arguments specified in `reader_kwargs` as in:
reader_class(**read_kwargs)
When you read from a ParallelReader, with its `read()` method,
you just dequeue examples from the `common_queue`.
The readers will read different files in parallel, asynchronously enqueueing
their output into `common_queue`. The `common_queue.dtypes` must be
[tf.string, tf.string]
Because each reader can read from a different file, the examples in the
`common_queue` could be from different files. Due to the asynchronous
reading there is no guarantee that all the readers will read the same
number of examples.
If the `common_queue` is a shuffling queue, then the examples are shuffled.
Usage:
common_queue = tf.queue.RandomShuffleQueue(
capacity=256,
min_after_dequeue=128,
dtypes=[tf.string, tf.string])
p_reader = ParallelReader(tf.compat.v1.TFRecordReader, common_queue)
common_queue = tf.queue.FIFOQueue(
capacity=256,
dtypes=[tf.string, tf.string])
p_reader = ParallelReader(readers, common_queue, num_readers=2)
Args:
reader_class: one of the io_ops.ReaderBase subclasses ex: TFRecordReader
common_queue: a Queue to hold (key, value pairs) with `dtypes` equal to
[tf.string, tf.string]. Must be one of the data_flow_ops.Queues
instances, ex. `tf.queue.FIFOQueue()`, `tf.queue.RandomShuffleQueue()`,
...
num_readers: a integer, number of instances of reader_class to create.
reader_kwargs: an optional dict of kwargs to create the readers.
Raises:
TypeError: if `common_queue.dtypes` is not [tf.string, tf.string].
"""
if len(common_queue.dtypes) != 2:
raise TypeError('common_queue.dtypes must be [tf.string, tf.string]')
for dtype in common_queue.dtypes:
if not dtype.is_compatible_with(tf_dtypes.string):
raise TypeError('common_queue.dtypes must be [tf.string, tf.string]')
reader_kwargs = reader_kwargs or {}
self._readers = [reader_class(**reader_kwargs) for _ in range(num_readers)]
self._common_queue = common_queue
@property
def num_readers(self):
return len(self._readers)
@property
def common_queue(self):
return self._common_queue
def read(self, queue, name=None):
"""Returns the next record (key, value pair) produced by the reader.
The multiple reader instances are all configured to `read()` from the
filenames listed in `queue` and enqueue their output into the `common_queue`
passed to the constructor, and this method returns the next record dequeued
from that `common_queue`.
Readers dequeue a work unit from `queue` if necessary (e.g. when a
reader needs to start reading from a new file since it has finished with
the previous file).
A queue runner for enqueuing in the `common_queue` is automatically added
to the TF QueueRunners collection.
Args:
queue: A Queue or a mutable string Tensor representing a handle to a
Queue, with string work items.
name: A name for the operation (optional).
Returns:
The next record (i.e. (key, value pair)) from the common_queue.
"""
self._configure_readers_by(queue)
return self._common_queue.dequeue(name=name)
def read_up_to(self, queue, num_records, name=None):
"""Returns up to num_records (key, value pairs) produced by a reader.
Will dequeue a work unit from queue if necessary (e.g., when the
Reader needs to start reading from a new file since it has
finished with the previous file).
It may return less than num_records even before the last batch.
**Note** This operation is not supported by all types of `common_queue`s.
If a `common_queue` does not support `dequeue_up_to()`, then a
`tf.errors.UnimplementedError` is raised.
Args:
queue: A Queue or a mutable string Tensor representing a handle to a
Queue, with string work items.
num_records: Number of records to read.
name: A name for the operation (optional).
Returns:
A tuple of Tensors (keys, values) from common_queue.
keys: A 1-D string Tensor.
values: A 1-D string Tensor.
"""
self._configure_readers_by(queue)
return self._common_queue.dequeue_up_to(num_records, name)
def _configure_readers_by(self, queue):
enqueue_ops = []
for reader in self._readers:
enqueue_ops.append(self._common_queue.enqueue(reader.read(queue)))
queue_runner.add_queue_runner(
queue_runner.QueueRunner(self._common_queue, enqueue_ops))
def num_records_produced(self, name=None):
"""Returns the number of records this reader has produced.
Args:
name: A name for the operation (optional).
Returns:
An int64 Tensor.
"""
num_records = [r.num_records_produced() for r in self._readers]
return math_ops.add_n(num_records, name=name)
def num_work_units_completed(self, name=None):
"""Returns the number of work units this reader has finished processing.
Args:
name: A name for the operation (optional).
Returns:
An int64 Tensor.
"""
num_work_units = [r.num_work_units_completed() for r in self._readers]
return math_ops.add_n(num_work_units, name=name)
def parallel_read(data_sources,
reader_class,
num_epochs=None,
num_readers=4,
reader_kwargs=None,
shuffle=True,
dtypes=None,
capacity=256,
min_after_dequeue=128,
seed=None,
scope=None):
"""Reads multiple records in parallel from data_sources using n readers.
It uses a ParallelReader to read from multiple files in parallel using
multiple readers created using `reader_class` with `reader_kwargs'.
If shuffle is True the common_queue would be a RandomShuffleQueue otherwise
it would be a FIFOQueue.
Usage:
data_sources = ['path_to/train*']
key, value = parallel_read(data_sources, tf.CSVReader, num_readers=4)
Args:
data_sources: a list/tuple of files or the location of the data, i.e.
/path/to/train@128, /path/to/train* or /tmp/.../train*
reader_class: one of the io_ops.ReaderBase subclasses ex: TFRecordReader
num_epochs: The number of times each data source is read. If left as None,
the data will be cycled through indefinitely.
num_readers: a integer, number of Readers to create.
reader_kwargs: an optional dict, of kwargs for the reader.
shuffle: boolean, whether should shuffle the files and the records by using
RandomShuffleQueue as common_queue.
dtypes: A list of types. The length of dtypes must equal the number of
elements in each record. If it is None it will default to [tf.string,
tf.string] for (key, value).
capacity: integer, capacity of the common_queue.
min_after_dequeue: integer, minimum number of records in the common_queue
after dequeue. Needed for a good shuffle.
seed: A seed for RandomShuffleQueue.
scope: Optional name scope for the ops.
Returns:
key, value: a tuple of keys and values from the data_source.
"""
data_files = get_data_files(data_sources)
with ops.name_scope(scope, 'parallel_read'):
filename_queue = tf_input.string_input_producer(
data_files,
num_epochs=num_epochs,
shuffle=shuffle,
seed=seed,
name='filenames')
dtypes = dtypes or [tf_dtypes.string, tf_dtypes.string]
if shuffle:
common_queue = data_flow_ops.RandomShuffleQueue(
capacity=capacity,
min_after_dequeue=min_after_dequeue,
dtypes=dtypes,
seed=seed,
name='common_queue')
else:
common_queue = data_flow_ops.FIFOQueue(
capacity=capacity, dtypes=dtypes, name='common_queue')
summary.scalar(
'fraction_of_%d_full' % capacity,
math_ops.cast(common_queue.size(), tf_dtypes.float32) * (1. / capacity))
return ParallelReader(
reader_class,
common_queue,
num_readers=num_readers,
reader_kwargs=reader_kwargs).read(filename_queue)
def single_pass_read(data_sources, reader_class, reader_kwargs=None,
scope=None):
"""Reads sequentially the data_sources using the reader, doing a single pass.
Args:
data_sources: a list/tuple of files or the location of the data, i.e.
/path/to/train@128, /path/to/train* or /tmp/.../train*
reader_class: one of the io_ops.ReaderBase subclasses ex: TFRecordReader.
reader_kwargs: an optional dict, of kwargs for the reader.
scope: Optional name scope for the ops.
Returns:
key, value: a tuple of keys and values from the data_source.
"""
data_files = get_data_files(data_sources)
with ops.name_scope(scope, 'single_pass_read'):
filename_queue = tf_input.string_input_producer(
data_files, num_epochs=1, shuffle=False, capacity=1, name='filenames')
reader_kwargs = reader_kwargs or {}
return reader_class(**reader_kwargs).read(filename_queue)
def get_data_files(data_sources):
"""Get data_files from data_sources.
Args:
data_sources: a list/tuple of files or the location of the data, i.e.
/path/to/train@128, /path/to/train* or /tmp/.../train*
Returns:
a list of data_files.
Raises:
ValueError: if data files are not found
"""
if isinstance(data_sources, (list, tuple)):
data_files = []
for source in data_sources:
data_files += get_data_files(source)
else:
if '*' in data_sources or '?' in data_sources or '[' in data_sources:
data_files = gfile.Glob(data_sources)
else:
data_files = [data_sources]
if not data_files:
raise ValueError('No data files found in %s' % (data_sources,))
return data_files
|
tensorflow-master
|
tensorflow/contrib/slim/python/slim/data/parallel_reader.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains test utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.lib.io import tf_record
from tensorflow.python.ops import image_ops
def _encoded_int64_feature(ndarray):
return feature_pb2.Feature(int64_list=feature_pb2.Int64List(
value=ndarray.flatten().tolist()))
def _encoded_bytes_feature(tf_encoded):
encoded = tf_encoded.eval()
def string_to_bytes(value):
return feature_pb2.BytesList(value=[value])
return feature_pb2.Feature(bytes_list=string_to_bytes(encoded))
def _string_feature(value):
value = value.encode('utf-8')
return feature_pb2.Feature(bytes_list=feature_pb2.BytesList(value=[value]))
def _encoder(image, image_format):
assert image_format in ['jpeg', 'png']
if image_format == 'jpeg':
tf_image = constant_op.constant(image, dtype=dtypes.uint8)
return image_ops.encode_jpeg(tf_image)
if image_format == 'png':
tf_image = constant_op.constant(image, dtype=dtypes.uint8)
return image_ops.encode_png(tf_image)
def generate_image(image_shape, image_format='jpeg', label=0):
"""Generates an image and an example containing the encoded image.
GenerateImage must be called within an active session.
Args:
image_shape: the shape of the image to generate.
image_format: the encoding format of the image.
label: the int64 labels for the image.
Returns:
image: the generated image.
example: a TF-example with a feature key 'image/encoded' set to the
serialized image and a feature key 'image/format' set to the image
encoding format ['jpeg', 'png'].
"""
image = np.random.random_integers(0, 255, size=image_shape)
tf_encoded = _encoder(image, image_format)
example = example_pb2.Example(features=feature_pb2.Features(feature={
'image/encoded': _encoded_bytes_feature(tf_encoded),
'image/format': _string_feature(image_format),
'image/class/label': _encoded_int64_feature(np.array(label)),
}))
return image, example.SerializeToString()
def create_tfrecord_files(output_dir, num_files=3, num_records_per_file=10):
"""Creates TFRecords files.
The method must be called within an active session.
Args:
output_dir: The directory where the files are stored.
num_files: The number of files to create.
num_records_per_file: The number of records per file.
Returns:
A list of the paths to the TFRecord files.
"""
tfrecord_paths = []
for i in range(num_files):
path = os.path.join(output_dir,
'flowers.tfrecord-%d-of-%s' % (i, num_files))
tfrecord_paths.append(path)
writer = tf_record.TFRecordWriter(path)
for _ in range(num_records_per_file):
_, example = generate_image(image_shape=(10, 10, 3))
writer.write(example)
writer.close()
return tfrecord_paths
|
tensorflow-master
|
tensorflow/contrib/slim/python/slim/data/test_utils.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for slim.data.prefetch_queue."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.slim.python.slim.data import prefetch_queue
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import queue_runner_impl
class PrefetchQueueTest(test.TestCase):
def testOneThread(self):
with self.cached_session() as sess:
batch_size = 10
image_size = 32
num_batches = 5
zero64 = constant_op.constant(0, dtype=dtypes.int64)
examples = variables.Variable(zero64)
counter = examples.count_up_to(num_batches * batch_size)
image = random_ops.random_normal(
[image_size, image_size, 3], dtype=dtypes.float32, name='images')
label = random_ops.random_uniform(
[1], 0, 10, dtype=dtypes.int32, name='labels')
batches = input_lib.batch(
[counter, image, label], batch_size=batch_size, num_threads=1)
batches = prefetch_queue.prefetch_queue(batches).dequeue()
variables.global_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
for i in range(num_batches):
results = sess.run(batches)
self.assertAllEqual(results[0],
np.arange(i * batch_size, (i + 1) * batch_size))
self.assertEquals(results[1].shape,
(batch_size, image_size, image_size, 3))
self.assertEquals(results[2].shape, (batch_size, 1))
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
sess.run(batches)
for thread in threads:
thread.join()
def testMultiThread(self):
with self.cached_session() as sess:
batch_size = 10
image_size = 32
num_batches = 5
zero64 = constant_op.constant(0, dtype=dtypes.int64)
examples = variables.Variable(zero64)
counter = examples.count_up_to(num_batches * batch_size)
image = random_ops.random_normal(
[image_size, image_size, 3], dtype=dtypes.float32, name='images')
label = random_ops.random_uniform(
[1], 0, 10, dtype=dtypes.int32, name='labels')
batches = input_lib.batch(
[counter, image, label], batch_size=batch_size, num_threads=4)
batches = prefetch_queue.prefetch_queue(batches).dequeue()
variables.global_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
value_counter = []
for _ in range(num_batches):
results = sess.run(batches)
value_counter.append(results[0])
self.assertEqual(results[1].shape,
(batch_size, image_size, image_size, 3))
self.assertEqual(results[2].shape, (batch_size, 1))
self.assertAllEqual(
np.sort(np.concatenate(value_counter)),
np.arange(0, num_batches * batch_size))
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
sess.run(batches)
for thread in threads:
thread.join()
def testMultipleDequeue(self):
with self.cached_session() as sess:
batch_size = 10
image_size = 32
num_batches = 4
zero64 = constant_op.constant(0, dtype=dtypes.int64)
examples = variables.Variable(zero64)
counter = examples.count_up_to(num_batches * batch_size)
image = random_ops.random_normal(
[image_size, image_size, 3], dtype=dtypes.float32, name='images')
label = random_ops.random_uniform(
[1], 0, 10, dtype=dtypes.int32, name='labels')
batches = input_lib.batch(
[counter, image, label], batch_size=batch_size, num_threads=4)
batcher = prefetch_queue.prefetch_queue(batches)
batches_list = [batcher.dequeue() for _ in range(2)]
variables.global_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
value_counter = []
for _ in range(int(num_batches / 2)):
for batches in batches_list:
results = sess.run(batches)
value_counter.append(results[0])
self.assertEquals(results[1].shape,
(batch_size, image_size, image_size, 3))
self.assertEquals(results[2].shape, (batch_size, 1))
self.assertAllEqual(
np.sort(np.concatenate(value_counter)),
np.arange(0, num_batches * batch_size))
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
sess.run(batches)
for thread in threads:
thread.join()
def testDynamicPad_failure(self):
with ops.Graph().as_default():
variable_tensor = array_ops.placeholder(dtypes.int32, shape=[None, 3])
with self.assertRaisesRegexp(ValueError, 'shapes must be fully defined'):
prefetch_queue.prefetch_queue([variable_tensor])
def testDynamicPad(self):
with self.cached_session() as sess:
# Create 3 tensors of variable but compatible shapes.
var_shape = [None, 2]
p1 = constant_op.constant([[1, 2], [3, 4]])
p1.set_shape(var_shape)
p2 = constant_op.constant([[5, 6], [7, 8], [9, 10]])
p2.set_shape(var_shape)
p3 = constant_op.constant([[11, 12]])
p3.set_shape(var_shape)
batch = [p1, p2, p3]
batch_size = len(batch)
zero64 = constant_op.constant(0, dtype=dtypes.int64)
examples = variables.Variable(zero64)
counter = examples.count_up_to(batch_size)
# Create a PaddingFIFOQueue to enqueue these tensors.
q = data_flow_ops.PaddingFIFOQueue(
capacity=10, dtypes=[dtypes.int32], shapes=[var_shape])
for tensor in [p1, p2, p3]:
q.enqueue([tensor]).run()
# Dequeue from the queue and batch them using batch().
batches = input_lib.batch([q.dequeue(), counter], batch_size=batch_size,
num_threads=1, dynamic_pad=True)
self.assertEqual([batch_size, None, 2], batches[0].shape.as_list())
# Finally, assemble them into prefetch_queue with dynamic_pad.
batcher = prefetch_queue.prefetch_queue(batches, dynamic_pad=True)
batches = batcher.dequeue()
self.assertEqual([batch_size, None, 2], batches[0].shape.as_list())
variables.global_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
values, _ = sess.run(batches)
# We enqueued 3 tensors of [None, 2] shapes, so using dynamic_pad
# they should be padded to the fixed size [3, 3, 2], where 3
# is the maximum length of the batch.
self.assertTrue(np.array_equal(
np.array([[[1, 2], [3, 4], [0, 0]],
[[5, 6], [7, 8], [9, 10]],
[[11, 12], [0, 0], [0, 0]]]),
values))
with self.assertRaises(errors_impl.OutOfRangeError):
sess.run(batches)
for thread in threads:
thread.join()
def testDictConstruction(self):
with ops.Graph().as_default():
batches = {
'first': constant_op.constant([1]),
'second': constant_op.constant([2.0, 2.1])
}
prefetcher = prefetch_queue.prefetch_queue(batches)
dequeued = prefetcher.dequeue()
self.assertTrue(isinstance(dequeued, dict))
self.assertEqual(2, len(dequeued))
self.assertEqual(dtypes.int32, dequeued['first'].dtype)
self.assertEqual(dtypes.float32, dequeued['second'].dtype)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/slim/python/slim/data/prefetch_queue_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for slim.data.dataset_data_provider."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
from tensorflow.contrib.slim.python.slim import queues
from tensorflow.contrib.slim.python.slim.data import dataset
from tensorflow.contrib.slim.python.slim.data import dataset_data_provider
from tensorflow.contrib.slim.python.slim.data import test_utils
from tensorflow.contrib.slim.python.slim.data import tfexample_decoder
from tensorflow.python.client import session
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import image_ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
def _resize_image(image, height, width):
image = array_ops.expand_dims(image, 0)
image = image_ops.resize_bilinear(image, [height, width])
return array_ops.squeeze(image, [0])
def _create_tfrecord_dataset(tmpdir):
if not gfile.Exists(tmpdir):
gfile.MakeDirs(tmpdir)
data_sources = test_utils.create_tfrecord_files(tmpdir, num_files=1)
keys_to_features = {
'image/encoded':
parsing_ops.FixedLenFeature(
shape=(), dtype=dtypes.string, default_value=''),
'image/format':
parsing_ops.FixedLenFeature(
shape=(), dtype=dtypes.string, default_value='jpeg'),
'image/class/label':
parsing_ops.FixedLenFeature(
shape=[1],
dtype=dtypes.int64,
default_value=array_ops.zeros(
[1], dtype=dtypes.int64))
}
items_to_handlers = {
'image': tfexample_decoder.Image(),
'label': tfexample_decoder.Tensor('image/class/label'),
}
decoder = tfexample_decoder.TFExampleDecoder(keys_to_features,
items_to_handlers)
return dataset.Dataset(
data_sources=data_sources,
reader=io_ops.TFRecordReader,
decoder=decoder,
num_samples=100,
items_to_descriptions=None)
class DatasetDataProviderTest(test.TestCase):
def testTFRecordDataset(self):
dataset_dir = tempfile.mkdtemp(prefix=os.path.join(self.get_temp_dir(),
'tfrecord_dataset'))
height = 300
width = 280
with self.cached_session():
test_dataset = _create_tfrecord_dataset(dataset_dir)
provider = dataset_data_provider.DatasetDataProvider(test_dataset)
key, image, label = provider.get(['record_key', 'image', 'label'])
image = _resize_image(image, height, width)
with session.Session('') as sess:
with queues.QueueRunners(sess):
key, image, label = sess.run([key, image, label])
split_key = key.decode('utf-8').split(':')
self.assertEqual(2, len(split_key))
self.assertEqual(test_dataset.data_sources[0], split_key[0])
self.assertTrue(split_key[1].isdigit())
self.assertListEqual([height, width, 3], list(image.shape))
self.assertListEqual([1], list(label.shape))
def testTFRecordSeparateGetDataset(self):
dataset_dir = tempfile.mkdtemp(prefix=os.path.join(self.get_temp_dir(),
'tfrecord_separate_get'))
height = 300
width = 280
with self.cached_session():
provider = dataset_data_provider.DatasetDataProvider(
_create_tfrecord_dataset(dataset_dir))
[image] = provider.get(['image'])
[label] = provider.get(['label'])
image = _resize_image(image, height, width)
with session.Session('') as sess:
with queues.QueueRunners(sess):
image, label = sess.run([image, label])
self.assertListEqual([height, width, 3], list(image.shape))
self.assertListEqual([1], list(label.shape))
def testConflictingRecordKeyItem(self):
dataset_dir = tempfile.mkdtemp(prefix=os.path.join(self.get_temp_dir(),
'tfrecord_dataset'))
with self.cached_session():
with self.assertRaises(ValueError):
dataset_data_provider.DatasetDataProvider(
_create_tfrecord_dataset(dataset_dir), record_key='image')
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/slim/python/slim/data/dataset_data_provider_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains helper functions and classes necessary for decoding data.
While data providers read data from disk, sstables or other formats, data
decoders decode the data (if necessary). A data decoder is provided with a
serialized or encoded piece of data as well as a list of items and
returns a set of tensors, each of which correspond to the requested list of
items extracted from the data:
def Decode(self, data, items):
...
For example, if data is a compressed map, the implementation might be:
def Decode(self, data, items):
decompressed_map = _Decompress(data)
outputs = []
for item in items:
outputs.append(decompressed_map[item])
return outputs.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
@six.add_metaclass(abc.ABCMeta)
class DataDecoder(object):
"""An abstract class which is used to decode data for a provider."""
@abc.abstractmethod
def decode(self, data, items):
"""Decodes the data to returns the tensors specified by the list of items.
Args:
data: A possibly encoded data format.
items: A list of strings, each of which indicate a particular data type.
Returns:
A list of `Tensors`, whose length matches the length of `items`, where
each `Tensor` corresponds to each item.
Raises:
ValueError: If any of the items cannot be satisfied.
"""
pass
@abc.abstractmethod
def list_items(self):
"""Lists the names of the items that the decoder can decode.
Returns:
A list of string names.
"""
pass
|
tensorflow-master
|
tensorflow/contrib/slim/python/slim/data/data_decoder.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for slim.data.parallel_reader."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.slim.python.slim import queues
from tensorflow.contrib.slim.python.slim.data import parallel_reader
from tensorflow.contrib.slim.python.slim.data import test_utils
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import supervisor
class ParallelReaderTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def _verify_all_data_sources_read(self, shared_queue):
with self.cached_session():
tfrecord_paths = test_utils.create_tfrecord_files(
self.get_temp_dir(), num_files=3)
num_readers = len(tfrecord_paths)
p_reader = parallel_reader.ParallelReader(
io_ops.TFRecordReader, shared_queue, num_readers=num_readers)
data_files = parallel_reader.get_data_files(tfrecord_paths)
filename_queue = input_lib.string_input_producer(data_files)
key, value = p_reader.read(filename_queue)
count0 = 0
count1 = 0
count2 = 0
num_reads = 50
sv = supervisor.Supervisor(logdir=self.get_temp_dir())
with sv.prepare_or_wait_for_session() as sess:
sv.start_queue_runners(sess)
for _ in range(num_reads):
current_key, _ = sess.run([key, value])
if '0-of-3' in str(current_key):
count0 += 1
if '1-of-3' in str(current_key):
count1 += 1
if '2-of-3' in str(current_key):
count2 += 1
self.assertGreater(count0, 0)
self.assertGreater(count1, 0)
self.assertGreater(count2, 0)
self.assertEquals(count0 + count1 + count2, num_reads)
def _verify_read_up_to_out(self, shared_queue):
with self.cached_session():
num_files = 3
num_records_per_file = 7
tfrecord_paths = test_utils.create_tfrecord_files(
self.get_temp_dir(),
num_files=num_files,
num_records_per_file=num_records_per_file)
p_reader = parallel_reader.ParallelReader(
io_ops.TFRecordReader, shared_queue, num_readers=5)
data_files = parallel_reader.get_data_files(tfrecord_paths)
filename_queue = input_lib.string_input_producer(data_files, num_epochs=1)
key, value = p_reader.read_up_to(filename_queue, 4)
count0 = 0
count1 = 0
count2 = 0
all_keys_count = 0
all_values_count = 0
sv = supervisor.Supervisor(logdir=self.get_temp_dir())
with sv.prepare_or_wait_for_session() as sess:
sv.start_queue_runners(sess)
while True:
try:
current_keys, current_values = sess.run([key, value])
self.assertEquals(len(current_keys), len(current_values))
all_keys_count += len(current_keys)
all_values_count += len(current_values)
for current_key in current_keys:
if '0-of-3' in str(current_key):
count0 += 1
if '1-of-3' in str(current_key):
count1 += 1
if '2-of-3' in str(current_key):
count2 += 1
except errors_impl.OutOfRangeError:
break
self.assertEquals(count0, num_records_per_file)
self.assertEquals(count1, num_records_per_file)
self.assertEquals(count2, num_records_per_file)
self.assertEquals(
all_keys_count,
num_files * num_records_per_file)
self.assertEquals(all_values_count, all_keys_count)
self.assertEquals(
count0 + count1 + count2,
all_keys_count)
def testRandomShuffleQueue(self):
shared_queue = data_flow_ops.RandomShuffleQueue(
capacity=256,
min_after_dequeue=128,
dtypes=[dtypes_lib.string, dtypes_lib.string])
self._verify_all_data_sources_read(shared_queue)
def testFIFOSharedQueue(self):
shared_queue = data_flow_ops.FIFOQueue(
capacity=256, dtypes=[dtypes_lib.string, dtypes_lib.string])
self._verify_all_data_sources_read(shared_queue)
def testReadUpToFromRandomShuffleQueue(self):
shared_queue = data_flow_ops.RandomShuffleQueue(
capacity=55,
min_after_dequeue=28,
dtypes=[dtypes_lib.string, dtypes_lib.string],
shapes=[tensor_shape.scalar(), tensor_shape.scalar()])
self._verify_read_up_to_out(shared_queue)
def testReadUpToFromFIFOQueue(self):
shared_queue = data_flow_ops.FIFOQueue(
capacity=99,
dtypes=[dtypes_lib.string, dtypes_lib.string],
shapes=[tensor_shape.scalar(), tensor_shape.scalar()])
self._verify_read_up_to_out(shared_queue)
class ParallelReadTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testTFRecordReader(self):
with self.cached_session():
self._tfrecord_paths = test_utils.create_tfrecord_files(
self.get_temp_dir(), num_files=3)
key, value = parallel_reader.parallel_read(
self._tfrecord_paths, reader_class=io_ops.TFRecordReader, num_readers=3)
sv = supervisor.Supervisor(logdir=self.get_temp_dir())
with sv.prepare_or_wait_for_session() as sess:
sv.start_queue_runners(sess)
flowers = 0
num_reads = 100
for _ in range(num_reads):
current_key, _ = sess.run([key, value])
if 'flowers' in str(current_key):
flowers += 1
self.assertGreater(flowers, 0)
self.assertEquals(flowers, num_reads)
class SinglePassReadTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testOutOfRangeError(self):
with self.cached_session():
[tfrecord_path] = test_utils.create_tfrecord_files(
self.get_temp_dir(), num_files=1)
key, value = parallel_reader.single_pass_read(
tfrecord_path, reader_class=io_ops.TFRecordReader)
init_op = variables.local_variables_initializer()
with self.cached_session() as sess:
sess.run(init_op)
with queues.QueueRunners(sess):
num_reads = 11
with self.assertRaises(errors_impl.OutOfRangeError):
for _ in range(num_reads):
sess.run([key, value])
def testTFRecordReader(self):
with self.cached_session():
[tfrecord_path] = test_utils.create_tfrecord_files(
self.get_temp_dir(), num_files=1)
key, value = parallel_reader.single_pass_read(
tfrecord_path, reader_class=io_ops.TFRecordReader)
init_op = variables.local_variables_initializer()
with self.cached_session() as sess:
sess.run(init_op)
with queues.QueueRunners(sess):
flowers = 0
num_reads = 9
for _ in range(num_reads):
current_key, _ = sess.run([key, value])
if 'flowers' in str(current_key):
flowers += 1
self.assertGreater(flowers, 0)
self.assertEquals(flowers, num_reads)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/slim/python/slim/data/parallel_reader_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the definition of a Dataset.
A Dataset is a collection of several components: (1) a list of data sources
(2) a Reader class that can read those sources and returns possibly encoded
samples of data (3) a decoder that decodes each sample of data provided by the
reader (4) the total number of samples and (5) an optional dictionary mapping
the list of items returns to a description of those items.
Data can be loaded from a dataset specification using a dataset_data_provider:
dataset = CreateMyDataset(...)
provider = dataset_data_provider.DatasetDataProvider(
dataset, shuffle=False)
image, label = provider.get(['image', 'label'])
See slim.data.dataset_data_provider for additional examples.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
class Dataset(object):
"""Represents a Dataset specification."""
def __init__(self, data_sources, reader, decoder, num_samples,
items_to_descriptions, **kwargs):
"""Initializes the dataset.
Args:
data_sources: A list of files that make up the dataset.
reader: The reader class, a subclass of BaseReader such as TextLineReader
or TFRecordReader.
decoder: An instance of a data_decoder.
num_samples: The number of samples in the dataset.
items_to_descriptions: A map from the items that the dataset provides to
the descriptions of those items.
**kwargs: Any remaining dataset-specific fields.
"""
kwargs['data_sources'] = data_sources
kwargs['reader'] = reader
kwargs['decoder'] = decoder
kwargs['num_samples'] = num_samples
kwargs['items_to_descriptions'] = items_to_descriptions
self.__dict__.update(kwargs)
|
tensorflow-master
|
tensorflow/contrib/slim/python/slim/data/dataset.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for slim.data.tfexample_decoder."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import sys
from tensorflow.contrib.slim.python.slim.data import tfexample_decoder
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import image_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.platform import test
class TFExampleDecoderTest(test.TestCase):
def _EncodedFloatFeature(self, ndarray):
return feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=ndarray.flatten().tolist()))
def _EncodedInt64Feature(self, ndarray):
return feature_pb2.Feature(
int64_list=feature_pb2.Int64List(value=ndarray.flatten().tolist()))
def _EncodedBytesFeature(self, tf_encoded):
with self.cached_session():
encoded = tf_encoded.eval()
def BytesList(value):
return feature_pb2.BytesList(value=[value])
return feature_pb2.Feature(bytes_list=BytesList(encoded))
def _BytesFeature(self, ndarray):
values = ndarray.flatten().tolist()
for i in range(len(values)):
values[i] = values[i].encode('utf-8')
return feature_pb2.Feature(bytes_list=feature_pb2.BytesList(value=values))
def _StringFeature(self, value):
value = value.encode('utf-8')
return feature_pb2.Feature(bytes_list=feature_pb2.BytesList(value=[value]))
def _Encoder(self, image, image_format):
assert image_format in ['jpeg', 'JPEG', 'png', 'PNG', 'raw', 'RAW']
if image_format in ['jpeg', 'JPEG']:
tf_image = constant_op.constant(image, dtype=dtypes.uint8)
return image_ops.encode_jpeg(tf_image)
if image_format in ['png', 'PNG']:
tf_image = constant_op.constant(image, dtype=dtypes.uint8)
return image_ops.encode_png(tf_image)
if image_format in ['raw', 'RAW']:
# If machine is big endian, change the byte ordering in case of dtype float32
# so that it should be interpreted correctly.
if image.dtype == np.float32 and sys.byteorder == 'big':
image = image.astype('<f4')
return constant_op.constant(image.tostring(), dtype=dtypes.string)
def GenerateImage(self, image_format, image_shape, image_dtype=np.uint8):
"""Generates an image and an example containing the encoded image.
Args:
image_format: the encoding format of the image.
image_shape: the shape of the image to generate.
image_dtype: the dtype of values in the image. Only 'raw' image can have
type different than uint8.
Returns:
image: the generated image.
example: a TF-example with a feature key 'image/encoded' set to the
serialized image and a feature key 'image/format' set to the image
encoding format ['jpeg', 'JPEG', 'png', 'PNG', 'raw'].
"""
assert image_format in ['raw', 'RAW'] or image_dtype == np.uint8
num_pixels = image_shape[0] * image_shape[1] * image_shape[2]
image = np.linspace(
0, num_pixels - 1,
num=num_pixels).reshape(image_shape).astype(image_dtype)
tf_encoded = self._Encoder(image, image_format)
example = example_pb2.Example(
features=feature_pb2.Features(
feature={
'image/encoded': self._EncodedBytesFeature(tf_encoded),
'image/format': self._StringFeature(image_format)
}))
return image, example.SerializeToString()
def DecodeExample(self, serialized_example, item_handler, image_format):
"""Decodes the given serialized example with the specified item handler.
Args:
serialized_example: a serialized TF example string.
item_handler: the item handler used to decode the image.
image_format: the image format being decoded.
Returns:
the decoded image found in the serialized Example.
"""
serialized_example = array_ops.reshape(serialized_example, shape=[])
decoder = tfexample_decoder.TFExampleDecoder(
keys_to_features={
'image/encoded':
parsing_ops.FixedLenFeature(
(), dtypes.string, default_value=''),
'image/format':
parsing_ops.FixedLenFeature(
(), dtypes.string, default_value=image_format),
},
items_to_handlers={'image': item_handler})
[tf_image] = decoder.decode(serialized_example, ['image'])
return tf_image
def RunDecodeExample(self, serialized_example, item_handler, image_format):
tf_image = self.DecodeExample(serialized_example, item_handler,
image_format)
with self.cached_session():
decoded_image = tf_image.eval()
# We need to recast them here to avoid some issues with uint8.
return decoded_image.astype(np.float32)
def testDecodeExampleWithJpegEncoding(self):
image_shape = (2, 3, 3)
image, serialized_example = self.GenerateImage(
image_format='jpeg', image_shape=image_shape)
decoded_image = self.RunDecodeExample(
serialized_example, tfexample_decoder.Image(), image_format='jpeg')
# Need to use a tolerance of 1 because of noise in the jpeg encode/decode
self.assertAllClose(image, decoded_image, atol=1.001)
def testDecodeExampleWithJPEGEncoding(self):
test_image_channels = [1, 3]
for channels in test_image_channels:
image_shape = (2, 3, channels)
image, serialized_example = self.GenerateImage(
image_format='JPEG', image_shape=image_shape)
decoded_image = self.RunDecodeExample(
serialized_example,
tfexample_decoder.Image(channels=channels),
image_format='JPEG')
# Need to use a tolerance of 1 because of noise in the jpeg encode/decode
self.assertAllClose(image, decoded_image, atol=1.001)
def testDecodeExampleWithNoShapeInfo(self):
test_image_channels = [1, 3]
for channels in test_image_channels:
image_shape = (2, 3, channels)
_, serialized_example = self.GenerateImage(
image_format='jpeg', image_shape=image_shape)
tf_decoded_image = self.DecodeExample(
serialized_example,
tfexample_decoder.Image(shape=None, channels=channels),
image_format='jpeg')
self.assertEqual(tf_decoded_image.get_shape().ndims, 3)
def testDecodeExampleWithPngEncoding(self):
test_image_channels = [1, 3, 4]
for channels in test_image_channels:
image_shape = (2, 3, channels)
image, serialized_example = self.GenerateImage(
image_format='png', image_shape=image_shape)
decoded_image = self.RunDecodeExample(
serialized_example,
tfexample_decoder.Image(channels=channels),
image_format='png')
self.assertAllClose(image, decoded_image, atol=0)
def testDecodeExampleWithPNGEncoding(self):
test_image_channels = [1, 3, 4]
for channels in test_image_channels:
image_shape = (2, 3, channels)
image, serialized_example = self.GenerateImage(
image_format='PNG', image_shape=image_shape)
decoded_image = self.RunDecodeExample(
serialized_example,
tfexample_decoder.Image(channels=channels),
image_format='PNG')
self.assertAllClose(image, decoded_image, atol=0)
def testDecodeExampleWithRawEncoding(self):
image_shape = (2, 3, 3)
image, serialized_example = self.GenerateImage(
image_format='raw', image_shape=image_shape)
decoded_image = self.RunDecodeExample(
serialized_example,
tfexample_decoder.Image(shape=image_shape),
image_format='raw')
self.assertAllClose(image, decoded_image, atol=0)
def testDecodeExampleWithRAWEncoding(self):
image_shape = (2, 3, 3)
image, serialized_example = self.GenerateImage(
image_format='RAW', image_shape=image_shape)
decoded_image = self.RunDecodeExample(
serialized_example,
tfexample_decoder.Image(shape=image_shape),
image_format='RAW')
self.assertAllClose(image, decoded_image, atol=0)
def testDecodeExampleWithRawEncodingFloatDtype(self):
image_shape = (2, 3, 3)
image, serialized_example = self.GenerateImage(
image_format='raw', image_shape=image_shape, image_dtype=np.float32)
decoded_image = self.RunDecodeExample(
serialized_example,
tfexample_decoder.Image(shape=image_shape, dtype=dtypes.float32),
image_format='raw')
self.assertAllClose(image, decoded_image, atol=0)
def testDecodeExampleWithJpegEncodingAt16BitDoesNotCauseError(self):
image_shape = (2, 3, 3)
# Image has type uint8 but decoding at uint16 should not cause problems.
image, serialized_example = self.GenerateImage(
image_format='jpeg', image_shape=image_shape)
decoded_image = self.RunDecodeExample(
serialized_example,
tfexample_decoder.Image(dtype=dtypes.uint16),
image_format='jpeg')
self.assertAllClose(image, decoded_image, atol=1.001)
def testDecodeExampleWithStringTensor(self):
tensor_shape = (2, 3, 1)
np_array = np.array([[['ab'], ['cd'], ['ef']],
[['ghi'], ['jkl'], ['mnop']]])
example = example_pb2.Example(
features=feature_pb2.Features(feature={
'labels': self._BytesFeature(np_array),
}))
serialized_example = example.SerializeToString()
with self.cached_session():
serialized_example = array_ops.reshape(serialized_example, shape=[])
keys_to_features = {
'labels':
parsing_ops.FixedLenFeature(
tensor_shape,
dtypes.string,
default_value=constant_op.constant(
'', shape=tensor_shape, dtype=dtypes.string))
}
items_to_handlers = {
'labels': tfexample_decoder.Tensor('labels'),
}
decoder = tfexample_decoder.TFExampleDecoder(keys_to_features,
items_to_handlers)
[tf_labels] = decoder.decode(serialized_example, ['labels'])
labels = tf_labels.eval()
labels = labels.astype(np_array.dtype)
self.assertTrue(np.array_equal(np_array, labels))
def testDecodeExampleWithFloatTensor(self):
np_array = np.random.rand(2, 3, 1).astype('f')
example = example_pb2.Example(
features=feature_pb2.Features(feature={
'array': self._EncodedFloatFeature(np_array),
}))
serialized_example = example.SerializeToString()
with self.cached_session():
serialized_example = array_ops.reshape(serialized_example, shape=[])
keys_to_features = {
'array': parsing_ops.FixedLenFeature(np_array.shape, dtypes.float32)
}
items_to_handlers = {
'array': tfexample_decoder.Tensor('array'),
}
decoder = tfexample_decoder.TFExampleDecoder(keys_to_features,
items_to_handlers)
[tf_array] = decoder.decode(serialized_example, ['array'])
self.assertAllEqual(tf_array.eval(), np_array)
def testDecodeExampleWithInt64Tensor(self):
np_array = np.random.randint(1, 10, size=(2, 3, 1))
example = example_pb2.Example(
features=feature_pb2.Features(feature={
'array': self._EncodedInt64Feature(np_array),
}))
serialized_example = example.SerializeToString()
with self.cached_session():
serialized_example = array_ops.reshape(serialized_example, shape=[])
keys_to_features = {
'array': parsing_ops.FixedLenFeature(np_array.shape, dtypes.int64)
}
items_to_handlers = {
'array': tfexample_decoder.Tensor('array'),
}
decoder = tfexample_decoder.TFExampleDecoder(keys_to_features,
items_to_handlers)
[tf_array] = decoder.decode(serialized_example, ['array'])
self.assertAllEqual(tf_array.eval(), np_array)
def testDecodeExampleWithVarLenTensor(self):
np_array = np.array([[[1], [2], [3]], [[4], [5], [6]]])
example = example_pb2.Example(
features=feature_pb2.Features(feature={
'labels': self._EncodedInt64Feature(np_array),
}))
serialized_example = example.SerializeToString()
with self.cached_session():
serialized_example = array_ops.reshape(serialized_example, shape=[])
keys_to_features = {
'labels': parsing_ops.VarLenFeature(dtype=dtypes.int64),
}
items_to_handlers = {
'labels': tfexample_decoder.Tensor('labels'),
}
decoder = tfexample_decoder.TFExampleDecoder(keys_to_features,
items_to_handlers)
[tf_labels] = decoder.decode(serialized_example, ['labels'])
labels = tf_labels.eval()
self.assertAllEqual(labels, np_array.flatten())
def testDecodeExampleWithFixLenTensorWithShape(self):
np_array = np.array([[1, 2, 3], [4, 5, 6]])
example = example_pb2.Example(
features=feature_pb2.Features(feature={
'labels': self._EncodedInt64Feature(np_array),
}))
serialized_example = example.SerializeToString()
with self.cached_session():
serialized_example = array_ops.reshape(serialized_example, shape=[])
keys_to_features = {
'labels':
parsing_ops.FixedLenFeature(np_array.shape, dtype=dtypes.int64),
}
items_to_handlers = {
'labels': tfexample_decoder.Tensor('labels', shape=np_array.shape),
}
decoder = tfexample_decoder.TFExampleDecoder(keys_to_features,
items_to_handlers)
[tf_labels] = decoder.decode(serialized_example, ['labels'])
labels = tf_labels.eval()
self.assertAllEqual(labels, np_array)
def testDecodeExampleWithVarLenTensorToDense(self):
np_array = np.array([[1, 2, 3], [4, 5, 6]])
example = example_pb2.Example(
features=feature_pb2.Features(feature={
'labels': self._EncodedInt64Feature(np_array),
}))
serialized_example = example.SerializeToString()
with self.cached_session():
serialized_example = array_ops.reshape(serialized_example, shape=[])
keys_to_features = {
'labels': parsing_ops.VarLenFeature(dtype=dtypes.int64),
}
items_to_handlers = {
'labels': tfexample_decoder.Tensor('labels', shape=np_array.shape),
}
decoder = tfexample_decoder.TFExampleDecoder(keys_to_features,
items_to_handlers)
[tf_labels] = decoder.decode(serialized_example, ['labels'])
labels = tf_labels.eval()
self.assertAllEqual(labels, np_array)
def testDecodeExampleShapeKeyTensor(self):
np_image = np.random.rand(2, 3, 1).astype('f')
np_labels = np.array([[[1], [2], [3]], [[4], [5], [6]]])
example = example_pb2.Example(
features=feature_pb2.Features(
feature={
'image':
self._EncodedFloatFeature(np_image),
'image/shape':
self._EncodedInt64Feature(np.array(np_image.shape)),
'labels':
self._EncodedInt64Feature(np_labels),
'labels/shape':
self._EncodedInt64Feature(np.array(np_labels.shape)),
}))
serialized_example = example.SerializeToString()
with self.cached_session():
serialized_example = array_ops.reshape(serialized_example, shape=[])
keys_to_features = {
'image': parsing_ops.VarLenFeature(dtype=dtypes.float32),
'image/shape': parsing_ops.VarLenFeature(dtype=dtypes.int64),
'labels': parsing_ops.VarLenFeature(dtype=dtypes.int64),
'labels/shape': parsing_ops.VarLenFeature(dtype=dtypes.int64),
}
items_to_handlers = {
'image':
tfexample_decoder.Tensor('image', shape_keys='image/shape'),
'labels':
tfexample_decoder.Tensor('labels', shape_keys='labels/shape'),
}
decoder = tfexample_decoder.TFExampleDecoder(keys_to_features,
items_to_handlers)
[tf_image, tf_labels] = decoder.decode(serialized_example,
['image', 'labels'])
self.assertAllEqual(tf_image.eval(), np_image)
self.assertAllEqual(tf_labels.eval(), np_labels)
def testDecodeExampleMultiShapeKeyTensor(self):
np_image = np.random.rand(2, 3, 1).astype('f')
np_labels = np.array([[[1], [2], [3]], [[4], [5], [6]]])
height, width, depth = np_labels.shape
example = example_pb2.Example(
features=feature_pb2.Features(
feature={
'image':
self._EncodedFloatFeature(np_image),
'image/shape':
self._EncodedInt64Feature(np.array(np_image.shape)),
'labels':
self._EncodedInt64Feature(np_labels),
'labels/height':
self._EncodedInt64Feature(np.array([height])),
'labels/width':
self._EncodedInt64Feature(np.array([width])),
'labels/depth':
self._EncodedInt64Feature(np.array([depth])),
}))
serialized_example = example.SerializeToString()
with self.cached_session():
serialized_example = array_ops.reshape(serialized_example, shape=[])
keys_to_features = {
'image': parsing_ops.VarLenFeature(dtype=dtypes.float32),
'image/shape': parsing_ops.VarLenFeature(dtype=dtypes.int64),
'labels': parsing_ops.VarLenFeature(dtype=dtypes.int64),
'labels/height': parsing_ops.VarLenFeature(dtype=dtypes.int64),
'labels/width': parsing_ops.VarLenFeature(dtype=dtypes.int64),
'labels/depth': parsing_ops.VarLenFeature(dtype=dtypes.int64),
}
items_to_handlers = {
'image':
tfexample_decoder.Tensor('image', shape_keys='image/shape'),
'labels':
tfexample_decoder.Tensor(
'labels',
shape_keys=['labels/height', 'labels/width', 'labels/depth']),
}
decoder = tfexample_decoder.TFExampleDecoder(keys_to_features,
items_to_handlers)
[tf_image, tf_labels] = decoder.decode(serialized_example,
['image', 'labels'])
self.assertAllEqual(tf_image.eval(), np_image)
self.assertAllEqual(tf_labels.eval(), np_labels)
def testDecodeExampleWithSparseTensor(self):
np_indices = np.array([[1], [2], [5]])
np_values = np.array([0.1, 0.2, 0.6]).astype('f')
example = example_pb2.Example(
features=feature_pb2.Features(
feature={
'indices': self._EncodedInt64Feature(np_indices),
'values': self._EncodedFloatFeature(np_values),
}))
serialized_example = example.SerializeToString()
with self.cached_session():
serialized_example = array_ops.reshape(serialized_example, shape=[])
keys_to_features = {
'indices': parsing_ops.VarLenFeature(dtype=dtypes.int64),
'values': parsing_ops.VarLenFeature(dtype=dtypes.float32),
}
items_to_handlers = {
'labels': tfexample_decoder.SparseTensor(),
}
decoder = tfexample_decoder.TFExampleDecoder(keys_to_features,
items_to_handlers)
[tf_labels] = decoder.decode(serialized_example, ['labels'])
labels = tf_labels.eval()
self.assertAllEqual(labels.indices, np_indices)
self.assertAllEqual(labels.values, np_values)
self.assertAllEqual(labels.dense_shape, np_values.shape)
def testDecodeExampleWithSparseTensorWithKeyShape(self):
np_indices = np.array([[1], [2], [5]])
np_values = np.array([0.1, 0.2, 0.6]).astype('f')
np_shape = np.array([6])
example = example_pb2.Example(
features=feature_pb2.Features(
feature={
'indices': self._EncodedInt64Feature(np_indices),
'values': self._EncodedFloatFeature(np_values),
'shape': self._EncodedInt64Feature(np_shape),
}))
serialized_example = example.SerializeToString()
with self.cached_session():
serialized_example = array_ops.reshape(serialized_example, shape=[])
keys_to_features = {
'indices': parsing_ops.VarLenFeature(dtype=dtypes.int64),
'values': parsing_ops.VarLenFeature(dtype=dtypes.float32),
'shape': parsing_ops.VarLenFeature(dtype=dtypes.int64),
}
items_to_handlers = {
'labels': tfexample_decoder.SparseTensor(shape_key='shape'),
}
decoder = tfexample_decoder.TFExampleDecoder(keys_to_features,
items_to_handlers)
[tf_labels] = decoder.decode(serialized_example, ['labels'])
labels = tf_labels.eval()
self.assertAllEqual(labels.indices, np_indices)
self.assertAllEqual(labels.values, np_values)
self.assertAllEqual(labels.dense_shape, np_shape)
def testDecodeExampleWithSparseTensorWithGivenShape(self):
np_indices = np.array([[1], [2], [5]])
np_values = np.array([0.1, 0.2, 0.6]).astype('f')
np_shape = np.array([6])
example = example_pb2.Example(
features=feature_pb2.Features(
feature={
'indices': self._EncodedInt64Feature(np_indices),
'values': self._EncodedFloatFeature(np_values),
}))
serialized_example = example.SerializeToString()
with self.cached_session():
serialized_example = array_ops.reshape(serialized_example, shape=[])
keys_to_features = {
'indices': parsing_ops.VarLenFeature(dtype=dtypes.int64),
'values': parsing_ops.VarLenFeature(dtype=dtypes.float32),
}
items_to_handlers = {
'labels': tfexample_decoder.SparseTensor(shape=np_shape),
}
decoder = tfexample_decoder.TFExampleDecoder(keys_to_features,
items_to_handlers)
[tf_labels] = decoder.decode(serialized_example, ['labels'])
labels = tf_labels.eval()
self.assertAllEqual(labels.indices, np_indices)
self.assertAllEqual(labels.values, np_values)
self.assertAllEqual(labels.dense_shape, np_shape)
def testDecodeExampleWithSparseTensorToDense(self):
np_indices = np.array([1, 2, 5])
np_values = np.array([0.1, 0.2, 0.6]).astype('f')
np_shape = np.array([6])
np_dense = np.array([0.0, 0.1, 0.2, 0.0, 0.0, 0.6]).astype('f')
example = example_pb2.Example(
features=feature_pb2.Features(
feature={
'indices': self._EncodedInt64Feature(np_indices),
'values': self._EncodedFloatFeature(np_values),
}))
serialized_example = example.SerializeToString()
with self.cached_session():
serialized_example = array_ops.reshape(serialized_example, shape=[])
keys_to_features = {
'indices': parsing_ops.VarLenFeature(dtype=dtypes.int64),
'values': parsing_ops.VarLenFeature(dtype=dtypes.float32),
}
items_to_handlers = {
'labels':
tfexample_decoder.SparseTensor(shape=np_shape, densify=True),
}
decoder = tfexample_decoder.TFExampleDecoder(keys_to_features,
items_to_handlers)
[tf_labels] = decoder.decode(serialized_example, ['labels'])
labels = tf_labels.eval()
self.assertAllClose(labels, np_dense)
def testDecodeExampleWithTensor(self):
tensor_shape = (2, 3, 1)
np_array = np.random.rand(2, 3, 1)
example = example_pb2.Example(
features=feature_pb2.Features(feature={
'image/depth_map': self._EncodedFloatFeature(np_array),
}))
serialized_example = example.SerializeToString()
with self.cached_session():
serialized_example = array_ops.reshape(serialized_example, shape=[])
keys_to_features = {
'image/depth_map':
parsing_ops.FixedLenFeature(
tensor_shape,
dtypes.float32,
default_value=array_ops.zeros(tensor_shape))
}
items_to_handlers = {'depth': tfexample_decoder.Tensor('image/depth_map')}
decoder = tfexample_decoder.TFExampleDecoder(keys_to_features,
items_to_handlers)
[tf_depth] = decoder.decode(serialized_example, ['depth'])
depth = tf_depth.eval()
self.assertAllClose(np_array, depth)
def testDecodeExampleWithItemHandlerCallback(self):
np.random.seed(0)
tensor_shape = (2, 3, 1)
np_array = np.random.rand(2, 3, 1)
example = example_pb2.Example(
features=feature_pb2.Features(feature={
'image/depth_map': self._EncodedFloatFeature(np_array),
}))
serialized_example = example.SerializeToString()
with self.cached_session():
serialized_example = array_ops.reshape(serialized_example, shape=[])
keys_to_features = {
'image/depth_map':
parsing_ops.FixedLenFeature(
tensor_shape,
dtypes.float32,
default_value=array_ops.zeros(tensor_shape))
}
def HandleDepth(keys_to_tensors):
depth = list(keys_to_tensors.values())[0]
depth += 1
return depth
items_to_handlers = {
'depth':
tfexample_decoder.ItemHandlerCallback('image/depth_map',
HandleDepth)
}
decoder = tfexample_decoder.TFExampleDecoder(keys_to_features,
items_to_handlers)
[tf_depth] = decoder.decode(serialized_example, ['depth'])
depth = tf_depth.eval()
self.assertAllClose(np_array, depth - 1)
def testDecodeImageWithItemHandlerCallback(self):
image_shape = (2, 3, 3)
for image_encoding in ['jpeg', 'png']:
image, serialized_example = self.GenerateImage(
image_format=image_encoding, image_shape=image_shape)
with self.cached_session():
def ConditionalDecoding(keys_to_tensors):
"""See base class."""
image_buffer = keys_to_tensors['image/encoded']
image_format = keys_to_tensors['image/format']
def DecodePng():
return image_ops.decode_png(image_buffer, 3)
def DecodeJpg():
return image_ops.decode_jpeg(image_buffer, 3)
image = control_flow_ops.case(
{
math_ops.equal(image_format, 'png'): DecodePng,
},
default=DecodeJpg,
exclusive=True)
image = array_ops.reshape(image, image_shape)
return image
keys_to_features = {
'image/encoded':
parsing_ops.FixedLenFeature(
(), dtypes.string, default_value=''),
'image/format':
parsing_ops.FixedLenFeature(
(), dtypes.string, default_value='jpeg')
}
items_to_handlers = {
'image':
tfexample_decoder.ItemHandlerCallback(
['image/encoded', 'image/format'], ConditionalDecoding)
}
decoder = tfexample_decoder.TFExampleDecoder(keys_to_features,
items_to_handlers)
[tf_image] = decoder.decode(serialized_example, ['image'])
decoded_image = tf_image.eval()
if image_encoding == 'jpeg':
# For jenkins:
image = image.astype(np.float32)
decoded_image = decoded_image.astype(np.float32)
self.assertAllClose(image, decoded_image, rtol=.5, atol=1.001)
else:
self.assertAllClose(image, decoded_image, atol=0)
def testDecodeExampleWithBoundingBoxSparse(self):
num_bboxes = 10
np_ymin = np.random.rand(num_bboxes, 1)
np_xmin = np.random.rand(num_bboxes, 1)
np_ymax = np.random.rand(num_bboxes, 1)
np_xmax = np.random.rand(num_bboxes, 1)
np_bboxes = np.hstack([np_ymin, np_xmin, np_ymax, np_xmax])
example = example_pb2.Example(
features=feature_pb2.Features(
feature={
'image/object/bbox/ymin': self._EncodedFloatFeature(np_ymin),
'image/object/bbox/xmin': self._EncodedFloatFeature(np_xmin),
'image/object/bbox/ymax': self._EncodedFloatFeature(np_ymax),
'image/object/bbox/xmax': self._EncodedFloatFeature(np_xmax),
}))
serialized_example = example.SerializeToString()
with self.cached_session():
serialized_example = array_ops.reshape(serialized_example, shape=[])
keys_to_features = {
'image/object/bbox/ymin': parsing_ops.VarLenFeature(dtypes.float32),
'image/object/bbox/xmin': parsing_ops.VarLenFeature(dtypes.float32),
'image/object/bbox/ymax': parsing_ops.VarLenFeature(dtypes.float32),
'image/object/bbox/xmax': parsing_ops.VarLenFeature(dtypes.float32),
}
items_to_handlers = {
'object/bbox':
tfexample_decoder.BoundingBox(['ymin', 'xmin', 'ymax', 'xmax'],
'image/object/bbox/'),
}
decoder = tfexample_decoder.TFExampleDecoder(keys_to_features,
items_to_handlers)
[tf_bboxes] = decoder.decode(serialized_example, ['object/bbox'])
bboxes = tf_bboxes.eval()
self.assertAllClose(np_bboxes, bboxes)
def testDecodeExampleWithBoundingBoxDense(self):
num_bboxes = 10
np_ymin = np.random.rand(num_bboxes, 1)
np_xmin = np.random.rand(num_bboxes, 1)
np_ymax = np.random.rand(num_bboxes, 1)
np_xmax = np.random.rand(num_bboxes, 1)
np_bboxes = np.hstack([np_ymin, np_xmin, np_ymax, np_xmax])
example = example_pb2.Example(
features=feature_pb2.Features(
feature={
'image/object/bbox/ymin': self._EncodedFloatFeature(np_ymin),
'image/object/bbox/xmin': self._EncodedFloatFeature(np_xmin),
'image/object/bbox/ymax': self._EncodedFloatFeature(np_ymax),
'image/object/bbox/xmax': self._EncodedFloatFeature(np_xmax),
}))
serialized_example = example.SerializeToString()
with self.cached_session():
serialized_example = array_ops.reshape(serialized_example, shape=[])
keys_to_features = {
'image/object/bbox/ymin':
parsing_ops.FixedLenSequenceFeature(
[], dtypes.float32, allow_missing=True),
'image/object/bbox/xmin':
parsing_ops.FixedLenSequenceFeature(
[], dtypes.float32, allow_missing=True),
'image/object/bbox/ymax':
parsing_ops.FixedLenSequenceFeature(
[], dtypes.float32, allow_missing=True),
'image/object/bbox/xmax':
parsing_ops.FixedLenSequenceFeature(
[], dtypes.float32, allow_missing=True),
}
items_to_handlers = {
'object/bbox':
tfexample_decoder.BoundingBox(['ymin', 'xmin', 'ymax', 'xmax'],
'image/object/bbox/'),
}
decoder = tfexample_decoder.TFExampleDecoder(keys_to_features,
items_to_handlers)
[tf_bboxes] = decoder.decode(serialized_example, ['object/bbox'])
bboxes = tf_bboxes.eval()
self.assertAllClose(np_bboxes, bboxes)
def testDecodeExampleWithRepeatedImages(self):
image_shape = (2, 3, 3)
image_format = 'png'
image, _ = self.GenerateImage(
image_format=image_format, image_shape=image_shape)
tf_encoded = self._Encoder(image, image_format)
with self.cached_session():
tf_string = tf_encoded.eval()
example = example_pb2.Example(
features=feature_pb2.Features(
feature={
'image/encoded':
feature_pb2.Feature(
bytes_list=feature_pb2.BytesList(
value=[tf_string, tf_string])),
'image/format':
self._StringFeature(image_format),
}))
serialized_example = example.SerializeToString()
with self.cached_session():
serialized_example = array_ops.reshape(serialized_example, shape=[])
decoder = tfexample_decoder.TFExampleDecoder(
keys_to_features={
'image/encoded':
parsing_ops.FixedLenFeature((2,), dtypes.string),
'image/format':
parsing_ops.FixedLenFeature(
(), dtypes.string, default_value=image_format),
},
items_to_handlers={'image': tfexample_decoder.Image(repeated=True)})
[tf_image] = decoder.decode(serialized_example, ['image'])
output_image = tf_image.eval()
self.assertEqual(output_image.shape, (2, 2, 3, 3))
self.assertAllEqual(np.squeeze(output_image[0, :, :, :]), image)
self.assertAllEqual(np.squeeze(output_image[1, :, :, :]), image)
def testDecodeExampleWithLookup(self):
example = example_pb2.Example(
features=feature_pb2.Features(
feature={
'image/object/class/text':
self._BytesFeature(np.array(['cat', 'dog', 'guinea pig'])),
}))
serialized_example = example.SerializeToString()
# 'dog' -> 0, 'guinea pig' -> 1, 'cat' -> 2
table = lookup_ops.index_table_from_tensor(
constant_op.constant(['dog', 'guinea pig', 'cat']))
with self.cached_session() as sess:
sess.run(lookup_ops.tables_initializer())
serialized_example = array_ops.reshape(serialized_example, shape=[])
keys_to_features = {
'image/object/class/text': parsing_ops.VarLenFeature(dtypes.string),
}
items_to_handlers = {
'labels':
tfexample_decoder.LookupTensor('image/object/class/text', table),
}
decoder = tfexample_decoder.TFExampleDecoder(keys_to_features,
items_to_handlers)
obtained_class_ids = decoder.decode(serialized_example)[0].eval()
self.assertAllClose([2, 0, 1], obtained_class_ids)
def testDecodeExampleWithBackupHandlerLookup(self):
example1 = example_pb2.Example(
features=feature_pb2.Features(
feature={
'image/object/class/text':
self._BytesFeature(np.array(['cat', 'dog', 'guinea pig'])),
'image/object/class/label':
self._EncodedInt64Feature(np.array([42, 10, 900]))
}))
example2 = example_pb2.Example(
features=feature_pb2.Features(
feature={
'image/object/class/text':
self._BytesFeature(np.array(['cat', 'dog', 'guinea pig'])),
}))
example3 = example_pb2.Example(
features=feature_pb2.Features(
feature={
'image/object/class/label':
self._EncodedInt64Feature(np.array([42, 10, 901]))
}))
# 'dog' -> 0, 'guinea pig' -> 1, 'cat' -> 2
table = lookup_ops.index_table_from_tensor(
constant_op.constant(['dog', 'guinea pig', 'cat']))
keys_to_features = {
'image/object/class/text': parsing_ops.VarLenFeature(dtypes.string),
'image/object/class/label': parsing_ops.VarLenFeature(dtypes.int64),
}
backup_handler = tfexample_decoder.BackupHandler(
handler=tfexample_decoder.Tensor('image/object/class/label'),
backup=tfexample_decoder.LookupTensor('image/object/class/text', table))
items_to_handlers = {
'labels': backup_handler,
}
decoder = tfexample_decoder.TFExampleDecoder(keys_to_features,
items_to_handlers)
obtained_class_ids_each_example = []
with self.cached_session() as sess:
sess.run(lookup_ops.tables_initializer())
for example in [example1, example2, example3]:
serialized_example = array_ops.reshape(
example.SerializeToString(), shape=[])
obtained_class_ids_each_example.append(
decoder.decode(serialized_example)[0].eval())
self.assertAllClose([42, 10, 900], obtained_class_ids_each_example[0])
self.assertAllClose([2, 0, 1], obtained_class_ids_each_example[1])
self.assertAllClose([42, 10, 901], obtained_class_ids_each_example[2])
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/slim/python/slim/data/tfexample_decoder_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A DataProvider that provides data from a Dataset.
DatasetDataProviders provide data from datasets. The provide can be configured
to use multiple readers simultaneously or read via a single reader.
Additionally, the data being read can be optionally shuffled.
For example, to read data using a single thread without shuffling:
pascal_voc_data_provider = DatasetDataProvider(
slim.datasets.pascal_voc.get_split('train'),
shuffle=False)
images, labels = pascal_voc_data_provider.get(['images', 'labels'])
To read data using multiple readers simultaneous with shuffling:
pascal_voc_data_provider = DatasetDataProvider(
slim.datasets.pascal_voc.Dataset(),
num_readers=10,
shuffle=True)
images, labels = pascal_voc_data_provider.get(['images', 'labels'])
Equivalently, one may request different fields of the same sample separately:
[images] = pascal_voc_data_provider.get(['images'])
[labels] = pascal_voc_data_provider.get(['labels'])
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.slim.python.slim.data import data_provider
from tensorflow.contrib.slim.python.slim.data import parallel_reader
class DatasetDataProvider(data_provider.DataProvider):
def __init__(self,
dataset,
num_readers=1,
reader_kwargs=None,
shuffle=True,
num_epochs=None,
common_queue_capacity=256,
common_queue_min=128,
record_key='record_key',
seed=None,
scope=None):
"""Creates a DatasetDataProvider.
Note: if `num_epochs` is not `None`, local counter `epochs` will be created
by relevant function. Use `local_variables_initializer()` to initialize
local variables.
Args:
dataset: An instance of the Dataset class.
num_readers: The number of parallel readers to use.
reader_kwargs: An optional dict of kwargs for the reader.
shuffle: Whether to shuffle the data sources and common queue when
reading.
num_epochs: The number of times each data source is read. If left as None,
the data will be cycled through indefinitely.
common_queue_capacity: The capacity of the common queue.
common_queue_min: The minimum number of elements in the common queue after
a dequeue.
record_key: The item name to use for the dataset record keys in the
provided tensors.
seed: The seed to use if shuffling.
scope: Optional name scope for the ops.
Raises:
ValueError: If `record_key` matches one of the items in the dataset.
"""
key, data = parallel_reader.parallel_read(
dataset.data_sources,
reader_class=dataset.reader,
num_epochs=num_epochs,
num_readers=num_readers,
reader_kwargs=reader_kwargs,
shuffle=shuffle,
capacity=common_queue_capacity,
min_after_dequeue=common_queue_min,
seed=seed,
scope=scope)
items = dataset.decoder.list_items()
tensors = dataset.decoder.decode(data, items)
items_to_tensors = dict(zip(items, tensors))
if record_key in items_to_tensors:
raise ValueError('The item name used for `record_key` cannot also be '
'used for a dataset item: %s', record_key)
items_to_tensors[record_key] = key
super(DatasetDataProvider, self).__init__(
items_to_tensors=items_to_tensors,
num_samples=dataset.num_samples)
|
tensorflow-master
|
tensorflow/contrib/slim/python/slim/data/dataset_data_provider.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains code for the DataProvider.
A DataProvider is a class which provides some predefined data types from some
source (TFRecord, etc). The most basic function of a
data provider is the `Get` operation where one requests one or more types of
data, or 'items':
provider.get(items=['image', 'sentence', 'class'])
More concretely, a data provider (a subclass of BaseDataProvider) returns a
single tensor for each requested item (data type):
provider = MyDataProvider(...)
image, sentence, clazz = provider.get(['image', 'sentence', 'class'])
In this example, the provider `MyDataProvider` must know how to load each item.
A data provider may be written in a way that the logic necessary to map from
each item to tensor is completely encapsulated within the data_provider itself.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
@six.add_metaclass(abc.ABCMeta)
class DataProvider(object):
"""Maps a list of requested data items to tensors from a data source.
All data providers must inherit from DataProvider and implement the Get
method which returns arbitrary types of data. No assumption is made about the
source of the data nor the mechanism for providing it.
"""
def __init__(self, items_to_tensors, num_samples):
"""Constructs the Data Provider.
Args:
items_to_tensors: a dictionary of names to tensors.
num_samples: the number of samples in the dataset being provided.
"""
self._items_to_tensors = items_to_tensors
self._num_samples = num_samples
def get(self, items):
"""Returns a list of tensors specified by the given list of items.
The list of items is arbitrary different data providers satisfy different
lists of items. For example the Pascal VOC might accept items 'image' and
'semantics', whereas the NYUDepthV2 data provider might accept items
'image', 'depths' and 'normals'.
Args:
items: a list of strings, each of which indicate a particular data type.
Returns:
a list of tensors, whose length matches the length of `items`, where each
tensor corresponds to each item.
Raises:
ValueError: if any of the items cannot be satisfied.
"""
self._validate_items(items)
return [self._items_to_tensors[item] for item in items]
def list_items(self):
"""Returns the list of item names that can be provided by the data provider.
Returns:
a list of item names that can be passed to Get([items]).
"""
return self._items_to_tensors.keys()
def num_samples(self):
"""Returns the number of data samples in the dataset.
Returns:
a positive whole number.
"""
return self._num_samples
def _validate_items(self, items):
"""Verifies that each given item is a member of the list from ListItems().
Args:
items: a list or tuple of strings.
Raises:
ValueError: if `items` is not a tuple or list or if any of the elements of
`items` is not found in the list provided by self.ListItems().
"""
if not isinstance(items, (list, tuple)):
raise ValueError('items must be a list or tuple')
valid_items = self.list_items()
for item in items:
if item not in valid_items:
raise ValueError('Item [%s] is invalid. Valid entries include: %s' %
(item, valid_items))
|
tensorflow-master
|
tensorflow/contrib/slim/python/slim/data/data_provider.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implements a simple prefetch_queue."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes as _dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.summary import summary
from tensorflow.python.training import queue_runner
def _which_queue(dynamic_pad):
return (data_flow_ops.PaddingFIFOQueue
if dynamic_pad else data_flow_ops.FIFOQueue)
def prefetch_queue(tensors,
capacity=8,
num_threads=1,
dynamic_pad=False,
shared_name=None,
name=None):
"""Creates a queue to prefetch tensors from `tensors`.
A queue runner for enqueuing tensors into the prefetch_queue is automatically
added to the TF QueueRunners collection.
Example:
This is for example useful to pre-assemble input batches read with
`tf.compat.v1.train.batch()` and enqueue the pre-assembled batches. Ops that
dequeue
from the pre-assembled queue will not pay the cost of assembling the batch.
images, labels = tf.compat.v1.train.batch([image, label], batch_size=32,
num_threads=4)
batch_queue = prefetch_queue([images, labels])
images, labels = batch_queue.dequeue()
logits = Net(images)
loss = Loss(logits, labels)
Args:
tensors: A list or dictionary of `Tensors` to enqueue in the buffer.
capacity: An integer. The maximum number of elements in the queue.
num_threads: An integer. Number of threads running the enqueue op.
dynamic_pad: Boolean. Whether to allow variable dimensions in input shapes.
shared_name: (optional). If set, this queue will be shared under the given
name across multiple sessions.
name: (Optional) A name for the operations.
Returns:
A queue from which you can dequeue tensors with the same type and shape
as `tensors`.
"""
if isinstance(tensors, dict):
# Need to wrap the keys and values in list() since Python3 returns views.
# We sort the keys so the order is consistent across runs.
names = list(sorted(tensors.keys()))
tensor_list = list([tensors[n] for n in names])
else:
names = None
tensor_list = tensors
with ops.name_scope(name, "prefetch_queue", tensor_list) as name:
dtypes = [t.dtype for t in tensor_list]
shapes = [t.get_shape() for t in tensor_list]
queue = _which_queue(dynamic_pad)(
capacity=capacity,
dtypes=dtypes,
shapes=shapes,
names=names,
shared_name=shared_name)
enqueue_op = queue.enqueue(tensors)
queue_runner.add_queue_runner(
queue_runner.QueueRunner(queue, [enqueue_op] * num_threads))
summary.scalar(
"fraction_of_%d_full" % capacity,
math_ops.cast(queue.size(), _dtypes.float32) * (1. / capacity))
return queue
|
tensorflow-master
|
tensorflow/contrib/slim/python/slim/data/prefetch_queue.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the TFExampleDecoder its associated helper classes.
The TFExampleDecode is a DataDecoder used to decode TensorFlow Example protos.
In order to do so each requested item must be paired with one or more Example
features that are parsed to produce the Tensor-based manifestation of the item.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
from tensorflow.contrib.slim.python.slim.data import data_decoder
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import map_fn
from tensorflow.python.ops import image_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import sparse_ops
@six.add_metaclass(abc.ABCMeta)
class ItemHandler(object):
"""Specifies the item-to-Features mapping for tf.parse_example.
An ItemHandler both specifies a list of Features used for parsing an Example
proto as well as a function that post-processes the results of Example
parsing.
"""
def __init__(self, keys):
"""Constructs the handler with the name of the tf.Feature keys to use.
See third_party/tensorflow/core/example/feature.proto
Args:
keys: the name of the TensorFlow Example Feature.
"""
if not isinstance(keys, (tuple, list)):
keys = [keys]
self._keys = keys
@property
def keys(self):
return self._keys
@abc.abstractmethod
def tensors_to_item(self, keys_to_tensors):
"""Maps the given dictionary of tensors to the requested item.
Args:
keys_to_tensors: a mapping of TF-Example keys to parsed tensors.
Returns:
the final tensor representing the item being handled.
"""
pass
class ItemHandlerCallback(ItemHandler):
"""An ItemHandler that converts the parsed tensors via a given function.
Unlike other ItemHandlers, the ItemHandlerCallback resolves its item via
a callback function rather than using prespecified behavior.
"""
def __init__(self, keys, func):
"""Initializes the ItemHandler.
Args:
keys: a list of TF-Example keys.
func: a function that takes as an argument a dictionary from `keys` to
parsed Tensors.
"""
super(ItemHandlerCallback, self).__init__(keys)
self._func = func
def tensors_to_item(self, keys_to_tensors):
return self._func(keys_to_tensors)
class BoundingBox(ItemHandler):
"""An ItemHandler that concatenates a set of parsed Tensors to Bounding Boxes.
"""
def __init__(self, keys=None, prefix=''):
"""Initialize the bounding box handler.
Args:
keys: A list of four key names representing the ymin, xmin, ymax, mmax
prefix: An optional prefix for each of the bounding box keys.
If provided, `prefix` is appended to each key in `keys`.
Raises:
ValueError: if keys is not `None` and also not a list of exactly 4 keys
"""
if keys is None:
keys = ['ymin', 'xmin', 'ymax', 'xmax']
elif len(keys) != 4:
raise ValueError('BoundingBox expects 4 keys but got {}'.format(
len(keys)))
self._prefix = prefix
self._keys = keys
self._full_keys = [prefix + k for k in keys]
super(BoundingBox, self).__init__(self._full_keys)
def tensors_to_item(self, keys_to_tensors):
"""Maps the given dictionary of tensors to a concatenated list of bboxes.
Args:
keys_to_tensors: a mapping of TF-Example keys to parsed tensors.
Returns:
[num_boxes, 4] tensor of bounding box coordinates,
i.e. 1 bounding box per row, in order [y_min, x_min, y_max, x_max].
"""
sides = []
for key in self._full_keys:
side = keys_to_tensors[key]
if isinstance(side, sparse_tensor.SparseTensor):
side = side.values
side = array_ops.expand_dims(side, 0)
sides.append(side)
bounding_box = array_ops.concat(sides, 0)
return array_ops.transpose(bounding_box)
class Tensor(ItemHandler):
"""An ItemHandler that returns a parsed Tensor."""
def __init__(self, tensor_key, shape_keys=None, shape=None, default_value=0):
"""Initializes the Tensor handler.
Tensors are, by default, returned without any reshaping. However, there are
two mechanisms which allow reshaping to occur at load time. If `shape_keys`
is provided, both the `Tensor` corresponding to `tensor_key` and
`shape_keys` is loaded and the former `Tensor` is reshaped with the values
of the latter. Alternatively, if a fixed `shape` is provided, the `Tensor`
corresponding to `tensor_key` is loaded and reshape appropriately.
If neither `shape_keys` nor `shape` are provided, the `Tensor` will be
returned without any reshaping.
Args:
tensor_key: the name of the `TFExample` feature to read the tensor from.
shape_keys: Optional name or list of names of the TF-Example feature in
which the tensor shape is stored. If a list, then each corresponds to
one dimension of the shape.
shape: Optional output shape of the `Tensor`. If provided, the `Tensor` is
reshaped accordingly.
default_value: The value used when the `tensor_key` is not found in a
particular `TFExample`.
Raises:
ValueError: if both `shape_keys` and `shape` are specified.
"""
if shape_keys and shape is not None:
raise ValueError('Cannot specify both shape_keys and shape parameters.')
if shape_keys and not isinstance(shape_keys, list):
shape_keys = [shape_keys]
self._tensor_key = tensor_key
self._shape_keys = shape_keys
self._shape = shape
self._default_value = default_value
keys = [tensor_key]
if shape_keys:
keys.extend(shape_keys)
super(Tensor, self).__init__(keys)
def tensors_to_item(self, keys_to_tensors):
tensor = keys_to_tensors[self._tensor_key]
shape = self._shape
if self._shape_keys:
shape_dims = []
for k in self._shape_keys:
shape_dim = keys_to_tensors[k]
if isinstance(shape_dim, sparse_tensor.SparseTensor):
shape_dim = sparse_ops.sparse_tensor_to_dense(shape_dim)
shape_dims.append(shape_dim)
shape = array_ops.reshape(array_ops.stack(shape_dims), [-1])
if isinstance(tensor, sparse_tensor.SparseTensor):
if shape is not None:
tensor = sparse_ops.sparse_reshape(tensor, shape)
tensor = sparse_ops.sparse_tensor_to_dense(tensor, self._default_value)
else:
if shape is not None:
tensor = array_ops.reshape(tensor, shape)
return tensor
class LookupTensor(Tensor):
"""An ItemHandler that returns a parsed Tensor, the result of a lookup."""
def __init__(self,
tensor_key,
table,
shape_keys=None,
shape=None,
default_value=''):
"""Initializes the LookupTensor handler.
See Tensor. Simply calls a vocabulary (most often, a label mapping) lookup.
Args:
tensor_key: the name of the `TFExample` feature to read the tensor from.
table: A tf.lookup table.
shape_keys: Optional name or list of names of the TF-Example feature in
which the tensor shape is stored. If a list, then each corresponds to
one dimension of the shape.
shape: Optional output shape of the `Tensor`. If provided, the `Tensor` is
reshaped accordingly.
default_value: The value used when the `tensor_key` is not found in a
particular `TFExample`.
Raises:
ValueError: if both `shape_keys` and `shape` are specified.
"""
self._table = table
super(LookupTensor, self).__init__(tensor_key, shape_keys, shape,
default_value)
def tensors_to_item(self, keys_to_tensors):
unmapped_tensor = super(LookupTensor, self).tensors_to_item(keys_to_tensors)
return self._table.lookup(unmapped_tensor)
class BackupHandler(ItemHandler):
"""An ItemHandler that tries two ItemHandlers in order."""
def __init__(self, handler, backup):
"""Initializes the BackupHandler handler.
If the first Handler's tensors_to_item returns a Tensor with no elements,
the second Handler is used.
Args:
handler: The primary ItemHandler.
backup: The backup ItemHandler.
Raises:
ValueError: if either is not an ItemHandler.
"""
if not isinstance(handler, ItemHandler):
raise ValueError('Primary handler is of type %s instead of ItemHandler'
% type(handler))
if not isinstance(backup, ItemHandler):
raise ValueError('Backup handler is of type %s instead of ItemHandler'
% type(backup))
self._handler = handler
self._backup = backup
super(BackupHandler, self).__init__(handler.keys + backup.keys)
def tensors_to_item(self, keys_to_tensors):
item = self._handler.tensors_to_item(keys_to_tensors)
return control_flow_ops.cond(
pred=math_ops.equal(math_ops.reduce_prod(array_ops.shape(item)), 0),
true_fn=lambda: self._backup.tensors_to_item(keys_to_tensors),
false_fn=lambda: item)
class SparseTensor(ItemHandler):
"""An ItemHandler for SparseTensors."""
def __init__(self,
indices_key=None,
values_key=None,
shape_key=None,
shape=None,
densify=False,
default_value=0):
"""Initializes the Tensor handler.
Args:
indices_key: the name of the TF-Example feature that contains the ids.
Defaults to 'indices'.
values_key: the name of the TF-Example feature that contains the values.
Defaults to 'values'.
shape_key: the name of the TF-Example feature that contains the shape.
If provided it would be used.
shape: the output shape of the SparseTensor. If `shape_key` is not
provided this `shape` would be used.
densify: whether to convert the SparseTensor into a dense Tensor.
default_value: Scalar value to set when making dense for indices not
specified in the `SparseTensor`.
"""
indices_key = indices_key or 'indices'
values_key = values_key or 'values'
self._indices_key = indices_key
self._values_key = values_key
self._shape_key = shape_key
self._shape = shape
self._densify = densify
self._default_value = default_value
keys = [indices_key, values_key]
if shape_key:
keys.append(shape_key)
super(SparseTensor, self).__init__(keys)
def tensors_to_item(self, keys_to_tensors):
indices = keys_to_tensors[self._indices_key]
values = keys_to_tensors[self._values_key]
if self._shape_key:
shape = keys_to_tensors[self._shape_key]
if isinstance(shape, sparse_tensor.SparseTensor):
shape = sparse_ops.sparse_tensor_to_dense(shape)
elif self._shape:
shape = self._shape
else:
shape = indices.dense_shape
indices_shape = array_ops.shape(indices.indices)
rank = indices_shape[1]
ids = math_ops.cast(indices.values, dtypes.int64)
indices_columns_to_preserve = array_ops.slice(
indices.indices, [0, 0], array_ops.stack([-1, rank - 1]))
new_indices = array_ops.concat(
[indices_columns_to_preserve, array_ops.reshape(ids, [-1, 1])], 1)
tensor = sparse_tensor.SparseTensor(new_indices, values.values, shape)
if self._densify:
tensor = sparse_ops.sparse_tensor_to_dense(tensor, self._default_value)
return tensor
class Image(ItemHandler):
"""An ItemHandler that decodes a parsed Tensor as an image."""
def __init__(self,
image_key=None,
format_key=None,
shape=None,
channels=3,
dtype=dtypes.uint8,
repeated=False,
dct_method=''):
"""Initializes the image.
Args:
image_key: the name of the TF-Example feature in which the encoded image
is stored.
format_key: the name of the TF-Example feature in which the image format
is stored.
shape: the output shape of the image as 1-D `Tensor`
[height, width, channels]. If provided, the image is reshaped
accordingly. If left as None, no reshaping is done. A shape should
be supplied only if all the stored images have the same shape.
channels: the number of channels in the image.
dtype: images will be decoded at this bit depth. Different formats
support different bit depths.
See tf.image.decode_image,
tf.io.decode_raw,
repeated: if False, decodes a single image. If True, decodes a
variable number of image strings from a 1D tensor of strings.
dct_method: An optional string. Defaults to empty string. It only takes
effect when image format is jpeg, used to specify a hint about the
algorithm used for jpeg decompression. Currently valid values
are ['INTEGER_FAST', 'INTEGER_ACCURATE']. The hint may be ignored, for
example, the jpeg library does not have that specific option.
"""
if not image_key:
image_key = 'image/encoded'
if not format_key:
format_key = 'image/format'
super(Image, self).__init__([image_key, format_key])
self._image_key = image_key
self._format_key = format_key
self._shape = shape
self._channels = channels
self._dtype = dtype
self._repeated = repeated
self._dct_method = dct_method
def tensors_to_item(self, keys_to_tensors):
"""See base class."""
image_buffer = keys_to_tensors[self._image_key]
image_format = keys_to_tensors[self._format_key]
if self._repeated:
return map_fn.map_fn(lambda x: self._decode(x, image_format),
image_buffer, dtype=self._dtype)
else:
return self._decode(image_buffer, image_format)
def _decode(self, image_buffer, image_format):
"""Decodes the image buffer.
Args:
image_buffer: The tensor representing the encoded image tensor.
image_format: The image format for the image in `image_buffer`. If image
format is `raw`, all images are expected to be in this format, otherwise
this op can decode a mix of `jpg` and `png` formats.
Returns:
A tensor that represents decoded image of self._shape, or
(?, ?, self._channels) if self._shape is not specified.
"""
def decode_image():
"""Decodes a image based on the headers."""
return math_ops.cast(
image_ops.decode_image(image_buffer, channels=self._channels),
self._dtype)
def decode_jpeg():
"""Decodes a jpeg image with specified '_dct_method'."""
return math_ops.cast(
image_ops.decode_jpeg(
image_buffer,
channels=self._channels,
dct_method=self._dct_method), self._dtype)
def check_jpeg():
"""Checks if an image is jpeg."""
# For jpeg, we directly use image_ops.decode_jpeg rather than decode_image
# in order to feed the jpeg specify parameter 'dct_method'.
return control_flow_ops.cond(
image_ops.is_jpeg(image_buffer),
decode_jpeg,
decode_image,
name='cond_jpeg')
def decode_raw():
"""Decodes a raw image."""
return parsing_ops.decode_raw(image_buffer, out_type=self._dtype)
pred_fn_pairs = {
math_ops.logical_or(
math_ops.equal(image_format, 'raw'),
math_ops.equal(image_format, 'RAW')): decode_raw,
}
image = control_flow_ops.case(
pred_fn_pairs, default=check_jpeg, exclusive=True)
image.set_shape([None, None, self._channels])
if self._shape is not None:
image = array_ops.reshape(image, self._shape)
return image
class TFExampleDecoder(data_decoder.DataDecoder):
"""A decoder for TensorFlow Examples.
Decoding Example proto buffers is comprised of two stages: (1) Example parsing
and (2) tensor manipulation.
In the first stage, the tf.io.parse_example function is called with a list of
FixedLenFeatures and SparseLenFeatures. These instances tell TF how to parse
the example. The output of this stage is a set of tensors.
In the second stage, the resulting tensors are manipulated to provide the
requested 'item' tensors.
To perform this decoding operation, an ExampleDecoder is given a list of
ItemHandlers. Each ItemHandler indicates the set of features for stage 1 and
contains the instructions for post_processing its tensors for stage 2.
"""
def __init__(self, keys_to_features, items_to_handlers):
"""Constructs the decoder.
Args:
keys_to_features: a dictionary from TF-Example keys to either
tf.io.VarLenFeature or tf.io.FixedLenFeature instances. See tensorflow's
parsing_ops.py.
items_to_handlers: a dictionary from items (strings) to ItemHandler
instances. Note that the ItemHandler's are provided the keys that they
use to return the final item Tensors.
"""
self._keys_to_features = keys_to_features
self._items_to_handlers = items_to_handlers
def list_items(self):
"""See base class."""
return list(self._items_to_handlers.keys())
def decode(self, serialized_example, items=None):
"""Decodes the given serialized TF-example.
Args:
serialized_example: a serialized TF-example tensor.
items: the list of items to decode. These must be a subset of the item
keys in self._items_to_handlers. If `items` is left as None, then all
of the items in self._items_to_handlers are decoded.
Returns:
the decoded items, a list of tensor.
"""
example = parsing_ops.parse_single_example(serialized_example,
self._keys_to_features)
# Reshape non-sparse elements just once, adding the reshape ops in
# deterministic order.
for k in sorted(self._keys_to_features):
v = self._keys_to_features[k]
if isinstance(v, parsing_ops.FixedLenFeature):
example[k] = array_ops.reshape(example[k], v.shape)
if not items:
items = self._items_to_handlers.keys()
outputs = []
for item in items:
handler = self._items_to_handlers[item]
keys_to_tensors = {key: example[key] for key in handler.keys}
outputs.append(handler.tensors_to_item(keys_to_tensors))
return outputs
|
tensorflow-master
|
tensorflow/contrib/slim/python/slim/data/tfexample_decoder.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Libsvm decoder.
@@decode_libsvm
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.libsvm.python.ops.libsvm_ops import decode_libsvm
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = [
"decode_libsvm",
]
remove_undocumented(__name__)
|
tensorflow-master
|
tensorflow/contrib/libsvm/__init__.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for DecodeLibsvm op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.libsvm.python.ops import libsvm_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import sparse_ops
from tensorflow.python.platform import test
class DecodeLibsvmOpTest(test.TestCase):
def testBasic(self):
with self.cached_session() as sess:
content = [
"1 1:3.4 2:0.5 4:0.231", "1 2:2.5 3:inf 5:0.503",
"2 3:2.5 2:nan 1:0.105"
]
sparse_features, labels = libsvm_ops.decode_libsvm(
content, num_features=6)
features = sparse_ops.sparse_tensor_to_dense(
sparse_features, validate_indices=False)
self.assertAllEqual(labels.get_shape().as_list(), [3])
features, labels = sess.run([features, labels])
self.assertAllEqual(labels, [1, 1, 2])
self.assertAllClose(
features, [[0, 3.4, 0.5, 0, 0.231, 0], [0, 0, 2.5, np.inf, 0, 0.503],
[0, 0.105, np.nan, 2.5, 0, 0]])
def testNDimension(self):
with self.cached_session() as sess:
content = [["1 1:3.4 2:0.5 4:0.231", "1 1:3.4 2:0.5 4:0.231"],
["1 2:2.5 3:inf 5:0.503", "1 2:2.5 3:inf 5:0.503"],
["2 3:2.5 2:nan 1:0.105", "2 3:2.5 2:nan 1:0.105"]]
sparse_features, labels = libsvm_ops.decode_libsvm(
content, num_features=6, label_dtype=dtypes.float64)
features = sparse_ops.sparse_tensor_to_dense(
sparse_features, validate_indices=False)
self.assertAllEqual(labels.get_shape().as_list(), [3, 2])
features, labels = sess.run([features, labels])
self.assertAllEqual(labels, [[1, 1], [1, 1], [2, 2]])
self.assertAllClose(
features, [[[0, 3.4, 0.5, 0, 0.231, 0], [0, 3.4, 0.5, 0, 0.231, 0]], [
[0, 0, 2.5, np.inf, 0, 0.503], [0, 0, 2.5, np.inf, 0, 0.503]
], [[0, 0.105, np.nan, 2.5, 0, 0], [0, 0.105, np.nan, 2.5, 0, 0]]])
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/libsvm/python/kernel_tests/decode_libsvm_op_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Libsvm decoder."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.libsvm.ops import gen_libsvm_ops
from tensorflow.contrib.util import loader
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.platform import resource_loader
from tensorflow.python.util.deprecation import deprecated
_libsvm_ops_so = loader.load_op_library(
resource_loader.get_path_to_datafile("_libsvm_ops.so"))
@deprecated(None,
'tf.contrib.libsvm will be removed in 2.0, the support for libsvm '
'format will continue to be provided in tensorflow-io: '
'https://github.com/tensorflow/io')
def decode_libsvm(content, num_features, dtype=None, label_dtype=None):
"""Convert Libsvm records to a tensor of label and a tensor of feature.
Args:
content: A `Tensor` of type `string`. Each string is a record/row in
the Libsvm format.
num_features: The number of features.
dtype: The type of the output feature tensor. Default to tf.float32.
label_dtype: The type of the output label tensor. Default to tf.int64.
Returns:
features: A `SparseTensor` of the shape `[input_shape, num_features]`.
labels: A `Tensor` of the same shape as content.
"""
labels, indices, values, shape = gen_libsvm_ops.decode_libsvm(
content, num_features, dtype=dtype, label_dtype=label_dtype)
return sparse_tensor.SparseTensor(indices, values, shape), labels
ops.NotDifferentiable("DecodeLibSVM")
|
tensorflow-master
|
tensorflow/contrib/libsvm/python/ops/libsvm_ops.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops and modules related to RPC.
@@rpc
@@try_rpc
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.rpc.python.ops.rpc_op import rpc
from tensorflow.contrib.rpc.python.ops.rpc_op import try_rpc
from tensorflow.python.util.all_util import remove_undocumented
remove_undocumented(__name__)
|
tensorflow-master
|
tensorflow/contrib/rpc/__init__.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Base class for RpcOp tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import numpy as np
from tensorflow.contrib.rpc.python.kernel_tests import test_example_pb2
from tensorflow.contrib.rpc.python.ops import rpc_op
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.ops import proto_ops
__all__ = ['I_WARNED_YOU', 'RpcOpTestBase']
I_WARNED_YOU = 'I warned you!'
class RpcOpTestBase(object):
# pylint: disable=missing-docstring,invalid-name
"""Base class for RpcOp tests."""
def get_method_name(self, suffix):
raise NotImplementedError
def rpc(self, *args, **kwargs):
return rpc_op.rpc(*args, protocol=self._protocol, **kwargs)
def try_rpc(self, *args, **kwargs):
return rpc_op.try_rpc(*args, protocol=self._protocol, **kwargs)
def testScalarHostPortRpc(self):
with self.cached_session() as sess:
request_tensors = (
test_example_pb2.TestCase(values=[1, 2, 3]).SerializeToString())
response_tensors = self.rpc(
method=self.get_method_name('Increment'),
address=self._address,
request=request_tensors)
self.assertEqual(response_tensors.shape, ())
response_values = sess.run(response_tensors)
response_message = test_example_pb2.TestCase()
self.assertTrue(response_message.ParseFromString(response_values))
self.assertAllEqual([2, 3, 4], response_message.values)
def testScalarHostPortTryRpc(self):
with self.cached_session() as sess:
request_tensors = (
test_example_pb2.TestCase(values=[1, 2, 3]).SerializeToString())
response_tensors, status_code, status_message = self.try_rpc(
method=self.get_method_name('Increment'),
address=self._address,
request=request_tensors)
self.assertEqual(status_code.shape, ())
self.assertEqual(status_message.shape, ())
self.assertEqual(response_tensors.shape, ())
response_values, status_code_values, status_message_values = (
sess.run((response_tensors, status_code, status_message)))
response_message = test_example_pb2.TestCase()
self.assertTrue(response_message.ParseFromString(response_values))
self.assertAllEqual([2, 3, 4], response_message.values)
# For the base Rpc op, don't expect to get error status back.
self.assertEqual(errors.OK, status_code_values)
self.assertEqual(b'', status_message_values)
def testEmptyHostPortRpc(self):
with self.cached_session() as sess:
request_tensors = []
response_tensors = self.rpc(
method=self.get_method_name('Increment'),
address=self._address,
request=request_tensors)
self.assertAllEqual(response_tensors.shape, [0])
response_values = sess.run(response_tensors)
self.assertAllEqual(response_values.shape, [0])
def testInvalidMethod(self):
for method in [
'/InvalidService.Increment',
self.get_method_name('InvalidMethodName')
]:
with self.cached_session() as sess:
with self.assertRaisesOpError(self.invalid_method_string):
sess.run(self.rpc(method=method, address=self._address, request=''))
_, status_code_value, status_message_value = sess.run(
self.try_rpc(method=method, address=self._address, request=''))
self.assertEqual(errors.UNIMPLEMENTED, status_code_value)
self.assertTrue(
self.invalid_method_string in status_message_value.decode('ascii'))
def testInvalidAddress(self):
# This covers the case of address='' and address='localhost:293874293874'
address = 'unix:/tmp/this_unix_socket_doesnt_exist_97820348!!@'
with self.cached_session() as sess:
with self.assertRaises(errors.UnavailableError):
sess.run(
self.rpc(
method=self.get_method_name('Increment'),
address=address,
request=''))
_, status_code_value, status_message_value = sess.run(
self.try_rpc(
method=self.get_method_name('Increment'),
address=address,
request=''))
self.assertEqual(errors.UNAVAILABLE, status_code_value)
def testAlwaysFailingMethod(self):
with self.cached_session() as sess:
response_tensors = self.rpc(
method=self.get_method_name('AlwaysFailWithInvalidArgument'),
address=self._address,
request='')
self.assertEqual(response_tensors.shape, ())
with self.assertRaisesOpError(I_WARNED_YOU):
sess.run(response_tensors)
response_tensors, status_code, status_message = self.try_rpc(
method=self.get_method_name('AlwaysFailWithInvalidArgument'),
address=self._address,
request='')
self.assertEqual(response_tensors.shape, ())
self.assertEqual(status_code.shape, ())
self.assertEqual(status_message.shape, ())
status_code_value, status_message_value = sess.run((status_code,
status_message))
self.assertEqual(errors.INVALID_ARGUMENT, status_code_value)
self.assertTrue(I_WARNED_YOU in status_message_value.decode('ascii'))
def testSometimesFailingMethodWithManyRequests(self):
with self.cached_session() as sess:
# Fail hard by default.
response_tensors = self.rpc(
method=self.get_method_name('SometimesFailWithInvalidArgument'),
address=self._address,
request=[''] * 20)
self.assertEqual(response_tensors.shape, (20,))
with self.assertRaisesOpError(I_WARNED_YOU):
sess.run(response_tensors)
# Don't fail hard, use TryRpc - return the failing status instead.
response_tensors, status_code, status_message = self.try_rpc(
method=self.get_method_name('SometimesFailWithInvalidArgument'),
address=self._address,
request=[''] * 20)
self.assertEqual(response_tensors.shape, (20,))
self.assertEqual(status_code.shape, (20,))
self.assertEqual(status_message.shape, (20,))
status_code_values, status_message_values = sess.run((status_code,
status_message))
self.assertTrue([
x in (errors.OK, errors.INVALID_ARGUMENT) for x in status_code_values
])
expected_message_values = np.where(
status_code_values == errors.INVALID_ARGUMENT,
I_WARNED_YOU.encode('ascii'), b'')
for msg, expected in zip(status_message_values, expected_message_values):
self.assertTrue(expected in msg,
'"%s" did not contain "%s"' % (msg, expected))
def testVecHostPortRpc(self):
with self.cached_session() as sess:
request_tensors = [
test_example_pb2.TestCase(
values=[i, i + 1, i + 2]).SerializeToString() for i in range(20)
]
response_tensors = self.rpc(
method=self.get_method_name('Increment'),
address=self._address,
request=request_tensors)
self.assertEqual(response_tensors.shape, (20,))
response_values = sess.run(response_tensors)
self.assertEqual(response_values.shape, (20,))
for i in range(20):
response_message = test_example_pb2.TestCase()
self.assertTrue(response_message.ParseFromString(response_values[i]))
self.assertAllEqual([i + 1, i + 2, i + 3], response_message.values)
def testVecHostPortManyParallelRpcs(self):
with self.cached_session() as sess:
request_tensors = [
test_example_pb2.TestCase(
values=[i, i + 1, i + 2]).SerializeToString() for i in range(20)
]
many_response_tensors = [
self.rpc(
method=self.get_method_name('Increment'),
address=self._address,
request=request_tensors) for _ in range(10)
]
# Launch parallel 10 calls to the RpcOp, each containing 20 rpc requests.
many_response_values = sess.run(many_response_tensors)
self.assertEqual(10, len(many_response_values))
for response_values in many_response_values:
self.assertEqual(response_values.shape, (20,))
for i in range(20):
response_message = test_example_pb2.TestCase()
self.assertTrue(response_message.ParseFromString(response_values[i]))
self.assertAllEqual([i + 1, i + 2, i + 3], response_message.values)
def testVecHostPortRpcUsingEncodeAndDecodeProto(self):
with self.cached_session() as sess:
request_tensors = proto_ops.encode_proto(
message_type='tensorflow.contrib.rpc.TestCase',
field_names=['values'],
sizes=[[3]] * 20,
values=[
[[i, i + 1, i + 2] for i in range(20)],
])
response_tensor_strings = self.rpc(
method=self.get_method_name('Increment'),
address=self._address,
request=request_tensors)
_, (response_shape,) = proto_ops.decode_proto(
bytes=response_tensor_strings,
message_type='tensorflow.contrib.rpc.TestCase',
field_names=['values'],
output_types=[dtypes.int32])
response_shape_values = sess.run(response_shape)
self.assertAllEqual([[i + 1, i + 2, i + 3]
for i in range(20)], response_shape_values)
def testVecHostPortRpcCancelsUponSessionTimeOutWhenSleepingForever(self):
with self.cached_session() as sess:
request_tensors = [''] * 25 # This will launch 25 RPC requests.
response_tensors = self.rpc(
method=self.get_method_name('SleepForever'),
address=self._address,
request=request_tensors)
for timeout_ms in [1, 500, 1000]:
options = config_pb2.RunOptions(timeout_in_ms=timeout_ms)
with self.assertRaises((errors.UnavailableError,
errors.DeadlineExceededError)):
sess.run(response_tensors, options=options)
def testVecHostPortRpcCancelsUponConfiguredTimeOutWhenSleepingForever(self):
with self.cached_session() as sess:
request_tensors = [''] * 25 # This will launch 25 RPC requests.
response_tensors = self.rpc(
method=self.get_method_name('SleepForever'),
address=self._address,
timeout_in_ms=1000,
request=request_tensors)
with self.assertRaises(errors.DeadlineExceededError):
sess.run(response_tensors)
def testTryRpcPropagatesDeadlineErrorWithSometimesTimingOutRequests(self):
with self.cached_session() as sess:
response_tensors, status_code, status_message = self.try_rpc(
method=self.get_method_name('SometimesSleepForever'),
timeout_in_ms=1000,
address=self._address,
request=[''] * 20)
self.assertEqual(response_tensors.shape, (20,))
self.assertEqual(status_code.shape, (20,))
self.assertEqual(status_message.shape, (20,))
status_code_values = sess.run(status_code)
self.assertTrue([
x in (errors.OK, errors.DEADLINE_EXCEEDED) for x in status_code_values
])
def testTryRpcWithMultipleAddressesSingleRequest(self):
flatten = lambda x: list(itertools.chain.from_iterable(x))
with self.cached_session() as sess:
addresses = flatten([[
self._address, 'unix:/tmp/this_unix_socket_doesnt_exist_97820348!!@'
] for _ in range(10)])
request = test_example_pb2.TestCase(values=[0, 1, 2]).SerializeToString()
response_tensors, status_code, _ = self.try_rpc(
method=self.get_method_name('Increment'),
address=addresses,
request=request)
response_tensors_values, status_code_values = sess.run((response_tensors,
status_code))
self.assertAllEqual(
flatten([errors.OK, errors.UNAVAILABLE] for _ in range(10)),
status_code_values)
for i in range(10):
self.assertTrue(response_tensors_values[2 * i])
self.assertFalse(response_tensors_values[2 * i + 1])
def testTryRpcWithMultipleMethodsSingleRequest(self):
flatten = lambda x: list(itertools.chain.from_iterable(x))
with self.cached_session() as sess:
methods = flatten(
[[self.get_method_name('Increment'), 'InvalidMethodName']
for _ in range(10)])
request = test_example_pb2.TestCase(values=[0, 1, 2]).SerializeToString()
response_tensors, status_code, _ = self.try_rpc(
method=methods, address=self._address, request=request)
response_tensors_values, status_code_values = sess.run((response_tensors,
status_code))
self.assertAllEqual(
flatten([errors.OK, errors.UNIMPLEMENTED] for _ in range(10)),
status_code_values)
for i in range(10):
self.assertTrue(response_tensors_values[2 * i])
self.assertFalse(response_tensors_values[2 * i + 1])
def testTryRpcWithMultipleAddressesAndRequests(self):
flatten = lambda x: list(itertools.chain.from_iterable(x))
with self.cached_session() as sess:
addresses = flatten([[
self._address, 'unix:/tmp/this_unix_socket_doesnt_exist_97820348!!@'
] for _ in range(10)])
requests = [
test_example_pb2.TestCase(
values=[i, i + 1, i + 2]).SerializeToString() for i in range(20)
]
response_tensors, status_code, _ = self.try_rpc(
method=self.get_method_name('Increment'),
address=addresses,
request=requests)
response_tensors_values, status_code_values = sess.run((response_tensors,
status_code))
self.assertAllEqual(
flatten([errors.OK, errors.UNAVAILABLE] for _ in range(10)),
status_code_values)
for i in range(20):
if i % 2 == 1:
self.assertFalse(response_tensors_values[i])
else:
response_message = test_example_pb2.TestCase()
self.assertTrue(
response_message.ParseFromString(response_tensors_values[i]))
self.assertAllEqual([i + 1, i + 2, i + 3], response_message.values)
|
tensorflow-master
|
tensorflow/contrib/rpc/python/kernel_tests/rpc_op_test_base.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Test servicer for RpcOp tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import time
import grpc
from tensorflow.contrib.rpc.python.kernel_tests import rpc_op_test_base
from tensorflow.contrib.rpc.python.kernel_tests import test_example_pb2_grpc
class RpcOpTestServicer(test_example_pb2_grpc.TestCaseServiceServicer):
"""Test servicer for RpcOp tests."""
def Increment(self, request, context):
"""Increment the entries in the `values` attribute of request.
Args:
request: input TestCase.
context: the rpc context.
Returns:
output TestCase.
"""
for i in range(len(request.values)):
request.values[i] += 1
return request
def AlwaysFailWithInvalidArgument(self, request, context):
"""Always fails with an InvalidArgument status.
Args:
request: input TestCase.
context: the rpc context.
Returns:
output TestCase.
"""
del request
context.set_code(grpc.StatusCode.INVALID_ARGUMENT)
context.set_details(rpc_op_test_base.I_WARNED_YOU)
def SometimesFailWithInvalidArgument(self, request, context):
"""Sometimes fails with an InvalidArgument status.
Args:
request: input TestCase.
context: the rpc context.
Returns:
output TestCase.
"""
if random.randint(0, 1) == 1:
context.set_code(grpc.StatusCode.INVALID_ARGUMENT)
context.set_details(rpc_op_test_base.I_WARNED_YOU)
return request
def SleepForever(self, request, context):
"""Sleeps forever.
Args:
request: input TestCase.
context: the rpc context.
Returns:
output TestCase.
"""
# TODO(ebrevdo): Make this async wait like the stubby version.
time.sleep(5)
def SometimesSleepForever(self, request, context):
"""Sometimes sleeps forever.
Args:
request: input TestCase.
context: the rpc context.
Returns:
output TestCase.
"""
if random.randint(0, 1) == 1:
time.sleep(5)
return request
|
tensorflow-master
|
tensorflow/contrib/rpc/python/kernel_tests/rpc_op_test_servicer.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Tests for RpcOp."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ctypes as ct
import os
import grpc
from grpc.framework.foundation import logging_pool
import portpicker
from tensorflow.contrib.rpc.python.kernel_tests import rpc_op_test_base
from tensorflow.contrib.rpc.python.kernel_tests import rpc_op_test_servicer
from tensorflow.contrib.rpc.python.kernel_tests import test_example_pb2_grpc
from tensorflow.python.platform import test
class RpcOpTest(test.TestCase, rpc_op_test_base.RpcOpTestBase):
_protocol = 'grpc'
invalid_method_string = 'Method not found'
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
super(RpcOpTest, self).__init__(methodName)
lib = os.path.join(os.path.dirname(__file__), 'libtestexample.so')
if os.path.isfile(lib):
ct.cdll.LoadLibrary(lib)
def get_method_name(self, suffix):
return '/tensorflow.contrib.rpc.TestCaseService/%s' % suffix
def setUp(self):
super(RpcOpTest, self).setUp()
service_port = portpicker.pick_unused_port()
server = grpc.server(logging_pool.pool(max_workers=25))
servicer = rpc_op_test_servicer.RpcOpTestServicer()
test_example_pb2_grpc.add_TestCaseServiceServicer_to_server(
servicer, server)
self._address = 'localhost:%d' % service_port
server.add_insecure_port(self._address)
server.start()
self._server = server
def tearDown(self):
self._server.stop(grace=None)
super(RpcOpTest, self).tearDown()
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/rpc/python/kernel_tests/rpc_op_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
# pylint: disable=wildcard-import,unused-import
"""RPC communication."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.rpc.python.ops.gen_rpc_op import rpc
from tensorflow.contrib.rpc.python.ops.gen_rpc_op import try_rpc
from tensorflow.python.framework import ops
ops.NotDifferentiable("Rpc")
ops.NotDifferentiable("TryRpc")
|
tensorflow-master
|
tensorflow/contrib/rpc/python/ops/rpc_op.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Abstract base class for all predictors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
@six.add_metaclass(abc.ABCMeta)
class Predictor(object):
"""Abstract base class for all predictors."""
@property
def graph(self):
return self._graph
@property
def session(self):
return self._session
@property
def feed_tensors(self):
return self._feed_tensors
@property
def fetch_tensors(self):
return self._fetch_tensors
def __repr__(self):
return '{} with feed tensors {} and fetch_tensors {}'.format(
type(self).__name__, self._feed_tensors, self._fetch_tensors)
def __call__(self, input_dict):
"""Returns predictions based on `input_dict`.
Args:
input_dict: a `dict` mapping strings to numpy arrays. These keys
must match `self._feed_tensors.keys()`.
Returns:
A `dict` mapping strings to numpy arrays. The keys match
`self.fetch_tensors.keys()`.
Raises:
ValueError: `input_dict` does not match `feed_tensors`.
"""
# TODO(jamieas): make validation optional?
input_keys = set(input_dict.keys())
expected_keys = set(self.feed_tensors.keys())
unexpected_keys = input_keys - expected_keys
if unexpected_keys:
raise ValueError(
'Got unexpected keys in input_dict: {}\nexpected: {}'.format(
unexpected_keys, expected_keys))
feed_dict = {}
for key in self.feed_tensors.keys():
value = input_dict.get(key)
if value is not None:
feed_dict[self.feed_tensors[key]] = value
return self._session.run(fetches=self.fetch_tensors, feed_dict=feed_dict)
|
tensorflow-master
|
tensorflow/contrib/predictor/predictor.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Factory functions for `Predictor`s."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.predictor import contrib_estimator_predictor
from tensorflow.contrib.predictor import core_estimator_predictor
from tensorflow.contrib.predictor import saved_model_predictor
from tensorflow.contrib.learn.python.learn.estimators import estimator as contrib_estimator
from tensorflow.python.estimator import estimator as core_estimator
def from_contrib_estimator(estimator,
prediction_input_fn,
input_alternative_key=None,
output_alternative_key=None,
graph=None,
config=None):
"""Constructs a `Predictor` from a `tf.contrib.learn.Estimator`.
Args:
estimator: an instance of `tf.contrib.learn.Estimator`.
prediction_input_fn: a function that takes no arguments and returns an
instance of `InputFnOps`.
input_alternative_key: Optional. Specify the input alternative used for
prediction.
output_alternative_key: Specify the output alternative used for
prediction. Not needed for single-headed models but required for
multi-headed models.
graph: Optional. The Tensorflow `graph` in which prediction should be
done.
config: `ConfigProto` proto used to configure the session.
Returns:
An initialized `Predictor`.
Raises:
TypeError: if `estimator` is a core `Estimator` instead of a contrib
`Estimator`.
"""
if isinstance(estimator, core_estimator.Estimator):
raise TypeError('Expected estimator to be of type '
'tf.contrib.learn.Estimator, but got type '
'tf.python.estimator.Estimator. You likely want to call '
'from_estimator.')
return contrib_estimator_predictor.ContribEstimatorPredictor(
estimator,
prediction_input_fn,
input_alternative_key=input_alternative_key,
output_alternative_key=output_alternative_key,
graph=graph,
config=config)
def from_estimator(estimator,
serving_input_receiver_fn,
output_key=None,
graph=None,
config=None):
"""Constructs a `Predictor` from a `tf.python.estimator.Estimator`.
Args:
estimator: an instance of `learn.python.estimator.Estimator`.
serving_input_receiver_fn: a function that takes no arguments and returns
an instance of `ServingInputReceiver` compatible with `estimator`.
output_key: Optional string specifying the export output to use. If
`None`, then `DEFAULT_SERVING_SIGNATURE_DEF_KEY` is used.
graph: Optional. The Tensorflow `graph` in which prediction should be
done.
config: `ConfigProto` proto used to configure the session.
Returns:
An initialized `Predictor`.
Raises:
TypeError: if `estimator` is a contrib `Estimator` instead of a core
`Estimator`.
"""
if isinstance(estimator, contrib_estimator.Estimator):
raise TypeError('Expected estimator to be of type '
'tf.python.estimator.Estimator, but got type '
'tf.contrib.learn.Estimator. You likely want to call '
'from_contrib_estimator.')
return core_estimator_predictor.CoreEstimatorPredictor(
estimator,
serving_input_receiver_fn,
output_key=output_key,
graph=graph,
config=config)
def from_saved_model(export_dir,
signature_def_key=None,
signature_def=None,
input_names=None,
output_names=None,
tags=None,
graph=None,
config=None):
"""Constructs a `Predictor` from a `SavedModel` on disk.
Args:
export_dir: a path to a directory containing a `SavedModel`.
signature_def_key: Optional string specifying the signature to use. If
`None`, then `DEFAULT_SERVING_SIGNATURE_DEF_KEY` is used. Only one of
`signature_def_key` and `signature_def`
signature_def: A `SignatureDef` proto specifying the inputs and outputs
for prediction. Only one of `signature_def_key` and `signature_def`
should be specified.
input_names: A dictionary mapping strings to `Tensor`s in the `SavedModel`
that represent the input. The keys can be any string of the user's
choosing.
output_names: A dictionary mapping strings to `Tensor`s in the
`SavedModel` that represent the output. The keys can be any string of
the user's choosing.
tags: Optional. Tags that will be used to retrieve the correct
`SignatureDef`. Defaults to `DEFAULT_TAGS`.
graph: Optional. The Tensorflow `graph` in which prediction should be
done.
config: `ConfigProto` proto used to configure the session.
Returns:
An initialized `Predictor`.
Raises:
ValueError: More than one of `signature_def_key` and `signature_def` is
specified.
"""
return saved_model_predictor.SavedModelPredictor(
export_dir,
signature_def_key=signature_def_key,
signature_def=signature_def,
input_names=input_names,
output_names=output_names,
tags=tags,
graph=graph,
config=config)
|
tensorflow-master
|
tensorflow/contrib/predictor/predictor_factories.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for predictor.contrib_estimator_predictor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tempfile
import numpy as np
from tensorflow.contrib.predictor import contrib_estimator_predictor
from tensorflow.contrib.predictor import testing_common
from tensorflow.python.platform import test
KEYS_AND_OPS = (('sum', lambda x, y: x + y),
('product', lambda x, y: x * y,),
('difference', lambda x, y: x - y))
class ContribEstimatorPredictorTest(test.TestCase):
"""Test fixture for `ContribEstimatorPredictor`."""
def setUp(self):
model_dir = tempfile.mkdtemp()
self._estimator = testing_common.get_arithmetic_estimator(
core=False, model_dir=model_dir)
self._prediction_input_fn = testing_common.get_arithmetic_input_fn(
core=False, train=False)
def testSpecifiedSignatureKey(self):
"""Test prediction with spedicified signatures."""
np.random.seed(1234)
for key, op in KEYS_AND_OPS:
x = np.random.rand()
y = np.random.rand()
expected_output = op(x, y)
predictor = contrib_estimator_predictor.ContribEstimatorPredictor(
estimator=self._estimator,
prediction_input_fn=self._prediction_input_fn,
output_alternative_key=key)
output_tensor_name = predictor.fetch_tensors[key].name
self.assertRegexpMatches(
output_tensor_name,
key,
msg='Unexpected fetch tensor.')
output = predictor({'x': x, 'y': y})[key]
self.assertAlmostEqual(
expected_output, output, places=3,
msg='Failed for output key "{}." '
'Got output {} for x = {} and y = {}'.format(
key, output, x, y))
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/predictor/contrib_estimator_predictor_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A `Predictor constructed from a `tf.contrib.learn.Estimator`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn.utils import saved_model_export_utils
from tensorflow.contrib.predictor import predictor
from tensorflow.python.framework import ops
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training import monitored_session
class ContribEstimatorPredictor(predictor.Predictor):
"""A `Predictor constructed from a `tf.contrib.learn.Estimator`."""
def __init__(self,
estimator,
prediction_input_fn,
input_alternative_key=None,
output_alternative_key=None,
graph=None,
config=None):
"""Initialize a `ContribEstimatorPredictor`.
Args:
estimator: an instance of `tf.contrib.learn.Estimator`.
prediction_input_fn: a function that takes no arguments and returns an
instance of `InputFnOps`.
input_alternative_key: Optional. Specify the input alternative used for
prediction.
output_alternative_key: Specify the output alternative used for
prediction. Not needed for single-headed models but required for
multi-headed models.
graph: Optional. The Tensorflow `graph` in which prediction should be
done.
config: `ConfigProto` proto used to configure the session.
"""
self._graph = graph or ops.Graph()
with self._graph.as_default():
input_fn_ops = prediction_input_fn()
# pylint: disable=protected-access
model_fn_ops = estimator._get_predict_ops(input_fn_ops.features)
# pylint: enable=protected-access
checkpoint_path = checkpoint_management.latest_checkpoint(
estimator.model_dir)
self._session = monitored_session.MonitoredSession(
session_creator=monitored_session.ChiefSessionCreator(
config=config,
checkpoint_filename_with_path=checkpoint_path))
input_alternative_key = (
input_alternative_key or
saved_model_export_utils.DEFAULT_INPUT_ALTERNATIVE_KEY)
input_alternatives, _ = saved_model_export_utils.get_input_alternatives(
input_fn_ops)
self._feed_tensors = input_alternatives[input_alternative_key]
(output_alternatives,
output_alternative_key) = saved_model_export_utils.get_output_alternatives(
model_fn_ops, output_alternative_key)
_, fetch_tensors = output_alternatives[output_alternative_key]
self._fetch_tensors = fetch_tensors
|
tensorflow-master
|
tensorflow/contrib/predictor/contrib_estimator_predictor.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A `Predictor` constructed from an `learn.python.estimator.Estimator`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.predictor import predictor
from tensorflow.python.estimator import model_fn
from tensorflow.python.framework import ops
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.training import monitored_session
def _get_signature_def(
serving_input_receiver, estimator, output_key=None):
"""Construct a `SignatureDef` proto."""
if output_key is None:
output_key = signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
# pylint: disable=protected-access
estimator_spec = estimator.model_fn(
serving_input_receiver.features, None, model_fn.ModeKeys.PREDICT,
estimator.config)
# pylint: enable=protected-access
export_outputs = estimator_spec.export_outputs
export_output = export_outputs.get(output_key)
if export_output is None:
raise KeyError('output_key must be one of {}; got {}'.format(
export_outputs.keys(), output_key))
return export_output.as_signature_def(serving_input_receiver.receiver_tensors)
class CoreEstimatorPredictor(predictor.Predictor):
"""A `Predictor` constructed from an `learn.python.estimator.Estimator`."""
def __init__(self,
estimator,
serving_input_receiver_fn,
output_key=None,
graph=None,
config=None):
"""Initialize a `CoreEstimatorPredictor`.
Args:
estimator: an instance of `learn.python.estimator.Estimator`.
serving_input_receiver_fn: a function that takes no arguments and returns
an instance of `ServingInputReceiver` compatible with `estimator`.
output_key: Optional string specifying the export output to use. If
`None`, then `DEFAULT_SERVING_SIGNATURE_DEF_KEY` is used.
graph: Optional. The Tensorflow `graph` in which prediction should be
done.
config: `ConfigProto` proto used to configure the session.
"""
self._graph = graph or ops.Graph()
with self._graph.as_default():
serving_input_receiver = serving_input_receiver_fn()
signature_def = _get_signature_def(
serving_input_receiver, estimator, output_key)
checkpoint_dir = estimator.model_dir
self._session = monitored_session.MonitoredSession(
session_creator=monitored_session.ChiefSessionCreator(
config=config,
checkpoint_dir=checkpoint_dir))
feed_tensor_info = signature_def.inputs
self._feed_tensors = {k: self._graph.get_tensor_by_name(v.name)
for k, v in feed_tensor_info.items()}
fetch_tensor_info = signature_def.outputs
self._fetch_tensors = {k: self._graph.get_tensor_by_name(v.name)
for k, v in fetch_tensor_info.items()}
|
tensorflow-master
|
tensorflow/contrib/predictor/core_estimator_predictor.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for predictor.predictor_factories."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.predictor import predictor_factories
from tensorflow.contrib.predictor import testing_common
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.platform import test
MODEL_DIR_NAME = 'contrib/predictor/test_export_dir'
class PredictorFactoriesTest(test.TestCase):
@classmethod
def setUpClass(cls):
# Load a saved model exported from the arithmetic `Estimator`.
# See `testing_common.py`.
cls._export_dir = test.test_src_dir_path(MODEL_DIR_NAME)
def testFromSavedModel(self):
"""Test loading from_saved_model."""
predictor_factories.from_saved_model(self._export_dir)
def testFromSavedModelWithTags(self):
"""Test loading from_saved_model with tags."""
predictor_factories.from_saved_model(self._export_dir, tags='serve')
def testFromSavedModelWithSessionConfig(self):
"""Test loading from_saved_model with session config."""
predictor_factories.from_saved_model(
self._export_dir, config=config_pb2.ConfigProto())
def testFromSavedModelWithBadTags(self):
"""Test that loading fails for bad tags."""
bad_tags_regex = ('.*? could not be found in SavedModel')
with self.assertRaisesRegexp(RuntimeError, bad_tags_regex):
predictor_factories.from_saved_model(self._export_dir, tags='bad_tag')
def testFromContribEstimator(self):
estimator = testing_common.get_arithmetic_estimator(core=False)
input_fn = testing_common.get_arithmetic_input_fn(core=False)
predictor_factories.from_contrib_estimator(
estimator, input_fn, output_alternative_key='sum')
def testFromContribEstimatorWithSessionConfig(self):
estimator = testing_common.get_arithmetic_estimator(core=False)
input_fn = testing_common.get_arithmetic_input_fn(core=False)
predictor_factories.from_contrib_estimator(
estimator, input_fn, output_alternative_key='sum',
config=config_pb2.ConfigProto())
def testFromContribEstimatorWithCoreEstimatorRaises(self):
estimator = testing_common.get_arithmetic_estimator(core=True)
input_fn = testing_common.get_arithmetic_input_fn(core=True)
with self.assertRaises(TypeError):
predictor_factories.from_contrib_estimator(estimator, input_fn)
def testFromCoreEstimator(self):
estimator = testing_common.get_arithmetic_estimator(core=True)
input_fn = testing_common.get_arithmetic_input_fn(core=True)
predictor_factories.from_estimator(estimator, input_fn)
def testFromCoreEstimatorWithSessionConfig(self):
estimator = testing_common.get_arithmetic_estimator(core=True)
input_fn = testing_common.get_arithmetic_input_fn(core=True)
predictor_factories.from_estimator(
estimator, input_fn, config=config_pb2.ConfigProto())
def testFromCoreEstimatorWithContribEstimatorRaises(self):
estimator = testing_common.get_arithmetic_estimator(core=False)
input_fn = testing_common.get_arithmetic_input_fn(core=False)
with self.assertRaises(TypeError):
predictor_factories.from_estimator(estimator, input_fn)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/predictor/predictor_factories_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Modules for `Predictor`s.
@@from_contrib_estimator
@@from_estimator
@@from_saved_model
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.predictor.predictor_factories import from_contrib_estimator
from tensorflow.contrib.predictor.predictor_factories import from_estimator
from tensorflow.contrib.predictor.predictor_factories import from_saved_model
from tensorflow.python.util.all_util import remove_undocumented
remove_undocumented(__name__)
|
tensorflow-master
|
tensorflow/contrib/predictor/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A `Predictor` constructed from a `SavedModel`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
from tensorflow.contrib.predictor import predictor
from tensorflow.contrib.saved_model.python.saved_model import reader
from tensorflow.python.client import session
from tensorflow.python.framework import ops
from tensorflow.python.saved_model import loader
from tensorflow.python.saved_model import signature_constants
DEFAULT_TAGS = 'serve'
_DEFAULT_INPUT_ALTERNATIVE_FORMAT = 'default_input_alternative:{}'
def get_meta_graph_def(saved_model_dir, tags):
"""Gets `MetaGraphDef` from a directory containing a `SavedModel`.
Returns the `MetaGraphDef` for the given tag-set and SavedModel directory.
Args:
saved_model_dir: Directory containing the SavedModel.
tags: Comma separated list of tags used to identify the correct
`MetaGraphDef`.
Raises:
ValueError: An error when the given tags cannot be found.
Returns:
A `MetaGraphDef` corresponding to the given tags.
"""
saved_model = reader.read_saved_model(saved_model_dir)
set_of_tags = set([tag.strip() for tag in tags.split(',')])
for meta_graph_def in saved_model.meta_graphs:
if set(meta_graph_def.meta_info_def.tags) == set_of_tags:
return meta_graph_def
raise ValueError('Could not find MetaGraphDef with tags {}'.format(tags))
def _get_signature_def(signature_def_key, export_dir, tags):
"""Construct a `SignatureDef` proto."""
signature_def_key = (
signature_def_key or
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY)
metagraph_def = get_meta_graph_def(export_dir, tags)
try:
signature_def = metagraph_def.signature_def[signature_def_key]
except KeyError as e:
formatted_key = _DEFAULT_INPUT_ALTERNATIVE_FORMAT.format(
signature_def_key)
try:
signature_def = metagraph_def.signature_def[formatted_key]
except KeyError:
raise ValueError(
'Got signature_def_key "{}". Available signatures are {}. '
'Original error:\n{}'.format(
signature_def_key, list(metagraph_def.signature_def), e))
logging.warning('Could not find signature def "%s". '
'Using "%s" instead', signature_def_key, formatted_key)
return signature_def
def _check_signature_arguments(signature_def_key,
signature_def,
input_names,
output_names):
"""Validates signature arguments for `SavedModelPredictor`."""
signature_def_key_specified = signature_def_key is not None
signature_def_specified = signature_def is not None
input_names_specified = input_names is not None
output_names_specified = output_names is not None
if input_names_specified != output_names_specified:
raise ValueError(
'input_names and output_names must both be specified or both be '
'unspecified.'
)
if (signature_def_key_specified + signature_def_specified +
input_names_specified > 1):
raise ValueError(
'You must specify at most one of signature_def_key OR signature_def OR'
'(input_names AND output_names).'
)
class SavedModelPredictor(predictor.Predictor):
"""A `Predictor` constructed from a `SavedModel`."""
def __init__(self,
export_dir,
signature_def_key=None,
signature_def=None,
input_names=None,
output_names=None,
tags=None,
graph=None,
config=None):
"""Initialize a `CoreEstimatorPredictor`.
Args:
export_dir: a path to a directory containing a `SavedModel`.
signature_def_key: Optional string specifying the signature to use. If
`None`, then `DEFAULT_SERVING_SIGNATURE_DEF_KEY` is used. Only one of
`signature_def_key` and `signature_def` should be specified.
signature_def: A `SignatureDef` proto specifying the inputs and outputs
for prediction. Only one of `signature_def_key` and `signature_def`
should be specified.
input_names: A dictionary mapping strings to `Tensor`s in the `SavedModel`
that represent the input. The keys can be any string of the user's
choosing.
output_names: A dictionary mapping strings to `Tensor`s in the
`SavedModel` that represent the output. The keys can be any string of
the user's choosing.
tags: Optional. Comma separated list of tags that will be used to retrieve
the correct `SignatureDef`. Defaults to `DEFAULT_TAGS`.
graph: Optional. The Tensorflow `graph` in which prediction should be
done.
config: `ConfigProto` proto used to configure the session.
Raises:
ValueError: If more than one of signature_def_key OR signature_def OR
(input_names AND output_names) is specified.
"""
_check_signature_arguments(
signature_def_key, signature_def, input_names, output_names)
tags = tags or DEFAULT_TAGS
self._graph = graph or ops.Graph()
with self._graph.as_default():
self._session = session.Session(config=config)
loader.load(self._session, tags.split(','), export_dir)
if input_names is None:
if signature_def is None:
signature_def = _get_signature_def(signature_def_key, export_dir, tags)
input_names = {k: v.name for k, v in signature_def.inputs.items()}
output_names = {k: v.name for k, v in signature_def.outputs.items()}
self._feed_tensors = {k: self._graph.get_tensor_by_name(v)
for k, v in input_names.items()}
self._fetch_tensors = {k: self._graph.get_tensor_by_name(v)
for k, v in output_names.items()}
|
tensorflow-master
|
tensorflow/contrib/predictor/saved_model_predictor.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Common code used for testing `Predictor`s."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn.estimators import constants
from tensorflow.contrib.learn.python.learn.estimators import estimator as contrib_estimator
from tensorflow.contrib.learn.python.learn.estimators import model_fn as contrib_model_fn
from tensorflow.contrib.learn.python.learn.utils import input_fn_utils
from tensorflow.python.estimator import estimator as core_estimator
from tensorflow.python.estimator import model_fn
from tensorflow.python.estimator.export import export_lib
from tensorflow.python.estimator.export import export_output
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.saved_model import signature_constants
def get_arithmetic_estimator(core=True, model_dir=None):
"""Returns an `Estimator` that performs basic arithmetic.
Args:
core: if `True`, returns a `tensorflow.python.estimator.Estimator`.
Otherwise, returns a `tensorflow.contrib.learn.Estimator`.
model_dir: directory in which to export checkpoints and saved models.
Returns:
An `Estimator` that performs arithmetic operations on its inputs.
"""
def _model_fn(features, labels, mode):
_ = labels
x = features['x']
y = features['y']
with ops.name_scope('outputs'):
predictions = {'sum': math_ops.add(x, y, name='sum'),
'product': math_ops.multiply(x, y, name='product'),
'difference': math_ops.subtract(x, y, name='difference')}
if core:
export_outputs = {k: export_output.PredictOutput({k: v})
for k, v in predictions.items()}
export_outputs[signature_constants.
DEFAULT_SERVING_SIGNATURE_DEF_KEY] = export_outputs['sum']
return model_fn.EstimatorSpec(mode=mode,
predictions=predictions,
export_outputs=export_outputs,
loss=constant_op.constant(0),
train_op=control_flow_ops.no_op())
else:
output_alternatives = {k: (constants.ProblemType.UNSPECIFIED, {k: v})
for k, v in predictions.items()}
return contrib_model_fn.ModelFnOps(
mode=mode,
predictions=predictions,
output_alternatives=output_alternatives,
loss=constant_op.constant(0),
train_op=control_flow_ops.no_op())
if core:
return core_estimator.Estimator(_model_fn)
else:
return contrib_estimator.Estimator(_model_fn, model_dir=model_dir)
def get_arithmetic_input_fn(core=True, train=False):
"""Returns a input functions or serving input receiver function."""
def _input_fn():
with ops.name_scope('inputs'):
x = array_ops.placeholder_with_default(0.0, shape=[], name='x')
y = array_ops.placeholder_with_default(0.0, shape=[], name='y')
label = constant_op.constant(0.0)
features = {'x': x, 'y': y}
if core:
if train:
return features, label
return export_lib.ServingInputReceiver(
features=features,
receiver_tensors=features)
else:
if train:
return features, label
return input_fn_utils.InputFnOps(
features=features,
labels={},
default_inputs=features)
return _input_fn
|
tensorflow-master
|
tensorflow/contrib/predictor/testing_common.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for predictor.core_estimator_predictor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tempfile
import numpy as np
from tensorflow.contrib.predictor import core_estimator_predictor
from tensorflow.contrib.predictor import testing_common
from tensorflow.python.platform import test
KEYS_AND_OPS = (('sum', lambda x, y: x + y),
('product', lambda x, y: x * y,),
('difference', lambda x, y: x - y))
class CoreEstimatorPredictorTest(test.TestCase):
"""Test fixture for `CoreEstimatorPredictor`."""
def setUp(self):
model_dir = tempfile.mkdtemp()
self._estimator = testing_common.get_arithmetic_estimator(
core=True, model_dir=model_dir)
self._serving_input_receiver_fn = testing_common.get_arithmetic_input_fn(
core=True, train=False)
def testDefault(self):
"""Test prediction with default signature."""
np.random.seed(1111)
x = np.random.rand()
y = np.random.rand()
predictor = core_estimator_predictor.CoreEstimatorPredictor(
estimator=self._estimator,
serving_input_receiver_fn=self._serving_input_receiver_fn)
output = predictor({'x': x, 'y': y})['sum']
self.assertAlmostEqual(output, x + y, places=3)
def testSpecifiedSignatureKey(self):
"""Test prediction with spedicified signatures."""
np.random.seed(1234)
for output_key, op in KEYS_AND_OPS:
x = np.random.rand()
y = np.random.rand()
expected_output = op(x, y)
predictor = core_estimator_predictor.CoreEstimatorPredictor(
estimator=self._estimator,
serving_input_receiver_fn=self._serving_input_receiver_fn,
output_key=output_key)
output_tensor_name = predictor.fetch_tensors[output_key].name
self.assertRegexpMatches(
output_tensor_name,
output_key,
msg='Unexpected fetch tensor.')
output = predictor({'x': x, 'y': y})[output_key]
self.assertAlmostEqual(
expected_output, output, places=3,
msg='Failed for output key "{}." '
'Got output {} for x = {} and y = {}'.format(
output_key, output, x, y))
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/predictor/core_estimator_predictor_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for predictor.saved_model_predictor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.predictor import saved_model_predictor
from tensorflow.core.framework import tensor_shape_pb2
from tensorflow.core.framework import types_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
from tensorflow.python.saved_model import signature_def_utils
KEYS_AND_OPS = (('sum', lambda x, y: x + y),
('product', lambda x, y: x * y,),
('difference', lambda x, y: x - y))
MODEL_DIR_NAME = 'contrib/predictor/test_export_dir'
class SavedModelPredictorTest(test.TestCase):
@classmethod
def setUpClass(cls):
# Load a saved model exported from the arithmetic `Estimator`.
# See `testing_common.py`.
cls._export_dir = test.test_src_dir_path(MODEL_DIR_NAME)
def testDefault(self):
"""Test prediction with default signature."""
np.random.seed(1111)
x = np.random.rand()
y = np.random.rand()
predictor = saved_model_predictor.SavedModelPredictor(
export_dir=self._export_dir)
output = predictor({'x': x, 'y': y})['outputs']
self.assertAlmostEqual(output, x + y, places=3)
def testSpecifiedSignatureKey(self):
"""Test prediction with spedicified signature key."""
np.random.seed(1234)
for signature_def_key, op in KEYS_AND_OPS:
x = np.random.rand()
y = np.random.rand()
expected_output = op(x, y)
predictor = saved_model_predictor.SavedModelPredictor(
export_dir=self._export_dir,
signature_def_key=signature_def_key)
output_tensor_name = predictor.fetch_tensors['outputs'].name
self.assertRegexpMatches(
output_tensor_name,
signature_def_key,
msg='Unexpected fetch tensor.')
output = predictor({'x': x, 'y': y})['outputs']
self.assertAlmostEqual(
expected_output, output, places=3,
msg='Failed for signature "{}." '
'Got output {} for x = {} and y = {}'.format(
signature_def_key, output, x, y))
def testSpecifiedSignature(self):
"""Test prediction with spedicified signature definition."""
np.random.seed(4444)
for key, op in KEYS_AND_OPS:
x = np.random.rand()
y = np.random.rand()
expected_output = op(x, y)
inputs = {
'x': meta_graph_pb2.TensorInfo(
name='inputs/x:0',
dtype=types_pb2.DT_FLOAT,
tensor_shape=tensor_shape_pb2.TensorShapeProto()),
'y': meta_graph_pb2.TensorInfo(
name='inputs/y:0',
dtype=types_pb2.DT_FLOAT,
tensor_shape=tensor_shape_pb2.TensorShapeProto())}
outputs = {
key: meta_graph_pb2.TensorInfo(
name='outputs/{}:0'.format(key),
dtype=types_pb2.DT_FLOAT,
tensor_shape=tensor_shape_pb2.TensorShapeProto())}
signature_def = signature_def_utils.build_signature_def(
inputs=inputs,
outputs=outputs,
method_name='tensorflow/serving/regress')
predictor = saved_model_predictor.SavedModelPredictor(
export_dir=self._export_dir,
signature_def=signature_def)
output_tensor_name = predictor.fetch_tensors[key].name
self.assertRegexpMatches(
output_tensor_name,
key,
msg='Unexpected fetch tensor.')
output = predictor({'x': x, 'y': y})[key]
self.assertAlmostEqual(
expected_output, output, places=3,
msg='Failed for signature "{}". '
'Got output {} for x = {} and y = {}'.format(key, output, x, y))
def testSpecifiedTensors(self):
"""Test prediction with spedicified `Tensor`s."""
np.random.seed(987)
for key, op in KEYS_AND_OPS:
x = np.random.rand()
y = np.random.rand()
expected_output = op(x, y)
input_names = {'x': 'inputs/x:0',
'y': 'inputs/y:0'}
output_names = {key: 'outputs/{}:0'.format(key)}
predictor = saved_model_predictor.SavedModelPredictor(
export_dir=self._export_dir,
input_names=input_names,
output_names=output_names)
output_tensor_name = predictor.fetch_tensors[key].name
self.assertRegexpMatches(
output_tensor_name,
key,
msg='Unexpected fetch tensor.')
output = predictor({'x': x, 'y': y})[key]
self.assertAlmostEqual(
expected_output, output, places=3,
msg='Failed for signature "{}". '
'Got output {} for x = {} and y = {}'.format(key, output, x, y))
def testBadTagsFail(self):
"""Test that predictor construction fails for bad tags."""
bad_tags_regex = ('.* could not be found in SavedModel')
with self.assertRaisesRegexp(RuntimeError, bad_tags_regex):
_ = saved_model_predictor.SavedModelPredictor(
export_dir=self._export_dir,
tags=('zomg, bad, tags'))
def testSpecifiedGraph(self):
"""Test that the predictor remembers a specified `Graph`."""
g = ops.Graph()
predictor = saved_model_predictor.SavedModelPredictor(
export_dir=self._export_dir,
graph=g)
self.assertEqual(predictor.graph, g)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/predictor/saved_model_predictor_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Random forest implementation in tensorflow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,wildcard-import
from tensorflow.contrib.tensor_forest.client import *
from tensorflow.contrib.tensor_forest.python import *
# pylint: enable=unused-import,wildcard-import
|
tensorflow-master
|
tensorflow/contrib/tensor_forest/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Extremely random forest graph builder. go/brain-tree."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numbers
import random
from google.protobuf import text_format
from tensorflow.contrib.decision_trees.proto import generic_tree_model_pb2 as _tree_proto
from tensorflow.contrib.framework.python.ops import variables as framework_variables
from tensorflow.contrib.tensor_forest.proto import tensor_forest_params_pb2 as _params_proto
from tensorflow.contrib.tensor_forest.python.ops import data_ops
from tensorflow.contrib.tensor_forest.python.ops import model_ops
from tensorflow.contrib.tensor_forest.python.ops import stats_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as tf_variables
from tensorflow.python.platform import tf_logging as logging
# Stores tuples of (leaf model type, stats model type)
CLASSIFICATION_LEAF_MODEL_TYPES = {
'all_dense': (_params_proto.MODEL_DENSE_CLASSIFICATION,
_params_proto.STATS_DENSE_GINI),
'all_sparse': (_params_proto.MODEL_SPARSE_CLASSIFICATION,
_params_proto.STATS_SPARSE_GINI),
'sparse_then_dense': (_params_proto.MODEL_SPARSE_OR_DENSE_CLASSIFICATION,
_params_proto.STATS_SPARSE_THEN_DENSE_GINI),
}
REGRESSION_MODEL_TYPE = (_params_proto.MODEL_REGRESSION,
_params_proto.STATS_LEAST_SQUARES_REGRESSION,
_params_proto.COLLECTION_BASIC)
FINISH_TYPES = {
'basic': _params_proto.SPLIT_FINISH_BASIC,
'hoeffding': _params_proto.SPLIT_FINISH_DOMINATE_HOEFFDING,
'bootstrap': _params_proto.SPLIT_FINISH_DOMINATE_BOOTSTRAP
}
PRUNING_TYPES = {
'none': _params_proto.SPLIT_PRUNE_NONE,
'half': _params_proto.SPLIT_PRUNE_HALF,
'quarter': _params_proto.SPLIT_PRUNE_QUARTER,
'10_percent': _params_proto.SPLIT_PRUNE_10_PERCENT,
'hoeffding': _params_proto.SPLIT_PRUNE_HOEFFDING,
}
SPLIT_TYPES = {
'less_or_equal': _tree_proto.InequalityTest.LESS_OR_EQUAL,
'less': _tree_proto.InequalityTest.LESS_THAN
}
def parse_number_or_string_to_proto(proto, param):
if isinstance(param, numbers.Number):
proto.constant_value = param
else: # assume it's a string
if param.isdigit():
proto.constant_value = int(param)
else:
text_format.Merge(param, proto)
def build_params_proto(params):
"""Build a TensorForestParams proto out of the V4ForestHParams object."""
proto = _params_proto.TensorForestParams()
proto.num_trees = params.num_trees
proto.max_nodes = params.max_nodes
proto.is_regression = params.regression
proto.num_outputs = params.num_classes
proto.num_features = params.num_features
proto.leaf_type = params.leaf_model_type
proto.stats_type = params.stats_model_type
proto.collection_type = _params_proto.COLLECTION_BASIC
proto.pruning_type.type = params.pruning_type
proto.finish_type.type = params.finish_type
proto.inequality_test_type = params.split_type
proto.drop_final_class = False
proto.collate_examples = params.collate_examples
proto.checkpoint_stats = params.checkpoint_stats
proto.use_running_stats_method = params.use_running_stats_method
proto.initialize_average_splits = params.initialize_average_splits
proto.inference_tree_paths = params.inference_tree_paths
parse_number_or_string_to_proto(proto.pruning_type.prune_every_samples,
params.prune_every_samples)
parse_number_or_string_to_proto(proto.finish_type.check_every_steps,
params.early_finish_check_every_samples)
parse_number_or_string_to_proto(proto.split_after_samples,
params.split_after_samples)
parse_number_or_string_to_proto(proto.num_splits_to_consider,
params.num_splits_to_consider)
proto.dominate_fraction.constant_value = params.dominate_fraction
if params.param_file:
with open(params.param_file) as f:
text_format.Merge(f.read(), proto)
return proto
# A convenience class for holding random forest hyperparameters.
#
# To just get some good default parameters, use:
# hparams = ForestHParams(num_classes=2, num_features=40).fill()
#
# Note that num_classes can not be inferred and so must always be specified.
# Also, either num_splits_to_consider or num_features should be set.
#
# To override specific values, pass them to the constructor:
# hparams = ForestHParams(num_classes=5, num_trees=10, num_features=5).fill()
#
# TODO(thomaswc): Inherit from tf.HParams when that is publicly available.
class ForestHParams(object):
"""A base class for holding hyperparameters and calculating good defaults."""
def __init__(
self,
num_trees=100,
max_nodes=10000,
bagging_fraction=1.0,
num_splits_to_consider=0,
feature_bagging_fraction=1.0,
max_fertile_nodes=0, # deprecated, unused.
split_after_samples=250,
valid_leaf_threshold=1,
dominate_method='bootstrap',
dominate_fraction=0.99,
model_name='all_dense',
split_finish_name='basic',
split_pruning_name='none',
prune_every_samples=0,
early_finish_check_every_samples=0,
collate_examples=False,
checkpoint_stats=False,
use_running_stats_method=False,
initialize_average_splits=False,
inference_tree_paths=False,
param_file=None,
split_name='less_or_equal',
**kwargs):
self.num_trees = num_trees
self.max_nodes = max_nodes
self.bagging_fraction = bagging_fraction
self.feature_bagging_fraction = feature_bagging_fraction
self.num_splits_to_consider = num_splits_to_consider
self.max_fertile_nodes = max_fertile_nodes
self.split_after_samples = split_after_samples
self.valid_leaf_threshold = valid_leaf_threshold
self.dominate_method = dominate_method
self.dominate_fraction = dominate_fraction
self.model_name = model_name
self.split_finish_name = split_finish_name
self.split_pruning_name = split_pruning_name
self.collate_examples = collate_examples
self.checkpoint_stats = checkpoint_stats
self.use_running_stats_method = use_running_stats_method
self.initialize_average_splits = initialize_average_splits
self.inference_tree_paths = inference_tree_paths
self.param_file = param_file
self.split_name = split_name
self.early_finish_check_every_samples = early_finish_check_every_samples
self.prune_every_samples = prune_every_samples
for name, value in kwargs.items():
setattr(self, name, value)
def values(self):
return self.__dict__
def fill(self):
"""Intelligently sets any non-specific parameters."""
# Fail fast if num_classes or num_features isn't set.
_ = getattr(self, 'num_classes')
_ = getattr(self, 'num_features')
self.bagged_num_features = int(self.feature_bagging_fraction *
self.num_features)
self.bagged_features = None
if self.feature_bagging_fraction < 1.0:
self.bagged_features = [
random.sample(range(self.num_features), self.bagged_num_features)
for _ in range(self.num_trees)
]
self.regression = getattr(self, 'regression', False)
# Num_outputs is the actual number of outputs (a single prediction for
# classification, a N-dimensional point for regression).
self.num_outputs = self.num_classes if self.regression else 1
# Add an extra column to classes for storing counts, which is needed for
# regression and avoids having to recompute sums for classification.
self.num_output_columns = self.num_classes + 1
# Our experiments have found that num_splits_to_consider = num_features
# gives good accuracy.
self.num_splits_to_consider = self.num_splits_to_consider or min(
max(10, math.floor(math.sqrt(self.num_features))), 1000)
# If base_random_seed is 0, the current time will be used to seed the
# random number generators for each tree. If non-zero, the i-th tree
# will be seeded with base_random_seed + i.
self.base_random_seed = getattr(self, 'base_random_seed', 0)
# How to store leaf models.
self.leaf_model_type = (
REGRESSION_MODEL_TYPE[0] if self.regression else
CLASSIFICATION_LEAF_MODEL_TYPES[self.model_name][0])
# How to store stats objects.
self.stats_model_type = (
REGRESSION_MODEL_TYPE[1] if self.regression else
CLASSIFICATION_LEAF_MODEL_TYPES[self.model_name][1])
self.finish_type = (
_params_proto.SPLIT_FINISH_BASIC
if self.regression else FINISH_TYPES[self.split_finish_name])
self.pruning_type = PRUNING_TYPES[self.split_pruning_name]
if self.pruning_type == _params_proto.SPLIT_PRUNE_NONE:
self.prune_every_samples = 0
else:
if (not self.prune_every_samples and
not (isinstance(numbers.Number) or
self.split_after_samples.isdigit())):
logging.error(
'Must specify prune_every_samples if using a depth-dependent '
'split_after_samples')
# Pruning half-way through split_after_samples seems like a decent
# default, making it easy to select the number being pruned with
# pruning_type while not paying the cost of pruning too often. Note that
# this only holds if not using a depth-dependent split_after_samples.
self.prune_every_samples = (
self.prune_every_samples or int(self.split_after_samples) / 2)
if self.finish_type == _params_proto.SPLIT_FINISH_BASIC:
self.early_finish_check_every_samples = 0
else:
if (not self.early_finish_check_every_samples and
not (isinstance(numbers.Number) or
self.split_after_samples.isdigit())):
logging.error(
'Must specify prune_every_samples if using a depth-dependent '
'split_after_samples')
# Checking for early finish every quarter through split_after_samples
# seems like a decent default. We don't want to incur the checking cost
# too often, but (at least for hoeffding) it's lower than the cost of
# pruning so we can do it a little more frequently.
self.early_finish_check_every_samples = (
self.early_finish_check_every_samples or
int(self.split_after_samples) / 4)
self.split_type = SPLIT_TYPES[self.split_name]
return self
def get_epoch_variable():
"""Returns the epoch variable, or [0] if not defined."""
# Grab epoch variable defined in
# //third_party/tensorflow/python/training/input.py::limit_epochs
for v in tf_variables.local_variables():
if 'limit_epochs/epoch' in v.op.name:
return array_ops.reshape(v, [1])
# TODO(thomaswc): Access epoch from the data feeder.
return [0]
# A simple container to hold the training variables for a single tree.
class TreeVariables(object):
"""Stores tf.Variables for training a single random tree.
Uses tf.compat.v1.get_variable to get tree-specific names so that this can be
used
with a tf.learn-style implementation (one that trains a model, saves it,
then relies on restoring that model to evaluate).
"""
def __init__(self, params, tree_num, training, tree_config='', tree_stat=''):
if (not hasattr(params, 'params_proto') or
not isinstance(params.params_proto, _params_proto.TensorForestParams)):
params.params_proto = build_params_proto(params)
params.serialized_params_proto = params.params_proto.SerializeToString()
self.stats = None
if training:
# TODO(gilberth): Manually shard this to be able to fit it on
# multiple machines.
self.stats = stats_ops.fertile_stats_variable(
params, tree_stat, self.get_tree_name('stats', tree_num))
self.tree = model_ops.tree_variable(params, tree_config, self.stats,
self.get_tree_name('tree', tree_num))
def get_tree_name(self, name, num):
return '{0}-{1}'.format(name, num)
class ForestVariables(object):
"""A container for a forests training data, consisting of multiple trees.
Instantiates a TreeVariables object for each tree. We override the
__getitem__ and __setitem__ function so that usage looks like this:
forest_variables = ForestVariables(params)
... forest_variables.tree ...
"""
def __init__(self,
params,
device_assigner,
training=True,
tree_variables_class=TreeVariables,
tree_configs=None,
tree_stats=None):
self.variables = []
# Set up some scalar variables to run through the device assigner, then
# we can use those to colocate everything related to a tree.
self.device_dummies = []
with ops.device(device_assigner):
for i in range(params.num_trees):
self.device_dummies.append(
variable_scope.get_variable(name='device_dummy_%d' % i, shape=0))
for i in range(params.num_trees):
with ops.device(self.device_dummies[i].device):
kwargs = {}
if tree_configs is not None:
kwargs.update(dict(tree_config=tree_configs[i]))
if tree_stats is not None:
kwargs.update(dict(tree_stat=tree_stats[i]))
self.variables.append(
tree_variables_class(params, i, training, **kwargs))
def __setitem__(self, t, val):
self.variables[t] = val
def __getitem__(self, t):
return self.variables[t]
class RandomForestGraphs(object):
"""Builds TF graphs for random forest training and inference."""
def __init__(self,
params,
tree_configs=None,
tree_stats=None,
device_assigner=None,
variables=None,
tree_variables_class=TreeVariables,
tree_graphs=None,
training=True):
self.params = params
self.device_assigner = (
device_assigner or framework_variables.VariableDeviceChooser())
logging.info('Constructing forest with params = ')
logging.info(self.params.__dict__)
self.variables = variables or ForestVariables(
self.params,
device_assigner=self.device_assigner,
training=training,
tree_variables_class=tree_variables_class,
tree_configs=tree_configs,
tree_stats=tree_stats)
tree_graph_class = tree_graphs or RandomTreeGraphs
self.trees = [
tree_graph_class(self.variables[i], self.params, i)
for i in range(self.params.num_trees)
]
def _bag_features(self, tree_num, input_data):
split_data = array_ops.split(
value=input_data, num_or_size_splits=self.params.num_features, axis=1)
return array_ops.concat(
[split_data[ind] for ind in self.params.bagged_features[tree_num]], 1)
def get_all_resource_handles(self):
return ([self.variables[i].tree for i in range(len(self.trees))] +
[self.variables[i].stats for i in range(len(self.trees))])
def training_graph(self,
input_data,
input_labels,
num_trainers=1,
trainer_id=0,
**tree_kwargs):
"""Constructs a TF graph for training a random forest.
Args:
input_data: A tensor or dict of string->Tensor for input data.
input_labels: A tensor or placeholder for labels associated with
input_data.
num_trainers: Number of parallel trainers to split trees among.
trainer_id: Which trainer this instance is.
**tree_kwargs: Keyword arguments passed to each tree's training_graph.
Returns:
The last op in the random forest training graph.
Raises:
NotImplementedError: If trying to use bagging with sparse features.
"""
processed_dense_features, processed_sparse_features, data_spec = (
data_ops.ParseDataTensorOrDict(input_data))
if input_labels is not None:
labels = data_ops.ParseLabelTensorOrDict(input_labels)
data_spec = data_spec or self.get_default_data_spec(input_data)
tree_graphs = []
trees_per_trainer = self.params.num_trees / num_trainers
tree_start = int(trainer_id * trees_per_trainer)
tree_end = int((trainer_id + 1) * trees_per_trainer)
for i in range(tree_start, tree_end):
with ops.device(self.variables.device_dummies[i].device):
seed = self.params.base_random_seed
if seed != 0:
seed += i
# If using bagging, randomly select some of the input.
tree_data = processed_dense_features
tree_labels = labels
if self.params.bagging_fraction < 1.0:
# TODO(gilberth): Support bagging for sparse features.
if processed_sparse_features is not None:
raise NotImplementedError(
'Bagging not supported with sparse features.')
# TODO(thomaswc): This does sampling without replacement. Consider
# also allowing sampling with replacement as an option.
batch_size = array_ops.strided_slice(
array_ops.shape(processed_dense_features), [0], [1])
r = random_ops.random_uniform(batch_size, seed=seed)
mask = math_ops.less(
r,
array_ops.ones_like(r) * self.params.bagging_fraction)
gather_indices = array_ops.squeeze(array_ops.where(mask), axis=[1])
# TODO(thomaswc): Calculate out-of-bag data and labels, and store
# them for use in calculating statistics later.
tree_data = array_ops.gather(processed_dense_features, gather_indices)
tree_labels = array_ops.gather(labels, gather_indices)
if self.params.bagged_features:
if processed_sparse_features is not None:
raise NotImplementedError(
'Feature bagging not supported with sparse features.')
tree_data = self._bag_features(i, tree_data)
tree_graphs.append(self.trees[i].training_graph(
tree_data,
tree_labels,
seed,
data_spec=data_spec,
sparse_features=processed_sparse_features,
**tree_kwargs))
return control_flow_ops.group(*tree_graphs, name='train')
def inference_graph(self, input_data, **inference_args):
"""Constructs a TF graph for evaluating a random forest.
Args:
input_data: A tensor or dict of string->Tensor for the input data. This
input_data must generate the same spec as the
input_data used in training_graph: the dict must have the
same keys, for example, and all tensors must have the same
size in their first dimension.
**inference_args: Keyword arguments to pass through to each tree.
Returns:
A tuple of (probabilities, tree_paths, variance).
Raises:
NotImplementedError: If trying to use feature bagging with sparse
features.
"""
processed_dense_features, processed_sparse_features, data_spec = (
data_ops.ParseDataTensorOrDict(input_data))
probabilities = []
paths = []
for i in range(self.params.num_trees):
with ops.device(self.variables.device_dummies[i].device):
tree_data = processed_dense_features
if self.params.bagged_features:
if processed_sparse_features is not None:
raise NotImplementedError(
'Feature bagging not supported with sparse features.')
tree_data = self._bag_features(i, tree_data)
probs, path = self.trees[i].inference_graph(
tree_data,
data_spec,
sparse_features=processed_sparse_features,
**inference_args)
probabilities.append(probs)
paths.append(path)
with ops.device(self.variables.device_dummies[0].device):
# shape of all_predict should be [batch_size, num_trees, num_outputs]
all_predict = array_ops.stack(probabilities, axis=1)
average_values = math_ops.div(
math_ops.reduce_sum(all_predict, 1),
self.params.num_trees,
name='probabilities')
tree_paths = array_ops.stack(paths, axis=1)
expected_squares = math_ops.div(
math_ops.reduce_sum(all_predict * all_predict, 1),
self.params.num_trees)
regression_variance = math_ops.maximum(
0., expected_squares - average_values * average_values)
return average_values, tree_paths, regression_variance
def average_size(self):
"""Constructs a TF graph for evaluating the average size of a forest.
Returns:
The average number of nodes over the trees.
"""
sizes = []
for i in range(self.params.num_trees):
with ops.device(self.variables.device_dummies[i].device):
sizes.append(self.trees[i].size())
return math_ops.reduce_mean(
math_ops.cast(array_ops.stack(sizes), dtypes.float32))
# pylint: disable=unused-argument
def training_loss(self, features, labels, name='training_loss'):
return math_ops.negative(self.average_size(), name=name)
# pylint: disable=unused-argument
def validation_loss(self, features, labels):
return math_ops.negative(self.average_size())
def average_impurity(self):
"""Constructs a TF graph for evaluating the leaf impurity of a forest.
Returns:
The last op in the graph.
"""
impurities = []
for i in range(self.params.num_trees):
with ops.device(self.variables.device_dummies[i].device):
impurities.append(self.trees[i].average_impurity())
return math_ops.reduce_mean(array_ops.stack(impurities))
def feature_importances(self):
tree_counts = [
self.trees[i].feature_usage_counts()
for i in range(self.params.num_trees)
]
total_counts = math_ops.reduce_sum(array_ops.stack(tree_counts, 0), 0)
return total_counts / math_ops.reduce_sum(total_counts)
class RandomTreeGraphs(object):
"""Builds TF graphs for random tree training and inference."""
def __init__(self, variables, params, tree_num):
self.variables = variables
self.params = params
self.tree_num = tree_num
def training_graph(self,
input_data,
input_labels,
random_seed,
data_spec,
sparse_features=None,
input_weights=None):
"""Constructs a TF graph for training a random tree.
Args:
input_data: A tensor or placeholder for input data.
input_labels: A tensor or placeholder for labels associated with
input_data.
random_seed: The random number generator seed to use for this tree. 0
means use the current time as the seed.
data_spec: A data_ops.TensorForestDataSpec object specifying the original
feature/columns of the data.
sparse_features: A tf.SparseTensor for sparse input data.
input_weights: A float tensor or placeholder holding per-input weights, or
None if all inputs are to be weighted equally.
Returns:
The last op in the random tree training graph.
"""
# TODO(gilberth): Use this.
unused_epoch = math_ops.cast(get_epoch_variable(), dtypes.int32)
if input_weights is None:
input_weights = []
sparse_indices = []
sparse_values = []
sparse_shape = []
if sparse_features is not None:
sparse_indices = sparse_features.indices
sparse_values = sparse_features.values
sparse_shape = sparse_features.dense_shape
if input_data is None:
input_data = []
leaf_ids = model_ops.traverse_tree_v4(
self.variables.tree,
input_data,
sparse_indices,
sparse_values,
sparse_shape,
input_spec=data_spec.SerializeToString(),
params=self.params.serialized_params_proto)
update_model = model_ops.update_model_v4(
self.variables.tree,
leaf_ids,
input_labels,
input_weights,
params=self.params.serialized_params_proto)
finished_nodes = stats_ops.process_input_v4(
self.variables.tree,
self.variables.stats,
input_data,
sparse_indices,
sparse_values,
sparse_shape,
input_labels,
input_weights,
leaf_ids,
input_spec=data_spec.SerializeToString(),
random_seed=random_seed,
params=self.params.serialized_params_proto)
with ops.control_dependencies([update_model]):
return stats_ops.grow_tree_v4(
self.variables.tree,
self.variables.stats,
finished_nodes,
params=self.params.serialized_params_proto)
def inference_graph(self, input_data, data_spec, sparse_features=None):
"""Constructs a TF graph for evaluating a random tree.
Args:
input_data: A tensor or placeholder for input data.
data_spec: A TensorForestDataSpec proto specifying the original input
columns.
sparse_features: A tf.SparseTensor for sparse input data.
Returns:
A tuple of (probabilities, tree_paths).
"""
sparse_indices = []
sparse_values = []
sparse_shape = []
if sparse_features is not None:
sparse_indices = sparse_features.indices
sparse_values = sparse_features.values
sparse_shape = sparse_features.dense_shape
if input_data is None:
input_data = []
return model_ops.tree_predictions_v4(
self.variables.tree,
input_data,
sparse_indices,
sparse_values,
sparse_shape,
input_spec=data_spec.SerializeToString(),
params=self.params.serialized_params_proto)
def size(self):
"""Constructs a TF graph for evaluating the current number of nodes.
Returns:
The current number of nodes in the tree.
"""
return model_ops.tree_size(self.variables.tree)
def feature_usage_counts(self):
return model_ops.feature_usage_counts(
self.variables.tree, params=self.params.serialized_params_proto)
|
tensorflow-master
|
tensorflow/contrib/tensor_forest/python/tensor_forest.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.contrib.tensor_forest.ops.tensor_forest."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from google.protobuf.json_format import ParseDict
from tensorflow.contrib.decision_trees.proto import generic_tree_model_pb2 as _tree_proto
from tensorflow.contrib.tensor_forest.python import tensor_forest
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import resources
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
class TensorForestTest(test_util.TensorFlowTestCase):
def testForestHParams(self):
hparams = tensor_forest.ForestHParams(
num_classes=2,
num_trees=100,
max_nodes=1000,
split_after_samples=25,
num_features=60).fill()
self.assertEquals(2, hparams.num_classes)
self.assertEquals(3, hparams.num_output_columns)
self.assertEquals(10, hparams.num_splits_to_consider)
# Default value of valid_leaf_threshold
self.assertEquals(1, hparams.valid_leaf_threshold)
self.assertEquals(0, hparams.base_random_seed)
def testForestHParamsBigTree(self):
hparams = tensor_forest.ForestHParams(
num_classes=2,
num_trees=100,
max_nodes=1000000,
split_after_samples=25,
num_features=1000).fill()
self.assertEquals(31, hparams.num_splits_to_consider)
def testForestHParamsStringParams(self):
hparams = tensor_forest.ForestHParams(
num_classes=2,
num_trees=100,
max_nodes=1000000,
split_after_samples="25",
num_splits_to_consider="1000000",
num_features=1000).fill()
self.assertEquals("1000000", hparams.num_splits_to_consider)
def testTrainingConstructionClassification(self):
input_data = [[-1., 0.], [-1., 2.], # node 1
[1., 0.], [1., -2.]] # node 2
input_labels = [0, 1, 2, 3]
params = tensor_forest.ForestHParams(
num_classes=4,
num_features=2,
num_trees=10,
max_nodes=1000,
split_after_samples=25).fill()
graph_builder = tensor_forest.RandomForestGraphs(params)
graph = graph_builder.training_graph(input_data, input_labels)
self.assertTrue(isinstance(graph, ops.Operation))
def testTrainingConstructionRegression(self):
input_data = [[-1., 0.], [-1., 2.], # node 1
[1., 0.], [1., -2.]] # node 2
input_labels = [0, 1, 2, 3]
params = tensor_forest.ForestHParams(
num_classes=4,
num_features=2,
num_trees=10,
max_nodes=1000,
split_after_samples=25,
regression=True).fill()
graph_builder = tensor_forest.RandomForestGraphs(params)
graph = graph_builder.training_graph(input_data, input_labels)
self.assertTrue(isinstance(graph, ops.Operation))
def testInferenceConstruction(self):
input_data = [[-1., 0.], [-1., 2.], # node 1
[1., 0.], [1., -2.]] # node 2
params = tensor_forest.ForestHParams(
num_classes=4,
num_features=2,
num_trees=10,
max_nodes=1000,
split_after_samples=25).fill()
graph_builder = tensor_forest.RandomForestGraphs(params)
probs, paths, var = graph_builder.inference_graph(input_data)
self.assertTrue(isinstance(probs, ops.Tensor))
self.assertTrue(isinstance(paths, ops.Tensor))
self.assertTrue(isinstance(var, ops.Tensor))
def testInfrenceFromRestoredModel(self):
input_data = [[-1., 0.], [-1., 2.], # node 1
[1., 0.], [1., -2.]] # node 2
expected_prediction = [[0.0, 1.0], [0.0, 1.0],
[0.0, 1.0], [0.0, 1.0]]
hparams = tensor_forest.ForestHParams(
num_classes=2,
num_features=2,
num_trees=1,
max_nodes=1000,
split_after_samples=25).fill()
tree_weight = {'decisionTree':
{'nodes':
[{'binaryNode':
{'rightChildId': 2,
'leftChildId': 1,
'inequalityLeftChildTest':
{'featureId': {'id': '0'},
'threshold': {'floatValue': 0}}}},
{'leaf': {'vector':
{'value': [{'floatValue': 0.0},
{'floatValue': 1.0}]}},
'nodeId': 1},
{'leaf': {'vector':
{'value': [{'floatValue': 0.0},
{'floatValue': 1.0}]}},
'nodeId': 2}]}}
restored_tree_param = ParseDict(tree_weight,
_tree_proto.Model()).SerializeToString()
graph_builder = tensor_forest.RandomForestGraphs(hparams,
[restored_tree_param])
probs, paths, var = graph_builder.inference_graph(input_data)
self.assertTrue(isinstance(probs, ops.Tensor))
self.assertTrue(isinstance(paths, ops.Tensor))
self.assertTrue(isinstance(var, ops.Tensor))
with self.cached_session():
variables.global_variables_initializer().run()
resources.initialize_resources(resources.shared_resources()).run()
self.assertEquals(probs.eval().shape, (4, 2))
self.assertEquals(probs.eval().tolist(), expected_prediction)
def testTrainingConstructionClassificationSparse(self):
input_data = sparse_tensor.SparseTensor(
indices=[[0, 0], [0, 3], [1, 0], [1, 7], [2, 1], [3, 9]],
values=[-1.0, 0.0, -1., 2., 1., -2.0],
dense_shape=[4, 10])
input_labels = [0, 1, 2, 3]
params = tensor_forest.ForestHParams(
num_classes=4,
num_features=10,
num_trees=10,
max_nodes=1000,
split_after_samples=25).fill()
graph_builder = tensor_forest.RandomForestGraphs(params)
graph = graph_builder.training_graph(input_data, input_labels)
self.assertTrue(isinstance(graph, ops.Operation))
def testInferenceConstructionSparse(self):
input_data = sparse_tensor.SparseTensor(
indices=[[0, 0], [0, 3],
[1, 0], [1, 7],
[2, 1],
[3, 9]],
values=[-1.0, 0.0,
-1., 2.,
1.,
-2.0],
dense_shape=[4, 10])
params = tensor_forest.ForestHParams(
num_classes=4,
num_features=10,
num_trees=10,
max_nodes=1000,
regression=True,
split_after_samples=25).fill()
graph_builder = tensor_forest.RandomForestGraphs(params)
probs, paths, var = graph_builder.inference_graph(input_data)
self.assertTrue(isinstance(probs, ops.Tensor))
self.assertTrue(isinstance(paths, ops.Tensor))
self.assertTrue(isinstance(var, ops.Tensor))
if __name__ == "__main__":
googletest.main()
|
tensorflow-master
|
tensorflow/contrib/tensor_forest/python/tensor_forest_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Random forest implementation in tensorflow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.tensor_forest.python import tensor_forest
from tensorflow.contrib.tensor_forest.python.ops import data_ops
from tensorflow.contrib.tensor_forest.python.ops import model_ops
from tensorflow.contrib.tensor_forest.python.ops import stats_ops
from tensorflow.contrib.tensor_forest.python.ops import tensor_forest_ops
|
tensorflow-master
|
tensorflow/contrib/tensor_forest/python/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.contrib.tensor_forest.ops.scatter_add_ndim_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.tensor_forest.python.ops import tensor_forest_ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
class ScatterAddNdimTest(test_util.TensorFlowTestCase):
def test1dim(self):
input_data = variables.VariableV1(
[1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.])
indices = [[1], [10]]
updates = [100., 200.]
with self.cached_session():
variables.global_variables_initializer().run()
tensor_forest_ops.scatter_add_ndim(input_data, indices, updates).run()
self.assertAllEqual(
[1., 102., 3., 4., 5., 6., 7., 8., 9., 10., 211., 12.],
input_data.eval())
def test3dim(self):
input_data = variables.VariableV1([[[1., 2., 3.], [4., 5., 6.]],
[[7., 8., 9.], [10., 11., 12.]]])
indices = [[0, 0, 1], [1, 1, 2]]
updates = [100., 200.]
with self.cached_session():
variables.global_variables_initializer().run()
tensor_forest_ops.scatter_add_ndim(input_data, indices, updates).run()
self.assertAllEqual([[[1., 102., 3.], [4., 5., 6.]],
[[7., 8., 9.], [10., 11., 212.]]], input_data.eval())
def testNoUpdates(self):
init_val = [[[1., 2., 3.], [4., 5., 6.]], [[7., 8., 9.], [10., 11., 12.]]]
input_data = variables.VariableV1(init_val)
indices = []
updates = []
with self.cached_session():
variables.global_variables_initializer().run()
tensor_forest_ops.scatter_add_ndim(input_data, indices, updates).run()
self.assertAllEqual(init_val, input_data.eval())
def testBadInput(self):
init_val = [[[1., 2., 3.], [4., 5., 6.]], [[7., 8., 9.], [10., 11., 12.]]]
input_data = variables.VariableV1(init_val)
indices = [[0, 0, 1], [1, 1, 2]]
updates = [100.]
with self.cached_session():
variables.global_variables_initializer().run()
with self.assertRaisesOpError(
'Number of updates should be same as number of indices.'):
tensor_forest_ops.scatter_add_ndim(input_data, indices, updates).run()
self.assertAllEqual(init_val, input_data.eval())
def testIncompleteIndices(self):
input_data = variables.VariableV1([[[1., 2., 3.], [4., 5., 6.]],
[[7., 8., 9.], [10., 11., 12.]]])
indices = [[0, 0], [1, 1]]
updates = [[100., 200., 300.], [400., 500., 600.]]
with self.cached_session():
variables.global_variables_initializer().run()
tensor_forest_ops.scatter_add_ndim(input_data, indices, updates).run()
self.assertAllEqual([[[101., 202., 303.], [4., 5., 6.]],
[[7., 8., 9.], [410., 511., 612.]]],
input_data.eval())
if __name__ == '__main__':
googletest.main()
|
tensorflow-master
|
tensorflow/contrib/tensor_forest/python/kernel_tests/scatter_add_ndim_op_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Stats ops python wrappers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from tensorflow.contrib.tensor_forest.python.ops import gen_stats_ops
# pylint: disable=unused-import
from tensorflow.contrib.tensor_forest.python.ops.gen_stats_ops import finalize_tree
from tensorflow.contrib.tensor_forest.python.ops.gen_stats_ops import grow_tree_v4
from tensorflow.contrib.tensor_forest.python.ops.gen_stats_ops import process_input_v4
# pylint: enable=unused-import
from tensorflow.contrib.util import loader
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.ops import resources
from tensorflow.python.platform import resource_loader
from tensorflow.python.training import saver
from tensorflow.python.training.tracking import tracking
_stats_ops = loader.load_op_library(
resource_loader.get_path_to_datafile("_stats_ops.so"))
ops.NotDifferentiable("FertileStatsVariable")
ops.NotDifferentiable("FertileStatsSerialize")
ops.NotDifferentiable("FertileStatsDeserialize")
ops.NotDifferentiable("GrowTreeV4")
ops.NotDifferentiable("ProcessInputV4")
ops.NotDifferentiable("FinalizeTree")
class FertileStatsVariableSavable(saver.BaseSaverBuilder.SaveableObject):
"""SaveableObject implementation for FertileStatsVariable."""
def __init__(self, params, stats_handle, create_op, name):
"""Creates a FertileStatsVariableSavable object.
Args:
params: A TensorForestParams object.
stats_handle: handle to the tree variable.
create_op: the op to initialize the variable.
name: the name to save the tree variable under.
"""
self.params = params
tensor = gen_stats_ops.fertile_stats_serialize(
stats_handle, params=params.serialized_params_proto)
# slice_spec is useful for saving a slice from a variable.
# It's not meaningful the tree variable. So we just pass an empty value.
slice_spec = ""
specs = [saver.BaseSaverBuilder.SaveSpec(tensor, slice_spec, name),]
super(FertileStatsVariableSavable,
self).__init__(stats_handle, specs, name)
self._stats_handle = stats_handle
self._create_op = create_op
def restore(self, restored_tensors, unused_restored_shapes):
"""Restores the associated tree from 'restored_tensors'.
Args:
restored_tensors: the tensors that were loaded from a checkpoint.
unused_restored_shapes: the shapes this object should conform to after
restore. Not meaningful for trees.
Returns:
The operation that restores the state of the tree variable.
"""
with ops.control_dependencies([self._create_op]):
return gen_stats_ops.fertile_stats_deserialize(
self._stats_handle, restored_tensors[0],
params=self.params.serialized_params_proto)
class FertileStatsVariable(tracking.TrackableResource):
"""A Fertile stats variable."""
def __init__(self, params, stats_config, name, container=None):
self._params = params
self._stats_config = stats_config
self._name = name
self._container = container
self._init_op = None
super(FertileStatsVariable, self).__init__()
self._resource_handle = self._create_resource()
def _create_resource(self):
if context.executing_eagerly():
# TODO(allenl): This will leak memory due to kernel caching by the
# shared_name attribute value (but is better than the alternative of
# sharing everything by default when executing eagerly; hopefully creating
# tables in a loop is uncommon).
shared_name = "fertile_stats_variable_%d" % (ops.uid(),)
else:
shared_name = self._name
return gen_stats_ops.fertile_stats_resource_handle_op(
self._container, shared_name=shared_name, name=self._name)
def _initialize(self):
return gen_stats_ops.create_fertile_stats_variable(
self.resource_handle,
self._stats_config,
params=self._params.serialized_params_proto)
@property
def initializer(self):
if self._init_op is None:
self._init_op = self._initialize()
return self._init_op
def is_initialized(self):
return gen_stats_ops.fertile_stats_is_initialized_op(self.resource_handle)
def _gather_saveables_for_checkpoint(self):
"""For object-based checkpointing."""
return {
"fertile_stats_variable":
functools.partial(
FertileStatsVariableSavable,
params=self._params,
stats_handle=self.resource_handle,
create_op=self.initializer)
}
def fertile_stats_variable(params, stats_config, name, container=None):
r"""Creates a stats object and returns a handle to it.
Args:
params: A TensorForestParams object.
stats_config: A `Tensor` of type `string`. Serialized proto of the stats.
name: A name for the variable.
container: An optional `string`. Defaults to `""`.
Returns:
A `Tensor` of type mutable `string`. The handle to the stats.
"""
with ops.name_scope(name, "FertileStatsVariable") as name:
fertile_stats_var = FertileStatsVariable(params, stats_config, name,
container)
resource_handle = fertile_stats_var.resource_handle
create_op = fertile_stats_var.initializer
is_initialized_op = fertile_stats_var.is_initialized()
# Adds the variable to the savable list.
saveable = (
fertile_stats_var._gather_saveables_for_checkpoint()[ # pylint: disable=protected-access
"fertile_stats_variable"](name=resource_handle.name))
ops.add_to_collection(ops.GraphKeys.SAVEABLE_OBJECTS, saveable)
resources.register_resource(resource_handle, create_op, is_initialized_op)
return resource_handle
|
tensorflow-master
|
tensorflow/contrib/tensor_forest/python/ops/stats_ops.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Custom ops used by tensorforest."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.contrib.tensor_forest.python.ops.gen_tensor_forest_ops import *
# pylint: enable=wildcard-import
from tensorflow.contrib.util import loader
from tensorflow.python.platform import resource_loader
_tensor_forest_ops = loader.load_op_library(
resource_loader.get_path_to_datafile('_tensor_forest_ops.so'))
|
tensorflow-master
|
tensorflow/contrib/tensor_forest/python/ops/tensor_forest_ops.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model ops python wrappers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from tensorflow.contrib.tensor_forest.python.ops import gen_model_ops
# pylint: disable=unused-import
from tensorflow.contrib.tensor_forest.python.ops.gen_model_ops import feature_usage_counts
from tensorflow.contrib.tensor_forest.python.ops.gen_model_ops import traverse_tree_v4
from tensorflow.contrib.tensor_forest.python.ops.gen_model_ops import tree_predictions_v4
from tensorflow.contrib.tensor_forest.python.ops.gen_model_ops import tree_size
from tensorflow.contrib.tensor_forest.python.ops.gen_model_ops import update_model_v4
# pylint: enable=unused-import
from tensorflow.contrib.util import loader
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.ops import resources
from tensorflow.python.platform import resource_loader
from tensorflow.python.training import saver
from tensorflow.python.training.tracking import tracking
_model_ops = loader.load_op_library(
resource_loader.get_path_to_datafile("_model_ops.so"))
ops.NotDifferentiable("TreeVariable")
ops.NotDifferentiable("TreeSerialize")
ops.NotDifferentiable("TreeDeserialize")
ops.NotDifferentiable("TreeSize")
ops.NotDifferentiable("TreePredictionsV4")
ops.NotDifferentiable("FeatureUsageCounts")
class TreeVariableSavable(saver.BaseSaverBuilder.SaveableObject):
"""SaveableObject implementation for TreeVariable."""
def __init__(self, params, tree_handle, stats_handle, create_op, name):
"""Creates a TreeVariableSavable object.
Args:
params: A TensorForestParams object.
tree_handle: handle to the tree variable.
stats_handle: handle to the stats variable.
create_op: the op to initialize the variable.
name: the name to save the tree variable under.
"""
self.params = params
tensor = gen_model_ops.tree_serialize(tree_handle)
# slice_spec is useful for saving a slice from a variable.
# It's not meaningful the tree variable. So we just pass an empty value.
slice_spec = ""
specs = [saver.BaseSaverBuilder.SaveSpec(tensor, slice_spec, name),]
super(TreeVariableSavable,
self).__init__(tree_handle, specs, name)
self._tree_handle = tree_handle
self._create_op = create_op
def restore(self, restored_tensors, unused_restored_shapes):
"""Restores the associated tree from 'restored_tensors'.
Args:
restored_tensors: the tensors that were loaded from a checkpoint.
unused_restored_shapes: the shapes this object should conform to after
restore. Not meaningful for trees.
Returns:
The operation that restores the state of the tree variable.
"""
with ops.control_dependencies([self._create_op]):
return gen_model_ops.tree_deserialize(
self._tree_handle,
restored_tensors[0],
params=self.params.serialized_params_proto)
class TreeVariable(tracking.TrackableResource):
"""A tree model."""
def __init__(self, params, tree_config, stats_handle, name, container=None):
self._params = params
self._tree_config = tree_config
self._stats_handle = stats_handle
self._name = name
self._container = container
self._init_op = None
super(TreeVariable, self).__init__()
self._resource_handle = self._create_resource()
def _create_resource(self):
if context.executing_eagerly():
# TODO(allenl): This will leak memory due to kernel caching by the
# shared_name attribute value (but is better than the alternative of
# sharing everything by default when executing eagerly; hopefully creating
# tables in a loop is uncommon).
shared_name = "tree_variable_%d" % (ops.uid(),)
else:
shared_name = self._name
return gen_model_ops.decision_tree_resource_handle_op(
self._container, shared_name=shared_name, name=self._name)
def _initialize(self):
return gen_model_ops.create_tree_variable(
self.resource_handle,
self._tree_config,
params=self._params.serialized_params_proto)
@property
def initializer(self):
if self._init_op is None:
self._init_op = self._initialize()
return self._init_op
def is_initialized(self):
return gen_model_ops.tree_is_initialized_op(self.resource_handle)
def _gather_saveables_for_checkpoint(self):
"""For object-based checkpointing."""
return {
"tree_variable":
functools.partial(
TreeVariableSavable,
params=self._params,
tree_handle=self.resource_handle,
stats_handle=self._stats_handle,
create_op=self._init_op)
}
def tree_variable(params, tree_config, stats_handle, name, container=None):
r"""Creates a tree model and returns a handle to it.
Args:
params: A TensorForestParams object.
tree_config: A `Tensor` of type `string`. Serialized proto of the tree.
stats_handle: Resource handle to the stats object.
name: A name for the variable.
container: An optional `string`. Defaults to `""`.
Returns:
A `Tensor` of type mutable `string`. The handle to the tree.
"""
with ops.name_scope(name, "TreeVariable") as name:
tree_var = TreeVariable(params, tree_config, stats_handle, name, container)
resource_handle = tree_var.resource_handle
create_op = tree_var.initializer
is_initialized_op = tree_var.is_initialized()
# Adds the variable to the savable list.
saveable = tree_var._gather_saveables_for_checkpoint()["tree_variable"]( # pylint: disable=protected-access
name=resource_handle.name)
ops.add_to_collection(ops.GraphKeys.SAVEABLE_OBJECTS, saveable)
resources.register_resource(resource_handle, create_op, is_initialized_op)
return resource_handle
|
tensorflow-master
|
tensorflow/contrib/tensor_forest/python/ops/model_ops.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops for preprocessing data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.tensor_forest.python.ops import tensor_forest_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.platform import tf_logging as logging
# Data column types for indicating categorical or other non-float values.
DATA_FLOAT = 0
DATA_CATEGORICAL = 1
DTYPE_TO_FTYPE = {
dtypes.string: DATA_CATEGORICAL,
dtypes.int32: DATA_CATEGORICAL,
dtypes.int64: DATA_CATEGORICAL,
dtypes.float32: DATA_FLOAT,
dtypes.float64: DATA_FLOAT
}
def CastToFloat(tensor):
if tensor.dtype == dtypes.string:
return tensor_forest_ops.reinterpret_string_to_float(tensor)
elif tensor.dtype.is_integer:
return math_ops.cast(tensor, dtypes.float32)
else:
return tensor
# TODO(gilberth): If protos are ever allowed in dynamically loaded custom
# op libraries, convert this to a proto like a sane person.
class TensorForestDataSpec(object):
def __init__(self):
self.sparse = DataColumnCollection()
self.dense = DataColumnCollection()
self.dense_features_size = 0
def SerializeToString(self):
return 'dense_features_size: %d dense: [%s] sparse: [%s]' % (
self.dense_features_size, self.dense.SerializeToString(),
self.sparse.SerializeToString())
class DataColumnCollection(object):
"""Collection of DataColumns, meant to mimic a proto repeated field."""
def __init__(self):
self.cols = []
def add(self): # pylint: disable=invalid-name
self.cols.append(DataColumn())
return self.cols[-1]
def size(self): # pylint: disable=invalid-name
return len(self.cols)
def SerializeToString(self):
ret = ''
for c in self.cols:
ret += '{%s}' % c.SerializeToString()
return ret
class DataColumn(object):
def __init__(self):
self.name = ''
self.original_type = ''
self.size = 0
def SerializeToString(self):
return 'name: {0} original_type: {1} size: {2}'.format(self.name,
self.original_type,
self.size)
def GetColumnName(column_key, col_num):
if isinstance(column_key, str):
return column_key
else:
return getattr(column_key, 'column_name', str(col_num))
def ParseDataTensorOrDict(data):
"""Return a tensor to use for input data.
The incoming features can be a dict where keys are the string names of the
columns, which we turn into a single 2-D tensor.
Args:
data: `Tensor` or `dict` of `Tensor` objects.
Returns:
A 2-D tensor for input to tensor_forest, a keys tensor for the
tf.Examples if they exist, and a list of the type of each column
(e.g. continuous float, categorical).
"""
data_spec = TensorForestDataSpec()
if isinstance(data, dict):
dense_features_size = 0
dense_features = []
sparse_features = []
for k in sorted(data.keys()):
is_sparse = isinstance(data[k], sparse_tensor.SparseTensor)
if is_sparse:
# TODO(gilberth): support sparse continuous.
if data[k].dtype == dtypes.float32:
logging.info('TensorForest does not support sparse continuous.')
continue
elif data_spec.sparse.size() == 0:
col_spec = data_spec.sparse.add()
col_spec.original_type = DATA_CATEGORICAL
col_spec.name = 'all_sparse'
col_spec.size = -1
sparse_features.append(
sparse_tensor.SparseTensor(data[
k].indices, CastToFloat(data[k].values), data[k].dense_shape))
else:
col_spec = data_spec.dense.add()
col_spec.original_type = DTYPE_TO_FTYPE[data[k].dtype]
col_spec.name = GetColumnName(k, len(dense_features))
# the second dimension of get_shape should always be known.
shape = data[k].get_shape()
if len(shape) == 1:
col_spec.size = 1
else:
col_spec.size = shape[1].value
dense_features_size += col_spec.size
dense_features.append(CastToFloat(data[k]))
processed_dense_features = None
processed_sparse_features = None
if dense_features:
processed_dense_features = array_ops.concat(dense_features, 1)
data_spec.dense_features_size = dense_features_size
if sparse_features:
processed_sparse_features = sparse_ops.sparse_concat(1, sparse_features)
logging.info(data_spec.SerializeToString())
return processed_dense_features, processed_sparse_features, data_spec
elif isinstance(data, sparse_tensor.SparseTensor):
col_spec = data_spec.sparse.add()
col_spec.name = 'sparse_features'
col_spec.original_type = DTYPE_TO_FTYPE[data.dtype]
col_spec.size = -1
data_spec.dense_features_size = 0
return None, data, data_spec
else:
data = ops.convert_to_tensor(data)
col_spec = data_spec.dense.add()
col_spec.name = 'dense_features'
col_spec.original_type = DTYPE_TO_FTYPE[data.dtype]
col_spec.size = data.get_shape()[1]
data_spec.dense_features_size = col_spec.size
return data, None, data_spec
def ParseLabelTensorOrDict(labels):
"""Return a tensor to use for input labels to tensor_forest.
The incoming targets can be a dict where keys are the string names of the
columns, which we turn into a single 1-D tensor for classification or
2-D tensor for regression.
Converts sparse tensors to dense ones.
Args:
labels: `Tensor` or `dict` of `Tensor` objects.
Returns:
A 2-D tensor for labels/outputs.
"""
if isinstance(labels, dict):
return math_ops.cast(
array_ops.concat(
[
sparse_ops.sparse_tensor_to_dense(
labels[k], default_value=-1) if isinstance(
labels, sparse_tensor.SparseTensor) else labels[k]
for k in sorted(labels.keys())
],
1),
dtypes.float32)
else:
if isinstance(labels, sparse_tensor.SparseTensor):
return math_ops.cast(
sparse_ops.sparse_tensor_to_dense(labels, default_value=-1),
dtypes.float32)
else:
return math_ops.cast(labels, dtypes.float32)
|
tensorflow-master
|
tensorflow/contrib/tensor_forest/python/ops/data_ops.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A collection of functions to be used as evaluation metrics."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib import losses
from tensorflow.contrib.learn.python.learn.estimators import prediction_key
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import metrics
from tensorflow.python.ops import nn
INFERENCE_PROB_NAME = prediction_key.PredictionKey.PROBABILITIES
INFERENCE_PRED_NAME = prediction_key.PredictionKey.CLASSES
FEATURE_IMPORTANCE_NAME = 'global_feature_importance'
def _top_k_generator(k):
def _top_k(probabilities, targets):
targets = math_ops.cast(targets, dtypes.int32)
if targets.get_shape().ndims > 1:
targets = array_ops.squeeze(targets, axis=[1])
return metrics.mean(nn.in_top_k(probabilities, targets, k))
return _top_k
def _accuracy(predictions, targets, weights=None):
return metrics.accuracy(
labels=targets, predictions=predictions, weights=weights)
def _r2(probabilities, targets, weights=None):
targets = math_ops.cast(targets, dtypes.float32)
y_mean = math_ops.reduce_mean(targets, 0)
squares_total = math_ops.reduce_sum(
math_ops.squared_difference(targets, y_mean), 0)
squares_residuals = math_ops.reduce_sum(
math_ops.squared_difference(targets, probabilities), 0)
score = 1 - math_ops.reduce_sum(squares_residuals / squares_total)
return metrics.mean(score, weights=weights)
def _squeeze_and_onehot(targets, depth):
targets = array_ops.squeeze(targets, axis=[1])
return array_ops.one_hot(math_ops.cast(targets, dtypes.int32), depth)
def _sigmoid_entropy(probabilities, targets, weights=None):
return metrics.mean(
losses.sigmoid_cross_entropy(probabilities,
_squeeze_and_onehot(
targets,
array_ops.shape(probabilities)[1])),
weights=weights)
def _softmax_entropy(probabilities, targets, weights=None):
return metrics.mean(
losses.sparse_softmax_cross_entropy(probabilities,
math_ops.cast(targets, dtypes.int32)),
weights=weights)
def _predictions(predictions, unused_targets, **unused_kwargs):
return predictions
def _class_log_loss(probabilities, targets, weights=None):
return metrics.mean(
losses.log_loss(probabilities,
_squeeze_and_onehot(targets,
array_ops.shape(probabilities)[1])),
weights=weights)
def _precision(predictions, targets, weights=None):
return metrics.precision(
labels=targets, predictions=predictions, weights=weights)
def _precision_at_thresholds(predictions, targets, weights=None):
return metrics.precision_at_thresholds(
labels=targets,
predictions=array_ops.slice(predictions, [0, 1], [-1, 1]),
thresholds=np.arange(0, 1, 0.01, dtype=np.float32),
weights=weights)
def _recall(predictions, targets, weights=None):
return metrics.recall(
labels=targets, predictions=predictions, weights=weights)
def _recall_at_thresholds(predictions, targets, weights=None):
return metrics.recall_at_thresholds(
labels=targets,
predictions=array_ops.slice(predictions, [0, 1], [-1, 1]),
thresholds=np.arange(0, 1, 0.01, dtype=np.float32),
weights=weights)
def _auc(probs, targets, weights=None):
return metrics.auc(
labels=targets,
predictions=array_ops.slice(probs, [0, 1], [-1, 1]),
weights=weights)
_EVAL_METRICS = {
'auc': _auc,
'sigmoid_entropy': _sigmoid_entropy,
'softmax_entropy': _softmax_entropy,
'accuracy': _accuracy,
'r2': _r2,
'predictions': _predictions,
'classification_log_loss': _class_log_loss,
'precision': _precision,
'precision_at_thresholds': _precision_at_thresholds,
'recall': _recall,
'recall_at_thresholds': _recall_at_thresholds,
'top_5': _top_k_generator(5)
}
_PREDICTION_KEYS = {
'auc': INFERENCE_PROB_NAME,
'sigmoid_entropy': INFERENCE_PROB_NAME,
'softmax_entropy': INFERENCE_PROB_NAME,
'accuracy': INFERENCE_PRED_NAME,
'r2': prediction_key.PredictionKey.SCORES,
'predictions': INFERENCE_PRED_NAME,
'classification_log_loss': INFERENCE_PROB_NAME,
'precision': INFERENCE_PRED_NAME,
'precision_at_thresholds': INFERENCE_PROB_NAME,
'recall': INFERENCE_PRED_NAME,
'recall_at_thresholds': INFERENCE_PROB_NAME,
'top_5': INFERENCE_PROB_NAME
}
def get_metric(metric_name):
"""Given a metric name, return the corresponding metric function."""
return _EVAL_METRICS[metric_name]
def get_prediction_key(metric_name):
return _PREDICTION_KEYS[metric_name]
|
tensorflow-master
|
tensorflow/contrib/tensor_forest/client/eval_metrics.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Random forest implementation in tensorflow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from tensorflow.contrib.tensor_forest.client import eval_metrics
from tensorflow.contrib.tensor_forest.client import random_forest
# pylint: enable=unused-import
|
tensorflow-master
|
tensorflow/contrib/tensor_forest/client/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.contrib.tensor_forest.client.eval_metrics."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.tensor_forest.client import eval_metrics
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
class EvalMetricsTest(test_util.TensorFlowTestCase):
def testTop2(self):
top_2_fn = eval_metrics._top_k_generator(2)
probabilities = constant_op.constant([[0.1, 0.2, 0.3], [0.4, 0.7, 0.5],
[0.9, 0.8, 0.2], [0.6, 0.4, 0.8]])
targets = constant_op.constant([[0], [2], [1], [1]])
in_top_2_op, update_op = top_2_fn(probabilities, targets)
with self.cached_session():
# initializes internal accuracy vars
variables.local_variables_initializer().run()
# need to call in order to run the in_top_2_op internal operations because
# it is a streaming function
update_op.eval()
self.assertNear(0.5, in_top_2_op.eval(), 0.0001)
def testTop3(self):
top_3_fn = eval_metrics._top_k_generator(3)
probabilities = constant_op.constant([[0.1, 0.2, 0.6, 0.3, 0.5, 0.5],
[0.1, 0.4, 0.7, 0.3, 0.5, 0.2],
[0.1, 0.3, 0.8, 0.7, 0.4, 0.9],
[0.9, 0.8, 0.1, 0.8, 0.2, 0.7],
[0.3, 0.6, 0.9, 0.4, 0.8, 0.6]])
targets = constant_op.constant([3, 0, 2, 5, 1])
in_top_3_op, update_op = top_3_fn(probabilities, targets)
with self.cached_session():
# initializes internal accuracy vars
variables.local_variables_initializer().run()
# need to call in order to run the in_top_3_op internal operations because
# it is a streaming function
update_op.eval()
self.assertNear(0.4, in_top_3_op.eval(), 0.0001)
def testAccuracy(self):
predictions = constant_op.constant([0, 1, 3, 6, 5, 2, 7, 6, 4, 9])
targets = constant_op.constant([0, 1, 4, 6, 5, 1, 7, 5, 4, 8])
accuracy_op, update_op = eval_metrics._accuracy(predictions, targets)
with self.cached_session():
variables.local_variables_initializer().run()
# need to call in order to run the accuracy_op internal operations because
# it is a streaming function
update_op.eval()
self.assertNear(0.6, accuracy_op.eval(), 0.0001)
def testR2(self):
scores = constant_op.constant(
[1.2, 3.9, 2.1, 0.9, 2.2, 0.1, 6.0, 4.0, 0.9])
targets = constant_op.constant(
[1.0, 4.3, 2.6, 0.5, 1.1, 0.7, 5.1, 3.4, 1.8])
r2_op, update_op = eval_metrics._r2(scores, targets)
with self.cached_session():
# initializes internal accuracy vars
variables.local_variables_initializer().run()
# need to call in order to run the r2_op internal operations because
# it is a streaming function
update_op.eval()
self.assertNear(0.813583, r2_op.eval(), 0.0001)
if __name__ == '__main__':
googletest.main()
|
tensorflow-master
|
tensorflow/contrib/tensor_forest/client/eval_metrics_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TensorForestTrainer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.tensor_forest.client import random_forest
from tensorflow.contrib.tensor_forest.python import tensor_forest
from tensorflow.python.estimator.canned import head as head_lib
from tensorflow.python.estimator.inputs import numpy_io
from tensorflow.python.feature_column import feature_column_lib as core_feature_column
from tensorflow.python.framework import ops
from tensorflow.python.ops.losses import losses
from tensorflow.python.platform import test
from tensorflow.python.training import checkpoint_utils
def _get_classification_input_fns():
iris = base.load_iris()
data = iris.data.astype(np.float32)
labels = iris.target.astype(np.int32)
train_input_fn = numpy_io.numpy_input_fn(
x=data, y=labels, batch_size=150, num_epochs=None, shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x=data[:1,], y=None, batch_size=1, num_epochs=1, shuffle=False)
return train_input_fn, predict_input_fn
def _get_regression_input_fns():
boston = base.load_boston()
data = boston.data.astype(np.float32)
labels = boston.target.astype(np.int32)
train_input_fn = numpy_io.numpy_input_fn(
x=data, y=labels, batch_size=506, num_epochs=None, shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x=data[:1,], y=None, batch_size=1, num_epochs=1, shuffle=False)
return train_input_fn, predict_input_fn
class TensorForestTrainerTests(test.TestCase):
def testClassification(self):
"""Tests multi-class classification using matrix data as input."""
hparams = tensor_forest.ForestHParams(
num_trees=3,
max_nodes=1000,
num_classes=3,
num_features=4,
split_after_samples=20,
inference_tree_paths=True)
classifier = random_forest.TensorForestEstimator(hparams.fill())
input_fn, predict_input_fn = _get_classification_input_fns()
classifier.fit(input_fn=input_fn, steps=100)
res = classifier.evaluate(input_fn=input_fn, steps=10)
self.assertEqual(1.0, res['accuracy'])
self.assertAllClose(0.55144483, res['loss'])
predictions = list(classifier.predict(input_fn=predict_input_fn))
self.assertAllClose([[0.576117, 0.211942, 0.211942]],
[pred['probabilities'] for pred in predictions])
def testRegression(self):
"""Tests regression using matrix data as input."""
hparams = tensor_forest.ForestHParams(
num_trees=5,
max_nodes=1000,
num_classes=1,
num_features=13,
regression=True,
split_after_samples=20)
regressor = random_forest.TensorForestEstimator(hparams.fill())
input_fn, predict_input_fn = _get_regression_input_fns()
regressor.fit(input_fn=input_fn, steps=100)
res = regressor.evaluate(input_fn=input_fn, steps=10)
self.assertGreaterEqual(0.1, res['loss'])
predictions = list(regressor.predict(input_fn=predict_input_fn))
self.assertAllClose([24.], [pred['scores'] for pred in predictions], atol=1)
def testAdditionalOutputs(self):
"""Tests multi-class classification using matrix data as input."""
hparams = tensor_forest.ForestHParams(
num_trees=1,
max_nodes=100,
num_classes=3,
num_features=4,
split_after_samples=20,
inference_tree_paths=True)
classifier = random_forest.TensorForestEstimator(
hparams.fill(), keys_column='keys', include_all_in_serving=True)
iris = base.load_iris()
data = iris.data.astype(np.float32)
labels = iris.target.astype(np.int32)
input_fn = numpy_io.numpy_input_fn(
x={
'x': data,
'keys': np.arange(len(iris.data)).reshape(150, 1)
},
y=labels,
batch_size=10,
num_epochs=1,
shuffle=False)
classifier.fit(input_fn=input_fn, steps=100)
predictions = list(classifier.predict(input_fn=input_fn))
# Check that there is a key column, tree paths and var.
for pred in predictions:
self.assertTrue('keys' in pred)
self.assertTrue('tree_paths' in pred)
self.assertTrue('prediction_variance' in pred)
def _assert_checkpoint(self, model_dir, global_step):
reader = checkpoint_utils.load_checkpoint(model_dir)
self.assertLessEqual(
reader.get_tensor(ops.GraphKeys.GLOBAL_STEP), global_step)
def testEarlyStopping(self):
"""Tests multi-class classification using matrix data as input."""
hparams = tensor_forest.ForestHParams(
num_trees=100,
max_nodes=10000,
num_classes=3,
num_features=4,
split_after_samples=20,
inference_tree_paths=True)
classifier = random_forest.TensorForestEstimator(
hparams.fill(),
# Set a crazy threshold - 30% loss change.
early_stopping_loss_threshold=0.3,
early_stopping_rounds=2)
input_fn, _ = _get_classification_input_fns()
classifier.fit(input_fn=input_fn, steps=100)
# We stopped early.
self._assert_checkpoint(classifier.model_dir, global_step=5)
class CoreTensorForestTests(test.TestCase):
def testTrainEvaluateInferDoesNotThrowErrorForClassifier(self):
head_fn = head_lib._multi_class_head_with_softmax_cross_entropy_loss(
n_classes=3, loss_reduction=losses.Reduction.SUM_OVER_NONZERO_WEIGHTS)
hparams = tensor_forest.ForestHParams(
num_trees=3,
max_nodes=1000,
num_classes=3,
num_features=4,
split_after_samples=20,
inference_tree_paths=True)
est = random_forest.CoreTensorForestEstimator(hparams.fill(), head=head_fn)
input_fn, predict_input_fn = _get_classification_input_fns()
est.train(input_fn=input_fn, steps=100)
res = est.evaluate(input_fn=input_fn, steps=1)
self.assertEqual(1.0, res['accuracy'])
self.assertAllClose(0.55144483, res['loss'])
predictions = list(est.predict(input_fn=predict_input_fn))
self.assertAllClose([[0.576117, 0.211942, 0.211942]],
[pred['probabilities'] for pred in predictions])
def testRegression(self):
"""Tests regression using matrix data as input."""
head_fn = head_lib._regression_head(
label_dimension=1,
loss_reduction=losses.Reduction.SUM_OVER_NONZERO_WEIGHTS)
hparams = tensor_forest.ForestHParams(
num_trees=5,
max_nodes=1000,
num_classes=1,
num_features=13,
regression=True,
split_after_samples=20)
regressor = random_forest.CoreTensorForestEstimator(
hparams.fill(), head=head_fn)
input_fn, predict_input_fn = _get_regression_input_fns()
regressor.train(input_fn=input_fn, steps=100)
res = regressor.evaluate(input_fn=input_fn, steps=10)
self.assertGreaterEqual(0.1, res['loss'])
predictions = list(regressor.predict(input_fn=predict_input_fn))
self.assertAllClose(
[[24.]], [pred['predictions'] for pred in predictions], atol=1)
def testWithFeatureColumns(self):
head_fn = head_lib._multi_class_head_with_softmax_cross_entropy_loss(
n_classes=3, loss_reduction=losses.Reduction.SUM_OVER_NONZERO_WEIGHTS)
hparams = tensor_forest.ForestHParams(
num_trees=3,
max_nodes=1000,
num_classes=3,
num_features=4,
split_after_samples=20,
inference_tree_paths=True)
est = random_forest.CoreTensorForestEstimator(
hparams.fill(),
head=head_fn,
feature_columns=[core_feature_column.numeric_column('x')])
iris = base.load_iris()
data = {'x': iris.data.astype(np.float32)}
labels = iris.target.astype(np.int32)
input_fn = numpy_io.numpy_input_fn(
x=data, y=labels, batch_size=150, num_epochs=None, shuffle=False)
est.train(input_fn=input_fn, steps=100)
res = est.evaluate(input_fn=input_fn, steps=1)
self.assertEqual(1.0, res['accuracy'])
self.assertAllClose(0.55144483, res['loss'])
def testAutofillsClassificationHead(self):
hparams = tensor_forest.ForestHParams(
num_trees=3,
max_nodes=1000,
num_classes=3,
num_features=4,
split_after_samples=20,
inference_tree_paths=True)
est = random_forest.CoreTensorForestEstimator(hparams.fill())
input_fn, _ = _get_classification_input_fns()
est.train(input_fn=input_fn, steps=100)
res = est.evaluate(input_fn=input_fn, steps=1)
self.assertEqual(1.0, res['accuracy'])
self.assertAllClose(0.55144483, res['loss'])
def testAutofillsRegressionHead(self):
hparams = tensor_forest.ForestHParams(
num_trees=5,
max_nodes=1000,
num_classes=1,
num_features=13,
regression=True,
split_after_samples=20)
regressor = random_forest.CoreTensorForestEstimator(hparams.fill())
input_fn, predict_input_fn = _get_regression_input_fns()
regressor.train(input_fn=input_fn, steps=100)
res = regressor.evaluate(input_fn=input_fn, steps=10)
self.assertGreaterEqual(0.1, res['loss'])
predictions = list(regressor.predict(input_fn=predict_input_fn))
self.assertAllClose(
[[24.]], [pred['predictions'] for pred in predictions], atol=1)
def testAdditionalOutputs(self):
"""Tests multi-class classification using matrix data as input."""
hparams = tensor_forest.ForestHParams(
num_trees=1,
max_nodes=100,
num_classes=3,
num_features=4,
split_after_samples=20,
inference_tree_paths=True)
classifier = random_forest.CoreTensorForestEstimator(
hparams.fill(), keys_column='keys', include_all_in_serving=True)
iris = base.load_iris()
data = iris.data.astype(np.float32)
labels = iris.target.astype(np.int32)
input_fn = numpy_io.numpy_input_fn(
x={
'x': data,
'keys': np.arange(len(iris.data)).reshape(150, 1)
},
y=labels,
batch_size=10,
num_epochs=1,
shuffle=False)
classifier.train(input_fn=input_fn, steps=100)
predictions = list(classifier.predict(input_fn=input_fn))
# Check that there is a key column, tree paths and var.
for pred in predictions:
self.assertTrue('keys' in pred)
self.assertTrue('tree_paths' in pred)
self.assertTrue('prediction_variance' in pred)
def _assert_checkpoint(self, model_dir, global_step):
reader = checkpoint_utils.load_checkpoint(model_dir)
self.assertLessEqual(
reader.get_tensor(ops.GraphKeys.GLOBAL_STEP), global_step)
def testEarlyStopping(self):
head_fn = head_lib._multi_class_head_with_softmax_cross_entropy_loss(
n_classes=3, loss_reduction=losses.Reduction.SUM_OVER_NONZERO_WEIGHTS)
hparams = tensor_forest.ForestHParams(
num_trees=3,
max_nodes=1000,
num_classes=3,
num_features=4,
split_after_samples=20,
inference_tree_paths=True)
est = random_forest.CoreTensorForestEstimator(
hparams.fill(),
head=head_fn,
# Set a crazy threshold - 30% loss change.
early_stopping_loss_threshold=0.3,
early_stopping_rounds=2)
input_fn, _ = _get_classification_input_fns()
est.train(input_fn=input_fn, steps=100)
# We stopped early.
self._assert_checkpoint(est.model_dir, global_step=8)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/tensor_forest/client/random_forest_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A tf.learn implementation of online extremely random forests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import layers
from tensorflow.contrib.estimator.python.estimator import head as core_head_lib
from tensorflow.contrib.learn.python.learn.estimators import constants
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
from tensorflow.contrib.learn.python.learn.estimators import model_fn as model_fn_lib
from tensorflow.contrib.tensor_forest.client import eval_metrics
from tensorflow.contrib.tensor_forest.python import tensor_forest
from tensorflow.python.estimator import estimator as core_estimator
from tensorflow.python.estimator.export.export_output import PredictOutput
from tensorflow.python.feature_column import feature_column as fc_core
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops.losses import losses
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import training_util
KEYS_NAME = 'keys'
LOSS_NAME = 'rf_training_loss'
TREE_PATHS_PREDICTION_KEY = 'tree_paths'
VARIANCE_PREDICTION_KEY = 'prediction_variance'
ALL_SERVING_KEY = 'tensorforest_all'
EPSILON = 0.000001
class ModelBuilderOutputType(object):
MODEL_FN_OPS = 0
ESTIMATOR_SPEC = 1
class TensorForestRunOpAtEndHook(session_run_hook.SessionRunHook):
def __init__(self, op_dict):
"""Ops is a dict of {name: op} to run before the session is destroyed."""
self._ops = op_dict
def end(self, session):
for name in sorted(self._ops.keys()):
logging.info('{0}: {1}'.format(name, session.run(self._ops[name])))
class TensorForestLossHook(session_run_hook.SessionRunHook):
"""Monitor to request stop when loss stops decreasing."""
def __init__(self,
early_stopping_rounds,
early_stopping_loss_threshold=None,
loss_op=None):
self.early_stopping_rounds = early_stopping_rounds
self.early_stopping_loss_threshold = early_stopping_loss_threshold
self.loss_op = loss_op
self.min_loss = None
self.last_step = -1
# self.steps records the number of steps for which the loss has been
# non-decreasing
self.steps = 0
def before_run(self, run_context):
loss = (self.loss_op if self.loss_op is not None else
run_context.session.graph.get_operation_by_name(
LOSS_NAME).outputs[0])
return session_run_hook.SessionRunArgs(
{'global_step': training_util.get_global_step(),
'current_loss': loss})
def after_run(self, run_context, run_values):
current_loss = run_values.results['current_loss']
current_step = run_values.results['global_step']
self.steps += 1
# Guard against the global step going backwards, which might happen
# if we recover from something.
if self.last_step == -1 or self.last_step > current_step:
logging.info('TensorForestLossHook resetting last_step.')
self.last_step = current_step
self.steps = 0
self.min_loss = None
return
self.last_step = current_step
if (self.min_loss is None or current_loss <
(self.min_loss - self.min_loss * self.early_stopping_loss_threshold)):
self.min_loss = current_loss
self.steps = 0
if self.steps > self.early_stopping_rounds:
logging.info('TensorForestLossHook requesting stop.')
run_context.request_stop()
def _get_default_head(params, weights_name, output_type, name=None):
"""Creates a default head based on a type of a problem."""
if output_type == ModelBuilderOutputType.MODEL_FN_OPS:
if params.regression:
return head_lib.regression_head(
weight_column_name=weights_name,
label_dimension=params.num_outputs,
enable_centered_bias=False,
head_name=name)
else:
return head_lib.multi_class_head(
params.num_classes,
weight_column_name=weights_name,
enable_centered_bias=False,
head_name=name)
else:
if params.regression:
return core_head_lib.regression_head(
weight_column=weights_name,
label_dimension=params.num_outputs,
name=name,
loss_reduction=losses.Reduction.SUM_OVER_BATCH_SIZE)
else:
if params.num_classes == 2:
return core_head_lib.binary_classification_head(
weight_column=weights_name,
name=name,
loss_reduction=losses.Reduction.SUM_OVER_BATCH_SIZE)
else:
return core_head_lib.multi_class_head(
n_classes=params.num_classes,
weight_column=weights_name,
name=name,
loss_reduction=losses.Reduction.SUM_OVER_BATCH_SIZE)
def get_model_fn(params,
graph_builder_class,
device_assigner,
feature_columns=None,
weights_name=None,
model_head=None,
keys_name=None,
early_stopping_rounds=100,
early_stopping_loss_threshold=0.001,
num_trainers=1,
trainer_id=0,
report_feature_importances=False,
local_eval=False,
head_scope=None,
include_all_in_serving=False,
output_type=ModelBuilderOutputType.MODEL_FN_OPS):
"""Return a model function given a way to construct a graph builder."""
if model_head is None:
model_head = _get_default_head(params, weights_name, output_type)
def _model_fn(features, labels, mode):
"""Function that returns predictions, training loss, and training op."""
if (isinstance(features, ops.Tensor) or
isinstance(features, sparse_tensor.SparseTensor)):
features = {'features': features}
if feature_columns:
features = features.copy()
if output_type == ModelBuilderOutputType.MODEL_FN_OPS:
features.update(layers.transform_features(features, feature_columns))
else:
for fc in feature_columns:
tensor = fc_core._transform_features(features, [fc])[fc] # pylint: disable=protected-access
features[fc.name] = tensor
weights = None
if weights_name and weights_name in features:
weights = features.pop(weights_name)
keys = None
if keys_name and keys_name in features:
keys = features.pop(keys_name)
# If we're doing eval, optionally ignore device_assigner.
# Also ignore device assigner if we're exporting (mode == INFER)
dev_assn = device_assigner
if (mode == model_fn_lib.ModeKeys.INFER or
(local_eval and mode == model_fn_lib.ModeKeys.EVAL)):
dev_assn = None
graph_builder = graph_builder_class(params,
device_assigner=dev_assn)
logits, tree_paths, regression_variance = graph_builder.inference_graph(
features)
summary.scalar('average_tree_size', graph_builder.average_size())
# For binary classification problems, convert probabilities to logits.
# Includes hack to get around the fact that a probability might be 0 or 1.
if not params.regression and params.num_classes == 2:
class_1_probs = array_ops.slice(logits, [0, 1], [-1, 1])
logits = math_ops.log(
math_ops.maximum(class_1_probs / math_ops.maximum(
1.0 - class_1_probs, EPSILON), EPSILON))
# labels might be None if we're doing prediction (which brings up the
# question of why we force everything to adhere to a single model_fn).
training_graph = None
training_hooks = []
if labels is not None and mode == model_fn_lib.ModeKeys.TRAIN:
with ops.control_dependencies([logits.op]):
training_graph = control_flow_ops.group(
graph_builder.training_graph(
features, labels, input_weights=weights,
num_trainers=num_trainers,
trainer_id=trainer_id),
state_ops.assign_add(training_util.get_global_step(), 1))
# Put weights back in
if weights is not None:
features[weights_name] = weights
# TensorForest's training graph isn't calculated directly from the loss
# like many other models.
def _train_fn(unused_loss):
return training_graph
# Ops are run in lexigraphical order of their keys. Run the resource
# clean-up op last.
all_handles = graph_builder.get_all_resource_handles()
ops_at_end = {
'9: clean up resources':
control_flow_ops.group(*[
resource_variable_ops.destroy_resource_op(handle)
for handle in all_handles
])
}
if report_feature_importances:
ops_at_end['1: feature_importances'] = (
graph_builder.feature_importances())
training_hooks = [TensorForestRunOpAtEndHook(ops_at_end)]
if output_type == ModelBuilderOutputType.MODEL_FN_OPS:
model_ops = model_head.create_model_fn_ops(
features=features,
labels=labels,
mode=mode,
train_op_fn=_train_fn,
logits=logits,
scope=head_scope)
if early_stopping_rounds:
training_hooks.append(
TensorForestLossHook(
early_stopping_rounds,
early_stopping_loss_threshold=early_stopping_loss_threshold,
loss_op=model_ops.loss))
model_ops.training_hooks.extend(training_hooks)
if keys is not None:
model_ops.predictions[keys_name] = keys
if params.inference_tree_paths:
model_ops.predictions[TREE_PATHS_PREDICTION_KEY] = tree_paths
model_ops.predictions[VARIANCE_PREDICTION_KEY] = regression_variance
if include_all_in_serving:
# In order to serve the variance we need to add the prediction dict
# to output_alternatives dict.
if not model_ops.output_alternatives:
model_ops.output_alternatives = {}
model_ops.output_alternatives[ALL_SERVING_KEY] = (
constants.ProblemType.UNSPECIFIED, model_ops.predictions)
return model_ops
else:
# Estimator spec
estimator_spec = model_head.create_estimator_spec(
features=features,
mode=mode,
labels=labels,
train_op_fn=_train_fn,
logits=logits)
if early_stopping_rounds:
training_hooks.append(
TensorForestLossHook(
early_stopping_rounds,
early_stopping_loss_threshold=early_stopping_loss_threshold,
loss_op=estimator_spec.loss))
estimator_spec = estimator_spec._replace(
training_hooks=training_hooks + list(estimator_spec.training_hooks))
if keys is not None:
estimator_spec.predictions[keys_name] = keys
if params.inference_tree_paths:
estimator_spec.predictions[TREE_PATHS_PREDICTION_KEY] = tree_paths
estimator_spec.predictions[VARIANCE_PREDICTION_KEY] = regression_variance
if include_all_in_serving:
outputs = estimator_spec.export_outputs
if not outputs:
outputs = {}
outputs = {ALL_SERVING_KEY: PredictOutput(estimator_spec.predictions)}
print(estimator_spec.export_outputs)
# In order to serve the variance we need to add the prediction dict
# to output_alternatives dict.
estimator_spec = estimator_spec._replace(export_outputs=outputs)
return estimator_spec
return _model_fn
class TensorForestEstimator(estimator.Estimator):
"""An estimator that can train and evaluate a random forest.
Example:
```python
params = tf.contrib.tensor_forest.python.tensor_forest.ForestHParams(
num_classes=2, num_features=40, num_trees=10, max_nodes=1000)
# Estimator using the default graph builder.
estimator = TensorForestEstimator(params, model_dir=model_dir)
# Or estimator using TrainingLossForest as the graph builder.
estimator = TensorForestEstimator(
params, graph_builder_class=tensor_forest.TrainingLossForest,
model_dir=model_dir)
# Input builders
def input_fn_train: # returns x, y
...
def input_fn_eval: # returns x, y
...
estimator.fit(input_fn=input_fn_train)
estimator.evaluate(input_fn=input_fn_eval)
# Predict returns an iterable of dicts.
results = list(estimator.predict(x=x))
prob0 = results[0][eval_metrics.INFERENCE_PROB_NAME]
prediction0 = results[0][eval_metrics.INFERENCE_PRED_NAME]
```
"""
def __init__(self,
params,
device_assigner=None,
model_dir=None,
feature_columns=None,
graph_builder_class=tensor_forest.RandomForestGraphs,
config=None,
weight_column=None,
keys_column=None,
feature_engineering_fn=None,
early_stopping_rounds=100,
early_stopping_loss_threshold=0.001,
num_trainers=1,
trainer_id=0,
report_feature_importances=False,
local_eval=False,
version=None,
head=None,
include_all_in_serving=False):
"""Initializes a TensorForestEstimator instance.
Args:
params: ForestHParams object that holds random forest hyperparameters.
These parameters will be passed into `model_fn`.
device_assigner: An `object` instance that controls how trees get
assigned to devices. If `None`, will use
`tensor_forest.RandomForestDeviceAssigner`.
model_dir: Directory to save model parameters, graph, etc. To continue
training a previously saved model, load checkpoints saved to this
directory into an estimator.
feature_columns: An iterable containing all the feature columns used by
the model. All items in the set should be instances of classes derived
from `_FeatureColumn`.
graph_builder_class: An `object` instance that defines how TF graphs for
random forest training and inference are built. By default will use
`tensor_forest.RandomForestGraphs`. Can be overridden by version
kwarg.
config: `RunConfig` object to configure the runtime settings.
weight_column: A string defining feature column name representing
weights. Will be multiplied by the loss of the example. Used to
downweight or boost examples during training.
keys_column: A string naming one of the features to strip out and
pass through into the inference/eval results dict. Useful for
associating specific examples with their prediction.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and returns features and
labels which will be fed into the model.
early_stopping_rounds: Allows training to terminate early if the forest is
no longer growing. 100 by default. Set to a Falsy value to disable
the default training hook.
early_stopping_loss_threshold: Percentage (as fraction) that loss must
improve by within early_stopping_rounds steps, otherwise training will
terminate.
num_trainers: Number of training jobs, which will partition trees
among them.
trainer_id: Which trainer this instance is.
report_feature_importances: If True, print out feature importances
during evaluation.
local_eval: If True, don't use a device assigner for eval. This is to
support some common setups where eval is done on a single machine, even
though training might be distributed.
version: Unused.
head: A heads_lib.Head object that calculates losses and such. If None,
one will be automatically created based on params.
include_all_in_serving: if True, allow preparation of the complete
prediction dict including the variance to be exported for serving with
the Servo lib; and it also requires calling export_savedmodel with
default_output_alternative_key=ALL_SERVING_KEY, i.e.
estimator.export_savedmodel(export_dir_base=your_export_dir,
serving_input_fn=your_export_input_fn,
default_output_alternative_key=ALL_SERVING_KEY)
if False, resort to default behavior, i.e. export scores and
probabilities but no variances. In this case
default_output_alternative_key should be None while calling
export_savedmodel().
Note, that due to backward compatibility we cannot always set
include_all_in_serving to True because in this case calling
export_saved_model() without
default_output_alternative_key=ALL_SERVING_KEY (legacy behavior) the
saved_model_export_utils.get_output_alternatives() would raise
ValueError.
Returns:
A `TensorForestEstimator` instance.
"""
# Override default number of trainers if config is provided.
if num_trainers == 1 and config is not None:
num_trainers = max(1, config.num_worker_replicas)
super(TensorForestEstimator, self).__init__(
model_fn=get_model_fn(
params.fill(),
graph_builder_class,
device_assigner,
feature_columns=feature_columns,
model_head=head,
weights_name=weight_column,
keys_name=keys_column,
early_stopping_rounds=early_stopping_rounds,
early_stopping_loss_threshold=early_stopping_loss_threshold,
num_trainers=num_trainers,
trainer_id=trainer_id,
report_feature_importances=report_feature_importances,
local_eval=local_eval,
include_all_in_serving=include_all_in_serving,
),
model_dir=model_dir,
config=config,
feature_engineering_fn=feature_engineering_fn)
def get_combined_model_fn(model_fns):
"""Get a combined model function given a list of other model fns.
The model function returned will call the individual model functions and
combine them appropriately. For:
training ops: tf.group them.
loss: average them.
predictions: concat probabilities such that predictions[*][0-C1] are the
probabilities for output 1 (where C1 is the number of classes in output 1),
predictions[*][C1-(C1+C2)] are the probabilities for output 2 (where C2
is the number of classes in output 2), etc. Also stack predictions such
that predictions[i][j] is the class prediction for example i and output j.
This assumes that labels are 2-dimensional, with labels[i][j] being the
label for example i and output j, where forest j is trained using only
output j.
Args:
model_fns: A list of model functions obtained from get_model_fn.
Returns:
A ModelFnOps instance.
"""
def _model_fn(features, labels, mode):
"""Function that returns predictions, training loss, and training op."""
model_fn_ops = []
for i in range(len(model_fns)):
with variable_scope.variable_scope('label_{0}'.format(i)):
sliced_labels = array_ops.slice(labels, [0, i], [-1, 1])
model_fn_ops.append(
model_fns[i](features, sliced_labels, mode))
training_hooks = []
for mops in model_fn_ops:
training_hooks += mops.training_hooks
predictions = {}
if (mode == model_fn_lib.ModeKeys.EVAL or
mode == model_fn_lib.ModeKeys.INFER):
# Flatten the probabilities into one dimension.
predictions[eval_metrics.INFERENCE_PROB_NAME] = array_ops.concat(
[mops.predictions[eval_metrics.INFERENCE_PROB_NAME]
for mops in model_fn_ops], axis=1)
predictions[eval_metrics.INFERENCE_PRED_NAME] = array_ops.stack(
[mops.predictions[eval_metrics.INFERENCE_PRED_NAME]
for mops in model_fn_ops], axis=1)
loss = None
if (mode == model_fn_lib.ModeKeys.EVAL or
mode == model_fn_lib.ModeKeys.TRAIN):
loss = math_ops.reduce_sum(
array_ops.stack(
[mops.loss for mops in model_fn_ops])) / len(model_fn_ops)
train_op = None
if mode == model_fn_lib.ModeKeys.TRAIN:
train_op = control_flow_ops.group(
*[mops.train_op for mops in model_fn_ops])
return model_fn_lib.ModelFnOps(
mode=mode,
predictions=predictions,
loss=loss,
train_op=train_op,
training_hooks=training_hooks,
scaffold=None,
output_alternatives=None)
return _model_fn
class MultiForestMultiHeadEstimator(estimator.Estimator):
"""An estimator that can train a forest for a multi-headed problems.
This class essentially trains separate forests (each with their own
ForestHParams) for each output.
For multi-headed regression, a single-headed TensorForestEstimator can
be used to train a single model that predicts all outputs. This class can
be used to train separate forests for each output.
"""
def __init__(self,
params_list,
device_assigner=None,
model_dir=None,
feature_columns=None,
graph_builder_class=tensor_forest.RandomForestGraphs,
config=None,
weight_column=None,
keys_column=None,
feature_engineering_fn=None,
early_stopping_rounds=100,
num_trainers=1,
trainer_id=0,
report_feature_importances=False,
local_eval=False):
"""See TensorForestEstimator.__init__."""
model_fns = []
# Override default number of trainers if config is provided.
if num_trainers == 1 and config is not None:
num_trainers = max(1, config.num_worker_replicas)
for i in range(len(params_list)):
params = params_list[i].fill()
model_fns.append(
get_model_fn(
params,
graph_builder_class,
device_assigner,
model_head=_get_default_head(
params,
weight_column,
name='head{0}'.format(i),
output_type=ModelBuilderOutputType.MODEL_FN_OPS),
weights_name=weight_column,
keys_name=keys_column,
early_stopping_rounds=early_stopping_rounds,
num_trainers=num_trainers,
trainer_id=trainer_id,
report_feature_importances=report_feature_importances,
local_eval=local_eval,
head_scope='output{0}'.format(i)))
super(MultiForestMultiHeadEstimator, self).__init__(
model_fn=get_combined_model_fn(model_fns),
model_dir=model_dir,
config=config,
feature_engineering_fn=feature_engineering_fn)
class CoreTensorForestEstimator(core_estimator.Estimator):
"""A CORE estimator that can train and evaluate a random forest.
Example:
```python
params = tf.contrib.tensor_forest.python.tensor_forest.ForestHParams(
num_classes=2, num_features=40, num_trees=10, max_nodes=1000)
# Estimator using the default graph builder.
estimator = CoreTensorForestEstimator(params, model_dir=model_dir)
# Or estimator using TrainingLossForest as the graph builder.
estimator = CoreTensorForestEstimator(
params, graph_builder_class=tensor_forest.TrainingLossForest,
model_dir=model_dir)
# Input builders
def input_fn_train: # returns x, y
...
def input_fn_eval: # returns x, y
...
estimator.train(input_fn=input_fn_train)
estimator.evaluate(input_fn=input_fn_eval)
# Predict returns an iterable of dicts.
results = list(estimator.predict(x=x))
prob0 = results[0][eval_metrics.INFERENCE_PROB_NAME]
prediction0 = results[0][eval_metrics.INFERENCE_PRED_NAME]
```
"""
def __init__(self,
params,
device_assigner=None,
model_dir=None,
feature_columns=None,
graph_builder_class=tensor_forest.RandomForestGraphs,
config=None,
weight_column=None,
keys_column=None,
feature_engineering_fn=None,
early_stopping_rounds=100,
early_stopping_loss_threshold=0.001,
num_trainers=1,
trainer_id=0,
report_feature_importances=False,
local_eval=False,
version=None,
head=None,
include_all_in_serving=False):
"""Initializes a TensorForestEstimator instance.
Args:
params: ForestHParams object that holds random forest hyperparameters.
These parameters will be passed into `model_fn`.
device_assigner: An `object` instance that controls how trees get
assigned to devices. If `None`, will use
`tensor_forest.RandomForestDeviceAssigner`.
model_dir: Directory to save model parameters, graph, etc. To continue
training a previously saved model, load checkpoints saved to this
directory into an estimator.
feature_columns: An iterable containing all the feature columns used by
the model. All items in the set should be instances of classes derived
from `_FeatureColumn`.
graph_builder_class: An `object` instance that defines how TF graphs for
random forest training and inference are built. By default will use
`tensor_forest.RandomForestGraphs`. Can be overridden by version
kwarg.
config: `RunConfig` object to configure the runtime settings.
weight_column: A string defining feature column name representing
weights. Will be multiplied by the loss of the example. Used to
downweight or boost examples during training.
keys_column: A string naming one of the features to strip out and
pass through into the inference/eval results dict. Useful for
associating specific examples with their prediction.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and returns features and
labels which will be fed into the model.
early_stopping_rounds: Allows training to terminate early if the forest is
no longer growing. 100 by default. Set to a Falsy value to disable
the default training hook.
early_stopping_loss_threshold: Percentage (as fraction) that loss must
improve by within early_stopping_rounds steps, otherwise training will
terminate.
num_trainers: Number of training jobs, which will partition trees
among them.
trainer_id: Which trainer this instance is.
report_feature_importances: If True, print out feature importances
during evaluation.
local_eval: If True, don't use a device assigner for eval. This is to
support some common setups where eval is done on a single machine, even
though training might be distributed.
version: Unused.
head: A heads_lib.Head object that calculates losses and such. If None,
one will be automatically created based on params.
include_all_in_serving: if True, allow preparation of the complete
prediction dict including the variance to be exported for serving with
the Servo lib; and it also requires calling export_savedmodel with
default_output_alternative_key=ALL_SERVING_KEY, i.e.
estimator.export_savedmodel(export_dir_base=your_export_dir,
serving_input_fn=your_export_input_fn,
default_output_alternative_key=ALL_SERVING_KEY)
if False, resort to default behavior, i.e. export scores and
probabilities but no variances. In this case
default_output_alternative_key should be None while calling
export_savedmodel().
Note, that due to backward compatibility we cannot always set
include_all_in_serving to True because in this case calling
export_saved_model() without
default_output_alternative_key=ALL_SERVING_KEY (legacy behavior) the
saved_model_export_utils.get_output_alternatives() would raise
ValueError.
Returns:
A `TensorForestEstimator` instance.
"""
# Override default number of trainers if config is provided.
if num_trainers == 1 and config is not None:
num_trainers = max(1, config.num_worker_replicas)
if trainer_id == 0 and config is not None:
trainer_id = config.global_id_in_cluster
super(CoreTensorForestEstimator, self).__init__(
model_fn=get_model_fn(
params.fill(),
graph_builder_class,
device_assigner,
feature_columns=feature_columns,
model_head=head,
weights_name=weight_column,
keys_name=keys_column,
early_stopping_rounds=early_stopping_rounds,
early_stopping_loss_threshold=early_stopping_loss_threshold,
num_trainers=num_trainers,
trainer_id=trainer_id,
report_feature_importances=report_feature_importances,
local_eval=local_eval,
include_all_in_serving=include_all_in_serving,
output_type=ModelBuilderOutputType.ESTIMATOR_SPEC),
model_dir=model_dir,
config=config)
|
tensorflow-master
|
tensorflow/contrib/tensor_forest/client/random_forest.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Initialize tensor_forest/hybrid."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,wildcard-import
from tensorflow.contrib.tensor_forest.hybrid.python import *
# pylint: enable=unused-import,wildcard-import
|
tensorflow-master
|
tensorflow/contrib/tensor_forest/hybrid/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Defines the layer abstraction for hybrid models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.framework.python.ops import variables as framework_variables
class HybridLayer(object):
"""Layers are building blocks for hybrid models."""
def _define_vars(self,
params,
**kwargs):
"""Override to define the TensorFlow variables for the layer."""
raise NotImplementedError
# pylint: disable=unused-argument
def __init__(self, params, layer_num, device_assigner, *args, **kwargs):
self.layer_num = layer_num
self.device_assigner = (
device_assigner or framework_variables.VariableDeviceChooser())
self.params = params
self._define_vars(params, **kwargs)
def inference_graph(self, data, data_spec=None):
raise NotImplementedError
|
tensorflow-master
|
tensorflow/contrib/tensor_forest/hybrid/python/hybrid_layer.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the hybrid tensor forest model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from tensorflow.contrib.tensor_forest.hybrid.python import hybrid_model
from tensorflow.contrib.tensor_forest.hybrid.python.layers import fully_connected
from tensorflow.contrib.tensor_forest.python import tensor_forest
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
class HybridLayerTest(test_util.TensorFlowTestCase):
def setUp(self):
self.params = tensor_forest.ForestHParams(
num_classes=3,
num_features=7,
layer_size=11,
num_layers=13,
num_trees=17,
connection_probability=0.1,
hybrid_tree_depth=4,
regularization_strength=0.01,
regularization="",
weight_init_mean=0.0,
weight_init_std=0.1)
self.params.num_nodes = 2**self.params.hybrid_tree_depth - 1
self.params.num_leaves = 2**(self.params.hybrid_tree_depth - 1)
def testLayerNums(self):
l1 = fully_connected.FullyConnectedLayer(self.params, 0, None)
self.assertEquals(l1.layer_num, 0)
l2 = fully_connected.FullyConnectedLayer(self.params, 1, None)
self.assertEquals(l2.layer_num, 1)
l3 = fully_connected.FullyConnectedLayer(self.params, 2, None)
self.assertEquals(l3.layer_num, 2)
if __name__ == "__main__":
googletest.main()
|
tensorflow-master
|
tensorflow/contrib/tensor_forest/hybrid/python/hybrid_layer_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Defines the model abstraction for hybrid models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.contrib import layers
from tensorflow.contrib.framework.python.ops import variables as framework_variables
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variables
from tensorflow.python.training import adagrad
class HybridModel(object):
"""Defines a hybrid model.
Models chain together the results of inference layers and provide training
capabilities.
"""
# pylint: disable=unused-argument
def __init__(self,
params,
device_assigner=None,
optimizer_class=adagrad.AdagradOptimizer,
**kwargs):
self.device_assigner = (
device_assigner or framework_variables.VariableDeviceChooser())
self.params = params
self.optimizer = optimizer_class(self.params.learning_rate)
self.is_regression = params.regression
self.regularizer = None
if params.regularization == "l1":
self.regularizer = layers.l1_regularizer(
self.params.regularization_strength)
elif params.regularization == "l2":
self.regularizer = layers.l2_regularizer(
self.params.regularization_strength)
def _do_layer_inference(self, layer, data):
# If this is a collection of layers, return the mean of their inference
# results.
if isinstance(layer, collections.Iterable):
return math_ops.reduce_mean(
array_ops.stack([l.inference_graph(data) for l in layer]), 0)
# If this is a single layer, return its inference result.
else:
return layer.inference_graph(data)
def _base_inference(self, data, data_spec=None):
"""Returns an op that performs inference without a softmax."""
inference_result = self._do_layer_inference(self.layers[0], data)
for layer in self.layers[1:]:
inference_result = self._do_layer_inference(layer, inference_result)
output_size = 1 if self.is_regression else self.params.num_classes
output = layers.fully_connected(
inference_result, output_size, activation_fn=array_ops.identity)
return output
def inference_graph(self, data, data_spec=None):
"""Returns the op that performs inference on a batch of data."""
return nn_ops.softmax(self._base_inference(data, data_spec=data_spec))
def training_inference_graph(self, data, data_spec=None):
"""Returns an inference-without-softmax op for training purposes."""
return self._base_inference(data, data_spec=data_spec)
def predict_proba(self, data, data_spec=None):
inference_result = self.inference_graph(data, data_spec=data_spec)
probabilities = nn_ops.softmax(inference_result, name="probabilities")
return probabilities
def training_graph(self, data, labels, data_spec=None, epoch=None):
"""Returns the op that trains the hybrid model."""
return self.optimizer.minimize(self.training_loss(data, labels))
def loss(self, data, labels):
"""The loss to minimize while training."""
if self.is_regression:
diff = self.training_inference_graph(data) - math_ops.cast(
labels, dtypes.float32)
mean_squared_error = math_ops.reduce_mean(diff * diff)
root_mean_squared_error = math_ops.sqrt(mean_squared_error, name="loss")
loss = root_mean_squared_error
else:
loss = math_ops.reduce_mean(
nn_ops.sparse_softmax_cross_entropy_with_logits(
labels=array_ops.squeeze(math_ops.cast(labels, dtypes.int32)),
logits=self.training_inference_graph(data)),
name="loss")
if self.regularizer:
loss += layers.apply_regularization(self.regularizer,
variables.trainable_variables())
return loss
def training_loss(self, data, labels):
return self.loss(data, labels)
def validation_loss(self, data, labels):
return self.loss(data, labels)
|
tensorflow-master
|
tensorflow/contrib/tensor_forest/hybrid/python/hybrid_model.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Initialize tensor_forest/hybrid/python."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.tensor_forest.hybrid.python import layers
from tensorflow.contrib.tensor_forest.hybrid.python import models
from tensorflow.contrib.tensor_forest.hybrid.python.ops import training_ops
|
tensorflow-master
|
tensorflow/contrib/tensor_forest/hybrid/python/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Initialize tensor_forest/hybrid/python/layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
|
tensorflow-master
|
tensorflow/contrib/tensor_forest/hybrid/python/layers/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Neural network components for hybrid models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import layers
from tensorflow.contrib.tensor_forest.hybrid.python import hybrid_layer
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
class FullyConnectedLayer(hybrid_layer.HybridLayer):
"""A stacked, fully-connected feed-forward neural network layer."""
def _define_vars(self, params):
pass
def inference_graph(self, data):
with ops.device(self.device_assigner):
# Compute activations for the neural network.
nn_activations = layers.fully_connected(data, self.params.layer_size)
for _ in range(1, self.params.num_layers):
# pylint: disable=W0106
nn_activations = layers.fully_connected(nn_activations,
self.params.layer_size)
return nn_activations
class ManyToOneLayer(hybrid_layer.HybridLayer):
def _define_vars(self, params):
pass
def inference_graph(self, data):
with ops.device(self.device_assigner):
# Compute activations for the neural network.
nn_activations = layers.fully_connected(data, 1)
# There is always one activation per instance by definition, so squeeze
# away the extra dimension.
return array_ops.squeeze(nn_activations, axis=[1])
class FlattenedFullyConnectedLayer(hybrid_layer.HybridLayer):
"""A stacked, fully-connected flattened feed-forward neural network layer."""
def _define_vars(self, params):
pass
def inference_graph(self, data):
with ops.device(self.device_assigner):
# Compute activations for the neural network.
nn_activations = [layers.fully_connected(data, self.params.layer_size)]
for _ in range(1, self.params.num_layers):
# pylint: disable=W0106
nn_activations.append(
layers.fully_connected(
nn_activations[-1],
self.params.layer_size))
nn_activations_tensor = array_ops.concat(
nn_activations, 1, name="flattened_nn_activations")
return nn_activations_tensor
|
tensorflow-master
|
tensorflow/contrib/tensor_forest/hybrid/python/layers/fully_connected.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
# pylint: disable=unused-import
from tensorflow.contrib.tensor_forest.hybrid.python.layers import decisions_to_data
from tensorflow.contrib.tensor_forest.python import tensor_forest
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util
from tensorflow.python.framework.ops import Operation
from tensorflow.python.framework.ops import Tensor
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import googletest
class DecisionsToDataTest(test_util.TensorFlowTestCase):
def setUp(self):
self.params = tensor_forest.ForestHParams(
num_classes=2,
num_features=31,
layer_size=11,
num_layers=13,
num_trees=17,
connection_probability=0.1,
hybrid_tree_depth=4,
regularization_strength=0.01,
regularization="",
learning_rate=0.01,
weight_init_mean=0.0,
weight_init_std=0.1)
self.params.regression = False
self.params.num_nodes = 2**self.params.hybrid_tree_depth - 1
self.params.num_leaves = 2**(self.params.hybrid_tree_depth - 1)
# pylint: disable=W0612
self.input_data = constant_op.constant(
[[random.uniform(-1, 1) for i in range(self.params.num_features)]
for _ in range(100)])
def testInferenceConstruction(self):
with variable_scope.variable_scope(
"DecisionsToDataTest_testInferenceContruction"):
graph_builder = decisions_to_data.DecisionsToDataLayer(self.params, 0,
None)
unused_graph = graph_builder.inference_graph(self.input_data)
if __name__ == "__main__":
googletest.main()
|
tensorflow-master
|
tensorflow/contrib/tensor_forest/hybrid/python/layers/decisions_to_data_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Treats a decision tree as a representation transformation layer.
A decision tree transformer takes features as input and returns the probability
of reaching each leaf as output. The routing throughout the tree is learnable
via backpropagation.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.tensor_forest.hybrid.ops import gen_training_ops
from tensorflow.contrib.tensor_forest.hybrid.python import hybrid_layer
from tensorflow.contrib.tensor_forest.hybrid.python.ops import training_ops
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import variable_scope
class DecisionsToDataLayer(hybrid_layer.HybridLayer):
"""A layer that treats soft decisions as data."""
def _define_vars(self, params, **kwargs):
with ops.device(self.device_assigner):
self.tree_parameters = variable_scope.get_variable(
name='tree_parameters_%d' % self.layer_num,
shape=[params.num_nodes, params.num_features],
initializer=init_ops.truncated_normal_initializer(
mean=params.weight_init_mean, stddev=params.weight_init_std))
self.tree_thresholds = variable_scope.get_variable(
name='tree_thresholds_%d' % self.layer_num,
shape=[params.num_nodes],
initializer=init_ops.truncated_normal_initializer(
mean=params.weight_init_mean, stddev=params.weight_init_std))
def __init__(self, params, layer_num, device_assigner,
*args, **kwargs):
super(DecisionsToDataLayer, self).__init__(
params, layer_num, device_assigner, *args, **kwargs)
self._training_ops = training_ops.Load()
def inference_graph(self, data):
with ops.device(self.device_assigner):
routing_probabilities = gen_training_ops.routing_function(
data,
self.tree_parameters,
self.tree_thresholds,
max_nodes=self.params.num_nodes)
output = array_ops.slice(
routing_probabilities,
[0, self.params.num_nodes - self.params.num_leaves - 1],
[-1, self.params.num_leaves])
return output
class KFeatureDecisionsToDataLayer(hybrid_layer.HybridLayer):
"""A layer that treats soft decisions made on single features as data."""
def _define_vars(self, params, **kwargs):
with ops.device(self.device_assigner):
self.tree_parameters = variable_scope.get_variable(
name='tree_parameters_%d' % self.layer_num,
shape=[params.num_nodes, params.num_features_per_node],
initializer=init_ops.truncated_normal_initializer(
mean=params.weight_init_mean, stddev=params.weight_init_std))
self.tree_thresholds = variable_scope.get_variable(
name='tree_thresholds_%d' % self.layer_num,
shape=[params.num_nodes],
initializer=init_ops.truncated_normal_initializer(
mean=params.weight_init_mean, stddev=params.weight_init_std))
def __init__(self, params, layer_num, device_assigner,
*args, **kwargs):
super(KFeatureDecisionsToDataLayer, self).__init__(
params, layer_num, device_assigner, *args, **kwargs)
self._training_ops = training_ops.Load()
# pylint: disable=unused-argument
def inference_graph(self, data):
with ops.device(self.device_assigner):
routing_probabilities = gen_training_ops.k_feature_routing_function(
data,
self.tree_parameters,
self.tree_thresholds,
max_nodes=self.params.num_nodes,
num_features_per_node=self.params.num_features_per_node,
layer_num=0,
random_seed=self.params.base_random_seed)
output = array_ops.slice(
routing_probabilities,
[0, self.params.num_nodes - self.params.num_leaves - 1],
[-1, self.params.num_leaves])
return output
class HardDecisionsToDataLayer(DecisionsToDataLayer):
"""A layer that learns a soft decision tree but treats it as hard at test."""
def _define_vars(self, params, **kwargs):
with ops.device(self.device_assigner):
self.tree_parameters = variable_scope.get_variable(
name='hard_tree_parameters_%d' % self.layer_num,
shape=[params.num_nodes, params.num_features],
initializer=variable_scope.truncated_normal_initializer(
mean=params.weight_init_mean, stddev=params.weight_init_std))
self.tree_thresholds = variable_scope.get_variable(
name='hard_tree_thresholds_%d' % self.layer_num,
shape=[params.num_nodes],
initializer=variable_scope.truncated_normal_initializer(
mean=params.weight_init_mean, stddev=params.weight_init_std))
def soft_inference_graph(self, data):
return super(HardDecisionsToDataLayer, self).inference_graph(data)
def inference_graph(self, data):
with ops.device(self.device_assigner):
path_probability, path = gen_training_ops.hard_routing_function(
data,
self.tree_parameters,
self.tree_thresholds,
max_nodes=self.params.num_nodes,
tree_depth=self.params.hybrid_tree_depth)
output = array_ops.slice(
gen_training_ops.unpack_path(path, path_probability),
[0, self.params.num_nodes - self.params.num_leaves - 1],
[-1, self.params.num_leaves])
return output
class StochasticHardDecisionsToDataLayer(HardDecisionsToDataLayer):
"""A layer that learns a soft decision tree by sampling paths."""
def _define_vars(self, params, **kwargs):
with ops.device(self.device_assigner):
self.tree_parameters = variable_scope.get_variable(
name='stochastic_hard_tree_parameters_%d' % self.layer_num,
shape=[params.num_nodes, params.num_features],
initializer=init_ops.truncated_normal_initializer(
mean=params.weight_init_mean, stddev=params.weight_init_std))
self.tree_thresholds = variable_scope.get_variable(
name='stochastic_hard_tree_thresholds_%d' % self.layer_num,
shape=[params.num_nodes],
initializer=init_ops.truncated_normal_initializer(
mean=params.weight_init_mean, stddev=params.weight_init_std))
def soft_inference_graph(self, data):
with ops.device(self.device_assigner):
path_probability, path = (
gen_training_ops.stochastic_hard_routing_function(
data,
self.tree_parameters,
self.tree_thresholds,
tree_depth=self.params.hybrid_tree_depth,
random_seed=self.params.base_random_seed))
output = array_ops.slice(
gen_training_ops.unpack_path(path, path_probability),
[0, self.params.num_nodes - self.params.num_leaves - 1],
[-1, self.params.num_leaves])
return output
def inference_graph(self, data):
with ops.device(self.device_assigner):
path_probability, path = gen_training_ops.hard_routing_function(
data,
self.tree_parameters,
self.tree_thresholds,
max_nodes=self.params.num_nodes,
tree_depth=self.params.hybrid_tree_depth)
output = array_ops.slice(
gen_training_ops.unpack_path(path, path_probability),
[0, self.params.num_nodes - self.params.num_leaves - 1],
[-1, self.params.num_leaves])
return output
class StochasticSoftDecisionsToDataLayer(StochasticHardDecisionsToDataLayer):
"""A layer that learns a soft decision tree by sampling paths."""
def _define_vars(self, params, **kwargs):
with ops.device(self.device_assigner):
self.tree_parameters = variable_scope.get_variable(
name='stochastic_soft_tree_parameters_%d' % self.layer_num,
shape=[params.num_nodes, params.num_features],
initializer=init_ops.truncated_normal_initializer(
mean=params.weight_init_mean, stddev=params.weight_init_std))
self.tree_thresholds = variable_scope.get_variable(
name='stochastic_soft_tree_thresholds_%d' % self.layer_num,
shape=[params.num_nodes],
initializer=init_ops.truncated_normal_initializer(
mean=params.weight_init_mean, stddev=params.weight_init_std))
def inference_graph(self, data):
with ops.device(self.device_assigner):
routes = gen_training_ops.routing_function(
data,
self.tree_parameters,
self.tree_thresholds,
max_nodes=self.params.num_nodes)
leaf_routes = array_ops.slice(
routes, [0, self.params.num_nodes - self.params.num_leaves - 1],
[-1, self.params.num_leaves])
return leaf_routes
|
tensorflow-master
|
tensorflow/contrib/tensor_forest/hybrid/python/layers/decisions_to_data.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the routing function op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.tensor_forest.hybrid.ops import gen_training_ops
from tensorflow.contrib.tensor_forest.hybrid.python.ops import training_ops
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
class RoutingFunctionTest(test_util.TensorFlowTestCase):
def setUp(self):
self.input_data = [[-1., 0.], [-1., 2.],
[1., 0.], [1., -2.]]
self.input_labels = [0., 1., 2., 3.]
self.tree = [[1, 0], [-1, 0], [-1, 0]]
self.tree_weights = [[1.0, 0.0], [1.0, 0.0], [1.0, 0.0]]
self.tree_thresholds = [0., 0., 0.]
self.ops = training_ops.Load()
def testRoutingFunction(self):
with self.cached_session():
route_tensor = gen_training_ops.routing_function(
self.input_data, self.tree_weights, self.tree_thresholds, max_nodes=3)
route_tensor_shape = route_tensor.get_shape()
self.assertEquals(len(route_tensor_shape), 2)
self.assertEquals(route_tensor_shape[0], 4)
self.assertEquals(route_tensor_shape[1], 3)
routes = route_tensor.eval()
# Point 1
# Node 1 is a decision node => probability = 1.0
self.assertAlmostEquals(1.0, routes[0, 0])
# Probability left output = 1.0 / (1.0 + exp(1.0)) = 0.26894142
self.assertAlmostEquals(0.26894142, routes[0, 1])
# Probability right = 1 - 0.2689414 = 0.73105858
self.assertAlmostEquals(0.73105858, routes[0, 2])
if __name__ == '__main__':
googletest.main()
|
tensorflow-master
|
tensorflow/contrib/tensor_forest/hybrid/python/kernel_tests/routing_function_op_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the routing function op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.tensor_forest.hybrid.ops import gen_training_ops
from tensorflow.contrib.tensor_forest.hybrid.python.ops import training_ops
from tensorflow.contrib.tensor_forest.python import tensor_forest
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
class KFeatureRoutingFunctionTest(test_util.TensorFlowTestCase):
def setUp(self):
self.input_data = [[-1., 0.], [-1., 2.],
[1., 0.], [1., -2.]]
self.input_labels = [0., 1., 2., 3.]
self.tree = [[1, 0], [-1, 0], [-1, 0]]
self.tree_weights = [[1.0, 0.0], [1.0, 0.0], [1.0, 0.0]]
self.tree_thresholds = [0., 0., 0.]
self.ops = training_ops.Load()
self.params = tensor_forest.ForestHParams(
num_features=2,
hybrid_tree_depth=2,
base_random_seed=10,
feature_bagging_fraction=1.0,
regularization_strength=0.01,
regularization="",
weight_init_mean=0.0,
weight_init_std=0.1)
self.params.num_nodes = 2**self.params.hybrid_tree_depth - 1
self.params.num_leaves = 2**(self.params.hybrid_tree_depth - 1)
self.params.num_features_per_node = (
self.params.feature_bagging_fraction * self.params.num_features)
self.params.regression = False
def testParams(self):
self.assertEquals(self.params.num_nodes, 3)
self.assertEquals(self.params.num_features, 2)
self.assertEquals(self.params.num_features_per_node, 2)
def testRoutingFunction(self):
with self.cached_session():
route_tensor = gen_training_ops.k_feature_routing_function(
self.input_data,
self.tree_weights,
self.tree_thresholds,
max_nodes=self.params.num_nodes,
num_features_per_node=self.params.num_features_per_node,
layer_num=0,
random_seed=self.params.base_random_seed)
route_tensor_shape = route_tensor.get_shape()
self.assertEquals(len(route_tensor_shape), 2)
self.assertEquals(route_tensor_shape[0], 4)
self.assertEquals(route_tensor_shape[1], 3)
routes = route_tensor.eval()
print(routes)
# Point 1
# Node 1 is a decision node => probability = 1.0
self.assertAlmostEquals(1.0, routes[0, 0])
# Probability left output = 1.0 / (1.0 + exp(1.0)) = 0.26894142
self.assertAlmostEquals(0.26894142, routes[0, 1])
# Probability right = 1 - 0.2689414 = 0.73105858
self.assertAlmostEquals(0.73105858, routes[0, 2])
if __name__ == "__main__":
googletest.main()
|
tensorflow-master
|
tensorflow/contrib/tensor_forest/hybrid/python/kernel_tests/k_feature_routing_function_op_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A hybrid model that samples paths when training."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.tensor_forest.hybrid.python.layers import decisions_to_data
from tensorflow.contrib.tensor_forest.hybrid.python.layers import fully_connected
from tensorflow.contrib.tensor_forest.hybrid.python.models import hard_decisions_to_data_then_nn
from tensorflow.python.training import adagrad
class StochasticSoftDecisionsToDataThenNN(
hard_decisions_to_data_then_nn.HardDecisionsToDataThenNN):
"""A hybrid model that samples paths when training."""
def __init__(self,
params,
device_assigner=None,
optimizer_class=adagrad.AdagradOptimizer,
**kwargs):
super(StochasticSoftDecisionsToDataThenNN, self).__init__(
params,
device_assigner=device_assigner,
optimizer_class=optimizer_class,
**kwargs)
self.layers = [decisions_to_data.StochasticSoftDecisionsToDataLayer(
params, 0, device_assigner),
fully_connected.FullyConnectedLayer(
params, 1, device_assigner=device_assigner)]
|
tensorflow-master
|
tensorflow/contrib/tensor_forest/hybrid/python/models/stochastic_soft_decisions_to_data_then_nn.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A simple baseline feed-forward neural network."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.tensor_forest.hybrid.python import hybrid_model
from tensorflow.contrib.tensor_forest.hybrid.python.layers import fully_connected
from tensorflow.python.training import adagrad
class NN(hybrid_model.HybridModel):
"""A simple baseline feed-forward neural network."""
def __init__(self,
params,
device_assigner=None,
optimizer_class=adagrad.AdagradOptimizer,
**kwargs):
super(NN, self).__init__(
params,
device_assigner=device_assigner,
optimizer_class=optimizer_class,
**kwargs)
self.layers = [fully_connected.FullyConnectedLayer(
params, 0, device_assigner=device_assigner)]
|
tensorflow-master
|
tensorflow/contrib/tensor_forest/hybrid/python/models/nn.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A model that places a decision tree embedding before a neural net."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.tensor_forest.hybrid.python import hybrid_model
from tensorflow.contrib.tensor_forest.hybrid.python.layers import decisions_to_data
from tensorflow.contrib.tensor_forest.hybrid.python.layers import fully_connected
from tensorflow.python.training import adagrad
class DecisionsToDataThenNN(hybrid_model.HybridModel):
"""A model that places a decision tree embedding before a neural net."""
def __init__(self,
params,
device_assigner=None,
optimizer_class=adagrad.AdagradOptimizer,
**kwargs):
super(DecisionsToDataThenNN, self).__init__(
params,
device_assigner=device_assigner,
optimizer_class=optimizer_class,
**kwargs)
self.layers = [decisions_to_data.DecisionsToDataLayer(params,
0, device_assigner),
fully_connected.FullyConnectedLayer(
params, 1, device_assigner=device_assigner)]
|
tensorflow-master
|
tensorflow/contrib/tensor_forest/hybrid/python/models/decisions_to_data_then_nn.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A model that places a hard decision tree embedding before a neural net."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import layers
from tensorflow.contrib.tensor_forest.hybrid.python import hybrid_model
from tensorflow.contrib.tensor_forest.hybrid.python.layers import decisions_to_data
from tensorflow.contrib.tensor_forest.hybrid.python.layers import fully_connected
from tensorflow.python.ops import nn_ops
from tensorflow.python.training import adagrad
class HardDecisionsToDataThenNN(hybrid_model.HybridModel):
"""A model that treats tree inference as hard at test."""
def __init__(self,
params,
device_assigner=None,
optimizer_class=adagrad.AdagradOptimizer,
**kwargs):
super(HardDecisionsToDataThenNN, self).__init__(
params,
device_assigner=device_assigner,
optimizer_class=optimizer_class,
**kwargs)
self.layers = [decisions_to_data.HardDecisionsToDataLayer(
params, 0, device_assigner),
fully_connected.FullyConnectedLayer(
params, 1, device_assigner=device_assigner)]
def _base_inference(self, data, data_spec=None, soft=False):
if soft:
inference_result = self.layers[0].soft_inference_graph(data)
else:
inference_result = self._do_layer_inference(self.layers[0], data)
for layer in self.layers[1:]:
inference_result = self._do_layer_inference(layer, inference_result)
output_size = 1 if self.is_regression else self.params.num_classes
output = layers.fully_connected(
inference_result, output_size, activation_fn=nn_ops.softmax)
return output
def inference_graph(self, data, data_spec=None):
"""Returns the op that performs inference on a batch of data."""
return nn_ops.softmax(
self._base_inference(
data, data_spec=data_spec, soft=True))
# pylint: disable=unused-argument
def training_inference_graph(self, data, data_spec=None):
return self._base_inference(data, data_spec=data_spec, soft=False)
|
tensorflow-master
|
tensorflow/contrib/tensor_forest/hybrid/python/models/hard_decisions_to_data_then_nn.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Initialize tensor_forest/hybrid/python/models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
|
tensorflow-master
|
tensorflow/contrib/tensor_forest/hybrid/python/models/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A hybrid model that samples paths when training."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.tensor_forest.hybrid.python.layers import decisions_to_data
from tensorflow.contrib.tensor_forest.hybrid.python.layers import fully_connected
from tensorflow.contrib.tensor_forest.hybrid.python.models import hard_decisions_to_data_then_nn
from tensorflow.python.training import adagrad
class StochasticHardDecisionsToDataThenNN(
hard_decisions_to_data_then_nn.HardDecisionsToDataThenNN):
"""A hybrid model that samples paths when training."""
def __init__(self,
params,
device_assigner=None,
optimizer_class=adagrad.AdagradOptimizer,
**kwargs):
super(StochasticHardDecisionsToDataThenNN, self).__init__(
params,
device_assigner=device_assigner,
optimizer_class=optimizer_class,
**kwargs)
self.layers = [decisions_to_data.StochasticHardDecisionsToDataLayer(
params, 0, device_assigner),
fully_connected.FullyConnectedLayer(
params, 1, device_assigner=device_assigner)]
|
tensorflow-master
|
tensorflow/contrib/tensor_forest/hybrid/python/models/stochastic_hard_decisions_to_data_then_nn.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A model that places a soft decision tree embedding before a neural net."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.tensor_forest.hybrid.python import hybrid_model
from tensorflow.contrib.tensor_forest.hybrid.python.layers import decisions_to_data
from tensorflow.contrib.tensor_forest.hybrid.python.layers import fully_connected
from tensorflow.python.training import adagrad
class KFeatureDecisionsToDataThenNN(hybrid_model.HybridModel):
"""A model that places a soft decision tree embedding before a neural net."""
def __init__(self,
params,
device_assigner=None,
optimizer_class=adagrad.AdagradOptimizer,
**kwargs):
super(KFeatureDecisionsToDataThenNN, self).__init__(
params,
device_assigner=device_assigner,
optimizer_class=optimizer_class,
**kwargs)
self.layers = [decisions_to_data.KFeatureDecisionsToDataLayer(
params, 0, device_assigner),
fully_connected.FullyConnectedLayer(
params, 1, device_assigner=device_assigner)]
|
tensorflow-master
|
tensorflow/contrib/tensor_forest/hybrid/python/models/k_feature_decisions_to_data_then_nn.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the hybrid tensor forest model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
# pylint: disable=unused-import
from tensorflow.contrib.tensor_forest.hybrid.python.models import forest_to_data_then_nn
from tensorflow.contrib.tensor_forest.python import tensor_forest
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util
from tensorflow.python.framework.ops import Operation
from tensorflow.python.framework.ops import Tensor
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import googletest
class ForestToDataThenNNTest(test_util.TensorFlowTestCase):
def setUp(self):
self.params = tensor_forest.ForestHParams(
num_classes=2,
num_features=31,
layer_size=11,
num_layers=13,
num_trees=3,
connection_probability=0.1,
hybrid_tree_depth=4,
regularization_strength=0.01,
regularization="",
base_random_seed=10,
feature_bagging_fraction=1.0,
learning_rate=0.01,
weight_init_mean=0.0,
weight_init_std=0.1)
self.params.regression = False
self.params.num_nodes = 2**self.params.hybrid_tree_depth - 1
self.params.num_leaves = 2**(self.params.hybrid_tree_depth - 1)
self.params.num_features_per_node = (self.params.feature_bagging_fraction *
self.params.num_features)
def testInferenceConstruction(self):
# pylint: disable=W0612
data = constant_op.constant(
[[random.uniform(-1, 1) for i in range(self.params.num_features)]
for _ in range(100)])
with variable_scope.variable_scope(
"ForestToDataThenNNTest_testInferenceContruction"):
graph_builder = forest_to_data_then_nn.ForestToDataThenNN(self.params)
graph = graph_builder.inference_graph(data, None)
self.assertTrue(isinstance(graph, Tensor))
def testTrainingConstruction(self):
# pylint: disable=W0612
data = constant_op.constant(
[[random.uniform(-1, 1) for i in range(self.params.num_features)]
for _ in range(100)])
labels = [1 for _ in range(100)]
with variable_scope.variable_scope(
"ForestToDataThenNNTest.testTrainingContruction"):
graph_builder = forest_to_data_then_nn.ForestToDataThenNN(self.params)
graph = graph_builder.training_graph(data, labels, None)
self.assertTrue(isinstance(graph, Operation))
if __name__ == "__main__":
googletest.main()
|
tensorflow-master
|
tensorflow/contrib/tensor_forest/hybrid/python/models/forest_to_data_then_nn_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the hybrid tensor forest model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
# pylint: disable=unused-import
from tensorflow.contrib.tensor_forest.hybrid.python.models import k_feature_decisions_to_data_then_nn
from tensorflow.contrib.tensor_forest.python import tensor_forest
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util
from tensorflow.python.framework.ops import Operation
from tensorflow.python.framework.ops import Tensor
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import googletest
class KFeatureDecisionsToDataThenNNTest(test_util.TensorFlowTestCase):
def setUp(self):
self.params = tensor_forest.ForestHParams(
num_classes=2,
num_features=31,
layer_size=11,
num_layers=13,
num_trees=17,
connection_probability=0.1,
hybrid_tree_depth=4,
regularization_strength=0.01,
regularization="",
base_random_seed=10,
hybrid_feature_bagging_fraction=1.0,
learning_rate=0.01,
weight_init_mean=0.0,
weight_init_std=0.1)
self.params.regression = False
self.params.num_nodes = 2**self.params.hybrid_tree_depth - 1
self.params.num_leaves = 2**(self.params.hybrid_tree_depth - 1)
self.params.num_features_per_node = (self.params.feature_bagging_fraction *
self.params.num_features)
def testKFeatureInferenceConstruction(self):
# pylint: disable=W0612
data = constant_op.constant(
[[random.uniform(-1, 1) for i in range(self.params.num_features)]
for _ in range(100)])
with variable_scope.variable_scope(
"KFeatureDecisionsToDataThenNNTest.testKFeatureInferenceContruction"):
graph_builder = (
k_feature_decisions_to_data_then_nn.KFeatureDecisionsToDataThenNN(
self.params))
graph = graph_builder.inference_graph(data, None)
self.assertTrue(isinstance(graph, Tensor))
def testKFeatureTrainingConstruction(self):
# pylint: disable=W0612
data = constant_op.constant(
[[random.uniform(-1, 1) for i in range(self.params.num_features)]
for _ in range(100)])
labels = [1 for _ in range(100)]
with variable_scope.variable_scope(
"KFeatureDecisionsToDataThenNNTest.testKFeatureTrainingContruction"):
graph_builder = (
k_feature_decisions_to_data_then_nn.KFeatureDecisionsToDataThenNN(
self.params))
graph = graph_builder.training_graph(data, labels, None)
self.assertTrue(isinstance(graph, Operation))
if __name__ == "__main__":
googletest.main()
|
tensorflow-master
|
tensorflow/contrib/tensor_forest/hybrid/python/models/k_feature_decisions_to_data_then_nn_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A model that combines a decision forest embedding with a neural net."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.tensor_forest.hybrid.python import hybrid_model
from tensorflow.contrib.tensor_forest.hybrid.python.layers import decisions_to_data
from tensorflow.contrib.tensor_forest.hybrid.python.layers import fully_connected
from tensorflow.python.training import adagrad
class ForestToDataThenNN(hybrid_model.HybridModel):
"""A model that combines a decision forest embedding with a neural net."""
def __init__(self,
params,
device_assigner=None,
optimizer_class=adagrad.AdagradOptimizer,
**kwargs):
super(ForestToDataThenNN, self).__init__(
params,
device_assigner=device_assigner,
optimizer_class=optimizer_class,
**kwargs)
self.layers = [[decisions_to_data.KFeatureDecisionsToDataLayer(
params, i, device_assigner)
for i in range(self.params.num_trees)],
fully_connected.FullyConnectedLayer(
params,
self.params.num_trees,
device_assigner=device_assigner)]
|
tensorflow-master
|
tensorflow/contrib/tensor_forest/hybrid/python/models/forest_to_data_then_nn.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the hybrid tensor forest model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
# pylint: disable=unused-import
from tensorflow.contrib.tensor_forest.hybrid.python.models import decisions_to_data_then_nn
from tensorflow.contrib.tensor_forest.python import tensor_forest
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util
from tensorflow.python.framework.ops import Operation
from tensorflow.python.framework.ops import Tensor
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import googletest
class DecisionsToDataThenNNTest(test_util.TensorFlowTestCase):
def setUp(self):
self.params = tensor_forest.ForestHParams(
num_classes=2,
num_features=31,
layer_size=11,
num_layers=13,
num_trees=17,
connection_probability=0.1,
hybrid_tree_depth=4,
regularization_strength=0.01,
learning_rate=0.01,
regularization="",
weight_init_mean=0.0,
weight_init_std=0.1)
self.params.regression = False
self.params.num_nodes = 2**self.params.hybrid_tree_depth - 1
self.params.num_leaves = 2**(self.params.hybrid_tree_depth - 1)
def testHParams(self):
self.assertEquals(self.params.num_classes, 2)
self.assertEquals(self.params.num_features, 31)
self.assertEquals(self.params.layer_size, 11)
self.assertEquals(self.params.num_layers, 13)
self.assertEquals(self.params.num_trees, 17)
self.assertEquals(self.params.hybrid_tree_depth, 4)
self.assertEquals(self.params.connection_probability, 0.1)
# Building the graphs modifies the params.
with variable_scope.variable_scope("DecisionsToDataThenNNTest_testHParams"):
# pylint: disable=W0612
graph_builder = decisions_to_data_then_nn.DecisionsToDataThenNN(
self.params)
# Tree with depth 4 should have 2**0 + 2**1 + 2**2 + 2**3 = 15 nodes.
self.assertEquals(self.params.num_nodes, 15)
def testConstructionPollution(self):
"""Ensure that graph building doesn't modify the params in a bad way."""
# pylint: disable=W0612
data = [[random.uniform(-1, 1) for i in range(self.params.num_features)]
for _ in range(100)]
self.assertTrue(isinstance(self.params, tensor_forest.ForestHParams))
self.assertFalse(
isinstance(self.params.num_trees, tensor_forest.ForestHParams))
with variable_scope.variable_scope(
"DecisionsToDataThenNNTest_testConstructionPollution"):
graph_builder = decisions_to_data_then_nn.DecisionsToDataThenNN(
self.params)
self.assertTrue(isinstance(self.params, tensor_forest.ForestHParams))
self.assertFalse(
isinstance(self.params.num_trees, tensor_forest.ForestHParams))
def testInferenceConstruction(self):
# pylint: disable=W0612
data = constant_op.constant(
[[random.uniform(-1, 1) for i in range(self.params.num_features)]
for _ in range(100)])
with variable_scope.variable_scope(
"DecisionsToDataThenNNTest_testInferenceConstruction"):
graph_builder = decisions_to_data_then_nn.DecisionsToDataThenNN(
self.params)
graph = graph_builder.inference_graph(data, None)
self.assertTrue(isinstance(graph, Tensor))
def testTrainingConstruction(self):
# pylint: disable=W0612
data = constant_op.constant(
[[random.uniform(-1, 1) for i in range(self.params.num_features)]
for _ in range(100)])
labels = [1 for _ in range(100)]
with variable_scope.variable_scope(
"DecisionsToDataThenNNTest_testTrainingConstruction"):
graph_builder = decisions_to_data_then_nn.DecisionsToDataThenNN(
self.params)
graph = graph_builder.training_graph(data, labels, None)
self.assertTrue(isinstance(graph, Operation))
if __name__ == "__main__":
googletest.main()
|
tensorflow-master
|
tensorflow/contrib/tensor_forest/hybrid/python/models/decisions_to_data_then_nn_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops for hybrid model training."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
from tensorflow.contrib.tensor_forest.hybrid.ops import gen_training_ops
from tensorflow.contrib.util import loader
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import resource_loader
from tensorflow.python.platform import tf_logging as logging
TRAINING_OPS_FILE = '_training_ops.so'
_training_ops = None
_ops_lock = threading.Lock()
# TODO(b/31222613): Some of these ops are probably differentiable, and
# there may be latent bugs here.
ops.NotDifferentiable('HardRoutingFunction')
ops.NotDifferentiable('RoutingGradient')
ops.NotDifferentiable('KFeatureDataGradient')
ops.NotDifferentiable('KFeatureRoutingGradient')
ops.NotDifferentiable('KFeatureWeightGradient')
ops.NotDifferentiable('UnpackPath')
@ops.RegisterGradient('RoutingFunction')
def _RoutingFunctionGradient(op, grad):
"""The gradient of RoutingFunction.
Args:
op: The RoutingFunction op.
grad: Gradient with respect to the output of the RoutingFunction op.
Returns:
Gradients with respect to the input of the RoutingFunction op.
"""
routing_gradient = gen_training_ops.routing_gradient
input_data_tensor = op.inputs[0]
tree_weights_tensor = op.inputs[1]
tree_thresholds_tensor = op.inputs[2]
routing_function_tensor = op.outputs[0]
# The derivatives below are each defined over one or two of three dimensions:
# (batch_size, num_nodes, num_features). We explicitly expand each derivative
# to three dimensions to ensure that they're broadcasted correctly.
# dl / du is the derivative of the loss with respect to the output of the
# routing function, which is provided by tensorflow.
#
# dl / du has dimension (batch_size, num_nodes), which we expand to
# (batch_size, num_nodes, 1).
dl_du = array_ops.expand_dims(grad, 2)
# du / df is the derivative of the output of the routing function with respect
# to the decision function at each node. It is computed by
# routing_gradient_op.cc.
#
# du / df has dimension (batch_size, num_nodes), which we expand to
# (batch_size, num_nodes, 1).
du_df = array_ops.expand_dims(
routing_gradient(
input_data_tensor,
tree_weights_tensor,
tree_thresholds_tensor,
routing_function_tensor,
max_nodes=op.get_attr('max_nodes')),
2)
# df / dx is the derivative of the decision function with respect to the input
# data. f_i(x) = (-t_i * x + b_i), so df_i / dx = -t_i.
#
# df / dx has dimension (num_nodes, num_features), which we expand to
# (1, num_nodes, num_features).
df_dx = -array_ops.expand_dims(tree_weights_tensor, 0)
# df / dt is the derivative of the decision function with respect to its
# parameters. f_i(x) = (-t_i * x + b_i), so df_i / d t_i = -x.
#
# df / dt has dimension (batch_size, num_features), which we expand to
# (batch_size, 1, num_features).
df_dt = -array_ops.expand_dims(input_data_tensor, 1)
# df / dt is the derivative of the decision function with respect to its
# bias parameter. f_i(x) = (-t_i * x + b_i), so df_i / d t_i = 1.
#
# df / db has dimension (num_nodes), which we expand to
# (1, num_nodes, 1).
df_db = array_ops.expand_dims(
array_ops.expand_dims(array_ops.ones_like(tree_thresholds_tensor), 0), 2)
# Compute the derivatives of the loss with respect to the inputs using the
# chain rule (backpropagation).
dl_dx = math_ops.reduce_mean(dl_du * du_df * df_dx, 1)
dl_dt = math_ops.reduce_mean(dl_du * du_df * df_dt, 0)
dl_db = math_ops.reduce_mean(array_ops.squeeze(dl_du * du_df * df_db, [2]), 0)
input_gradients = [dl_dx, dl_dt, dl_db]
return input_gradients
@ops.RegisterGradient('StochasticHardRoutingFunction')
def _StochasticHardRoutingFunctionGradient(op, routing_grad, unused_path_grad):
"""The gradient of RoutingFunction.
Args:
op: The RoutingFunction op.
routing_grad: Gradient with respect to the output of the RoutingFunction op.
Returns:
Gradients with respect to the input of the RoutingFunction op.
"""
gradient_op = gen_training_ops.stochastic_hard_routing_gradient
unpack_path_op = gen_training_ops.unpack_path
input_data_tensor = op.inputs[0]
tree_weights_tensor = op.inputs[1]
tree_thresholds_tensor = op.inputs[2]
path_probability_tensor = op.outputs[0]
path_tensor = op.outputs[1]
# The derivatives below are each defined over one or two of three dimensions:
# (batch_size, num_nodes, num_features). We explicitly expand each derivative
# to three dimensions to ensure that they're broadcasted correctly.
du_df_raw, df_dx_raw, df_dt_raw, df_db_raw = gradient_op(
input_data_tensor,
tree_weights_tensor,
tree_thresholds_tensor,
path_probability_tensor,
path_tensor,
tree_depth=op.get_attr('tree_depth'))
# dl / du is the derivative of the loss with respect to the output of the
# routing function, which is provided by tensorflow.
#
# dl / du has dimension (batch_size, num_nodes), which we expand to
# (batch_size, num_nodes, 1).
dl_du = array_ops.expand_dims(unpack_path_op(path_tensor, routing_grad), 2)
# du / df is the derivative of the output of the routing function with respect
# to the decision function at each node. It is computed by
# single_feature_routing_gradient_op.cc.
#
# du / df has dimension (batch_size, num_nodes), which we expand to
# (batch_size, num_nodes, 1).
du_df = array_ops.expand_dims(du_df_raw, 2)
# df / dx is the derivative of the decision function with respect to the input
# data. f(x) = (-t * x + b), so df / dx = -t for the selected features and
# zero elsewhere.
#
# df / dx has dimension (num_nodes, num_features), which we expand to
# (1, num_nodes, num_features).
df_dx = array_ops.expand_dims(df_dx_raw, 0)
# df / dt is the derivative of the decision function with respect to its
# parameters. f(x) = (-t * x + b), so df / dt = -x[feature].
#
# df / dt has dimension (batch_size, num_nodes, num_features).
df_dt = -df_dt_raw
# df / dt is the derivative of the decision function with respect to its
# bias parameter. f(x) = (-t * x + b), so df / dt = 1.
#
# df / db has dimension (num_nodes), which we expand to
# (1, num_nodes, 1).
df_db = array_ops.expand_dims(array_ops.expand_dims(df_db_raw, 0), 2)
# Compute the derivatives of the loss with respect to the inputs using the
# chain rule (backpropagation).
dl_dx = math_ops.reduce_mean(dl_du * du_df * df_dx, 1)
dl_dt = math_ops.reduce_mean(dl_du * du_df * df_dt, 0)
dl_db = math_ops.reduce_mean(array_ops.squeeze(dl_du * du_df * df_db, [2]), 0)
input_gradients = [dl_dx, dl_dt, dl_db]
return input_gradients
@ops.RegisterGradient('KFeatureRoutingFunction')
def _KFeatureRoutingFunctionGradient(op, grad):
"""The gradient of RoutingFunction.
Args:
op: The RoutingFunction op.
grad: Gradient with respect to the output of the RoutingFunction op.
Returns:
Gradients with respect to the input of the RoutingFunction op.
"""
gradient_op = gen_training_ops.k_feature_gradient
input_data_tensor = op.inputs[0]
tree_weights_tensor = op.inputs[1]
tree_thresholds_tensor = op.inputs[2]
routing_function_tensor = op.outputs[0]
# The derivatives below are each defined over one or two of three dimensions:
# (batch_size, num_nodes, num_features). We explicitly expand each derivative
# to three dimensions to ensure that they're broadcasted correctly.
du_df_raw, df_dx_raw, df_dt_raw = gradient_op(
input_data_tensor,
tree_weights_tensor,
tree_thresholds_tensor,
routing_function_tensor,
layer_num=op.get_attr('layer_num'),
random_seed=op.get_attr('random_seed'))
# dl / du is the derivative of the loss with respect to the output of the
# routing function, which is provided by tensorflow.
#
# dl / du has dimension (batch_size, num_nodes), which we expand to
# (batch_size, num_nodes, 1).
dl_du = array_ops.expand_dims(grad, 2)
# du / df is the derivative of the output of the routing function with respect
# to the decision function at each node. It is computed by
# single_feature_routing_gradient_op.cc.
#
# du / df has dimension (batch_size, num_nodes), which we expand to
# (batch_size, num_nodes, 1).
du_df = array_ops.expand_dims(du_df_raw, 2)
# df / dx is the derivative of the decision function with respect to the input
# data. f(x) = (-t * x + b), so df / dx = -t for the selected features and
# zero elsewhere.
#
# df / dx has dimension (num_nodes, num_features), which we expand to
# (1, num_nodes, num_features).
df_dx = array_ops.expand_dims(df_dx_raw, 0)
# df / dt is the derivative of the decision function with respect to its
# parameters. f(x) = (-t * x + b), so df / dt = -x[feature].
#
# df / dt has dimension (batch_size, num_nodes, num_features).
df_dt = -df_dt_raw
# df / dt is the derivative of the decision function with respect to its
# bias parameter. f(x) = (-t * x + b), so df / dt = 1.
#
# df / db has dimension (num_nodes), which we expand to
# (1, num_nodes, 1).
df_db = array_ops.expand_dims(
array_ops.expand_dims(array_ops.ones_like(tree_thresholds_tensor), 0), 2)
# Compute the derivatives of the loss with respect to the inputs using the
# chain rule (backpropagation).
dl_dx = math_ops.reduce_mean(dl_du * du_df * df_dx, 1)
dl_dt = math_ops.reduce_mean(dl_du * du_df * df_dt, 0)
dl_db = math_ops.reduce_mean(array_ops.squeeze(dl_du * du_df * df_db, [2]), 0)
input_gradients = [dl_dx, dl_dt, dl_db]
return input_gradients
# Workaround for the fact that importing tensorflow imports contrib
# (even if a user isn't using this or any other contrib op), but
# there's not yet any guarantee that the shared object exists.
# In which case, "import tensorflow" will always crash, even for users that
# never use contrib.
def Load():
"""Load training ops library and return the loaded module."""
with _ops_lock:
global _training_ops
if not _training_ops:
ops_path = resource_loader.get_path_to_datafile(TRAINING_OPS_FILE)
logging.info('data path: %s', ops_path)
_training_ops = loader.load_op_library(ops_path)
assert _training_ops, 'Could not load _training_ops.so'
return _training_ops
|
tensorflow-master
|
tensorflow/contrib/tensor_forest/hybrid/python/ops/training_ops.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Experimental API for building input pipelines.
This module contains experimental `Dataset` sources and transformations that can
be used in conjunction with the `tf.data.Dataset` API. Note that the
`tf.contrib.data` API is not subject to the same backwards compatibility
guarantees as `tf.data`, but we will provide deprecation advice in advance of
removing existing functionality.
See [Importing Data](https://tensorflow.org/guide/datasets) for an overview.
@@Counter
@@CheckpointInputPipelineHook
@@CsvDataset
@@LMDBDataset
@@Optional
@@RandomDataset
@@Reducer
@@SqlDataset
@@TFRecordWriter
@@assert_element_shape
@@batch_and_drop_remainder
@@bucket_by_sequence_length
@@choose_from_datasets
@@copy_to_device
@@dense_to_sparse_batch
@@enumerate_dataset
@@get_next_as_optional
@@get_single_element
@@group_by_reducer
@@group_by_window
@@ignore_errors
@@latency_stats
@@make_batched_features_dataset
@@make_csv_dataset
@@make_saveable_from_iterator
@@map_and_batch
@@padded_batch_and_drop_remainder
@@parallel_interleave
@@parse_example_dataset
@@prefetch_to_device
@@read_batch_features
@@rejection_resample
@@reduce_dataset
@@sample_from_datasets
@@scan
@@set_stats_aggregator
@@shuffle_and_repeat
@@sliding_window_batch
@@sloppy_interleave
@@StatsAggregator
@@unbatch
@@unique
@@AUTOTUNE
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from tensorflow.contrib.data.python.ops.batching import assert_element_shape
from tensorflow.contrib.data.python.ops.batching import batch_and_drop_remainder
from tensorflow.contrib.data.python.ops.batching import dense_to_sparse_batch
from tensorflow.contrib.data.python.ops.batching import map_and_batch
from tensorflow.contrib.data.python.ops.batching import padded_batch_and_drop_remainder
from tensorflow.contrib.data.python.ops.batching import unbatch
from tensorflow.contrib.data.python.ops.counter import Counter
from tensorflow.contrib.data.python.ops.enumerate_ops import enumerate_dataset
from tensorflow.contrib.data.python.ops.error_ops import ignore_errors
from tensorflow.contrib.data.python.ops.get_single_element import get_single_element
from tensorflow.contrib.data.python.ops.get_single_element import reduce_dataset
from tensorflow.contrib.data.python.ops.grouping import bucket_by_sequence_length
from tensorflow.contrib.data.python.ops.grouping import group_by_reducer
from tensorflow.contrib.data.python.ops.grouping import group_by_window
from tensorflow.contrib.data.python.ops.grouping import Reducer
from tensorflow.contrib.data.python.ops.interleave_ops import choose_from_datasets
from tensorflow.contrib.data.python.ops.interleave_ops import parallel_interleave
from tensorflow.contrib.data.python.ops.interleave_ops import sample_from_datasets
from tensorflow.contrib.data.python.ops.interleave_ops import sloppy_interleave
from tensorflow.contrib.data.python.ops.iterator_ops import CheckpointInputPipelineHook
from tensorflow.contrib.data.python.ops.iterator_ops import make_saveable_from_iterator
from tensorflow.contrib.data.python.ops.parsing_ops import parse_example_dataset
from tensorflow.contrib.data.python.ops.prefetching_ops import copy_to_device
from tensorflow.contrib.data.python.ops.prefetching_ops import prefetch_to_device
from tensorflow.contrib.data.python.ops.random_ops import RandomDataset
from tensorflow.contrib.data.python.ops.readers import CsvDataset
from tensorflow.contrib.data.python.ops.readers import LMDBDataset
from tensorflow.contrib.data.python.ops.readers import make_batched_features_dataset
from tensorflow.contrib.data.python.ops.readers import make_csv_dataset
from tensorflow.contrib.data.python.ops.readers import read_batch_features
from tensorflow.contrib.data.python.ops.readers import SqlDataset
from tensorflow.contrib.data.python.ops.resampling import rejection_resample
from tensorflow.contrib.data.python.ops.scan_ops import scan
from tensorflow.contrib.data.python.ops.shuffle_ops import shuffle_and_repeat
from tensorflow.contrib.data.python.ops.sliding import sliding_window_batch
from tensorflow.contrib.data.python.ops.unique import unique
from tensorflow.contrib.data.python.ops.writers import TFRecordWriter
from tensorflow.python.data.ops.dataset_ops import AUTOTUNE
from tensorflow.python.data.ops.iterator_ops import get_next_as_optional
from tensorflow.python.data.ops.optional_ops import Optional
# pylint: enable=unused-import
from tensorflow.python.util.all_util import remove_undocumented
remove_undocumented(__name__)
|
tensorflow-master
|
tensorflow/contrib/data/__init__.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for LMDBDatasetOp."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
from tensorflow.contrib.data.python.ops import readers
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
from tensorflow.python.util import compat
prefix_path = "tensorflow/core/lib"
@test_util.run_v1_only("deprecated API, no eager or V2 test coverage")
class LMDBDatasetTest(test_base.DatasetTestBase):
def setUp(self):
super(LMDBDatasetTest, self).setUp()
# Copy database out because we need the path to be writable to use locks.
path = os.path.join(prefix_path, "lmdb", "testdata", "data.mdb")
self.db_path = os.path.join(self.get_temp_dir(), "data.mdb")
shutil.copy(path, self.db_path)
def testReadFromFile(self):
filename = self.db_path
filenames = constant_op.constant([filename], dtypes.string)
num_repeats = 2
dataset = readers.LMDBDataset(filenames).repeat(num_repeats)
iterator = dataset_ops.make_initializable_iterator(dataset)
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op)
for _ in range(num_repeats): # Dataset is repeated.
for i in range(10): # 10 records.
k = compat.as_bytes(str(i))
v = compat.as_bytes(str(chr(ord("a") + i)))
self.assertEqual((k, v), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/data/python/kernel_tests/lmdb_dataset_op_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the experimental input pipeline ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.data.python.ops import batching
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.platform import test
@test_util.run_v1_only("deprecated API, no eager or V2 test coverage")
class AssertElementShapeTest(test_base.DatasetTestBase):
def test_assert_element_shape(self):
def create_dataset(_):
return (array_ops.ones(2, dtype=dtypes.float32),
array_ops.zeros((3, 4), dtype=dtypes.int32))
dataset = dataset_ops.Dataset.range(5).map(create_dataset)
expected_shapes = (tensor_shape.TensorShape(2),
tensor_shape.TensorShape((3, 4)))
self.assertEqual(expected_shapes,
dataset_ops.get_legacy_output_shapes(dataset))
result = dataset.apply(batching.assert_element_shape(expected_shapes))
self.assertEqual(expected_shapes,
dataset_ops.get_legacy_output_shapes(result))
iterator = dataset_ops.make_initializable_iterator(result)
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op)
for _ in range(5):
sess.run(get_next)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def test_assert_wrong_element_shape(self):
def create_dataset(_):
return (array_ops.ones(2, dtype=dtypes.float32),
array_ops.zeros((3, 4), dtype=dtypes.int32))
dataset = dataset_ops.Dataset.range(3).map(create_dataset)
wrong_shapes = (tensor_shape.TensorShape(2),
tensor_shape.TensorShape((3, 10)))
with self.assertRaises(ValueError):
dataset.apply(batching.assert_element_shape(wrong_shapes))
def test_assert_element_shape_on_unknown_shape_dataset(self):
def create_unknown_shape_dataset(x):
return script_ops.py_func(
lambda _: ( # pylint: disable=g-long-lambda
np.ones(2, dtype=np.float32),
np.zeros((3, 4), dtype=np.int32)),
[x],
[dtypes.float32, dtypes.int32])
dataset = dataset_ops.Dataset.range(5).map(create_unknown_shape_dataset)
unknown_shapes = (tensor_shape.TensorShape(None),
tensor_shape.TensorShape(None))
self.assertEqual(unknown_shapes,
dataset_ops.get_legacy_output_shapes(dataset))
expected_shapes = (tensor_shape.TensorShape(2),
tensor_shape.TensorShape((3, 4)))
result = dataset.apply(batching.assert_element_shape(expected_shapes))
self.assertEqual(expected_shapes,
dataset_ops.get_legacy_output_shapes(result))
iterator = dataset_ops.make_initializable_iterator(result)
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op)
for _ in range(5):
sess.run(get_next)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def test_assert_wrong_element_shape_on_unknown_shape_dataset(self):
def create_unknown_shape_dataset(x):
return script_ops.py_func(
lambda _: ( # pylint: disable=g-long-lambda
np.ones(2, dtype=np.float32),
np.zeros((3, 4), dtype=np.int32)),
[x],
[dtypes.float32, dtypes.int32])
dataset = dataset_ops.Dataset.range(3).map(create_unknown_shape_dataset)
unknown_shapes = (tensor_shape.TensorShape(None),
tensor_shape.TensorShape(None))
self.assertEqual(unknown_shapes,
dataset_ops.get_legacy_output_shapes(dataset))
wrong_shapes = (tensor_shape.TensorShape(2),
tensor_shape.TensorShape((3, 10)))
iterator = dataset_ops.make_initializable_iterator(
dataset.apply(batching.assert_element_shape(wrong_shapes)))
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op)
with self.assertRaises(errors.InvalidArgumentError):
sess.run(get_next)
def test_assert_partial_element_shape(self):
def create_dataset(_):
return (array_ops.ones(2, dtype=dtypes.float32),
array_ops.zeros((3, 4), dtype=dtypes.int32))
dataset = dataset_ops.Dataset.range(5).map(create_dataset)
partial_expected_shape = (
tensor_shape.TensorShape(None), # Unknown shape
tensor_shape.TensorShape((None, 4))) # Partial shape
result = dataset.apply(
batching.assert_element_shape(partial_expected_shape))
# Partial shapes are merged with actual shapes:
actual_shapes = (tensor_shape.TensorShape(2),
tensor_shape.TensorShape((3, 4)))
self.assertEqual(actual_shapes,
dataset_ops.get_legacy_output_shapes(result))
iterator = dataset_ops.make_initializable_iterator(result)
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op)
for _ in range(5):
sess.run(get_next)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def test_assert_wrong_partial_element_shape(self):
def create_dataset(_):
return (array_ops.ones(2, dtype=dtypes.float32),
array_ops.zeros((3, 4), dtype=dtypes.int32))
dataset = dataset_ops.Dataset.range(3).map(create_dataset)
wrong_shapes = (tensor_shape.TensorShape(2),
tensor_shape.TensorShape((None, 10)))
with self.assertRaises(ValueError):
dataset.apply(batching.assert_element_shape(wrong_shapes))
def test_assert_partial_element_shape_on_unknown_shape_dataset(self):
def create_unknown_shape_dataset(x):
return script_ops.py_func(
lambda _: ( # pylint: disable=g-long-lambda
np.ones(2, dtype=np.float32),
np.zeros((3, 4), dtype=np.int32)),
[x],
[dtypes.float32, dtypes.int32])
dataset = dataset_ops.Dataset.range(5).map(create_unknown_shape_dataset)
unknown_shapes = (tensor_shape.TensorShape(None),
tensor_shape.TensorShape(None))
self.assertEqual(unknown_shapes,
dataset_ops.get_legacy_output_shapes(dataset))
expected_shapes = (tensor_shape.TensorShape(2),
tensor_shape.TensorShape((None, 4)))
result = dataset.apply(batching.assert_element_shape(expected_shapes))
self.assertEqual(expected_shapes,
dataset_ops.get_legacy_output_shapes(result))
iterator = dataset_ops.make_initializable_iterator(result)
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op)
for _ in range(5):
sess.run(get_next)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def test_assert_wrong_partial_element_shape_on_unknown_shape_dataset(self):
def create_unknown_shape_dataset(x):
return script_ops.py_func(
lambda _: ( # pylint: disable=g-long-lambda
np.ones(2, dtype=np.float32),
np.zeros((3, 4), dtype=np.int32)),
[x],
[dtypes.float32, dtypes.int32])
dataset = dataset_ops.Dataset.range(3).map(create_unknown_shape_dataset)
unknown_shapes = (tensor_shape.TensorShape(None),
tensor_shape.TensorShape(None))
self.assertEqual(unknown_shapes,
dataset_ops.get_legacy_output_shapes(dataset))
wrong_shapes = (tensor_shape.TensorShape(2),
tensor_shape.TensorShape((None, 10)))
iterator = dataset_ops.make_initializable_iterator(
dataset.apply(batching.assert_element_shape(wrong_shapes)))
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op)
with self.assertRaises(errors.InvalidArgumentError):
sess.run(get_next)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/data/python/kernel_tests/assert_element_shape_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the experimental input pipeline ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.contrib.data.python.ops import get_single_element
from tensorflow.contrib.data.python.ops import grouping
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class ReduceDatasetTest(test_base.DatasetTestBase, parameterized.TestCase):
@parameterized.named_parameters(
("SumZero", 0),
("SumOne", 1),
("SumFive", 5),
("SumTen", 10),
)
def testReduceDataset(self, stop):
def init_fn(_):
return np.int64(0)
def reduce_fn(state, value):
return state + value
def finalize_fn(state):
return state
sum_reducer = grouping.Reducer(init_fn, reduce_fn, finalize_fn)
dataset = dataset_ops.Dataset.range(stop)
element = get_single_element.reduce_dataset(dataset, sum_reducer)
self.assertEqual(stop * (stop - 1) / 2, self.evaluate(element))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/data/python/kernel_tests/reduce_dataset_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the experimental input pipeline ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.contrib.data.python.ops import sliding
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
@test_util.run_v1_only("deprecated API, no eager or V2 test coverage")
class SlideDatasetTest(test_base.DatasetTestBase, parameterized.TestCase):
@parameterized.named_parameters(
("1", 20, 14, 7, 1),
("2", 20, 17, 9, 1),
("3", 20, 14, 14, 1),
("4", 20, 10, 14, 1),
("5", 20, 14, 19, 1),
("6", 20, 4, 1, 2),
("7", 20, 2, 1, 6),
("8", 20, 4, 7, 2),
("9", 20, 2, 7, 6),
("10", 1, 10, 4, 1),
("11", 0, 10, 4, 1),
)
def testSlideDataset(self, count, window_size, window_shift, window_stride):
"""Tests a dataset that slides a window its input elements."""
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
count_t = array_ops.placeholder(dtypes.int64, shape=[])
window_size_t = array_ops.placeholder(dtypes.int64, shape=[])
window_shift_t = array_ops.placeholder(dtypes.int64, shape=[])
window_stride_t = array_ops.placeholder(dtypes.int64, shape=[])
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
# The pipeline is TensorSliceDataset -> MapDataset(square_3) ->
# RepeatDataset(count) ->
# _SlideDataset(window_size, window_shift, window_stride).
iterator = dataset_ops.make_initializable_iterator(
dataset_ops.Dataset.from_tensor_slices(components).map(_map_fn)
.repeat(count).apply(
sliding.sliding_window_batch(
window_size=window_size_t,
window_shift=window_shift_t,
window_stride=window_stride_t)))
init_op = iterator.initializer
get_next = iterator.get_next()
self.assertEqual([[None] + list(c.shape[1:]) for c in components],
[t.shape.as_list() for t in get_next])
with self.cached_session() as sess:
sess.run(
init_op,
feed_dict={
count_t: count,
window_size_t: window_size,
window_shift_t: window_shift,
window_stride_t: window_stride
})
num_batches = (count * 7 - (
(window_size - 1) * window_stride + 1)) // window_shift + 1
for i in range(num_batches):
result = sess.run(get_next)
for component, result_component in zip(components, result):
for j in range(window_size):
self.assertAllEqual(
component[(i * window_shift + j * window_stride) % 7]**2,
result_component[j])
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
@parameterized.named_parameters(
("1", 20, 14, 7, 1),
("2", 20, 17, 9, 1),
("3", 20, 14, 14, 1),
("4", 20, 10, 14, 1),
("5", 20, 14, 19, 1),
("6", 20, 4, 1, 2),
("7", 20, 2, 1, 6),
("8", 20, 4, 7, 2),
("9", 20, 2, 7, 6),
("10", 1, 10, 4, 1),
("11", 0, 10, 4, 1),
)
def testSlideDatasetDeprecated(self, count, window_size, stride,
window_stride):
"""Tests a dataset that slides a window its input elements."""
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
count_t = array_ops.placeholder(dtypes.int64, shape=[])
window_size_t = array_ops.placeholder(dtypes.int64, shape=[])
stride_t = array_ops.placeholder(dtypes.int64, shape=[])
window_stride_t = array_ops.placeholder(dtypes.int64, shape=[])
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
# The pipeline is TensorSliceDataset -> MapDataset(square_3) ->
# RepeatDataset(count) -> _SlideDataset(window_size, stride, window_stride).
iterator = dataset_ops.make_initializable_iterator(
dataset_ops.Dataset.from_tensor_slices(components).map(_map_fn)
.repeat(count).apply(
sliding.sliding_window_batch(
window_size=window_size_t,
stride=stride_t,
window_stride=window_stride_t)))
init_op = iterator.initializer
get_next = iterator.get_next()
self.assertEqual([[None] + list(c.shape[1:]) for c in components],
[t.shape.as_list() for t in get_next])
with self.cached_session() as sess:
sess.run(
init_op,
feed_dict={
count_t: count,
window_size_t: window_size,
stride_t: stride,
window_stride_t: window_stride
})
num_batches = (count * 7 - (
(window_size - 1) * window_stride + 1)) // stride + 1
for i in range(num_batches):
result = sess.run(get_next)
for component, result_component in zip(components, result):
for j in range(window_size):
self.assertAllEqual(
component[(i * stride + j * window_stride) % 7]**2,
result_component[j])
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
@parameterized.named_parameters(
("1", 14, 0, 3, 1),
("2", 14, 3, 0, 1),
("3", 14, 3, 3, 0),
)
def testSlideDatasetInvalid(self, count, window_size, window_shift,
window_stride):
count_t = array_ops.placeholder(dtypes.int64, shape=[])
window_size_t = array_ops.placeholder(dtypes.int64, shape=[])
window_shift_t = array_ops.placeholder(dtypes.int64, shape=[])
window_stride_t = array_ops.placeholder(dtypes.int64, shape=[])
iterator = dataset_ops.make_initializable_iterator(
dataset_ops.Dataset.range(10).map(lambda x: x).repeat(count_t).apply(
sliding.sliding_window_batch(
window_size=window_size_t,
window_shift=window_shift_t,
window_stride=window_stride_t)))
init_op = iterator.initializer
with self.cached_session() as sess:
with self.assertRaises(errors.InvalidArgumentError):
sess.run(
init_op,
feed_dict={
count_t: count,
window_size_t: window_size,
window_shift_t: window_shift,
window_stride_t: window_stride
})
def testSlideDatasetValueError(self):
with self.assertRaises(ValueError):
dataset_ops.Dataset.range(10).map(lambda x: x).apply(
sliding.sliding_window_batch(
window_size=1, stride=1, window_shift=1, window_stride=1))
def testSlideSparse(self):
def _sparse(i):
return sparse_tensor.SparseTensorValue(
indices=[[0]], values=(i * [1]), dense_shape=[1])
iterator = dataset_ops.make_initializable_iterator(
dataset_ops.Dataset.range(10).map(_sparse).apply(
sliding.sliding_window_batch(window_size=5, window_shift=3)))
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op)
num_batches = (10 - 5) // 3 + 1
for i in range(num_batches):
actual = sess.run(get_next)
expected = sparse_tensor.SparseTensorValue(
indices=[[0, 0], [1, 0], [2, 0], [3, 0], [4, 0]],
values=[i * 3, i * 3 + 1, i * 3 + 2, i * 3 + 3, i * 3 + 4],
dense_shape=[5, 1])
self.assertTrue(sparse_tensor.is_sparse(actual))
self.assertSparseValuesEqual(actual, expected)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testSlideSparseWithDifferentDenseShapes(self):
def _sparse(i):
return sparse_tensor.SparseTensorValue(
indices=array_ops.expand_dims(
math_ops.range(i, dtype=dtypes.int64), 1),
values=array_ops.fill([math_ops.cast(i, dtypes.int32)], i),
dense_shape=[i])
iterator = dataset_ops.make_initializable_iterator(
dataset_ops.Dataset.range(10).map(_sparse).apply(
sliding.sliding_window_batch(window_size=5, window_shift=3)))
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op)
num_batches = (10 - 5) // 3 + 1
for i in range(num_batches):
actual = sess.run(get_next)
expected_indices = []
expected_values = []
for j in range(5):
for k in range(i * 3 + j):
expected_indices.append([j, k])
expected_values.append(i * 3 + j)
expected = sparse_tensor.SparseTensorValue(
indices=expected_indices,
values=expected_values,
dense_shape=[5, i * 3 + 5 - 1])
self.assertTrue(sparse_tensor.is_sparse(actual))
self.assertSparseValuesEqual(actual, expected)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testNestedSlideSparse(self):
def _sparse(i):
return sparse_tensor.SparseTensorValue(
indices=[[0]], values=(i * [1]), dense_shape=[1])
iterator = dataset_ops.make_initializable_iterator(
dataset_ops.Dataset.range(10).map(_sparse).apply(
sliding.sliding_window_batch(window_size=4, window_shift=2)).apply(
sliding.sliding_window_batch(window_size=3, window_shift=1)))
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op)
# Slide: 1st batch.
actual = sess.run(get_next)
expected = sparse_tensor.SparseTensorValue(
indices=[[0, 0, 0], [0, 1, 0], [0, 2, 0], [0, 3, 0], [1, 0, 0],
[1, 1, 0], [1, 2, 0], [1, 3, 0], [2, 0, 0], [2, 1, 0],
[2, 2, 0], [2, 3, 0]],
values=[0, 1, 2, 3, 2, 3, 4, 5, 4, 5, 6, 7],
dense_shape=[3, 4, 1])
self.assertTrue(sparse_tensor.is_sparse(actual))
self.assertSparseValuesEqual(actual, expected)
# Slide: 2nd batch.
actual = sess.run(get_next)
expected = sparse_tensor.SparseTensorValue(
indices=[[0, 0, 0], [0, 1, 0], [0, 2, 0], [0, 3, 0], [1, 0, 0],
[1, 1, 0], [1, 2, 0], [1, 3, 0], [2, 0, 0], [2, 1, 0],
[2, 2, 0], [2, 3, 0]],
values=[2, 3, 4, 5, 4, 5, 6, 7, 6, 7, 8, 9],
dense_shape=[3, 4, 1])
self.assertTrue(sparse_tensor.is_sparse(actual))
self.assertSparseValuesEqual(actual, expected)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testSlideShapeError(self):
def generator():
yield [1.0, 2.0, 3.0]
yield [4.0, 5.0, 6.0]
yield [7.0, 8.0, 9.0, 10.0]
iterator = dataset_ops.make_initializable_iterator(
dataset_ops.Dataset.from_generator(
generator, dtypes.float32, output_shapes=[None]).apply(
sliding.sliding_window_batch(window_size=3, window_shift=1)))
next_element = iterator.get_next()
with self.cached_session() as sess:
sess.run(iterator.initializer)
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
r"Cannot batch tensors with different shapes in component 0. "
r"First element had shape \[3\] and element 2 had shape \[4\]."):
sess.run(next_element)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/data/python/kernel_tests/slide_dataset_op_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the private `_RestructuredDataset` transformation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.data.python.ops import batching
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import nest
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
# TODO(b/117581999): Add eager specific test.
class RestructuredDatasetTest(test_base.DatasetTestBase):
@test_util.run_deprecated_v1
def testRestructureDataset(self):
components = (array_ops.placeholder(dtypes.int32),
(array_ops.placeholder(dtypes.int32, shape=[None]),
array_ops.placeholder(dtypes.int32, shape=[20, 30])))
dataset = dataset_ops.Dataset.from_tensors(components)
i32 = dtypes.int32
test_cases = [((i32, i32, i32), None),
(((i32, i32), i32), None),
((i32, i32, i32), (None, None, None)),
((i32, i32, i32), ([17], [17], [20, 30]))]
for new_types, new_shape_lists in test_cases:
# pylint: disable=protected-access
new = batching._RestructuredDataset(dataset, new_types, new_shape_lists)
# pylint: enable=protected-access
self.assertEqual(new_types, dataset_ops.get_legacy_output_types(new))
if new_shape_lists is not None:
for expected_shape_list, shape in zip(
nest.flatten(new_shape_lists),
nest.flatten(dataset_ops.get_legacy_output_shapes(new))):
if expected_shape_list is None:
self.assertIs(None, shape.ndims)
else:
self.assertEqual(expected_shape_list, shape.as_list())
fail_cases = [((i32, dtypes.int64, i32), None),
((i32, i32, i32, i32), None),
((i32, i32, i32), ((None, None), None)),
((i32, i32, i32), (None, None, None, None)),
((i32, i32, i32), (None, [None], [21, 30]))]
for new_types, new_shape_lists in fail_cases:
with self.assertRaises(ValueError):
# pylint: disable=protected-access
new = batching._RestructuredDataset(dataset, new_types, new_shape_lists)
# pylint: enable=protected-access
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/data/python/kernel_tests/restructured_dataset_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Sliding dataset transformations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import gen_experimental_dataset_ops as ged_ops
from tensorflow.python.util import deprecation
class _SlideDataset(dataset_ops.UnaryDataset):
"""A `Dataset` that passes a sliding window over its input."""
def __init__(self, input_dataset, window_size, window_shift, window_stride):
"""See `sliding_window_batch` for details."""
self._input_dataset = input_dataset
self._window_size = ops.convert_to_tensor(
window_size, dtype=dtypes.int64, name="window_stride")
self._window_stride = ops.convert_to_tensor(
window_stride, dtype=dtypes.int64, name="window_stride")
self._window_shift = ops.convert_to_tensor(
window_shift, dtype=dtypes.int64, name="window_shift")
input_structure = dataset_ops.get_structure(input_dataset)
self._structure = input_structure._batch(None) # pylint: disable=protected-access
variant_tensor = ged_ops.experimental_sliding_window_dataset(
self._input_dataset._variant_tensor, # pylint: disable=protected-access
window_size=self._window_size,
window_shift=self._window_shift,
window_stride=self._window_stride,
**dataset_ops.flat_structure(self))
super(_SlideDataset, self).__init__(input_dataset, variant_tensor)
@property
def _element_structure(self):
return self._structure
@deprecation.deprecated_args(
None, "stride is deprecated, use window_shift instead", "stride")
@deprecation.deprecated(
None, "Use `tf.data.Dataset.window(size=window_size, shift=window_shift, "
"stride=window_stride).flat_map(lambda x: x.batch(window_size))` "
"instead.")
def sliding_window_batch(window_size,
stride=None,
window_shift=None,
window_stride=1):
"""A sliding window over a dataset.
This transformation passes a sliding window over this dataset. The window size
is `window_size`, the stride of the input elements is `window_stride`, and the
shift between consecutive windows is `window_shift`. If the remaining elements
cannot fill up the sliding window, this transformation will drop the final
smaller element. For example:
```python
# NOTE: The following examples use `{ ... }` to represent the
# contents of a dataset.
a = { [1], [2], [3], [4], [5], [6] }
a.apply(sliding_window_batch(window_size=3)) ==
{ [[1], [2], [3]], [[2], [3], [4]], [[3], [4], [5]], [[4], [5], [6]] }
a.apply(sliding_window_batch(window_size=3, window_shift=2)) ==
{ [[1], [2], [3]], [[3], [4], [5]] }
a.apply(sliding_window_batch(window_size=3, window_stride=2)) ==
{ [[1], [3], [5]], [[2], [4], [6]] }
```
Args:
window_size: A `tf.int64` scalar `tf.Tensor`, representing the number of
elements in the sliding window. It must be positive.
stride: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the
forward shift of the sliding window in each iteration. The default is `1`.
It must be positive. Deprecated alias for `window_shift`.
window_shift: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the
forward shift of the sliding window in each iteration. The default is `1`.
It must be positive.
window_stride: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the
stride of the input elements in the sliding window. The default is `1`.
It must be positive.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
Raises:
ValueError: if invalid arguments are provided.
"""
if stride is None and window_shift is None:
window_shift = 1
elif stride is not None and window_shift is None:
window_shift = stride
elif stride is not None and window_shift is not None:
raise ValueError("Cannot specify both `stride` and `window_shift`")
def _apply_fn(dataset):
return _SlideDataset(dataset, window_size, window_shift, window_stride)
return _apply_fn
|
tensorflow-master
|
tensorflow/contrib/data/python/ops/sliding.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Batching dataset transformations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.framework import with_shape
from tensorflow.python.data.experimental.ops import batching
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import nest
from tensorflow.python.data.util import structure
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_shape
from tensorflow.python.util import deprecation
@deprecation.deprecated(
None, "Use `tf.data.experimental.dense_to_sparse_batch(...)`.")
def dense_to_sparse_batch(batch_size, row_shape):
"""A transformation that batches ragged elements into `tf.SparseTensor`s.
Like `Dataset.padded_batch()`, this transformation combines multiple
consecutive elements of the dataset, which might have different
shapes, into a single element. The resulting element has three
components (`indices`, `values`, and `dense_shape`), which
comprise a `tf.SparseTensor` that represents the same data. The
`row_shape` represents the dense shape of each row in the
resulting `tf.SparseTensor`, to which the effective batch size is
prepended. For example:
```python
# NOTE: The following examples use `{ ... }` to represent the
# contents of a dataset.
a = { ['a', 'b', 'c'], ['a', 'b'], ['a', 'b', 'c', 'd'] }
a.apply(tf.data.experimental.dense_to_sparse_batch(batch_size=2,
row_shape=[6])) ==
{
([[0, 0], [0, 1], [0, 2], [1, 0], [1, 1]], # indices
['a', 'b', 'c', 'a', 'b'], # values
[2, 6]), # dense_shape
([[0, 0], [0, 1], [0, 2], [0, 3]],
['a', 'b', 'c', 'd'],
[1, 6])
}
```
Args:
batch_size: A `tf.int64` scalar `tf.Tensor`, representing the number of
consecutive elements of this dataset to combine in a single batch.
row_shape: A `tf.TensorShape` or `tf.int64` vector tensor-like object
representing the equivalent dense shape of a row in the resulting
`tf.SparseTensor`. Each element of this dataset must have the same rank as
`row_shape`, and must have size less than or equal to `row_shape` in each
dimension.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
return batching.dense_to_sparse_batch(batch_size, row_shape)
@deprecation.deprecated(None, "Use `tf.data.experimental.unbatch()`.")
def unbatch():
"""Splits elements of a dataset into multiple elements on the batch dimension.
For example, if elements of the dataset are shaped `[B, a0, a1, ...]`,
where `B` may vary for each input element, then for each element in the
dataset, the unbatched dataset will contain `B` consecutive elements
of shape `[a0, a1, ...]`.
```python
# NOTE: The following example uses `{ ... }` to represent the contents
# of a dataset.
a = { ['a', 'b', 'c'], ['a', 'b'], ['a', 'b', 'c', 'd'] }
a.apply(tf.data.experimental.unbatch()) == {
'a', 'b', 'c', 'a', 'b', 'a', 'b', 'c', 'd'}
```
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
return batching.unbatch()
@deprecation.deprecated(
None, "Use `tf.data.Dataset.batch(..., drop_remainder=True)`.")
def batch_and_drop_remainder(batch_size):
"""A batching transformation that omits the final small batch (if present).
Like `tf.data.Dataset.batch`, this transformation combines
consecutive elements of this dataset into batches. However, if the batch
size does not evenly divide the input dataset size, this transformation will
drop the final smaller element.
The following example illustrates the difference between this
transformation and `Dataset.batch()`:
```python
dataset = tf.data.Dataset.range(200)
batched =
dataset.apply(tf.contrib.data.batch_and_drop_remainder(128))
print(batched.output_shapes) # ==> "(128,)" (the batch dimension is known)
```
By contrast, `dataset.batch(128)` would yield a two-element dataset with
shapes `(128,)` and `(72,)`, so the batch dimension would not be statically
known.
Args:
batch_size: A `tf.int64` scalar `tf.Tensor`, representing the number of
consecutive elements of this dataset to combine in a single batch.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`
"""
def _apply_fn(dataset):
"""Function from `Dataset` to `Dataset` that applies the transformation."""
return dataset.batch(batch_size, drop_remainder=True)
return _apply_fn
@deprecation.deprecated(
None, "Use `tf.data.Dataset.padded_batch(..., drop_remainder=True)`.")
def padded_batch_and_drop_remainder(batch_size,
padded_shapes,
padding_values=None):
"""A batching and padding transformation that omits the final small batch.
Like `tf.data.Dataset.padded_batch`, this transformation combines
consecutive elements of this dataset into batches. However, if the batch
size does not evenly divide the input dataset size, this transformation will
drop the final smaller element.
See `tf.contrib.data.batch_and_drop_remainder` for more details.
Args:
batch_size: A `tf.int64` scalar `tf.Tensor`, representing the number of
consecutive elements of this dataset to combine in a single batch.
padded_shapes: A nested structure of `tf.TensorShape` or `tf.int64` vector
tensor-like objects. See `tf.data.Dataset.padded_batch` for details.
padding_values: (Optional.) A nested structure of scalar-shaped `tf.Tensor`.
See `tf.data.Dataset.padded_batch` for details.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`
"""
def _apply_fn(dataset):
"""Function from `Dataset` to `Dataset` that applies the transformation."""
return dataset.padded_batch(
batch_size, padded_shapes=padded_shapes, padding_values=padding_values,
drop_remainder=True)
return _apply_fn
# TODO(b/116817045): Move this to `tf.data.experimental` when the `with_shape()`
# function is available in the core.
def assert_element_shape(expected_shapes):
"""Assert the shape of this `Dataset`.
```python
shapes = [tf.TensorShape([16, 256]), tf.TensorShape([None, 2])]
result = dataset.apply(tf.data.experimental.assert_element_shape(shapes))
print(result.output_shapes) # ==> "((16, 256), (<unknown>, 2))"
```
If dataset shapes and expected_shape, are fully defined, assert they match.
Otherwise, add assert op that will validate the shapes when tensors are
evaluated, and set shapes on tensors, respectively.
Note that unknown dimension in `expected_shapes` will be ignored.
Args:
expected_shapes: A nested structure of `tf.TensorShape` objects.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`
"""
def _merge_output_shapes(original_shapes, expected_shapes):
flat_original_shapes = nest.flatten(original_shapes)
flat_new_shapes = nest.flatten_up_to(original_shapes, expected_shapes)
flat_merged_output_shapes = [
original_shape.merge_with(new_shape)
for original_shape, new_shape in zip(flat_original_shapes,
flat_new_shapes)]
return nest.pack_sequence_as(original_shapes, flat_merged_output_shapes)
def _check_shape(*elements):
flatten_tensors = nest.flatten(elements)
flatten_shapes = nest.flatten(expected_shapes)
checked_tensors = [
with_shape(shape, tensor) if shape else tensor # Ignore unknown shape
for shape, tensor in zip(flatten_shapes, flatten_tensors)
]
return nest.pack_sequence_as(elements, checked_tensors)
def _apply_fn(dataset):
output_shapes = _merge_output_shapes(
dataset_ops.get_legacy_output_shapes(dataset), expected_shapes)
# pylint: disable=protected-access
return _RestructuredDataset(
dataset.map(_check_shape),
dataset_ops.get_legacy_output_types(dataset),
output_shapes=output_shapes,
output_classes=dataset_ops.get_legacy_output_classes(dataset))
return _apply_fn
@deprecation.deprecated(None, "Use `tf.data.experimental.map_and_batch(...)`.")
def map_and_batch(map_func,
batch_size,
num_parallel_batches=None,
drop_remainder=False,
num_parallel_calls=None):
"""Fused implementation of `map` and `batch`.
Maps `map_func` across `batch_size` consecutive elements of this dataset
and then combines them into a batch. Functionally, it is equivalent to `map`
followed by `batch`. However, by fusing the two transformations together, the
implementation can be more efficient. Surfacing this transformation in the API
is temporary. Once automatic input pipeline optimization is implemented,
the fusing of `map` and `batch` will happen automatically and this API will be
deprecated.
Args:
map_func: A function mapping a nested structure of tensors to another nested
structure of tensors.
batch_size: A `tf.int64` scalar `tf.Tensor`, representing the number of
consecutive elements of this dataset to combine in a single batch.
num_parallel_batches: (Optional.) A `tf.int64` scalar `tf.Tensor`,
representing the number of batches to create in parallel. On one hand,
higher values can help mitigate the effect of stragglers. On the other
hand, higher values can increase contention if CPU is scarce.
drop_remainder: (Optional.) A `tf.bool` scalar `tf.Tensor`, representing
whether the last batch should be dropped in case its size is smaller than
desired; the default behavior is not to drop the smaller batch.
num_parallel_calls: (Optional.) A `tf.int32` scalar `tf.Tensor`,
representing the number of elements to process in parallel. If not
specified, `batch_size * num_parallel_batches` elements will be processed
in parallel.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
Raises:
ValueError: If both `num_parallel_batches` and `num_parallel_calls` are
specified.
"""
return batching.map_and_batch(map_func, batch_size, num_parallel_batches,
drop_remainder, num_parallel_calls)
class _RestructuredDataset(dataset_ops.UnaryDataset):
"""An internal helper for changing the structure and shape of a dataset."""
def __init__(self,
dataset,
output_types,
output_shapes=None,
output_classes=None):
"""Creates a new dataset with the given output types and shapes.
The given `dataset` must have a structure that is convertible:
* `dataset.output_types` must be the same as `output_types` module nesting.
* Each shape in `dataset.output_shapes` must be compatible with each shape
in `output_shapes` (if given).
Note: This helper permits "unsafe casts" for shapes, equivalent to using
`tf.Tensor.set_shape()` where domain-specific knowledge is available.
Args:
dataset: A `Dataset` object.
output_types: A nested structure of `tf.DType` objects.
output_shapes: (Optional.) A nested structure of `tf.TensorShape` objects.
If omitted, the shapes will be inherited from `dataset`.
output_classes: (Optional.) A nested structure of class types. If omitted,
the class types will be inherited from `dataset`.
Raises:
ValueError: If either `output_types` or `output_shapes` is not compatible
with the structure of `dataset`.
"""
self._input_dataset = dataset
input_types = dataset_ops.get_legacy_output_types(dataset)
# Validate that the types are compatible.
output_types = nest.map_structure(dtypes.as_dtype, output_types)
flat_original_types = nest.flatten(input_types)
flat_new_types = nest.flatten(output_types)
if flat_original_types != flat_new_types:
raise ValueError(
"Dataset with output types %r cannot be restructured to have "
"output types %r" %
(dataset_ops.get_legacy_output_types(dataset), output_types))
input_shapes = dataset_ops.get_legacy_output_shapes(dataset)
if output_shapes is None:
# Inherit shapes from the original `dataset`.
output_shapes = nest.pack_sequence_as(
output_types, nest.flatten(input_shapes))
else:
# Validate that the shapes are compatible.
nest.assert_same_structure(output_types, output_shapes)
flat_original_shapes = nest.flatten(input_shapes)
flat_new_shapes = nest.flatten_up_to(output_types, output_shapes)
for original_shape, new_shape in zip(flat_original_shapes,
flat_new_shapes):
if not original_shape.is_compatible_with(new_shape):
raise ValueError(
"Dataset with output shapes %r cannot be restructured to have "
"incompatible output shapes %r" % (input_shapes,
output_shapes))
output_shapes = nest.map_structure_up_to(
output_types, tensor_shape.as_shape, output_shapes)
input_classes = dataset_ops.get_legacy_output_classes(dataset)
if output_classes is None:
# Inherit class types from the original `dataset`.
output_classes = nest.pack_sequence_as(
output_types, nest.flatten(input_classes))
self._structure = structure.convert_legacy_structure(
output_types, output_shapes, output_classes)
variant_tensor = self._input_dataset._variant_tensor # pylint: disable=protected-access
super(_RestructuredDataset, self).__init__(dataset, variant_tensor)
@property
def _element_structure(self):
return self._structure
|
tensorflow-master
|
tensorflow/contrib/data/python/ops/batching.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python wrappers for reader Datasets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.ops import readers
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import readers as core_readers
from tensorflow.python.data.util import structure
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import gen_experimental_dataset_ops
from tensorflow.python.util import deprecation
@deprecation.deprecated(None,
"Use `tf.data.experimental.make_csv_dataset(...)`.")
def make_csv_dataset(
file_pattern,
batch_size,
column_names=None,
column_defaults=None,
label_name=None,
select_columns=None,
field_delim=",",
use_quote_delim=True,
na_value="",
header=True,
num_epochs=None,
shuffle=True,
shuffle_buffer_size=10000,
shuffle_seed=None,
prefetch_buffer_size=dataset_ops.AUTOTUNE,
num_parallel_reads=1,
sloppy=False,
num_rows_for_inference=100,
compression_type=None,
):
"""Reads CSV files into a dataset.
Reads CSV files into a dataset, where each element is a (features, labels)
tuple that corresponds to a batch of CSV rows. The features dictionary
maps feature column names to `Tensor`s containing the corresponding
feature data, and labels is a `Tensor` containing the batch's label data.
Args:
file_pattern: List of files or patterns of file paths containing CSV
records. See `tf.io.gfile.glob` for pattern rules.
batch_size: An int representing the number of records to combine
in a single batch.
column_names: An optional list of strings that corresponds to the CSV
columns, in order. One per column of the input record. If this is not
provided, infers the column names from the first row of the records.
These names will be the keys of the features dict of each dataset element.
column_defaults: A optional list of default values for the CSV fields. One
item per selected column of the input record. Each item in the list is
either a valid CSV dtype (float32, float64, int32, int64, or string), or a
`Tensor` with one of the aforementioned types. The tensor can either be
a scalar default value (if the column is optional), or an empty tensor (if
the column is required). If a dtype is provided instead of a tensor, the
column is also treated as required. If this list is not provided, tries
to infer types based on reading the first num_rows_for_inference rows of
files specified, and assumes all columns are optional, defaulting to `0`
for numeric values and `""` for string values. If both this and
`select_columns` are specified, these must have the same lengths, and
`column_defaults` is assumed to be sorted in order of increasing column
index.
label_name: A optional string corresponding to the label column. If
provided, the data for this column is returned as a separate `Tensor` from
the features dictionary, so that the dataset complies with the format
expected by a `tf.Estimator.train` or `tf.Estimator.evaluate` input
function.
select_columns: An optional list of integer indices or string column
names, that specifies a subset of columns of CSV data to select. If
column names are provided, these must correspond to names provided in
`column_names` or inferred from the file header lines. When this argument
is specified, only a subset of CSV columns will be parsed and returned,
corresponding to the columns specified. Using this results in faster
parsing and lower memory usage. If both this and `column_defaults` are
specified, these must have the same lengths, and `column_defaults` is
assumed to be sorted in order of increasing column index.
field_delim: An optional `string`. Defaults to `","`. Char delimiter to
separate fields in a record.
use_quote_delim: An optional bool. Defaults to `True`. If false, treats
double quotation marks as regular characters inside of the string fields.
na_value: Additional string to recognize as NA/NaN.
header: A bool that indicates whether the first rows of provided CSV files
correspond to header lines with column names, and should not be included
in the data.
num_epochs: An int specifying the number of times this dataset is repeated.
If None, cycles through the dataset forever.
shuffle: A bool that indicates whether the input should be shuffled.
shuffle_buffer_size: Buffer size to use for shuffling. A large buffer size
ensures better shuffling, but increases memory usage and startup time.
shuffle_seed: Randomization seed to use for shuffling.
prefetch_buffer_size: An int specifying the number of feature
batches to prefetch for performance improvement. Recommended value is the
number of batches consumed per training step. Defaults to auto-tune.
num_parallel_reads: Number of threads used to read CSV records from files.
If >1, the results will be interleaved.
sloppy: If `True`, reading performance will be improved at
the cost of non-deterministic ordering. If `False`, the order of elements
produced is deterministic prior to shuffling (elements are still
randomized if `shuffle=True`. Note that if the seed is set, then order
of elements after shuffling is deterministic). Defaults to `False`.
num_rows_for_inference: Number of rows of a file to use for type inference
if record_defaults is not provided. If None, reads all the rows of all
the files. Defaults to 100.
compression_type: (Optional.) A `tf.string` scalar evaluating to one of
`""` (no compression), `"ZLIB"`, or `"GZIP"`. Defaults to no compression.
Returns:
A dataset, where each element is a (features, labels) tuple that corresponds
to a batch of `batch_size` CSV rows. The features dictionary maps feature
column names to `Tensor`s containing the corresponding column data, and
labels is a `Tensor` containing the column data for the label column
specified by `label_name`.
Raises:
ValueError: If any of the arguments is malformed.
"""
return readers.make_csv_dataset(
file_pattern, batch_size, column_names, column_defaults, label_name,
select_columns, field_delim, use_quote_delim, na_value, header,
num_epochs, shuffle, shuffle_buffer_size, shuffle_seed,
prefetch_buffer_size, num_parallel_reads, sloppy, num_rows_for_inference,
compression_type)
class CsvDataset(readers.CsvDataset):
"""A Dataset comprising lines from one or more CSV files."""
@deprecation.deprecated(None, "Use `tf.data.experimental.CsvDataset(...)`.")
def __init__(self,
filenames,
record_defaults,
compression_type=None,
buffer_size=None,
header=False,
field_delim=",",
use_quote_delim=True,
na_value="",
select_cols=None):
super(CsvDataset, self).__init__(
filenames, record_defaults, compression_type, buffer_size, header,
field_delim, use_quote_delim, na_value, select_cols)
@deprecation.deprecated(
None, "Use `tf.data.experimental.make_batched_features_dataset(...)`.")
def make_batched_features_dataset(file_pattern,
batch_size,
features,
reader=core_readers.TFRecordDataset,
label_key=None,
reader_args=None,
num_epochs=None,
shuffle=True,
shuffle_buffer_size=10000,
shuffle_seed=None,
prefetch_buffer_size=dataset_ops.AUTOTUNE,
reader_num_threads=1,
parser_num_threads=2,
sloppy_ordering=False,
drop_final_batch=False):
"""Returns a `Dataset` of feature dictionaries from `Example` protos.
If label_key argument is provided, returns a `Dataset` of tuple
comprising of feature dictionaries and label.
Example:
```
serialized_examples = [
features {
feature { key: "age" value { int64_list { value: [ 0 ] } } }
feature { key: "gender" value { bytes_list { value: [ "f" ] } } }
feature { key: "kws" value { bytes_list { value: [ "code", "art" ] } } }
},
features {
feature { key: "age" value { int64_list { value: [] } } }
feature { key: "gender" value { bytes_list { value: [ "f" ] } } }
feature { key: "kws" value { bytes_list { value: [ "sports" ] } } }
}
]
```
We can use arguments:
```
features: {
"age": FixedLenFeature([], dtype=tf.int64, default_value=-1),
"gender": FixedLenFeature([], dtype=tf.string),
"kws": VarLenFeature(dtype=tf.string),
}
```
And the expected output is:
```python
{
"age": [[0], [-1]],
"gender": [["f"], ["f"]],
"kws": SparseTensor(
indices=[[0, 0], [0, 1], [1, 0]],
values=["code", "art", "sports"]
dense_shape=[2, 2]),
}
```
Args:
file_pattern: List of files or patterns of file paths containing
`Example` records. See `tf.io.gfile.glob` for pattern rules.
batch_size: An int representing the number of records to combine
in a single batch.
features: A `dict` mapping feature keys to `FixedLenFeature` or
`VarLenFeature` values. See `tf.io.parse_example`.
reader: A function or class that can be
called with a `filenames` tensor and (optional) `reader_args` and returns
a `Dataset` of `Example` tensors. Defaults to `tf.data.TFRecordDataset`.
label_key: (Optional) A string corresponding to the key labels are stored in
`tf.Examples`. If provided, it must be one of the `features` key,
otherwise results in `ValueError`.
reader_args: Additional arguments to pass to the reader class.
num_epochs: Integer specifying the number of times to read through the
dataset. If None, cycles through the dataset forever. Defaults to `None`.
shuffle: A boolean, indicates whether the input should be shuffled. Defaults
to `True`.
shuffle_buffer_size: Buffer size of the ShuffleDataset. A large capacity
ensures better shuffling but would increase memory usage and startup time.
shuffle_seed: Randomization seed to use for shuffling.
prefetch_buffer_size: Number of feature batches to prefetch in order to
improve performance. Recommended value is the number of batches consumed
per training step. Defaults to auto-tune.
reader_num_threads: Number of threads used to read `Example` records. If >1,
the results will be interleaved.
parser_num_threads: Number of threads to use for parsing `Example` tensors
into a dictionary of `Feature` tensors.
sloppy_ordering: If `True`, reading performance will be improved at
the cost of non-deterministic ordering. If `False`, the order of elements
produced is deterministic prior to shuffling (elements are still
randomized if `shuffle=True`. Note that if the seed is set, then order
of elements after shuffling is deterministic). Defaults to `False`.
drop_final_batch: If `True`, and the batch size does not evenly divide the
input dataset size, the final smaller batch will be dropped. Defaults to
`False`.
Returns:
A dataset of `dict` elements, (or a tuple of `dict` elements and label).
Each `dict` maps feature keys to `Tensor` or `SparseTensor` objects.
Raises:
ValueError: If `label_key` is not one of the `features` keys.
"""
return readers.make_batched_features_dataset(
file_pattern, batch_size, features, reader, label_key, reader_args,
num_epochs, shuffle, shuffle_buffer_size, shuffle_seed,
prefetch_buffer_size, reader_num_threads, parser_num_threads,
sloppy_ordering, drop_final_batch)
@deprecation.deprecated(
None, "Use `tf.data.experimental.make_batched_features_dataset(...)`")
def read_batch_features(file_pattern,
batch_size,
features,
reader=core_readers.TFRecordDataset,
reader_args=None,
randomize_input=True,
num_epochs=None,
capacity=10000):
"""Reads batches of Examples.
Example:
```
serialized_examples = [
features {
feature { key: "age" value { int64_list { value: [ 0 ] } } }
feature { key: "gender" value { bytes_list { value: [ "f" ] } } }
feature { key: "kws" value { bytes_list { value: [ "code", "art" ] } } }
},
features {
feature { key: "age" value { int64_list { value: [] } } }
feature { key: "gender" value { bytes_list { value: [ "f" ] } } }
feature { key: "kws" value { bytes_list { value: [ "sports" ] } } }
}
]
```
We can use arguments:
```
features: {
"age": FixedLenFeature([], dtype=tf.int64, default_value=-1),
"gender": FixedLenFeature([], dtype=tf.string),
"kws": VarLenFeature(dtype=tf.string),
}
```
And the expected output is:
```python
{
"age": [[0], [-1]],
"gender": [["f"], ["f"]],
"kws": SparseTensor(
indices=[[0, 0], [0, 1], [1, 0]],
values=["code", "art", "sports"]
dense_shape=[2, 2]),
}
```
Args:
file_pattern: List of files or patterns of file paths containing
`Example` records. See `tf.io.gfile.glob` for pattern rules.
batch_size: An int representing the number of records to combine
in a single batch.
features: A `dict` mapping feature keys to `FixedLenFeature` or
`VarLenFeature` values. See `tf.io.parse_example`.
reader: A function or class that can be
called with a `filenames` tensor and (optional) `reader_args` and returns
a `Dataset` of `Example` tensors. Defaults to `tf.data.TFRecordDataset`.
reader_args: Additional arguments to pass to the reader class.
randomize_input: Whether the input should be randomized.
num_epochs: Integer specifying the number of times to read through the
dataset. If None, cycles through the dataset forever.
capacity: Buffer size of the ShuffleDataset. A large capacity ensures better
shuffling but would increase memory usage and startup time.
Returns:
A dict from keys in features to `Tensor` or `SparseTensor` objects.
"""
dataset = readers.make_batched_features_dataset(
file_pattern,
batch_size,
features,
reader=reader,
reader_args=reader_args,
shuffle=randomize_input,
num_epochs=num_epochs,
shuffle_buffer_size=capacity)
iterator = dataset_ops.make_one_shot_iterator(dataset)
outputs = iterator.get_next()
return outputs
class SqlDataset(readers.SqlDataset):
"""A `Dataset` consisting of the results from a SQL query."""
@deprecation.deprecated(None, "Use `tf.data.experimental.SqlDataset(...)`.")
def __init__(self, driver_name, data_source_name, query, output_types):
super(SqlDataset, self).__init__(
driver_name, data_source_name, query, output_types)
class LMDBDataset(dataset_ops.DatasetSource):
"""A LMDB Dataset that reads the lmdb file."""
def __init__(self, filenames):
"""Create a `LMDBDataset`.
`LMDBDataset` allows a user to read data from a mdb file as
(key value) pairs sequentially.
For example:
```python
tf.compat.v1.enable_eager_execution()
dataset = tf.contrib.lmdb.LMDBDataset("/foo/bar.mdb")
# Prints the (key, value) pairs inside a lmdb file.
for key, value in dataset:
print(key, value)
```
Args:
filenames: A `tf.string` tensor containing one or more filenames.
"""
self._filenames = ops.convert_to_tensor(
filenames, dtype=dtypes.string, name="filenames")
variant_tensor = gen_experimental_dataset_ops.experimental_lmdb_dataset(
self._filenames, **dataset_ops.flat_structure(self))
super(LMDBDataset, self).__init__(variant_tensor)
@property
def _element_structure(self):
return structure.NestedStructure(
(structure.TensorStructure(dtypes.string, []),
structure.TensorStructure(dtypes.string, [])))
|
tensorflow-master
|
tensorflow/contrib/data/python/ops/readers.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Resampling dataset transformations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.ops import resampling
from tensorflow.python.util import deprecation
@deprecation.deprecated(None,
"Use `tf.data.experimental.rejection_resample(...)`.")
def rejection_resample(class_func, target_dist, initial_dist=None, seed=None):
"""A transformation that resamples a dataset to achieve a target distribution.
**NOTE** Resampling is performed via rejection sampling; some fraction
of the input values will be dropped.
Args:
class_func: A function mapping an element of the input dataset to a scalar
`tf.int32` tensor. Values should be in `[0, num_classes)`.
target_dist: A floating point type tensor, shaped `[num_classes]`.
initial_dist: (Optional.) A floating point type tensor, shaped
`[num_classes]`. If not provided, the true class distribution is
estimated live in a streaming fashion.
seed: (Optional.) Python integer seed for the resampler.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
return resampling.rejection_resample(class_func, target_dist, initial_dist,
seed)
|
tensorflow-master
|
tensorflow/contrib/data/python/ops/resampling.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.