python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.contrib.training.evaluation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import os
import time
import numpy as np
from tensorflow.contrib.framework.python.ops import variables
from tensorflow.contrib.layers.python.layers import layers
from tensorflow.contrib.losses.python.losses import loss_ops
from tensorflow.contrib.training.python.training import evaluation
from tensorflow.contrib.training.python.training import training
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session as session_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import metrics
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.summary import summary as summary_lib
from tensorflow.python.summary import summary_iterator
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import saver as saver_lib
class CheckpointIteratorTest(test.TestCase):
def testReturnsEmptyIfNoCheckpointsFound(self):
checkpoint_dir = os.path.join(self.get_temp_dir(), 'no_checkpoints_found')
num_found = 0
for _ in evaluation.checkpoints_iterator(checkpoint_dir, timeout=0):
num_found += 1
self.assertEqual(num_found, 0)
def testReturnsSingleCheckpointIfOneCheckpointFound(self):
checkpoint_dir = os.path.join(self.get_temp_dir(), 'one_checkpoint_found')
if not gfile.Exists(checkpoint_dir):
gfile.MakeDirs(checkpoint_dir)
global_step = variables.get_or_create_global_step()
saver = saver_lib.Saver() # Saves the global step.
with self.cached_session() as session:
session.run(variables_lib.global_variables_initializer())
save_path = os.path.join(checkpoint_dir, 'model.ckpt')
saver.save(session, save_path, global_step=global_step)
num_found = 0
for _ in evaluation.checkpoints_iterator(checkpoint_dir, timeout=0):
num_found += 1
self.assertEqual(num_found, 1)
def testReturnsSingleCheckpointIfOneShardedCheckpoint(self):
checkpoint_dir = os.path.join(self.get_temp_dir(),
'one_checkpoint_found_sharded')
if not gfile.Exists(checkpoint_dir):
gfile.MakeDirs(checkpoint_dir)
global_step = variables.get_or_create_global_step()
# This will result in 3 different checkpoint shard files.
with ops.device('/cpu:0'):
variables_lib.Variable(10, name='v0')
with ops.device('/cpu:1'):
variables_lib.Variable(20, name='v1')
saver = saver_lib.Saver(sharded=True)
with session_lib.Session(
target='',
config=config_pb2.ConfigProto(device_count={'CPU': 2})) as session:
session.run(variables_lib.global_variables_initializer())
save_path = os.path.join(checkpoint_dir, 'model.ckpt')
saver.save(session, save_path, global_step=global_step)
num_found = 0
for _ in evaluation.checkpoints_iterator(checkpoint_dir, timeout=0):
num_found += 1
self.assertEqual(num_found, 1)
def testTimeoutFn(self):
timeout_fn_calls = [0]
def timeout_fn():
timeout_fn_calls[0] += 1
return timeout_fn_calls[0] > 3
results = list(
evaluation.checkpoints_iterator(
'/non-existent-dir', timeout=0.1, timeout_fn=timeout_fn))
self.assertEqual([], results)
self.assertEqual(4, timeout_fn_calls[0])
class WaitForNewCheckpointTest(test.TestCase):
def testReturnsNoneAfterTimeout(self):
start = time.time()
ret = evaluation.wait_for_new_checkpoint(
'/non-existent-dir', 'foo', timeout=1.0, seconds_to_sleep=0.5)
end = time.time()
self.assertIsNone(ret)
# We've waited one second.
self.assertGreater(end, start + 0.5)
# The timeout kicked in.
self.assertLess(end, start + 1.1)
def logistic_classifier(inputs):
return layers.fully_connected(inputs, 1, activation_fn=math_ops.sigmoid)
class EvaluateOnceTest(test.TestCase):
def setUp(self):
super(EvaluateOnceTest, self).setUp()
# Create an easy training set:
np.random.seed(0)
self._inputs = np.zeros((16, 4))
self._labels = np.random.randint(0, 2, size=(16, 1)).astype(np.float32)
for i in range(16):
j = int(2 * self._labels[i] + np.random.randint(0, 2))
self._inputs[i, j] = 1
def _train_model(self, checkpoint_dir, num_steps):
"""Trains a simple classification model.
Note that the data has been configured such that after around 300 steps,
the model has memorized the dataset (e.g. we can expect %100 accuracy).
Args:
checkpoint_dir: The directory where the checkpoint is written to.
num_steps: The number of steps to train for.
"""
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = logistic_classifier(tf_inputs)
loss = loss_ops.log_loss(tf_predictions, tf_labels)
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = training.create_train_op(loss, optimizer)
loss = training.train(
train_op,
checkpoint_dir,
hooks=[basic_session_run_hooks.StopAtStepHook(num_steps)])
if num_steps >= 300:
assert loss < .015
def testEvaluatePerfectModel(self):
checkpoint_dir = os.path.join(self.get_temp_dir(),
'evaluate_perfect_model_once')
# Train a Model to completion:
self._train_model(checkpoint_dir, num_steps=300)
# Run
inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
labels = constant_op.constant(self._labels, dtype=dtypes.float32)
logits = logistic_classifier(inputs)
predictions = math_ops.round(logits)
accuracy, update_op = metrics.accuracy(
predictions=predictions, labels=labels)
checkpoint_path = evaluation.wait_for_new_checkpoint(checkpoint_dir)
final_ops_values = evaluation.evaluate_once(
checkpoint_path=checkpoint_path,
eval_ops=update_op,
final_ops={'accuracy': accuracy},
hooks=[
evaluation.StopAfterNEvalsHook(1),
])
self.assertTrue(final_ops_values['accuracy'] > .99)
def testEvalOpAndFinalOp(self):
checkpoint_dir = os.path.join(self.get_temp_dir(), 'eval_ops_and_final_ops')
# Train a model for a single step to get a checkpoint.
self._train_model(checkpoint_dir, num_steps=1)
checkpoint_path = evaluation.wait_for_new_checkpoint(checkpoint_dir)
# Create the model so we have something to restore.
inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
logistic_classifier(inputs)
num_evals = 5
final_increment = 9.0
my_var = variables.local_variable(0.0, name='MyVar')
eval_ops = state_ops.assign_add(my_var, 1.0)
final_ops = array_ops.identity(my_var) + final_increment
final_ops_values = evaluation.evaluate_once(
checkpoint_path=checkpoint_path,
eval_ops=eval_ops,
final_ops={'value': final_ops},
hooks=[
evaluation.StopAfterNEvalsHook(num_evals),
])
self.assertEqual(final_ops_values['value'], num_evals + final_increment)
def testOnlyFinalOp(self):
checkpoint_dir = os.path.join(self.get_temp_dir(), 'only_final_ops')
# Train a model for a single step to get a checkpoint.
self._train_model(checkpoint_dir, num_steps=1)
checkpoint_path = evaluation.wait_for_new_checkpoint(checkpoint_dir)
# Create the model so we have something to restore.
inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
logistic_classifier(inputs)
final_increment = 9.0
my_var = variables.local_variable(0.0, name='MyVar')
final_ops = array_ops.identity(my_var) + final_increment
final_ops_values = evaluation.evaluate_once(
checkpoint_path=checkpoint_path, final_ops={'value': final_ops})
self.assertEqual(final_ops_values['value'], final_increment)
class EvaluateRepeatedlyTest(test.TestCase):
def setUp(self):
super(EvaluateRepeatedlyTest, self).setUp()
# Create an easy training set:
np.random.seed(0)
self._inputs = np.zeros((16, 4))
self._labels = np.random.randint(0, 2, size=(16, 1)).astype(np.float32)
for i in range(16):
j = int(2 * self._labels[i] + np.random.randint(0, 2))
self._inputs[i, j] = 1
def _train_model(self, checkpoint_dir, num_steps):
"""Trains a simple classification model.
Note that the data has been configured such that after around 300 steps,
the model has memorized the dataset (e.g. we can expect %100 accuracy).
Args:
checkpoint_dir: The directory where the checkpoint is written to.
num_steps: The number of steps to train for.
"""
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = logistic_classifier(tf_inputs)
loss = loss_ops.log_loss(tf_predictions, tf_labels)
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = training.create_train_op(loss, optimizer)
loss = training.train(
train_op,
checkpoint_dir,
hooks=[basic_session_run_hooks.StopAtStepHook(num_steps)])
def testEvaluatePerfectModel(self):
checkpoint_dir = os.path.join(self.get_temp_dir(),
'evaluate_perfect_model_repeated')
# Train a Model to completion:
self._train_model(checkpoint_dir, num_steps=300)
# Run
inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
labels = constant_op.constant(self._labels, dtype=dtypes.float32)
logits = logistic_classifier(inputs)
predictions = math_ops.round(logits)
accuracy, update_op = metrics.accuracy(
predictions=predictions, labels=labels)
final_values = evaluation.evaluate_repeatedly(
checkpoint_dir=checkpoint_dir,
eval_ops=update_op,
final_ops={'accuracy': accuracy},
hooks=[
evaluation.StopAfterNEvalsHook(1),
],
max_number_of_evaluations=1)
self.assertTrue(final_values['accuracy'] > .99)
def testEvaluationLoopTimeout(self):
checkpoint_dir = os.path.join(self.get_temp_dir(),
'evaluation_loop_timeout')
if not gfile.Exists(checkpoint_dir):
gfile.MakeDirs(checkpoint_dir)
# We need a variable that the saver will try to restore.
variables.get_or_create_global_step()
# Run with placeholders. If we actually try to evaluate this, we'd fail
# since we're not using a feed_dict.
cant_run_op = array_ops.placeholder(dtype=dtypes.float32)
start = time.time()
final_values = evaluation.evaluate_repeatedly(
checkpoint_dir=checkpoint_dir,
eval_ops=cant_run_op,
hooks=[evaluation.StopAfterNEvalsHook(10)],
timeout=6)
end = time.time()
self.assertFalse(final_values)
# Assert that we've waited for the duration of the timeout (minus the sleep
# time).
self.assertGreater(end - start, 5.0)
# Then the timeout kicked in and stops the loop.
self.assertLess(end - start, 7)
def testEvaluationLoopTimeoutWithTimeoutFn(self):
checkpoint_dir = os.path.join(self.get_temp_dir(),
'evaluation_loop_timeout_with_timeout_fn')
# Train a Model to completion:
self._train_model(checkpoint_dir, num_steps=300)
# Run
inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
labels = constant_op.constant(self._labels, dtype=dtypes.float32)
logits = logistic_classifier(inputs)
predictions = math_ops.round(logits)
accuracy, update_op = metrics.accuracy(
predictions=predictions, labels=labels)
timeout_fn_calls = [0]
def timeout_fn():
timeout_fn_calls[0] += 1
return timeout_fn_calls[0] > 3
final_values = evaluation.evaluate_repeatedly(
checkpoint_dir=checkpoint_dir,
eval_ops=update_op,
final_ops={'accuracy': accuracy},
hooks=[
evaluation.StopAfterNEvalsHook(1),
],
eval_interval_secs=1,
max_number_of_evaluations=2,
timeout=0.1,
timeout_fn=timeout_fn)
# We should have evaluated once.
self.assertTrue(final_values['accuracy'] > .99)
# And called 4 times the timeout fn
self.assertEqual(4, timeout_fn_calls[0])
def testEvaluateWithEvalFeedDict(self):
# Create a checkpoint.
checkpoint_dir = os.path.join(self.get_temp_dir(),
'evaluate_with_eval_feed_dict')
self._train_model(checkpoint_dir, num_steps=1)
# We need a variable that the saver will try to restore.
variables.get_or_create_global_step()
# Create a variable and an eval op that increments it with a placeholder.
my_var = variables.local_variable(0.0, name='my_var')
increment = array_ops.placeholder(dtype=dtypes.float32)
eval_ops = state_ops.assign_add(my_var, increment)
increment_value = 3
num_evals = 5
expected_value = increment_value * num_evals
final_values = evaluation.evaluate_repeatedly(
checkpoint_dir=checkpoint_dir,
eval_ops=eval_ops,
feed_dict={increment: 3},
final_ops={'my_var': array_ops.identity(my_var)},
hooks=[
evaluation.StopAfterNEvalsHook(num_evals),
],
max_number_of_evaluations=1)
self.assertEqual(final_values['my_var'], expected_value)
def _create_names_to_metrics(self, predictions, labels):
accuracy0, update_op0 = metrics.accuracy(labels, predictions)
accuracy1, update_op1 = metrics.accuracy(labels, predictions + 1)
names_to_values = {'Accuracy': accuracy0, 'Another_accuracy': accuracy1}
names_to_updates = {'Accuracy': update_op0, 'Another_accuracy': update_op1}
return names_to_values, names_to_updates
def _verify_events(self, output_dir, names_to_values):
"""Verifies that the given `names_to_values` are found in the summaries.
Also checks that a GraphDef was written out to the events file.
Args:
output_dir: An existing directory where summaries are found.
names_to_values: A dictionary of strings to values.
"""
# Check that the results were saved. The events file may have additional
# entries, e.g. the event version stamp, so have to parse things a bit.
output_filepath = glob.glob(os.path.join(output_dir, '*'))
self.assertEqual(len(output_filepath), 1)
events = summary_iterator.summary_iterator(output_filepath[0])
summaries = []
graph_def = None
for event in events:
if event.summary.value:
summaries.append(event.summary)
elif event.graph_def:
graph_def = event.graph_def
values = []
for summary in summaries:
for value in summary.value:
values.append(value)
saved_results = {v.tag: v.simple_value for v in values}
for name in names_to_values:
self.assertAlmostEqual(names_to_values[name], saved_results[name], 5)
self.assertIsNotNone(graph_def)
def testSummariesAreFlushedToDisk(self):
checkpoint_dir = os.path.join(self.get_temp_dir(), 'summaries_are_flushed')
logdir = os.path.join(self.get_temp_dir(), 'summaries_are_flushed_eval')
if gfile.Exists(logdir):
gfile.DeleteRecursively(logdir)
# Train a Model to completion:
self._train_model(checkpoint_dir, num_steps=300)
# Create the model (which can be restored).
inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
logistic_classifier(inputs)
names_to_values = {'bread': 3.4, 'cheese': 4.5, 'tomato': 2.0}
for k in names_to_values:
v = names_to_values[k]
summary_lib.scalar(k, v)
evaluation.evaluate_repeatedly(
checkpoint_dir=checkpoint_dir,
hooks=[
evaluation.SummaryAtEndHook(log_dir=logdir),
],
max_number_of_evaluations=1)
self._verify_events(logdir, names_to_values)
def testSummaryAtEndHookWithoutSummaries(self):
logdir = os.path.join(self.get_temp_dir(),
'summary_at_end_hook_without_summaires')
if gfile.Exists(logdir):
gfile.DeleteRecursively(logdir)
with ops.Graph().as_default():
# Purposefully don't add any summaries. The hook will just dump the
# GraphDef event.
hook = evaluation.SummaryAtEndHook(log_dir=logdir)
hook.begin()
with self.cached_session() as session:
hook.after_create_session(session, None)
hook.end(session)
self._verify_events(logdir, {})
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/training/python/training/evaluation_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Tuner interface for hyper-parameters tuning."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
from tensorflow.contrib.framework.python.framework import experimental
@six.add_metaclass(abc.ABCMeta)
class Tuner(object):
"""Tuner class is the interface for Experiment hyper-parameters tuning.
Example:
```
def _create_my_experiment(run_config, hparams):
hidden_units = [hparams.unit_per_layer] * hparams.num_hidden_layers
return tf.contrib.learn.Experiment(
estimator=DNNClassifier(config=run_config, hidden_units=hidden_units),
train_input_fn=my_train_input,
eval_input_fn=my_eval_input)
tuner = create_tuner(study_configuration, objective_key)
learn_runner.tune(experiment_fn=_create_my_experiment, tuner)
"""
@experimental
@abc.abstractmethod
def next_trial(self):
"""Switch to the next trial.
Ask the tuning service for a new trial for hyper-parameters tuning.
Returns:
A boolean indicating if a trial was assigned to the tuner.
Raises:
RuntimeError: If the tuner is initialized correctly.
"""
raise NotImplementedError("Calling an abstract method.")
@experimental
@abc.abstractmethod
def run_experiment(self, experiment_fn):
"""Creates an Experiment by calling `experiment_fn` and executes it.
It creates a `RunConfig`, which captures the current execution environment
configuration and retrieves the hyper-parameters for current trial from the
tuning service. Both are passed to the `experiment_fn` and used to create
the Experiment for current trial execution. When finished, the measure will
be reported to the tuning service.
If the `RunConfig` does not include a task type, then an exception is
raised. The task type should be one of the types supported by the tuner. If
tuner does not support the task type directly, it could delegate the task to
Experiment, which is usually a function of Experiment. An exception would be
raised, if neither tuner nor Experiment could support the task type.
Args:
experiment_fn: A function that creates an `Experiment`. It should accept
an argument `run_config` which should be used to create the `Estimator`
(passed as `config` to its constructor), and an argument `hparams`,
which should be used for hyper-parameters tuning. It must return an
`Experiment`.
"""
raise NotImplementedError("Calling an abstract method.")
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/training/python/training/tuner.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=unused-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.training.python.training import sampling_ops
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner_impl
class SamplingOpsThreadingTest(test.TestCase):
def testMultiThreadedEstimateDataDistribution(self):
num_classes = 10
# Set up graph.
random_seed.set_random_seed(1234)
label = math_ops.cast(
math_ops.round(random_ops.random_uniform([1]) * num_classes),
dtypes_lib.int32)
prob_estimate = sampling_ops._estimate_data_distribution( # pylint: disable=protected-access
label, num_classes)
# Check that prob_estimate is well-behaved in a multithreaded context.
_, _, [prob_estimate] = sampling_ops._verify_input( # pylint: disable=protected-access
[], label, [prob_estimate])
# Use queues to run multiple threads over the graph, each of which
# fetches `prob_estimate`.
queue = data_flow_ops.FIFOQueue(
capacity=25,
dtypes=[prob_estimate.dtype],
shapes=[prob_estimate.get_shape()])
enqueue_op = queue.enqueue([prob_estimate])
queue_runner_impl.add_queue_runner(
queue_runner_impl.QueueRunner(queue, [enqueue_op] * 25))
out_tensor = queue.dequeue()
# Run the multi-threaded session.
with self.cached_session() as sess:
# Need to initialize variables that keep running total of classes seen.
variables.global_variables_initializer().run()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(coord=coord)
for _ in range(25):
sess.run([out_tensor])
coord.request_stop()
coord.join(threads)
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/training/python/training/sampling_ops_threading_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operations for bucketing data into groups.
The classes and functions in this module are used to queue up data into
buckets conditional on side information (e.g. sequence length).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.layers import utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.summary import summary
from tensorflow.python.training import input as input_py
from tensorflow.python.training import queue_runner
# pylint: disable=protected-access
_as_original_type = input_py._as_original_type
_as_tensor_list = input_py._as_tensor_list
_restore_sparse_tensors = input_py._restore_sparse_tensors
_dtypes = input_py._dtypes
_store_sparse_tensors = input_py._store_sparse_tensors
_validate_keep_input = input_py._validate_keep_input
_shapes = input_py._shapes
_which_queue = input_py._which_queue
# pylint: enable=protected-access
def _validate_bucket(tensor_list):
tensor_list = ops.convert_n_to_tensor_or_indexed_slices(tensor_list)
if not tensor_list:
raise ValueError("Expected at least one tensor in bucket().")
return tensor_list
def bucket(tensors,
which_bucket,
batch_size,
num_buckets,
num_threads=1,
capacity=32,
bucket_capacities=None,
shapes=None,
dynamic_pad=False,
allow_smaller_final_batch=False,
keep_input=True,
shared_name=None,
name=None):
"""Lazy bucketing of input tensors according to `which_bucket`.
The argument `tensors` can be a list or a dictionary of tensors.
The value returned by the function will be of the same type
as `tensors`.
The tensors entering this function are put into the bucket given by
`which_bucket`. Each bucket has its own queue. When a bucket contains
`batch_size` elements, this minibatch is pushed onto a top queue. The
tensors returned from this function are a the result of dequeueing the
next minibatch from this top queue.
This function is implemented using several queues. A `QueueRunner` for the
queues is added to the current `Graph`'s `QUEUE_RUNNER` collection.
As the returned tensors are the result of a dequeue operation, evaluating
them will throw a `tf.errors.OutOfRangeError` when the input queue is
exhausted. If these tensors are feeding another input queue, its queue runner
will catch this exception, however, if they are used in your main thread
you are responsible for catching this yourself.
*N.B.:* If `dynamic_pad` is `False`, you must ensure that either
(i) the `shapes` argument is passed, or (ii) all of the tensors in
`tensors` must have fully-defined shapes. `ValueError` will be
raised if neither of these conditions holds.
If `dynamic_pad` is `True`, it is sufficient that the *rank* of the
tensors is known, but individual dimensions may have shape `None`.
In this case, for each enqueue the dimensions with value `None`
may have a variable length; upon dequeue, the output tensors will be padded
on the right to the maximum shape of the tensors in the current minibatch.
For numbers, this padding takes value 0. For strings, this padding is
the empty string. See `PaddingFIFOQueue` for more info.
If `allow_smaller_final_batch` is `True`, a smaller batch value than
`batch_size` is returned when the queues are closed and there are not enough
elements to fill the batch, otherwise the pending elements are discarded.
In addition, all output tensors' static shapes, as accessed via the
`get_shape()` method will have a 0th `Dimension` value of `None`, and
operations that depend on fixed batch_size would fail.
Args:
tensors: The list or dictionary of tensors, representing a single element,
to bucket. Nested lists are not supported.
which_bucket: An `int32` scalar Tensor taking a value in `[0, num_buckets)`.
batch_size: The new batch size pulled from the queue (all queues will have
the same size). If a list is passed in then each bucket will have a
different batch_size.
(python int, int32 scalar or iterable of integers of length num_buckets).
num_buckets: A python integer, the number of buckets.
num_threads: An integer. The number of threads enqueuing `tensors`.
capacity: An integer. The maximum number of minibatches in the top queue,
and also (by default) the maximum number of elements within each bucket.
bucket_capacities: (Optional) None or a list of integers, the capacities of
each bucket. If None, capacity is used (default). If specified, it must
be a list of integers of length num_buckets: the i-th element is used
as capacity for the i-th bucket queue.
shapes: (Optional) The shapes for each example. Defaults to the
inferred shapes for `tensors`.
dynamic_pad: Boolean. Allow variable dimensions in input shapes.
The given dimensions are padded upon dequeue so that tensors within a
batch have the same shapes.
allow_smaller_final_batch: (Optional) Boolean. If `True`, allow the final
batches to be smaller if there are insufficient items left in the queues.
keep_input: A `bool` scalar Tensor. If provided, this tensor controls
whether the input is added to the queue or not. If it evaluates `True`,
then `tensors` are added to the bucket; otherwise they are dropped. This
tensor essentially acts as a filtering mechanism.
shared_name: (Optional). If set, the queues will be shared under the given
name across multiple sessions.
name: (Optional) A name for the operations.
Returns:
A tuple `(bucket, outputs)` where `bucket` is
a `int32` scalar tensor and `outputs` is a list or
dictionary of batched outputs corresponding to elements of `tensors`.
Every step will receive a new bucket of outputs.
Raises:
ValueError: If the `shapes` are not specified, and cannot be
inferred from the elements of `tensors` or if batch_size is a sequence
but its length != num_buckets. Also if bucket_capacities is not None but
its length != num_buckets.
"""
batch_size_per_bucket = False
if isinstance(batch_size, (list, tuple)):
batch_size_per_bucket = True
if len(batch_size) != num_buckets:
raise ValueError(
"If batch_size is a list it must have num_buckets elements")
else:
batch_size = [batch_size] * num_buckets
if bucket_capacities is None:
bucket_capacities = [capacity] * num_buckets
if len(bucket_capacities) != num_buckets:
raise ValueError(
"The list bucket_capacities (%s) must have exactly num_buckets (%d) "
"elements." % (str(bucket_capacities), num_buckets))
tensor_list = _as_tensor_list(tensors)
with ops.name_scope(name, "bucket", tensor_list) as name:
tensor_list = _validate_bucket(tensor_list)
keep_input = _validate_keep_input(keep_input, enqueue_many=False)
(tensor_list, sparse_info) = _store_sparse_tensors(
tensor_list, enqueue_many=False, keep_input=keep_input)
# Round-trip batch_size to a tensor, and possibly back
for i, bucket_batch_size in enumerate(batch_size):
bucket_batch_size = ops.convert_to_tensor(
bucket_batch_size, dtype=dtypes.int32, name="batch_size")
static_batch_size = tensor_util.constant_value(bucket_batch_size)
batch_size[i] = (static_batch_size if static_batch_size is not None else
bucket_batch_size)
types = _dtypes([tensor_list])
shapes = _shapes([tensor_list], shapes, enqueue_many=False)
which_bucket = ops.convert_to_tensor(
which_bucket, dtype=dtypes.int32, name="which_bucket")
queue_creator = _which_queue(dynamic_pad)
bucket_queues = []
for i in range(num_buckets):
shared_name_i = ("%s_%d" % (shared_name, i) if shared_name is not None
else None)
bucket_queues.append(
queue_creator(
capacity=bucket_capacities[i],
dtypes=types,
shapes=shapes,
shared_name=shared_name_i,
name="bucket_queue_%d" % i))
maybe_static_batch_size = (
None if (allow_smaller_final_batch or batch_size_per_bucket)
else static_batch_size)
bucket_shapes = [
tensor_shape.TensorShape([maybe_static_batch_size]).concatenate(s)
for s in bucket_queues[0].shapes
]
# top_queue is a PaddingFIFOQueue even if the bucket queues are regular FIFO
# queues because if we use allow_smaller_final_batch, shapes will
# contain Nones in their first entry; as a result, a regular
# FIFOQueue would die when being passed shapes that are not fully defined.
top_queue = data_flow_ops.PaddingFIFOQueue(
capacity=capacity,
dtypes=[dtypes.int32] + types,
shapes=[tensor_shape.TensorShape([])] + bucket_shapes,
shared_name=shared_name,
name="top_queue")
def enqueue_which():
"""Return an op that enqueues conditionally in one of the queues."""
def enqueue_single(i):
return bucket_queues[i].enqueue(tensor_list)
enqueues = [
control_flow_ops.cond(
math_ops.equal(which_bucket, i),
functools.partial(enqueue_single, i), control_flow_ops.no_op)
for i in range(num_buckets)
]
return control_flow_ops.group(*enqueues, name="group_enqueues")
maybe_enqueue = utils.smart_cond(
keep_input,
enqueue_which,
control_flow_ops.no_op)
bucket_enqueue_ops = [maybe_enqueue] * num_threads
if allow_smaller_final_batch:
which_dequeue = lambda q: q.dequeue_up_to
else:
which_dequeue = lambda q: q.dequeue_many
def make_list(t):
if isinstance(t, (list, tuple)):
return t
else:
return [t]
enqueues_to_top = [
top_queue.enqueue(
[constant_op.constant(i)] + make_list(which_dequeue(q)(
bs, name="read_bucket_%d" % i)),
name="enqueue_from_bucket_%d" % i)
for i, (q, bs) in enumerate(zip(bucket_queues, batch_size))
]
queue_runner.add_queue_runner(
queue_runner.QueueRunner(
bucket_queues[0], enqueues_to_top,
close_op=top_queue.close(),
cancel_op=top_queue.close(cancel_pending_enqueues=True),
queue_closed_exception_types=(errors.OutOfRangeError,
errors.CancelledError)))
queue_runner.add_queue_runner(
queue_runner.QueueRunner(
top_queue,
bucket_enqueue_ops,
close_op=control_flow_ops.group(
*[q.close() for q in bucket_queues]),
cancel_op=control_flow_ops.group(
*[q.close(cancel_pending_enqueues=True)
for q in bucket_queues]),
queue_closed_exception_types=(errors.OutOfRangeError,
errors.CancelledError)))
for q in bucket_queues:
summary.scalar("bucket/%s/size" % q.name,
math_ops.cast(top_queue.size(), dtypes.float32))
summary.scalar("bucket/%s/fraction_of_%d_full" % (top_queue.name, capacity),
math_ops.cast(top_queue.size(), dtypes.float32) *
(1. / capacity))
dequeued = top_queue.dequeue(name="dequeue_top")
which_bucket_dequeued = dequeued[0]
dequeued = dequeued[1:]
if len(dequeued) == 1:
dequeued = dequeued[0]
dequeued = _restore_sparse_tensors(dequeued, sparse_info)
return (which_bucket_dequeued, _as_original_type(tensors, dequeued))
def bucket_by_sequence_length(input_length,
tensors,
batch_size,
bucket_boundaries,
num_threads=1,
capacity=32,
bucket_capacities=None,
shapes=None,
dynamic_pad=False,
allow_smaller_final_batch=False,
keep_input=True,
shared_name=None,
name=None):
"""Lazy bucketing of inputs according to their length.
This method calls `tf.contrib.training.bucket` under the hood, after first
subdividing the bucket boundaries into separate buckets and identifying which
bucket the given `input_length` belongs to. See the documentation for
`which_bucket` for details of the other arguments.
Args:
input_length: `int32` scalar `Tensor`, the sequence length of tensors.
tensors: The list or dictionary of tensors, representing a single element,
to bucket. Nested lists are not supported.
batch_size: The new batch size pulled from the queue (all queues will have
the same size). If a list is passed in then each bucket will have a
different batch_size.
(python int, int32 scalar or iterable of integers of length num_buckets).
bucket_boundaries: int list, increasing non-negative numbers.
The edges of the buckets to use when bucketing tensors. Two extra buckets
are created, one for `input_length < bucket_boundaries[0]` and
one for `input_length >= bucket_boundaries[-1]`.
num_threads: An integer. The number of threads enqueuing `tensors`.
capacity: An integer. The maximum number of minibatches in the top queue,
and also the maximum number of elements within each bucket.
bucket_capacities: (Optional) None or a list of integers, the capacities of
each bucket. If None, capacity is used (default). If specified, it must
be a list of integers of length one larger than bucket_boundaries.
Its i-th element is used as capacity for the i-th bucket queue.
shapes: (Optional) The shapes for each example. Defaults to the
inferred shapes for `tensors`.
dynamic_pad: Boolean. Allow variable dimensions in input shapes.
The given dimensions are padded upon dequeue so that tensors within a
batch have the same shapes.
allow_smaller_final_batch: (Optional) Boolean. If `True`, allow the final
batches to be smaller if there are insufficient items left in the queues.
keep_input: A `bool` scalar Tensor. If provided, this tensor controls
whether the input is added to the queue or not. If it evaluates `True`,
then `tensors` are added to the bucket; otherwise they are dropped. This
tensor essentially acts as a filtering mechanism.
shared_name: (Optional). If set, the queues will be shared under the given
name across multiple sessions.
name: (Optional) A name for the operations.
Returns:
A tuple `(sequence_length, outputs)` where `sequence_length` is
a 1-D `Tensor` of size `batch_size` and `outputs` is a list or dictionary
of batched, bucketed, outputs corresponding to elements of `tensors`.
Raises:
TypeError: if `bucket_boundaries` is not a list of python integers.
ValueError: if `bucket_boundaries` is empty or contains non-increasing
values or if batch_size is a list and it's length doesn't equal the number
of buckets.
"""
tensor_list = _as_tensor_list(tensors)
if not isinstance(bucket_boundaries, (list, tuple)):
raise TypeError(
"bucket_boundaries must be a list or tuple, but received: %s" %
bucket_boundaries)
if not bucket_boundaries:
raise ValueError("bucket_boundaries must not be empty")
for (s, e) in zip(bucket_boundaries[:-1], bucket_boundaries[1:]):
if not isinstance(s, int) or not isinstance(e, int):
raise TypeError("bucket boundaries must be integers, but saw: %s and %s" %
(s, e))
if s >= e:
raise ValueError(
"Buckets must contain sequential increasing lengths, but saw: "
"%d before %d" % (s, e))
with ops.name_scope(name, "bucket_by_sequence_length",
[input_length] + tensor_list) as name:
input_length = ops.convert_to_tensor(
input_length, dtype=dtypes.int32, name="input_length")
# Bucketing conditions are:
# l < b[0]
# b[0] <= l < b[1]
# b[1] <= l < b[2]
# ...
# b[N-2] <= l < b[N-1]
# b[N-1] <= l
# Equivalent to:
# [-inf, b[0], b[1], ..., b[N-1]] <= l < [b[0], b[1], ..., b[N-1], inf]
buckets_min = [np.iinfo(np.int32).min] + list(bucket_boundaries)
buckets_max = list(bucket_boundaries) + [np.iinfo(np.int32).max]
conditions_c = math_ops.logical_and(
math_ops.less_equal(buckets_min, input_length),
math_ops.less(input_length, buckets_max))
which_bucket = math_ops.reduce_min(array_ops.where_v2(conditions_c))
which_bucket = math_ops.cast(which_bucket, dtypes.int32)
if shapes is not None:
shapes = [tensor_shape.TensorShape([])] + shapes
_, dequeued = bucket(
tensors=[input_length] + tensor_list,
which_bucket=which_bucket,
batch_size=batch_size,
num_buckets=len(bucket_boundaries) + 1,
num_threads=num_threads,
capacity=capacity,
bucket_capacities=bucket_capacities,
shapes=shapes,
dynamic_pad=dynamic_pad,
allow_smaller_final_batch=allow_smaller_final_batch,
keep_input=keep_input,
shared_name=shared_name)
return (dequeued[0], _as_original_type(tensors, dequeued[1:]))
__all__ = ["bucket", "bucket_by_sequence_length"]
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/training/python/training/bucket_ops.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains various routines and helper functions for training models.
This script contains various functions for training models. These include
manipulating gradients, creating a `train_op` (an operation that computes the
loss and applies the gradients) and a training loop function. The training loop
allows the user to pass in the `train_op` and runs the optimization according
to user-specified arguments.
************************************
* A simple working training script *
************************************
# Load data and create the model:
images, labels = LoadData(...)
predictions = MyModel(images)
# Define the loss:
tf.contrib.losses.log_loss(predictions, labels)
total_loss = tf.contrib.losses.get_total_loss()
# Define the optimizer:
optimizer = tf.compat.v1.train.MomentumOptimizer(FLAGS.learning_rate,
FLAGS.momentum)
# Create the train_op
train_op = tf.contrib.training.create_train_op(total_loss, optimizer)
# Run training.
tf.contrib.training.train(train_op, my_log_dir)
*************************
* Creating the train_op *
*************************
In order to use the `train` function, one needs a train_op: an `Operation` that
(a) computes the loss, (b) applies the gradients to update the weights and
(c) returns the value of the loss. tf.contrib.training.create_train_op creates
such an `Operation`. This function also provides the ability to manipulate
the gradients using a few arguments:
# Create the train_op and clip the gradient norms:
train_op = tf.contrib.training.create_train_op(
total_loss,
optimizer,
transform_grads_fn=clip_gradient_norms_fn(3))
# Create the train_op and scale the gradients by providing a map from variable
# name (or variable) to a scaling coefficient:
def transform_grads_fn(grads):
gradient_multipliers = {
'conv0/weights': 1.2,
'fc8/weights': 3.4,
}
return tf.contrib.training.multiply_gradients(
grads, gradient_multipliers)
train_op = tf.contrib.training.create_train_op(
total_loss,
optimizer,
transform_grads_fn=transform_grads_fn)
****************************************************************
* Performing additional (non-gradient) updates during training *
****************************************************************
Many networks utilize modules, like BatchNorm, that require performing a series
of non-gradient updates during training. tf.contrib.training.create_train_op
allows a user to pass in a list of update_ops to call along with the gradient
updates.
train_op = tf.contrib.training.create_train_op(
total_loss, optimizer, update_ops)
By default, tf.contrib.training.create_train_op includes all update ops that are
part of the `tf.GraphKeys.UPDATE_OPS` collection. Additionally, the
tf.contrib.layers.batch_norm function adds the moving mean and moving variance
updates to this collection. Consequently, users who want to use
tf.contrib.layers.batch_norm will not need to take any additional steps in order
to have the moving mean and moving variance updates be computed.
However, users with additional, specialized updates can either override the
default update ops or simply add additional update ops to the
`tf.GraphKeys.UPDATE_OPS` collection:
# Force `create_train_op` to NOT use ANY update_ops:
train_op = tf.contrib.training.create_train_op(
total_loss,
optimizer,
update_ops=[])
# Use an alternative set of update ops:
train_op = tf.contrib.training.create_train_op(
total_loss,
optimizer,
update_ops=my_other_update_ops)
# Use a set of update ops in addition to the default updates:
tf.compat.v1.add_to_collection(tf.GraphKeys.UPDATE_OPS, my_update0)
tf.compat.v1.add_to_collection(tf.GraphKeys.UPDATE_OPS, my_update1)
train_op = tf.contrib.training.create_train_op(
total_loss,
optimizer)
# Which is the same as:
train_op = tf.contrib.training.create_train_op(
total_loss,
optimizer,
update_ops=tf.compat.v1.get_collection(tf.GraphKeys.UPDATE_OPS))
******************************************
* Initializing a model from a checkpoint *
******************************************
It is common to want to 'warm-start' a model from a pre-trained checkpoint.
One can use a tf.Scaffold and an initializing function to do so.
...
# Create the train_op
train_op = tf.contrib.training.create_train_op(total_loss, optimizer)
# Create the initial assignment op
checkpoint_path = '/path/to/old_model_checkpoint'
variables_to_restore = tf.contrib.framework.get_model_variables()
init_fn = tf.contrib.framework.assign_from_checkpoint_fn(
checkpoint_path, variables_to_restore)
# Run training.
scaffold = tf.Scaffold(init_fn=init_fn)
tf.contrib.training.train(train_op, my_log_dir, scaffold=scaffold)
***************************************************************************
* Initializing a model from a checkpoint whose variable names don't match *
***************************************************************************
At times, a user may want to initialize a new model with values from a
checkpoint whose variable names do not match those of the current model. In this
case, one needs to create a mapping from the checkpoint variable names to the
current model variables. This requires only a small modification of the code
above:
...
# Creates a model with two variables, var0 and var1
predictions = MyModel(images)
...
# Create the train_op
train_op = tf.contrib.training.create_train_op(total_loss, optimizer)
checkpoint_path = '/path/to/old_model_checkpoint'
# Create the mapping:
variables_to_restore = {
'name_var_0_in_checkpoint':
tf.contrib.framework.get_unique_variable('var0'),
'name_var_1_in_checkpoint':
tf.contrib.framework.get_unique_variable('var1')
}
init_fn = tf.contrib.framework.assign_from_checkpoint_fn(
checkpoint_path, variables_to_restore)
scaffold = tf.Scaffold(init_fn=init_fn)
# Run training.
tf.contrib.training.train(train_op, my_log_dir, scaffold=scaffold)
*************************************************
* Fine-Tuning Part of a model from a checkpoint *
*************************************************
Rather than initializing all of the weights of a given model, we sometimes
only want to restore some of the weights from a checkpoint. To do this, one
need only filter those variables to initialize as follows:
...
# Create the train_op
train_op = tf.contrib.training.create_train_op(total_loss, optimizer)
checkpoint_path = '/path/to/old_model_checkpoint'
# Specify the variables to restore via a list of inclusion or exclusion
# patterns:
variables_to_restore = tf.contrib.framework.get_variables_to_restore(
include=["conv"], exclude=["fc8", "fc9])
# or
variables_to_restore = tf.contrib.framework.get_variables_to_restore(
exclude=["conv"])
init_fn = tf.contrib.framework.assign_from_checkpoint_fn(
checkpoint_path, variables_to_restore)
scaffold = tf.Scaffold(init_fn=init_fn)
# Run training.
tf.contrib.training.train(train_op, my_log_dir, scaffold=scaffold)
******************************************************
* Initializing model variables from values in memory *
******************************************************
One may want to initialize the weights of a model from values coming from an
arbitrary source (a text document, matlab file, etc). While this is technically
feasible using assign operations, this strategy results in the values of your
weights being stored in the graph. For large models, this becomes prohibitively
large. However, it's possible to perform this initial assignment without having
to store the values of the initial model in the graph itself by using
placeholders and a feed dictionary:
...
# Create the train_op
train_op = tf.contrib.training.create_train_op(total_loss, optimizer)
# Create the mapping from variable names to values:
var0_initial_value = ReadFromDisk(...)
var1_initial_value = ReadFromDisk(...)
var_names_to_values = {
'var0': var0_initial_value,
'var1': var1_initial_value,
}
init_fn = tf.contrib.framework.assign_from_values_fn(var_names_to_values)
scaffold = tf.Scaffold(init_fn=init_fn)
# Run training.
tf.contrib.training.train(train_op, my_log_dir, scaffold=scaffold)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import variables as tf_variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary
from tensorflow.python.training import monitored_session
from tensorflow.python.training import optimizer as tf_optimizer
from tensorflow.python.training import training_util
# TODO(nsilberman): move add_gradients_summaries, clip_gradient_norms and
# multiply_gradients into contrib/summaries and contrib/optimizers.py
__all__ = [
'add_gradients_summaries',
'clip_gradient_norms',
'clip_gradient_norms_fn',
'create_train_op',
'multiply_gradients',
'train',
]
def add_gradients_summaries(grads_and_vars):
"""Add summaries to gradients.
Args:
grads_and_vars: A list of gradient to variable pairs (tuples).
Returns:
The list of created summaries.
"""
summaries = []
for grad, var in grads_and_vars:
if grad is not None:
if isinstance(grad, ops.IndexedSlices):
grad_values = grad.values
else:
grad_values = grad
summaries.append(
summary.histogram(var.op.name + '_gradient', grad_values))
summaries.append(
summary.scalar(var.op.name + '_gradient_norm',
clip_ops.global_norm([grad_values])))
else:
logging.info('Var %s has no gradient', var.op.name)
return summaries
def clip_gradient_norms(gradients_to_variables, max_norm):
"""Clips the gradients by the given value.
Args:
gradients_to_variables: A list of gradient to variable pairs (tuples).
max_norm: the maximum norm value.
Returns:
A list of clipped gradient to variable pairs.
"""
clipped_grads_and_vars = []
for grad, var in gradients_to_variables:
if grad is not None:
if isinstance(grad, ops.IndexedSlices):
tmp = clip_ops.clip_by_norm(grad.values, max_norm)
grad = ops.IndexedSlices(tmp, grad.indices, grad.dense_shape)
else:
grad = clip_ops.clip_by_norm(grad, max_norm)
clipped_grads_and_vars.append((grad, var))
return clipped_grads_and_vars
def clip_gradient_norms_fn(max_norm):
"""Returns a `transform_grads_fn` function for gradient clipping."""
def clip_norms(gradients_to_variables):
return clip_gradient_norms(gradients_to_variables, max_norm)
return clip_norms
def multiply_gradients(grads_and_vars, gradient_multipliers):
"""Multiply specified gradients.
Args:
grads_and_vars: A list of gradient to variable pairs (tuples).
gradient_multipliers: A map from either `Variables` or `Variable` op names
to the coefficient by which the associated gradient should be scaled.
Returns:
The updated list of gradient to variable pairs.
Raises:
ValueError: If `grads_and_vars` is not a list or if `gradient_multipliers`
is empty or None or if `gradient_multipliers` is not a dictionary.
"""
if not isinstance(grads_and_vars, list):
raise ValueError('`grads_and_vars` must be a list.')
if not gradient_multipliers:
raise ValueError('`gradient_multipliers` is empty.')
if not isinstance(gradient_multipliers, dict):
raise ValueError('`gradient_multipliers` must be a dict.')
multiplied_grads_and_vars = []
for grad, var in grads_and_vars:
if var in gradient_multipliers or var.op.name in gradient_multipliers:
key = var if var in gradient_multipliers else var.op.name
if grad is None:
raise ValueError('Requested multiple of `None` gradient.')
if isinstance(grad, ops.IndexedSlices):
tmp = grad.values * ops.convert_to_tensor(
gradient_multipliers[key], dtype=grad.dtype)
grad = ops.IndexedSlices(tmp, grad.indices, grad.dense_shape)
else:
grad *= ops.convert_to_tensor(
gradient_multipliers[key], dtype=grad.dtype)
multiplied_grads_and_vars.append((grad, var))
return multiplied_grads_and_vars
_USE_GLOBAL_STEP = 0
def create_train_op(total_loss,
optimizer,
global_step=_USE_GLOBAL_STEP,
update_ops=None,
variables_to_train=None,
transform_grads_fn=None,
summarize_gradients=False,
gate_gradients=tf_optimizer.Optimizer.GATE_OP,
aggregation_method=None,
colocate_gradients_with_ops=False,
check_numerics=True):
"""Creates an `Operation` that evaluates the gradients and returns the loss.
Args:
total_loss: A `Tensor` representing the total loss.
optimizer: A tf.Optimizer to use for computing the gradients.
global_step: A `Tensor` representing the global step variable. If left as
`_USE_GLOBAL_STEP`, then tf.contrib.framework.global_step() is used.
update_ops: An optional list of updates to execute. If `update_ops` is
`None`, then the update ops are set to the contents of the
`tf.GraphKeys.UPDATE_OPS` collection. If `update_ops` is not `None`, but
it doesn't contain all of the update ops in `tf.GraphKeys.UPDATE_OPS`, a
warning will be displayed.
variables_to_train: an optional list of variables to train. If None, it will
default to all tf.compat.v1.trainable_variables().
transform_grads_fn: A function which takes a single argument, a list of
gradient to variable pairs (tuples), performs any requested gradient
updates, such as gradient clipping or multipliers, and returns the updated
list.
summarize_gradients: Whether or not add summaries for each gradient.
gate_gradients: How to gate the computation of gradients. See tf.Optimizer.
aggregation_method: Specifies the method used to combine gradient terms.
Valid values are defined in the class `AggregationMethod`.
colocate_gradients_with_ops: Whether or not to try colocating the gradients
with the ops that generated them.
check_numerics: Whether or not we apply check_numerics.
Returns:
A `Tensor` that when evaluated, computes the gradients and returns the total
loss value.
"""
if global_step is _USE_GLOBAL_STEP:
global_step = training_util.get_or_create_global_step()
# Update ops use GraphKeys.UPDATE_OPS collection if update_ops is None.
global_update_ops = set(ops.get_collection(ops.GraphKeys.UPDATE_OPS))
if update_ops is None:
update_ops = global_update_ops
else:
update_ops = set(update_ops)
if not global_update_ops.issubset(update_ops):
logging.warning('update_ops in create_train_op does not contain all the '
'update_ops in GraphKeys.UPDATE_OPS')
# Make sure update_ops are computed before total_loss.
if update_ops:
with ops.control_dependencies(update_ops):
barrier = control_flow_ops.no_op(name='update_barrier')
total_loss = control_flow_ops.with_dependencies([barrier], total_loss)
if variables_to_train is None:
# Default to tf.compat.v1.trainable_variables()
variables_to_train = tf_variables.trainable_variables()
else:
# Make sure that variables_to_train are in
# tf.compat.v1.trainable_variables()
for v in variables_to_train:
assert v.trainable or v in tf_variables.trainable_variables()
assert variables_to_train
# Create the gradients. Note that apply_gradients adds the gradient
# computation to the current graph.
grads = optimizer.compute_gradients(
total_loss,
variables_to_train,
gate_gradients=gate_gradients,
aggregation_method=aggregation_method,
colocate_gradients_with_ops=colocate_gradients_with_ops)
if transform_grads_fn:
grads = transform_grads_fn(grads)
# Summarize gradients.
if summarize_gradients:
with ops.name_scope('summarize_grads'):
add_gradients_summaries(grads)
# Create gradient updates.
grad_updates = optimizer.apply_gradients(grads, global_step=global_step)
with ops.name_scope('train_op'):
# Make sure total_loss is valid.
if check_numerics:
total_loss = array_ops.check_numerics(total_loss,
'LossTensor is inf or nan')
# Ensure the train_tensor computes grad_updates.
train_op = control_flow_ops.with_dependencies([grad_updates], total_loss)
# Add the operation used for training to the 'train_op' collection
train_ops = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP)
if train_op not in train_ops:
train_ops.append(train_op)
return train_op
def train(train_op,
logdir,
master='',
is_chief=True,
scaffold=None,
hooks=None,
chief_only_hooks=None,
save_checkpoint_secs=600,
save_summaries_steps=100,
config=None,
max_wait_secs=7200,
run_metadata=None):
"""Runs the training loop.
Args:
train_op: A `Tensor` that, when executed, will apply the gradients and
return the loss value.
logdir: The directory where the graph and checkpoints are saved.
master: The URL of the master.
is_chief: Specifies whether or not the training is being run by the primary
replica during replica training.
scaffold: An tf.compat.v1.train.Scaffold instance.
hooks: List of `tf.estimator.SessionRunHook` callbacks which are run inside
the training loop.
chief_only_hooks: List of `tf.estimator.SessionRunHook` instances which are
run inside the training loop for the chief trainer only.
save_checkpoint_secs: The frequency, in seconds, that a checkpoint is saved
using a default checkpoint saver. If `save_checkpoint_secs` is set to
`None`, then the default checkpoint saver isn't used.
save_summaries_steps: The frequency, in number of global steps, that the
summaries are written to disk using a default summary saver. If
`save_summaries_steps` is set to `None`, then the default summary saver
isn't used.
config: An instance of `tf.compat.v1.ConfigProto`.
max_wait_secs: Maximum time workers should wait for the session to become
available. This should be kept relatively short to help detect incorrect
code, but sometimes may need to be increased if the chief takes a while to
start up.
run_metadata: A [`RunMetadata`] protocol buffer.
Returns:
the value of the loss function after training.
Raises:
ValueError: if `logdir` is `None` and either `save_checkpoint_secs` or
`save_summaries_steps` are `None.
"""
if logdir is None and is_chief:
if save_summaries_steps:
raise ValueError(
'logdir cannot be None when save_summaries_steps is not None')
if save_checkpoint_secs:
raise ValueError(
'logdir cannot be None when save_checkpoint_secs is not None')
with monitored_session.MonitoredTrainingSession(
master=master,
is_chief=is_chief,
checkpoint_dir=logdir,
scaffold=scaffold,
hooks=hooks,
chief_only_hooks=chief_only_hooks,
save_checkpoint_secs=save_checkpoint_secs,
save_summaries_steps=save_summaries_steps,
config=config,
max_wait_secs=max_wait_secs) as session:
loss = None
while not session.should_stop():
loss = session.run(train_op, run_metadata=run_metadata)
return loss
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/training/python/training/training.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
import numpy
from tensorflow.contrib.training.python.training import resample
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class ResampleTest(test.TestCase):
"""Tests that resampling runs and outputs are close to expected values."""
def testRepeatRange(self):
cases = [
([], []),
([0], []),
([1], [0]),
([1, 0], [0]),
([0, 1], [1]),
([3], [0, 0, 0]),
([0, 1, 2, 3], [1, 2, 2, 3, 3, 3]),
]
with self.cached_session() as sess:
for inputs, expected in cases:
array_inputs = numpy.array(inputs, dtype=numpy.int32)
actual = sess.run(resample._repeat_range(array_inputs))
self.assertAllEqual(actual, expected)
def testRoundtrip(self, rate=0.25, count=5, n=500):
"""Tests `resample(x, weights)` and resample(resample(x, rate), 1/rate)`."""
foo = self.get_values(count)
bar = self.get_values(count)
weights = self.get_weights(count)
resampled_in, rates = resample.weighted_resample(
[foo, bar], constant_op.constant(weights), rate, seed=123)
resampled_back_out = resample.resample_at_rate(
resampled_in, 1.0 / rates, seed=456)
init = control_flow_ops.group(variables.local_variables_initializer(),
variables.global_variables_initializer())
with self.cached_session() as s:
s.run(init) # initialize
# outputs
counts_resampled = collections.Counter()
counts_reresampled = collections.Counter()
for _ in range(n):
resampled_vs, reresampled_vs = s.run([resampled_in, resampled_back_out])
self.assertAllEqual(resampled_vs[0], resampled_vs[1])
self.assertAllEqual(reresampled_vs[0], reresampled_vs[1])
for v in resampled_vs[0]:
counts_resampled[v] += 1
for v in reresampled_vs[0]:
counts_reresampled[v] += 1
# assert that resampling worked as expected
self.assert_expected(weights, rate, counts_resampled, n)
# and that re-resampling gives the approx identity.
self.assert_expected(
[1.0 for _ in weights],
1.0,
counts_reresampled,
n,
abs_delta=0.1 * n * count)
def testCorrectRates(self, rate=0.25, count=10, n=500, rtol=0.1):
"""Tests that the rates returned by weighted_resample are correct."""
# The approach here is to verify that:
# - sum(1/rate) approximates the size of the original collection
# - sum(1/rate * value) approximates the sum of the original inputs,
# - sum(1/rate * value)/sum(1/rate) approximates the mean.
vals = self.get_values(count)
weights = self.get_weights(count)
resampled, rates = resample.weighted_resample([vals],
constant_op.constant(weights),
rate)
invrates = 1.0 / rates
init = control_flow_ops.group(variables.local_variables_initializer(),
variables.global_variables_initializer())
expected_sum_op = math_ops.reduce_sum(vals)
with self.cached_session() as s:
s.run(init)
expected_sum = n * s.run(expected_sum_op)
weight_sum = 0.0
weighted_value_sum = 0.0
for _ in range(n):
val, inv_rate = s.run([resampled[0], invrates])
weight_sum += sum(inv_rate)
weighted_value_sum += sum(val * inv_rate)
# sum(inv_rate) ~= N*count:
expected_count = count * n
self.assertAlmostEqual(
expected_count, weight_sum, delta=(rtol * expected_count))
# sum(vals) * n ~= weighted_sum(resampled, 1.0/weights)
self.assertAlmostEqual(
expected_sum, weighted_value_sum, delta=(rtol * expected_sum))
# Mean ~= weighted mean:
expected_mean = expected_sum / float(n * count)
self.assertAlmostEqual(
expected_mean,
weighted_value_sum / weight_sum,
delta=(rtol * expected_mean))
def testZeroRateUnknownShapes(self, count=10):
"""Tests that resampling runs with completely runtime shapes."""
# Use placeholcers without shape set:
vals = array_ops.placeholder(dtype=dtypes.int32)
rates = array_ops.placeholder(dtype=dtypes.float32)
resampled = resample.resample_at_rate([vals], rates)
with self.cached_session() as s:
rs, = s.run(resampled, {
vals: list(range(count)),
rates: numpy.zeros(
shape=[count], dtype=numpy.float32)
})
self.assertEqual(rs.shape, (0,))
def testDtypes(self, count=10):
"""Test that we can define the ops with float64 weights."""
vals = self.get_values(count)
weights = math_ops.cast(self.get_weights(count), dtypes.float64)
# should not error:
resample.resample_at_rate([vals], weights)
resample.weighted_resample(
[vals], weights, overall_rate=math_ops.cast(1.0, dtypes.float64))
def get_weights(self, n, mean=10.0, stddev=5):
"""Returns random positive weight values."""
assert mean > 0, 'Weights have to be positive.'
results = []
while len(results) < n:
v = numpy.random.normal(mean, stddev)
if v > 0:
results.append(v)
return results
def get_values(self, n):
return constant_op.constant(list(range(n)))
def assert_expected(self,
weights,
overall_rate,
counts,
n,
tol=2.0,
abs_delta=0):
# Overall, we expect sum(counts) there to be `overall_rate` * n *
# len(weights)... with a stddev on that expectation equivalent to
# performing (n * len(weights)) trials each with probability of
# overall_rate.
expected_overall_count = len(weights) * n * overall_rate
actual_overall_count = sum(counts.values())
stddev = math.sqrt(len(weights) * n * overall_rate * (1 - overall_rate))
self.assertAlmostEqual(
expected_overall_count,
actual_overall_count,
delta=(stddev * tol + abs_delta))
# And we can form a similar expectation for each item -- it should
# appear in the results a number of time proportional to its
# weight, which is similar to performing `expected_overall_count`
# trials each with a probability of weight/weight_sum.
weight_sum = sum(weights)
fractions = [w / weight_sum for w in weights]
expected_counts = [expected_overall_count * f for f in fractions]
stddevs = [
math.sqrt(expected_overall_count * f * (1 - f)) for f in fractions
]
for i in range(len(expected_counts)):
expected_count = expected_counts[i]
actual_count = counts[i]
self.assertAlmostEqual(
expected_count, actual_count, delta=(stddevs[i] * tol + abs_delta))
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/training/python/training/resample_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module that implements sparsemax and sparsemax loss, see [1].
[1]: https://arxiv.org/abs/1602.02068
## Sparsemax
@@sparsemax
@@sparsemax_loss
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.sparsemax.python.ops.sparsemax import sparsemax
from tensorflow.contrib.sparsemax.python.ops.sparsemax_loss \
import sparsemax_loss
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = ['sparsemax', 'sparsemax_loss']
remove_undocumented(__name__, _allowed_symbols)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/sparsemax/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SparsemaxLossOp."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.sparsemax import sparsemax, sparsemax_loss
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.framework import constant_op
from tensorflow.python.platform import test
test_obs = 10
class SparsemaxLossTest(test.TestCase):
def _np_sparsemax(self, z):
z = z - np.mean(z, axis=1)[:, np.newaxis]
# sort z
z_sorted = np.sort(z, axis=1)[:, ::-1]
# calculate k(z)
z_cumsum = np.cumsum(z_sorted, axis=1)
k = np.arange(1, z.shape[1] + 1)
z_check = 1 + k * z_sorted > z_cumsum
# use argmax to get the index by row as .nonzero() doesn't
# take an axis argument. np.argmax return the first index, but the last
# index is required here, use np.flip to get the last index and
# `z.shape[axis]` to compensate for np.flip afterwards.
k_z = z.shape[1] - np.argmax(z_check[:, ::-1], axis=1)
# calculate tau(z)
tau_sum = z_cumsum[np.arange(0, z.shape[0]), k_z - 1]
tau_z = ((tau_sum - 1) / k_z).reshape(-1, 1)
# calculate p
return np.maximum(0, z - tau_z)
def _np_sparsemax_loss(self, z, q):
z = z - np.mean(z, axis=1)[:, np.newaxis]
# Calculate q^T * z
z_k = np.sum(q * z, axis=1)
# calculate sum over S(z)
p = self._np_sparsemax(z)
s = p > 0
# z_i^2 - tau(z)^2 = p_i (2 * z_i - p_i) for i \in S(z)
S_sum = np.sum(s * p * (2 * z - p), axis=1)
# because q is binary, sum([q_1^2, q_2^2, ...]) is just sum(q)
q_norm = np.sum(q, axis=1)
return -z_k + 0.5 * S_sum + 0.5 * q_norm
def _np_sparsemax_loss_grad(self, z, q):
# chain rule
grad = 1
return grad * (-q + self._np_sparsemax(z))
def _tf_sparsemax(self, z, dtype, use_gpu):
with self.test_session(use_gpu=use_gpu):
tf_sparsemax_op = sparsemax(z.astype(dtype))
tf_sparsemax_out = tf_sparsemax_op.eval()
return tf_sparsemax_op, tf_sparsemax_out
def _tf_sparsemax_loss(self, z, q, dtype, use_gpu):
z = z.astype(dtype)
q = q.astype(dtype)
with self.test_session(use_gpu=use_gpu):
tf_sparsemax_op = sparsemax(z)
tf_loss_op = sparsemax_loss(z, tf_sparsemax_op, q)
tf_loss_out = tf_loss_op.eval()
return tf_loss_op, tf_loss_out
def _test_sparsemax_loss_against_numpy(self, dtype, random, use_gpu):
"""check sparsemax-loss kernel against numpy"""
z = random.uniform(low=-3, high=3, size=(test_obs, 10))
q = np.zeros((test_obs, 10))
q[np.arange(0, test_obs), random.randint(0, 10, size=test_obs)] = 1
tf_loss_op, tf_loss_out = self._tf_sparsemax_loss(z, q, dtype, use_gpu)
np_loss = self._np_sparsemax_loss(z, q).astype(dtype)
self.assertAllCloseAccordingToType(
np_loss, tf_loss_out, half_atol=1e-2, half_rtol=5e-3)
self.assertShapeEqual(np_loss, tf_loss_op)
def _test_sparsemax_loss_of_nan(self, dtype, random, use_gpu):
"""check sparsemax-loss transfers nan"""
q = np.asarray([[0, 0, 1], [0, 0, 1], [0, 0, 1]])
z_nan = np.asarray([[0, np.nan, 0], [0, np.nan, np.nan],
[np.nan, np.nan, np.nan]]).astype(dtype)
_, tf_loss_nan = self._tf_sparsemax_loss(z_nan, q, dtype, use_gpu)
self.assertAllCloseAccordingToType([np.nan, np.nan, np.nan], tf_loss_nan)
def _test_sparsemax_loss_of_inf(self, dtype, random, use_gpu):
"""check sparsemax-loss is infinity safe"""
q = np.asarray([[0, 0, 1], [0, 0, 1], [0, 0, 1], [0, 0, 1]])
z_neg = np.asarray([
[0, -np.inf, 0],
[0, -np.inf, -np.inf],
[-np.inf, -np.inf, 0],
[-np.inf, -np.inf, -np.inf],
]).astype(dtype)
z_pos = np.asarray([[0, np.inf, 0], [0, np.inf,
np.inf], [np.inf, np.inf, 0],
[np.inf, np.inf, np.inf]]).astype(dtype)
z_mix = np.asarray([[0, np.inf, 0], [0, np.inf, -np.inf],
[-np.inf, np.inf, 0], [-np.inf, np.inf,
-np.inf]]).astype(dtype)
_, tf_loss_neg = self._tf_sparsemax_loss(z_neg, q, dtype, use_gpu)
self.assertAllCloseAccordingToType([0.25, np.inf, 0, np.nan], tf_loss_neg)
_, tf_loss_pos = self._tf_sparsemax_loss(z_pos, q, dtype, use_gpu)
self.assertAllCloseAccordingToType([np.nan, np.nan, np.nan, np.nan],
tf_loss_pos)
_, tf_loss_mix = self._tf_sparsemax_loss(z_mix, q, dtype, use_gpu)
self.assertAllCloseAccordingToType([np.nan, np.nan, np.nan, np.nan],
tf_loss_mix)
def _test_constant_add(self, dtype, random, use_gpu):
"""check sparsemax-loss proposition 3"""
z = random.uniform(low=-3, high=3, size=(test_obs, 10))
c = random.uniform(low=-3, high=3, size=(test_obs, 1))
q = np.zeros((test_obs, 10))
q[np.arange(0, test_obs), np.random.randint(0, 10, size=test_obs)] = 1
_, tf_loss_zpc = self._tf_sparsemax_loss(z + c, q, dtype, use_gpu)
_, tf_loss_z = self._tf_sparsemax_loss(z, q, dtype, use_gpu)
self.assertAllCloseAccordingToType(
tf_loss_zpc,
tf_loss_z,
float_atol=5e-6,
float_rtol=5e-6,
half_atol=1e-2,
half_rtol=1e-2)
def _test_sparsemax_loss_positive(self, dtype, random, use_gpu):
"""check sparsemax-loss proposition 4"""
z = random.uniform(low=-3, high=3, size=(test_obs, 10))
q = np.zeros((test_obs, 10))
q[np.arange(0, test_obs), random.randint(0, 10, size=test_obs)] = 1
tf_loss_op, tf_loss_out = self._tf_sparsemax_loss(z, q, dtype, use_gpu)
self.assertAllCloseAccordingToType(np.abs(tf_loss_out), tf_loss_out)
self.assertShapeEqual(np.zeros(test_obs), tf_loss_op)
def _test_sparsemax_loss_zero(self, dtype, random, use_gpu):
"""check sparsemax-loss proposition 5"""
# construct z and q, such that z_k >= 1 + max_{j!=k} z_k holds for
# delta_0 = 1.
z = random.uniform(low=-3, high=3, size=(test_obs, 10))
z[:, 0] = np.max(z, axis=1) + 1.05
q = np.zeros((test_obs, 10))
q[:, 0] = 1
tf_loss_op, tf_loss_out = self._tf_sparsemax_loss(z, q, dtype, use_gpu)
tf_sparsemax_op, tf_sparsemax_out = self._tf_sparsemax(z, dtype, use_gpu)
self.assertAllCloseAccordingToType(np.zeros(test_obs), tf_loss_out)
self.assertShapeEqual(np.zeros(test_obs), tf_loss_op)
self.assertAllCloseAccordingToType(q, tf_sparsemax_out)
self.assertShapeEqual(q, tf_sparsemax_op)
def _test_gradient_against_estimate(self, dtype, random, use_gpu):
"""check sparsemax-loss Rop, against estimated-loss Rop"""
z = random.uniform(low=-3, high=3, size=(test_obs, 10)).astype(dtype)
q = np.zeros((test_obs, 10)).astype(dtype)
q[np.arange(0, test_obs), np.random.randint(0, 10, size=test_obs)] = 1
logits = array_ops.placeholder(dtype, name='z')
sparsemax_op = sparsemax(logits)
loss_op = sparsemax_loss(logits, sparsemax_op, q)
with self.test_session(use_gpu=use_gpu):
err = gradient_checker.compute_gradient_error(
logits, z.shape, loss_op, (test_obs,), x_init_value=z, delta=1e-9)
self.assertLess(err, 1e-4)
def _test_gradient_against_numpy(self, dtype, random, use_gpu):
"""check sparsemax-loss Rop, against numpy Rop"""
z = random.uniform(low=-3, high=3, size=(test_obs, 10))
q = np.zeros((test_obs, 10))
q[np.arange(0, test_obs), np.random.randint(0, 10, size=test_obs)] = 1
logits = constant_op.constant(z.astype(dtype), name='z')
sparsemax_op = sparsemax(logits)
loss_op = sparsemax_loss(logits, sparsemax_op, q.astype(dtype))
loss_grad_op = gradients_impl.gradients(loss_op, [logits])[0]
with self.test_session(use_gpu=use_gpu):
tf_grad = loss_grad_op.eval()
np_grad = self._np_sparsemax_loss_grad(z, q).astype(dtype)
self.assertAllCloseAccordingToType(
np_grad, tf_grad, half_atol=1e-2, half_rtol=5e-3)
self.assertShapeEqual(np_grad, loss_grad_op)
def _test_dtype(self, dtype):
random = np.random.RandomState(1)
self._test_sparsemax_loss_against_numpy(dtype, random, use_gpu=False)
self._test_sparsemax_loss_of_nan(dtype, random, use_gpu=False)
self._test_sparsemax_loss_of_inf(dtype, random, use_gpu=False)
self._test_constant_add(dtype, random, use_gpu=False)
self._test_sparsemax_loss_positive(dtype, random, use_gpu=False)
self._test_sparsemax_loss_zero(dtype, random, use_gpu=False)
# sparsemax is not a smooth function so gradient estimation is only
# possibol for float64.
if dtype == 'float64':
self._test_gradient_against_estimate(dtype, random, use_gpu=False)
self._test_gradient_against_numpy(dtype, random, use_gpu=False)
def testFloat(self):
self._test_dtype('float32')
def testDouble(self):
self._test_dtype('float64')
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/sparsemax/python/kernel_tests/sparsemax_loss_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SparsemaxOp."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.sparsemax import sparsemax
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.framework import constant_op
from tensorflow.python.platform import test
test_obs = 10
class SparsemaxTest(test.TestCase):
def _np_sparsemax(self, z):
z = z - np.mean(z, axis=1)[:, np.newaxis]
# sort z
z_sorted = np.sort(z, axis=1)[:, ::-1]
# calculate k(z)
z_cumsum = np.cumsum(z_sorted, axis=1)
k = np.arange(1, z.shape[1] + 1)
z_check = 1 + k * z_sorted > z_cumsum
# use argmax to get the index by row as .nonzero() doesn't
# take an axis argument. np.argmax return the first index, but the last
# index is required here, use np.flip to get the last index and
# `z.shape[axis]` to compensate for np.flip afterwards.
k_z = z.shape[1] - np.argmax(z_check[:, ::-1], axis=1)
# calculate tau(z)
tau_sum = z_cumsum[np.arange(0, z.shape[0]), k_z - 1]
tau_z = ((tau_sum - 1) / k_z).reshape(-1, 1)
# calculate p
return np.maximum(0, z - tau_z)
def _np_sparsemax_grad(self, z):
# chain rule
grad = np.ones_like(z)
# Construct S(z)
probability = self._np_sparsemax(z)
support = probability > 0
# Calculate \hat{v}, which will be a vector (scalar for each z)
v_hat = np.sum(grad * support, axis=1) / np.sum(support, axis=1)
# Calculates J(z) * v
return support * (grad - v_hat[:, np.newaxis])
def _tf_sparsemax(self, z, dtype, use_gpu):
with self.test_session(use_gpu=use_gpu):
tf_sparsemax_op = sparsemax(z.astype(dtype))
tf_sparsemax_out = tf_sparsemax_op.eval()
return tf_sparsemax_op, tf_sparsemax_out
def _test_sparsemax_against_numpy(self, dtype, random, use_gpu):
"""check sparsemax kernel against numpy"""
z = random.uniform(low=-3, high=3, size=(test_obs, 10))
tf_sparsemax_op, tf_sparsemax_out = self._tf_sparsemax(z, dtype, use_gpu)
p_sparemax = self._np_sparsemax(z).astype(dtype)
self.assertAllCloseAccordingToType(
p_sparemax, tf_sparsemax_out, half_atol=5e-3)
self.assertShapeEqual(p_sparemax, tf_sparsemax_op)
def _test_sparsemax_of_nan(self, dtype, random, use_gpu):
"""check sparsemax transfers nan"""
z_nan = np.asarray([
[0, np.nan, 0],
[0, np.nan, np.nan],
[np.nan, np.nan, np.nan],
]).astype(dtype)
_, tf_sparsemax_nan = self._tf_sparsemax(z_nan, dtype, use_gpu)
self.assertAllCloseAccordingToType(
[[np.nan, np.nan, np.nan], [np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan]], tf_sparsemax_nan)
def _test_sparsemax_of_inf(self, dtype, random, use_gpu):
"""check sparsemax is infinity safe"""
z_neg = np.asarray([
[0, -np.inf, 0],
[0, -np.inf, -np.inf],
[-np.inf, -np.inf, -np.inf],
]).astype(dtype)
z_pos = np.asarray([[0, np.inf, 0], [0, np.inf, np.inf],
[np.inf, np.inf, np.inf]]).astype(dtype)
z_mix = np.asarray([[0, np.inf, 0], [0, np.inf, -np.inf],
[-np.inf, np.inf, -np.inf]]).astype(dtype)
_, tf_sparsemax_neg = self._tf_sparsemax(z_neg, dtype, use_gpu)
self.assertAllCloseAccordingToType(
[[0.5, 0, 0.5], [1, 0, 0], [np.nan, np.nan, np.nan]], tf_sparsemax_neg)
_, tf_sparsemax_pos = self._tf_sparsemax(z_pos, dtype, use_gpu)
self.assertAllCloseAccordingToType(
[[np.nan, np.nan, np.nan], [np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan]], tf_sparsemax_pos)
_, tf_sparsemax_mix = self._tf_sparsemax(z_mix, dtype, use_gpu)
self.assertAllCloseAccordingToType(
[[np.nan, np.nan, np.nan], [np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan]], tf_sparsemax_mix)
def _test_sparsemax_of_zero(self, dtype, random, use_gpu):
"""check sparsemax proposition 1, part 1"""
z = np.zeros((1, 10))
tf_sparsemax_op, tf_sparsemax_out = self._tf_sparsemax(z, dtype, use_gpu)
p_sparemax = np.ones_like(z, dtype=dtype) / z.size
self.assertAllCloseAccordingToType(p_sparemax, tf_sparsemax_out)
self.assertShapeEqual(p_sparemax, tf_sparsemax_op)
def _test_sparsemax_of_to_inf(self, dtype, random, use_gpu):
"""check sparsemax proposition 1, part 2"""
z = random.uniform(low=-3, high=3, size=(test_obs, 10))
# assume |A(z)| = 1, as z is continues random
z_sort_arg = np.argsort(z, axis=1)[:, ::-1]
z_sort = np.sort(z, axis=-1)[:, ::-1]
gamma_z = z_sort[:, 0] - z_sort[:, 1]
epsilon = (0.99 * gamma_z * 1).reshape(-1, 1)
# construct the expected 1_A(z) array
p_expected = np.zeros((test_obs, 10), dtype=dtype)
p_expected[np.arange(0, test_obs), z_sort_arg[:, 0]] = 1
tf_sparsemax_op, tf_sparsemax_out = self._tf_sparsemax((1 / epsilon) * z,
dtype, use_gpu)
self.assertAllCloseAccordingToType(p_expected, tf_sparsemax_out)
self.assertShapeEqual(p_expected, tf_sparsemax_op)
def _test_constant_add(self, dtype, random, use_gpu):
"""check sparsemax proposition 2"""
z = random.uniform(low=-3, high=3, size=(test_obs, 10)).astype(dtype)
c = random.uniform(low=-3, high=3, size=(test_obs, 1)).astype(dtype)
_, tf_sparsemax_zpc = self._tf_sparsemax(z + c, dtype, use_gpu)
_, tf_sparsemax_z = self._tf_sparsemax(z, dtype, use_gpu)
self.assertAllCloseAccordingToType(
tf_sparsemax_zpc, tf_sparsemax_z, half_atol=5e-3)
def _test_permutation(self, dtype, random, use_gpu):
"""check sparsemax proposition 3"""
z = random.uniform(low=-3, high=3, size=(test_obs, 10))
_, p = self._tf_sparsemax(z, dtype, use_gpu)
for i in range(test_obs):
per = random.permutation(10)
tf_sparsemax_op, tf_sparsemax_out = self._tf_sparsemax(
z[i, per].reshape(1, -1), dtype, use_gpu)
p_expected = p[i, per].reshape(1, -1)
self.assertAllCloseAccordingToType(
p_expected, tf_sparsemax_out, half_atol=5e-3)
self.assertShapeEqual(p_expected, tf_sparsemax_op)
def _test_diffrence(self, dtype, random, use_gpu):
"""check sparsemax proposition 4"""
z = random.uniform(low=-3, high=3, size=(test_obs, 10))
_, p = self._tf_sparsemax(z, dtype, use_gpu)
etol = {'float16': 1e-2, 'float32': 1e-6, 'float64': 1e-9}[dtype]
for val in range(0, test_obs):
for i in range(0, 10):
for j in range(0, 10):
# check condition, the obesite pair will be checked anyway
if z[val, i] > z[val, j]:
continue
self.assertTrue(
0 <= p[val, j] - p[val, i] <= z[val, j] - z[val, i] + etol,
'0 <= %.10f <= %.10f' % (p[val, j] - p[val, i],
z[val, j] - z[val, i] + etol))
def _test_two_dimentional(self, dtype, random, use_gpu):
"""check two dimentation sparsemax case"""
t = np.linspace(-2, 2, test_obs, dtype=dtype)
z = np.vstack([t, np.zeros(test_obs, dtype=dtype)]).T
tf_sparsemax_op, tf_sparsemax_out = self._tf_sparsemax(z, dtype, use_gpu)
p0_expected = np.select([t < -1, t <= 1, t > 1], [0, (t + 1) / 2, 1])
self.assertAllCloseAccordingToType(p0_expected, tf_sparsemax_out[:, 0])
self.assertAllCloseAccordingToType(1 - p0_expected, tf_sparsemax_out[:, 1])
self.assertShapeEqual(z, tf_sparsemax_op)
def _test_gradient_against_estimate(self, dtype, random, use_gpu):
"""check sparsemax Rop, against estimated Rop"""
z = random.uniform(low=-3, high=3, size=(test_obs, 10)).astype(dtype)
logits = array_ops.placeholder(dtype, name='z')
sparsemax_op = sparsemax(logits)
with self.test_session(use_gpu=use_gpu):
err = gradient_checker.compute_gradient_error(
logits, z.shape, sparsemax_op, z.shape, x_init_value=z, delta=1e-9)
self.assertLess(err, 1e-4)
def _test_gradient_against_numpy(self, dtype, random, use_gpu):
"""check sparsemax Rop, against numpy Rop"""
z = random.uniform(low=-3, high=3, size=(test_obs, 10)).astype(dtype)
logits = constant_op.constant(z, name='z')
sparsemax_op = sparsemax(logits)
sparsemax_grad_op = gradients_impl.gradients(sparsemax_op, [logits])[0]
with self.test_session(use_gpu=use_gpu):
tf_grad = sparsemax_grad_op.eval()
np_grad = self._np_sparsemax_grad(z)
self.assertAllCloseAccordingToType(np_grad, tf_grad)
self.assertShapeEqual(np_grad, sparsemax_grad_op)
def _test_dtype(self, dtype):
random = np.random.RandomState(1)
self._test_sparsemax_against_numpy(dtype, random, use_gpu=False)
self._test_sparsemax_of_nan(dtype, random, use_gpu=False)
self._test_sparsemax_of_inf(dtype, random, use_gpu=False)
self._test_sparsemax_of_zero(dtype, random, use_gpu=False)
self._test_sparsemax_of_to_inf(dtype, random, use_gpu=False)
self._test_constant_add(dtype, random, use_gpu=False)
self._test_permutation(dtype, random, use_gpu=False)
self._test_diffrence(dtype, random, use_gpu=False)
self._test_two_dimentional(dtype, random, use_gpu=False)
# sparsemax is not a smooth function so gradient estimation is only
# possibol for float64.
if dtype == 'float64':
self._test_gradient_against_estimate(dtype, random, use_gpu=False)
self._test_gradient_against_numpy(dtype, random, use_gpu=False)
def testFloat(self):
self._test_dtype('float32')
def testDouble(self):
self._test_dtype('float64')
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/sparsemax/python/kernel_tests/sparsemax_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Sparsemax op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
__all__ = ["sparsemax"]
def sparsemax(logits, name=None):
"""Computes sparsemax activations [1].
For each batch `i` and class `j` we have
$$sparsemax[i, j] = max(logits[i, j] - tau(logits[i, :]), 0)$$
[1]: https://arxiv.org/abs/1602.02068
Args:
logits: A `Tensor`. Must be one of the following types: `half`, `float32`,
`float64`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `logits`.
"""
with ops.name_scope(name, "sparsemax", [logits]) as name:
logits = ops.convert_to_tensor(logits, name="logits")
obs = array_ops.shape(logits)[0]
dims = array_ops.shape(logits)[1]
# In the paper, they call the logits z.
# The mean(logits) can be substracted from logits to make the algorithm
# more numerically stable. the instability in this algorithm comes mostly
# from the z_cumsum. Substacting the mean will cause z_cumsum to be close
# to zero. However, in practise the numerical instability issues are very
# minor and substacting the mean causes extra issues with inf and nan
# input.
z = logits
# sort z
z_sorted, _ = nn.top_k(z, k=dims)
# calculate k(z)
z_cumsum = math_ops.cumsum(z_sorted, axis=1)
k = math_ops.range(
1, math_ops.cast(dims, logits.dtype) + 1, dtype=logits.dtype)
z_check = 1 + k * z_sorted > z_cumsum
# because the z_check vector is always [1,1,...1,0,0,...0] finding the
# (index + 1) of the last `1` is the same as just summing the number of 1.
k_z = math_ops.reduce_sum(math_ops.cast(z_check, dtypes.int32), axis=1)
# calculate tau(z)
# If there are inf values or all values are -inf, the k_z will be zero,
# this is mathematically invalid and will also cause the gather_nd to fail.
# Prevent this issue for now by setting k_z = 1 if k_z = 0, this is then
# fixed later (see p_safe) by returning p = nan. This results in the same
# behavior as softmax.
k_z_safe = math_ops.maximum(k_z, 1)
indices = array_ops.stack([math_ops.range(0, obs), k_z_safe - 1], axis=1)
tau_sum = array_ops.gather_nd(z_cumsum, indices)
tau_z = (tau_sum - 1) / math_ops.cast(k_z, logits.dtype)
# calculate p
p = math_ops.maximum(
math_ops.cast(0, logits.dtype), z - tau_z[:, array_ops.newaxis])
# If k_z = 0 or if z = nan, then the input is invalid
p_safe = array_ops.where(
math_ops.logical_or(
math_ops.equal(k_z, 0), math_ops.is_nan(z_cumsum[:, -1])),
array_ops.fill([obs, dims], math_ops.cast(float("nan"), logits.dtype)),
p)
return p_safe
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/sparsemax/python/ops/sparsemax.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Sparsemax Loss op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
__all__ = ["sparsemax_loss"]
def sparsemax_loss(logits, sparsemax, labels, name=None):
"""Computes sparsemax loss function [1].
[1]: https://arxiv.org/abs/1602.02068
Args:
logits: A `Tensor`. Must be one of the following types: `half`, `float32`,
`float64`.
sparsemax: A `Tensor`. Must have the same type as `logits`.
labels: A `Tensor`. Must have the same type as `logits`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `logits`.
"""
with ops.name_scope(name, "sparsemax_loss",
[logits, sparsemax, labels]) as name:
logits = ops.convert_to_tensor(logits, name="logits")
sparsemax = ops.convert_to_tensor(sparsemax, name="sparsemax")
labels = ops.convert_to_tensor(labels, name="labels")
# In the paper, they call the logits z.
# A constant can be substracted from logits to make the algorithm
# more numerically stable in theory. However, there are really no major
# source numerical instability in this algorithm.
z = logits
# sum over support
# Use a conditional where instead of a multiplication to support z = -inf.
# If z = -inf, and there is no support (sparsemax = 0), a multiplication
# would cause 0 * -inf = nan, which is not correct in this case.
sum_s = array_ops.where(
math_ops.logical_or(sparsemax > 0, math_ops.is_nan(sparsemax)),
sparsemax * (z - 0.5 * sparsemax), array_ops.zeros_like(sparsemax))
# - z_k + ||q||^2
q_part = labels * (0.5 * labels - z)
# Fix the case where labels = 0 and z = -inf, where q_part would
# otherwise be 0 * -inf = nan. But since the lables = 0, no cost for
# z = -inf should be consideredself.
# The code below also coveres the case where z = inf. Howeverm in this
# caose the sparsemax will be nan, which means the sum_s will also be nan,
# therefor this case doesn't need addtional special treatment.
q_part_safe = array_ops.where(
math_ops.logical_and(math_ops.equal(labels, 0), math_ops.is_inf(z)),
array_ops.zeros_like(z), q_part)
return math_ops.reduce_sum(sum_s + q_part_safe, axis=1)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/sparsemax/python/ops/sparsemax_loss.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Experimental utilities for tf.feature_column."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,line-too-long,wildcard-import
from tensorflow.contrib.feature_column.python.feature_column.sequence_feature_column import *
from tensorflow.python.util.all_util import remove_undocumented
# pylint: enable=unused-import,line-too-long,wildcard-import
_allowed_symbols = [
'sequence_categorical_column_with_hash_bucket',
'sequence_categorical_column_with_identity',
'sequence_categorical_column_with_vocabulary_list',
'sequence_categorical_column_with_vocabulary_file',
'sequence_input_layer',
'sequence_numeric_column',
]
remove_undocumented(__name__, allowed_exception_list=_allowed_symbols)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/feature_column/__init__.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Integration test for sequence feature columns with SequenceExamples."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import string
import tempfile
from google.protobuf import text_format
from tensorflow.contrib.feature_column.python.feature_column import sequence_feature_column as sfc
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.feature_column import feature_column as fc
from tensorflow.python.keras.layers import recurrent
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.util import compat
class SequenceFeatureColumnIntegrationTest(test.TestCase):
def _make_sequence_example(self):
example = example_pb2.SequenceExample()
example.context.feature['int_ctx'].int64_list.value.extend([5])
example.context.feature['float_ctx'].float_list.value.extend([123.6])
for val in range(0, 10, 2):
feat = feature_pb2.Feature()
feat.int64_list.value.extend([val] * val)
example.feature_lists.feature_list['int_list'].feature.extend([feat])
for val in range(1, 11, 2):
feat = feature_pb2.Feature()
feat.bytes_list.value.extend([compat.as_bytes(str(val))] * val)
example.feature_lists.feature_list['str_list'].feature.extend([feat])
return example
def _build_feature_columns(self):
col = fc._categorical_column_with_identity('int_ctx', num_buckets=100)
ctx_cols = [
fc._embedding_column(col, dimension=10),
fc._numeric_column('float_ctx')
]
identity_col = sfc.sequence_categorical_column_with_identity(
'int_list', num_buckets=10)
bucket_col = sfc.sequence_categorical_column_with_hash_bucket(
'bytes_list', hash_bucket_size=100)
seq_cols = [
fc._embedding_column(identity_col, dimension=10),
fc._embedding_column(bucket_col, dimension=20)
]
return ctx_cols, seq_cols
def test_sequence_example_into_input_layer(self):
examples = [_make_sequence_example().SerializeToString()] * 100
ctx_cols, seq_cols = self._build_feature_columns()
def _parse_example(example):
ctx, seq = parsing_ops.parse_single_sequence_example(
example,
context_features=fc.make_parse_example_spec(ctx_cols),
sequence_features=fc.make_parse_example_spec(seq_cols))
ctx.update(seq)
return ctx
ds = dataset_ops.Dataset.from_tensor_slices(examples)
ds = ds.map(_parse_example)
ds = ds.batch(20)
# Test on a single batch
features = ds.make_one_shot_iterator().get_next()
# Tile the context features across the sequence features
seq_layer, _ = sfc.sequence_input_layer(features, seq_cols)
ctx_layer = fc.input_layer(features, ctx_cols)
input_layer = sfc.concatenate_context_input(ctx_layer, seq_layer)
rnn_layer = recurrent.RNN(recurrent.SimpleRNNCell(10))
output = rnn_layer(input_layer)
with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
features_r = sess.run(features)
self.assertAllEqual(features_r['int_list'].dense_shape, [20, 3, 6])
output_r = sess.run(output)
self.assertAllEqual(output_r.shape, [20, 10])
class SequenceExampleParsingTest(test.TestCase):
def test_seq_ex_in_sequence_categorical_column_with_identity(self):
self._test_parsed_sequence_example(
'int_list', sfc.sequence_categorical_column_with_identity,
10, [3, 6], [2, 4, 6])
def test_seq_ex_in_sequence_categorical_column_with_hash_bucket(self):
self._test_parsed_sequence_example(
'bytes_list', sfc.sequence_categorical_column_with_hash_bucket,
10, [3, 4], [compat.as_bytes(x) for x in 'acg'])
def test_seq_ex_in_sequence_categorical_column_with_vocabulary_list(self):
self._test_parsed_sequence_example(
'bytes_list', sfc.sequence_categorical_column_with_vocabulary_list,
list(string.ascii_lowercase), [3, 4],
[compat.as_bytes(x) for x in 'acg'])
def test_seq_ex_in_sequence_categorical_column_with_vocabulary_file(self):
_, fname = tempfile.mkstemp()
with open(fname, 'w') as f:
f.write(string.ascii_lowercase)
self._test_parsed_sequence_example(
'bytes_list', sfc.sequence_categorical_column_with_vocabulary_file,
fname, [3, 4], [compat.as_bytes(x) for x in 'acg'])
def _test_parsed_sequence_example(
self, col_name, col_fn, col_arg, shape, values):
"""Helper function to check that each FeatureColumn parses correctly.
Args:
col_name: string, name to give to the feature column. Should match
the name that the column will parse out of the features dict.
col_fn: function used to create the feature column. For example,
sequence_numeric_column.
col_arg: second arg that the target feature column is expecting.
shape: the expected dense_shape of the feature after parsing into
a SparseTensor.
values: the expected values at index [0, 2, 6] of the feature
after parsing into a SparseTensor.
"""
example = _make_sequence_example()
columns = [
fc._categorical_column_with_identity('int_ctx', num_buckets=100),
fc._numeric_column('float_ctx'),
col_fn(col_name, col_arg)
]
context, seq_features = parsing_ops.parse_single_sequence_example(
example.SerializeToString(),
context_features=fc.make_parse_example_spec(columns[:2]),
sequence_features=fc.make_parse_example_spec(columns[2:]))
with self.cached_session() as sess:
ctx_result, seq_result = sess.run([context, seq_features])
self.assertEqual(list(seq_result[col_name].dense_shape), shape)
self.assertEqual(
list(seq_result[col_name].values[[0, 2, 6]]), values)
self.assertEqual(list(ctx_result['int_ctx'].dense_shape), [1])
self.assertEqual(ctx_result['int_ctx'].values[0], 5)
self.assertEqual(list(ctx_result['float_ctx'].shape), [1])
self.assertAlmostEqual(ctx_result['float_ctx'][0], 123.6, places=1)
_SEQ_EX_PROTO = """
context {
feature {
key: "float_ctx"
value {
float_list {
value: 123.6
}
}
}
feature {
key: "int_ctx"
value {
int64_list {
value: 5
}
}
}
}
feature_lists {
feature_list {
key: "bytes_list"
value {
feature {
bytes_list {
value: "a"
}
}
feature {
bytes_list {
value: "b"
value: "c"
}
}
feature {
bytes_list {
value: "d"
value: "e"
value: "f"
value: "g"
}
}
}
}
feature_list {
key: "float_list"
value {
feature {
float_list {
value: 1.0
}
}
feature {
float_list {
value: 3.0
value: 3.0
value: 3.0
}
}
feature {
float_list {
value: 5.0
value: 5.0
value: 5.0
value: 5.0
value: 5.0
}
}
}
}
feature_list {
key: "int_list"
value {
feature {
int64_list {
value: 2
value: 2
}
}
feature {
int64_list {
value: 4
value: 4
value: 4
value: 4
}
}
feature {
int64_list {
value: 6
value: 6
value: 6
value: 6
value: 6
value: 6
}
}
}
}
}
"""
def _make_sequence_example():
example = example_pb2.SequenceExample()
return text_format.Parse(_SEQ_EX_PROTO, example)
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/feature_column/python/feature_column/sequence_feature_column_integration_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for sequential_feature_column."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl.testing import parameterized
import numpy as np
from tensorflow.contrib.feature_column.python.feature_column import sequence_feature_column as sfc
from tensorflow.python.feature_column import feature_column as fc
from tensorflow.python.feature_column import feature_column_lib as fc_lib
from tensorflow.python.feature_column.feature_column import _LazyBuilder
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.platform import test
from tensorflow.python.training import monitored_session
class SequenceInputLayerTest(test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
{'testcase_name': '2D',
'sparse_input_args_a': {
# example 0, ids [2]
# example 1, ids [0, 1]
'indices': ((0, 0), (1, 0), (1, 1)),
'values': (2, 0, 1),
'dense_shape': (2, 2)},
'sparse_input_args_b': {
# example 0, ids [1]
# example 1, ids [2, 0]
'indices': ((0, 0), (1, 0), (1, 1)),
'values': (1, 2, 0),
'dense_shape': (2, 2)},
'expected_input_layer': [
# example 0, ids_a [2], ids_b [1]
[[5., 6., 14., 15., 16.], [0., 0., 0., 0., 0.]],
# example 1, ids_a [0, 1], ids_b [2, 0]
[[1., 2., 17., 18., 19.], [3., 4., 11., 12., 13.]],],
'expected_sequence_length': [1, 2]},
{'testcase_name': '3D',
'sparse_input_args_a': {
# feature 0, ids [[2], [0, 1]]
# feature 1, ids [[0, 0], [1]]
'indices': (
(0, 0, 0), (0, 1, 0), (0, 1, 1),
(1, 0, 0), (1, 0, 1), (1, 1, 0)),
'values': (2, 0, 1, 0, 0, 1),
'dense_shape': (2, 2, 2)},
'sparse_input_args_b': {
# feature 0, ids [[1, 1], [1]]
# feature 1, ids [[2], [0]]
'indices': ((0, 0, 0), (0, 0, 1), (0, 1, 0), (1, 0, 0), (1, 1, 0)),
'values': (1, 1, 1, 2, 0),
'dense_shape': (2, 2, 2)},
'expected_input_layer': [
# feature 0, [a: 2, -, b: 1, 1], [a: 0, 1, b: 1, -]
[[5., 6., 14., 15., 16.], [2., 3., 14., 15., 16.]],
# feature 1, [a: 0, 0, b: 2, -], [a: 1, -, b: 0, -]
[[1., 2., 17., 18., 19.], [3., 4., 11., 12., 13.]]],
'expected_sequence_length': [2, 2]},
)
def test_embedding_column(
self, sparse_input_args_a, sparse_input_args_b, expected_input_layer,
expected_sequence_length):
sparse_input_a = sparse_tensor.SparseTensorValue(**sparse_input_args_a)
sparse_input_b = sparse_tensor.SparseTensorValue(**sparse_input_args_b)
vocabulary_size = 3
embedding_dimension_a = 2
embedding_values_a = (
(1., 2.), # id 0
(3., 4.), # id 1
(5., 6.) # id 2
)
embedding_dimension_b = 3
embedding_values_b = (
(11., 12., 13.), # id 0
(14., 15., 16.), # id 1
(17., 18., 19.) # id 2
)
def _get_initializer(embedding_dimension, embedding_values):
def _initializer(shape, dtype, partition_info):
self.assertAllEqual((vocabulary_size, embedding_dimension), shape)
self.assertEqual(dtypes.float32, dtype)
self.assertIsNone(partition_info)
return embedding_values
return _initializer
categorical_column_a = sfc.sequence_categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
embedding_column_a = fc._embedding_column(
categorical_column_a,
dimension=embedding_dimension_a,
initializer=_get_initializer(embedding_dimension_a, embedding_values_a))
categorical_column_b = sfc.sequence_categorical_column_with_identity(
key='bbb', num_buckets=vocabulary_size)
embedding_column_b = fc._embedding_column(
categorical_column_b,
dimension=embedding_dimension_b,
initializer=_get_initializer(embedding_dimension_b, embedding_values_b))
input_layer, sequence_length = sfc.sequence_input_layer(
features={
'aaa': sparse_input_a,
'bbb': sparse_input_b,
},
# Test that columns are reordered alphabetically.
feature_columns=[embedding_column_b, embedding_column_a])
global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertItemsEqual(
('sequence_input_layer/aaa_embedding/embedding_weights:0',
'sequence_input_layer/bbb_embedding/embedding_weights:0'),
tuple([v.name for v in global_vars]))
with monitored_session.MonitoredSession() as sess:
self.assertAllEqual(embedding_values_a, global_vars[0].eval(session=sess))
self.assertAllEqual(embedding_values_b, global_vars[1].eval(session=sess))
self.assertAllEqual(expected_input_layer, input_layer.eval(session=sess))
self.assertAllEqual(
expected_sequence_length, sequence_length.eval(session=sess))
def test_embedding_column_with_non_sequence_categorical(self):
"""Tests that error is raised for non-sequence embedding column."""
vocabulary_size = 3
sparse_input = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
indices=((0, 0), (1, 0), (1, 1)),
values=(2, 0, 1),
dense_shape=(2, 2))
categorical_column_a = fc._categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
embedding_column_a = fc._embedding_column(categorical_column_a, dimension=2)
with self.assertRaisesRegexp(
ValueError,
r'In embedding_column: aaa_embedding\. categorical_column must be of '
r'type _SequenceCategoricalColumn to use sequence_input_layer\.'):
_, _ = sfc.sequence_input_layer(
features={'aaa': sparse_input},
feature_columns=[embedding_column_a])
def test_shared_embedding_column(self):
vocabulary_size = 3
sparse_input_a = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
indices=((0, 0), (1, 0), (1, 1)),
values=(2, 0, 1),
dense_shape=(2, 2))
sparse_input_b = sparse_tensor.SparseTensorValue(
# example 0, ids [1]
# example 1, ids [2, 0]
indices=((0, 0), (1, 0), (1, 1)),
values=(1, 2, 0),
dense_shape=(2, 2))
embedding_dimension = 2
embedding_values = (
(1., 2.), # id 0
(3., 4.), # id 1
(5., 6.) # id 2
)
def _get_initializer(embedding_dimension, embedding_values):
def _initializer(shape, dtype, partition_info):
self.assertAllEqual((vocabulary_size, embedding_dimension), shape)
self.assertEqual(dtypes.float32, dtype)
self.assertIsNone(partition_info)
return embedding_values
return _initializer
expected_input_layer = [
# example 0, ids_a [2], ids_b [1]
[[5., 6., 3., 4.], [0., 0., 0., 0.]],
# example 1, ids_a [0, 1], ids_b [2, 0]
[[1., 2., 5., 6.], [3., 4., 1., 2.]],
]
expected_sequence_length = [1, 2]
categorical_column_a = sfc.sequence_categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
categorical_column_b = sfc.sequence_categorical_column_with_identity(
key='bbb', num_buckets=vocabulary_size)
# Test that columns are reordered alphabetically.
shared_embedding_columns = fc_lib.shared_embedding_columns(
[categorical_column_b, categorical_column_a],
dimension=embedding_dimension,
initializer=_get_initializer(embedding_dimension, embedding_values))
input_layer, sequence_length = sfc.sequence_input_layer(
features={
'aaa': sparse_input_a,
'bbb': sparse_input_b,
},
feature_columns=shared_embedding_columns)
global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertItemsEqual(
('sequence_input_layer/aaa_bbb_shared_embedding/embedding_weights:0',),
tuple([v.name for v in global_vars]))
with monitored_session.MonitoredSession() as sess:
self.assertAllEqual(embedding_values, global_vars[0].eval(session=sess))
self.assertAllEqual(expected_input_layer, input_layer.eval(session=sess))
self.assertAllEqual(
expected_sequence_length, sequence_length.eval(session=sess))
def test_shared_embedding_column_with_non_sequence_categorical(self):
"""Tests that error is raised for non-sequence shared embedding column."""
vocabulary_size = 3
sparse_input_a = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
indices=((0, 0), (1, 0), (1, 1)),
values=(2, 0, 1),
dense_shape=(2, 2))
sparse_input_b = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
indices=((0, 0), (1, 0), (1, 1)),
values=(2, 0, 1),
dense_shape=(2, 2))
categorical_column_a = fc._categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
categorical_column_b = fc._categorical_column_with_identity(
key='bbb', num_buckets=vocabulary_size)
shared_embedding_columns = fc_lib.shared_embedding_columns(
[categorical_column_a, categorical_column_b], dimension=2)
with self.assertRaisesRegexp(
ValueError,
r'In embedding_column: aaa_shared_embedding\. categorical_column must '
r'be of type _SequenceCategoricalColumn to use sequence_input_layer\.'):
_, _ = sfc.sequence_input_layer(
features={
'aaa': sparse_input_a,
'bbb': sparse_input_b
},
feature_columns=shared_embedding_columns)
@parameterized.named_parameters(
{'testcase_name': '2D',
'sparse_input_args_a': {
# example 0, ids [2]
# example 1, ids [0, 1]
'indices': ((0, 0), (1, 0), (1, 1)),
'values': (2, 0, 1),
'dense_shape': (2, 2)},
'sparse_input_args_b': {
# example 0, ids [1]
# example 1, ids [1, 0]
'indices': ((0, 0), (1, 0), (1, 1)),
'values': (1, 1, 0),
'dense_shape': (2, 2)},
'expected_input_layer': [
# example 0, ids_a [2], ids_b [1]
[[0., 0., 1., 0., 1.], [0., 0., 0., 0., 0.]],
# example 1, ids_a [0, 1], ids_b [1, 0]
[[1., 0., 0., 0., 1.], [0., 1., 0., 1., 0.]]],
'expected_sequence_length': [1, 2]},
{'testcase_name': '3D',
'sparse_input_args_a': {
# feature 0, ids [[2], [0, 1]]
# feature 1, ids [[0, 0], [1]]
'indices': (
(0, 0, 0), (0, 1, 0), (0, 1, 1),
(1, 0, 0), (1, 0, 1), (1, 1, 0)),
'values': (2, 0, 1, 0, 0, 1),
'dense_shape': (2, 2, 2)},
'sparse_input_args_b': {
# feature 0, ids [[1, 1], [1]]
# feature 1, ids [[1], [0]]
'indices': ((0, 0, 0), (0, 0, 1), (0, 1, 0), (1, 0, 0), (1, 1, 0)),
'values': (1, 1, 1, 1, 0),
'dense_shape': (2, 2, 2)},
'expected_input_layer': [
# feature 0, [a: 2, -, b: 1, 1], [a: 0, 1, b: 1, -]
[[0., 0., 1., 0., 2.], [1., 1., 0., 0., 1.]],
# feature 1, [a: 0, 0, b: 1, -], [a: 1, -, b: 0, -]
[[2., 0., 0., 0., 1.], [0., 1., 0., 1., 0.]]],
'expected_sequence_length': [2, 2]},
)
def test_indicator_column(
self, sparse_input_args_a, sparse_input_args_b, expected_input_layer,
expected_sequence_length):
sparse_input_a = sparse_tensor.SparseTensorValue(**sparse_input_args_a)
sparse_input_b = sparse_tensor.SparseTensorValue(**sparse_input_args_b)
vocabulary_size_a = 3
vocabulary_size_b = 2
categorical_column_a = sfc.sequence_categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size_a)
indicator_column_a = fc._indicator_column(categorical_column_a)
categorical_column_b = sfc.sequence_categorical_column_with_identity(
key='bbb', num_buckets=vocabulary_size_b)
indicator_column_b = fc._indicator_column(categorical_column_b)
input_layer, sequence_length = sfc.sequence_input_layer(
features={
'aaa': sparse_input_a,
'bbb': sparse_input_b,
},
# Test that columns are reordered alphabetically.
feature_columns=[indicator_column_b, indicator_column_a])
with monitored_session.MonitoredSession() as sess:
self.assertAllEqual(expected_input_layer, input_layer.eval(session=sess))
self.assertAllEqual(
expected_sequence_length, sequence_length.eval(session=sess))
def test_indicator_column_with_non_sequence_categorical(self):
"""Tests that error is raised for non-sequence categorical column."""
vocabulary_size = 3
sparse_input = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
indices=((0, 0), (1, 0), (1, 1)),
values=(2, 0, 1),
dense_shape=(2, 2))
categorical_column_a = fc._categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
indicator_column_a = fc._indicator_column(categorical_column_a)
with self.assertRaisesRegexp(
ValueError,
r'In indicator_column: aaa_indicator\. categorical_column must be of '
r'type _SequenceCategoricalColumn to use sequence_input_layer\.'):
_, _ = sfc.sequence_input_layer(
features={'aaa': sparse_input},
feature_columns=[indicator_column_a])
@parameterized.named_parameters(
{'testcase_name': '2D',
'sparse_input_args': {
# example 0, values [0., 1]
# example 1, [10.]
'indices': ((0, 0), (0, 1), (1, 0)),
'values': (0., 1., 10.),
'dense_shape': (2, 2)},
'expected_input_layer': [
[[0.], [1.]],
[[10.], [0.]]],
'expected_sequence_length': [2, 1]},
{'testcase_name': '3D',
'sparse_input_args': {
# feature 0, ids [[20, 3], [5]]
# feature 1, ids [[3], [8]]
'indices': ((0, 0, 0), (0, 0, 1), (0, 1, 0), (1, 0, 0), (1, 1, 0)),
'values': (20, 3, 5., 3., 8.),
'dense_shape': (2, 2, 2)},
'expected_input_layer': [
[[20.], [3.], [5.], [0.]],
[[3.], [0.], [8.], [0.]]],
'expected_sequence_length': [2, 2]},
)
def test_numeric_column(
self, sparse_input_args, expected_input_layer, expected_sequence_length):
sparse_input = sparse_tensor.SparseTensorValue(**sparse_input_args)
numeric_column = sfc.sequence_numeric_column('aaa')
input_layer, sequence_length = sfc.sequence_input_layer(
features={'aaa': sparse_input},
feature_columns=[numeric_column])
with monitored_session.MonitoredSession() as sess:
self.assertAllEqual(expected_input_layer, input_layer.eval(session=sess))
self.assertAllEqual(
expected_sequence_length, sequence_length.eval(session=sess))
@parameterized.named_parameters(
{'testcase_name': '2D',
'sparse_input_args': {
# example 0, values [0., 1., 2., 3., 4., 5., 6., 7.]
# example 1, [10., 11., 12., 13.]
'indices': ((0, 0), (0, 1), (0, 2), (0, 3), (0, 4), (0, 5), (0, 6),
(0, 7), (1, 0), (1, 1), (1, 2), (1, 3)),
'values': (0., 1., 2., 3., 4., 5., 6., 7., 10., 11., 12., 13.),
'dense_shape': (2, 8)},
'expected_input_layer': [
# The output of numeric_column._get_dense_tensor should be flattened.
[[0., 1., 2., 3.], [4., 5., 6., 7.]],
[[10., 11., 12., 13.], [0., 0., 0., 0.]]],
'expected_sequence_length': [2, 1]},
{'testcase_name': '3D',
'sparse_input_args': {
# example 0, values [[0., 1., 2., 3.]], [[4., 5., 6., 7.]]
# example 1, [[10., 11., 12., 13.], []]
'indices': ((0, 0, 0), (0, 0, 1), (0, 0, 2), (0, 0, 3),
(0, 1, 0), (0, 1, 1), (0, 1, 2), (0, 1, 3),
(1, 0, 0), (1, 0, 1), (1, 0, 2), (1, 0, 3)),
'values': (0., 1., 2., 3., 4., 5., 6., 7., 10., 11., 12., 13.),
'dense_shape': (2, 2, 4)},
'expected_input_layer': [
# The output of numeric_column._get_dense_tensor should be flattened.
[[0., 1., 2., 3.], [4., 5., 6., 7.]],
[[10., 11., 12., 13.], [0., 0., 0., 0.]]],
'expected_sequence_length': [2, 1]},
)
def test_numeric_column_multi_dim(
self, sparse_input_args, expected_input_layer, expected_sequence_length):
"""Tests sequence_input_layer for multi-dimensional numeric_column."""
sparse_input = sparse_tensor.SparseTensorValue(**sparse_input_args)
numeric_column = sfc.sequence_numeric_column('aaa', shape=(2, 2))
input_layer, sequence_length = sfc.sequence_input_layer(
features={'aaa': sparse_input},
feature_columns=[numeric_column])
with monitored_session.MonitoredSession() as sess:
self.assertAllEqual(expected_input_layer, input_layer.eval(session=sess))
self.assertAllEqual(
expected_sequence_length, sequence_length.eval(session=sess))
def test_sequence_length_not_equal(self):
"""Tests that an error is raised when sequence lengths are not equal."""
# Input a with sequence_length = [2, 1]
sparse_input_a = sparse_tensor.SparseTensorValue(
indices=((0, 0), (0, 1), (1, 0)),
values=(0., 1., 10.),
dense_shape=(2, 2))
# Input b with sequence_length = [1, 1]
sparse_input_b = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0)),
values=(1., 10.),
dense_shape=(2, 2))
numeric_column_a = sfc.sequence_numeric_column('aaa')
numeric_column_b = sfc.sequence_numeric_column('bbb')
_, sequence_length = sfc.sequence_input_layer(
features={
'aaa': sparse_input_a,
'bbb': sparse_input_b,
},
feature_columns=[numeric_column_a, numeric_column_b])
with monitored_session.MonitoredSession() as sess:
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
r'\[Condition x == y did not hold element-wise:\] '
r'\[x \(sequence_input_layer/aaa/sequence_length:0\) = \] \[2 1\] '
r'\[y \(sequence_input_layer/bbb/sequence_length:0\) = \] \[1 1\]'):
sess.run(sequence_length)
@parameterized.named_parameters(
{'testcase_name': '2D',
'sparse_input_args': {
# example 0, values [[[0., 1.], [2., 3.]], [[4., 5.], [6., 7.]]]
# example 1, [[[10., 11.], [12., 13.]]]
'indices': ((0, 0), (0, 1), (0, 2), (0, 3), (0, 4), (0, 5), (0, 6),
(0, 7), (1, 0), (1, 1), (1, 2), (1, 3)),
'values': (0., 1., 2., 3., 4., 5., 6., 7., 10., 11., 12., 13.),
'dense_shape': (2, 8)},
'expected_shape': [2, 2, 4]},
{'testcase_name': '3D',
'sparse_input_args': {
# example 0, values [[0., 1., 2., 3.]], [[4., 5., 6., 7.]]
# example 1, [[10., 11., 12., 13.], []]
'indices': ((0, 0, 0), (0, 0, 1), (0, 0, 2), (0, 0, 3),
(0, 1, 0), (0, 1, 1), (0, 1, 2), (0, 1, 2),
(1, 0, 0), (1, 0, 1), (1, 0, 2), (1, 0, 3)),
'values': (0., 1., 2., 3., 4., 5., 6., 7., 10., 11., 12., 13.),
'dense_shape': (2, 2, 4)},
'expected_shape': [2, 2, 4]},
)
def test_static_shape_from_tensors_numeric(
self, sparse_input_args, expected_shape):
"""Tests that we return a known static shape when we have one."""
sparse_input = sparse_tensor.SparseTensorValue(**sparse_input_args)
numeric_column = sfc.sequence_numeric_column('aaa', shape=(2, 2))
input_layer, _ = sfc.sequence_input_layer(
features={'aaa': sparse_input},
feature_columns=[numeric_column])
shape = input_layer.get_shape()
self.assertEqual(shape, expected_shape)
@parameterized.named_parameters(
{'testcase_name': '2D',
'sparse_input_args': {
# example 0, ids [2]
# example 1, ids [0, 1]
# example 2, ids []
# example 3, ids [1]
'indices': ((0, 0), (1, 0), (1, 1), (3, 0)),
'values': (2, 0, 1, 1),
'dense_shape': (4, 2)},
'expected_shape': [4, 2, 3]},
{'testcase_name': '3D',
'sparse_input_args': {
# example 0, ids [[2]]
# example 1, ids [[0, 1], [2]]
# example 2, ids []
# example 3, ids [[1], [0, 2]]
'indices': ((0, 0, 0), (1, 0, 0), (1, 0, 1), (1, 1, 0),
(3, 0, 0), (3, 1, 0), (3, 1, 1)),
'values': (2, 0, 1, 2, 1, 0, 2),
'dense_shape': (4, 2, 2)},
'expected_shape': [4, 2, 3]}
)
def test_static_shape_from_tensors_indicator(
self, sparse_input_args, expected_shape):
"""Tests that we return a known static shape when we have one."""
sparse_input = sparse_tensor.SparseTensorValue(**sparse_input_args)
categorical_column = sfc.sequence_categorical_column_with_identity(
key='aaa', num_buckets=3)
indicator_column = fc._indicator_column(categorical_column)
input_layer, _ = sfc.sequence_input_layer(
features={'aaa': sparse_input}, feature_columns=[indicator_column])
shape = input_layer.get_shape()
self.assertEqual(shape, expected_shape)
class ConcatenateContextInputTest(test.TestCase, parameterized.TestCase):
"""Tests the utility fn concatenate_context_input."""
def test_concatenate_context_input(self):
seq_input = ops.convert_to_tensor(np.arange(12).reshape(2, 3, 2))
context_input = ops.convert_to_tensor(np.arange(10).reshape(2, 5))
seq_input = math_ops.cast(seq_input, dtype=dtypes.float32)
context_input = math_ops.cast(context_input, dtype=dtypes.float32)
input_layer = sfc.concatenate_context_input(context_input, seq_input)
expected = np.array([
[[0, 1, 0, 1, 2, 3, 4], [2, 3, 0, 1, 2, 3, 4], [4, 5, 0, 1, 2, 3, 4]],
[[6, 7, 5, 6, 7, 8, 9], [8, 9, 5, 6, 7, 8, 9], [10, 11, 5, 6, 7, 8, 9]]
], dtype=np.float32)
with monitored_session.MonitoredSession() as sess:
output = sess.run(input_layer)
self.assertAllEqual(expected, output)
@parameterized.named_parameters(
{'testcase_name': 'rank_lt_3',
'seq_input_arg': np.arange(100).reshape(10, 10)},
{'testcase_name': 'rank_gt_3',
'seq_input_arg': np.arange(100).reshape(5, 5, 2, 2)}
)
def test_sequence_input_throws_error(self, seq_input_arg):
seq_input = ops.convert_to_tensor(seq_input_arg)
context_input = ops.convert_to_tensor(np.arange(100).reshape(10, 10))
seq_input = math_ops.cast(seq_input, dtype=dtypes.float32)
context_input = math_ops.cast(context_input, dtype=dtypes.float32)
with self.assertRaisesRegexp(ValueError, 'sequence_input must have rank 3'):
sfc.concatenate_context_input(context_input, seq_input)
@parameterized.named_parameters(
{'testcase_name': 'rank_lt_2',
'context_input_arg': np.arange(100)},
{'testcase_name': 'rank_gt_2',
'context_input_arg': np.arange(100).reshape(5, 5, 4)}
)
def test_context_input_throws_error(self, context_input_arg):
context_input = ops.convert_to_tensor(context_input_arg)
seq_input = ops.convert_to_tensor(np.arange(100).reshape(5, 5, 4))
seq_input = math_ops.cast(seq_input, dtype=dtypes.float32)
context_input = math_ops.cast(context_input, dtype=dtypes.float32)
with self.assertRaisesRegexp(ValueError, 'context_input must have rank 2'):
sfc.concatenate_context_input(context_input, seq_input)
def test_integer_seq_input_throws_error(self):
seq_input = ops.convert_to_tensor(np.arange(100).reshape(5, 5, 4))
context_input = ops.convert_to_tensor(np.arange(100).reshape(10, 10))
context_input = math_ops.cast(context_input, dtype=dtypes.float32)
with self.assertRaisesRegexp(
TypeError, 'sequence_input must have dtype float32'):
sfc.concatenate_context_input(context_input, seq_input)
def test_integer_context_input_throws_error(self):
seq_input = ops.convert_to_tensor(np.arange(100).reshape(5, 5, 4))
context_input = ops.convert_to_tensor(np.arange(100).reshape(10, 10))
seq_input = math_ops.cast(seq_input, dtype=dtypes.float32)
with self.assertRaisesRegexp(
TypeError, 'context_input must have dtype float32'):
sfc.concatenate_context_input(context_input, seq_input)
class InputLayerTest(test.TestCase):
"""Tests input_layer with sequence feature columns."""
def test_embedding_column(self):
"""Tests that error is raised for sequence embedding column."""
vocabulary_size = 3
sparse_input = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
indices=((0, 0), (1, 0), (1, 1)),
values=(2, 0, 1),
dense_shape=(2, 2))
categorical_column_a = sfc.sequence_categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
embedding_column_a = fc._embedding_column(categorical_column_a, dimension=2)
with self.assertRaisesRegexp(
ValueError,
r'In embedding_column: aaa_embedding\. categorical_column must not be '
r'of type _SequenceCategoricalColumn\.'):
_ = fc.input_layer(
features={'aaa': sparse_input},
feature_columns=[embedding_column_a])
def test_indicator_column(self):
"""Tests that error is raised for sequence indicator column."""
vocabulary_size = 3
sparse_input = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
indices=((0, 0), (1, 0), (1, 1)),
values=(2, 0, 1),
dense_shape=(2, 2))
categorical_column_a = sfc.sequence_categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
indicator_column_a = fc._indicator_column(categorical_column_a)
with self.assertRaisesRegexp(
ValueError,
r'In indicator_column: aaa_indicator\. categorical_column must not be '
r'of type _SequenceCategoricalColumn\.'):
_ = fc.input_layer(
features={'aaa': sparse_input},
feature_columns=[indicator_column_a])
def _assert_sparse_tensor_value(test_case, expected, actual):
_assert_sparse_tensor_indices_shape(test_case, expected, actual)
test_case.assertEqual(
np.array(expected.values).dtype, np.array(actual.values).dtype)
test_case.assertAllEqual(expected.values, actual.values)
def _assert_sparse_tensor_indices_shape(test_case, expected, actual):
test_case.assertEqual(np.int64, np.array(actual.indices).dtype)
test_case.assertAllEqual(expected.indices, actual.indices)
test_case.assertEqual(np.int64, np.array(actual.dense_shape).dtype)
test_case.assertAllEqual(expected.dense_shape, actual.dense_shape)
class SequenceCategoricalColumnWithIdentityTest(
test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
{'testcase_name': '2D',
'inputs_args': {
'indices': ((0, 0), (1, 0), (1, 1)),
'values': (1, 2, 0),
'dense_shape': (2, 2)},
'expected_args': {
'indices': ((0, 0, 0), (1, 0, 0), (1, 1, 0)),
'values': np.array((1, 2, 0), dtype=np.int64),
'dense_shape': (2, 2, 1)}},
{'testcase_name': '3D',
'inputs_args': {
'indices': ((0, 0, 2), (1, 0, 0), (1, 2, 0)),
'values': (6, 7, 8),
'dense_shape': (2, 2, 2)},
'expected_args': {
'indices': ((0, 0, 2), (1, 0, 0), (1, 2, 0)),
'values': (6, 7, 8),
'dense_shape': (2, 2, 2)}}
)
def test_get_sparse_tensors(self, inputs_args, expected_args):
inputs = sparse_tensor.SparseTensorValue(**inputs_args)
expected = sparse_tensor.SparseTensorValue(**expected_args)
column = sfc.sequence_categorical_column_with_identity('aaa', num_buckets=9)
id_weight_pair = column._get_sparse_tensors(_LazyBuilder({'aaa': inputs}))
self.assertIsNone(id_weight_pair.weight_tensor)
with monitored_session.MonitoredSession() as sess:
_assert_sparse_tensor_value(
self, expected, id_weight_pair.id_tensor.eval(session=sess))
class SequenceCategoricalColumnWithHashBucketTest(
test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
{'testcase_name': '2D',
'inputs_args': {
'indices': ((0, 0), (1, 0), (1, 1)),
'values': ('omar', 'stringer', 'marlo'),
'dense_shape': (2, 2)},
'expected_args': {
'indices': ((0, 0, 0), (1, 0, 0), (1, 1, 0)),
# Ignored to avoid hash dependence in test.
'values': np.array((0, 0, 0), dtype=np.int64),
'dense_shape': (2, 2, 1)}},
{'testcase_name': '3D',
'inputs_args': {
'indices': ((0, 0, 2), (1, 0, 0), (1, 2, 0)),
'values': ('omar', 'stringer', 'marlo'),
'dense_shape': (2, 2, 2)},
'expected_args': {
'indices': ((0, 0, 2), (1, 0, 0), (1, 2, 0)),
# Ignored to avoid hash dependence in test.
'values': np.array((0, 0, 0), dtype=np.int64),
'dense_shape': (2, 2, 2)}}
)
def test_get_sparse_tensors(self, inputs_args, expected_args):
inputs = sparse_tensor.SparseTensorValue(**inputs_args)
expected = sparse_tensor.SparseTensorValue(**expected_args)
column = sfc.sequence_categorical_column_with_hash_bucket(
'aaa', hash_bucket_size=10)
id_weight_pair = column._get_sparse_tensors(_LazyBuilder({'aaa': inputs}))
self.assertIsNone(id_weight_pair.weight_tensor)
with monitored_session.MonitoredSession() as sess:
_assert_sparse_tensor_indices_shape(
self, expected, id_weight_pair.id_tensor.eval(session=sess))
class SequenceCategoricalColumnWithVocabularyFileTest(
test.TestCase, parameterized.TestCase):
def _write_vocab(self, vocab_strings, file_name):
vocab_file = os.path.join(self.get_temp_dir(), file_name)
with open(vocab_file, 'w') as f:
f.write('\n'.join(vocab_strings))
return vocab_file
def setUp(self):
super(SequenceCategoricalColumnWithVocabularyFileTest, self).setUp()
vocab_strings = ['omar', 'stringer', 'marlo']
self._wire_vocabulary_file_name = self._write_vocab(vocab_strings,
'wire_vocabulary.txt')
self._wire_vocabulary_size = 3
@parameterized.named_parameters(
{'testcase_name': '2D',
'inputs_args': {
'indices': ((0, 0), (1, 0), (1, 1)),
'values': ('marlo', 'skywalker', 'omar'),
'dense_shape': (2, 2)},
'expected_args': {
'indices': ((0, 0, 0), (1, 0, 0), (1, 1, 0)),
'values': np.array((2, -1, 0), dtype=np.int64),
'dense_shape': (2, 2, 1)}},
{'testcase_name': '3D',
'inputs_args': {
'indices': ((0, 0, 2), (1, 0, 0), (1, 2, 0)),
'values': ('omar', 'skywalker', 'marlo'),
'dense_shape': (2, 2, 2)},
'expected_args': {
'indices': ((0, 0, 2), (1, 0, 0), (1, 2, 0)),
'values': np.array((0, -1, 2), dtype=np.int64),
'dense_shape': (2, 2, 2)}}
)
def test_get_sparse_tensors(self, inputs_args, expected_args):
inputs = sparse_tensor.SparseTensorValue(**inputs_args)
expected = sparse_tensor.SparseTensorValue(**expected_args)
column = sfc.sequence_categorical_column_with_vocabulary_file(
key='aaa',
vocabulary_file=self._wire_vocabulary_file_name,
vocabulary_size=self._wire_vocabulary_size)
id_weight_pair = column._get_sparse_tensors(_LazyBuilder({'aaa': inputs}))
self.assertIsNone(id_weight_pair.weight_tensor)
with monitored_session.MonitoredSession() as sess:
_assert_sparse_tensor_value(
self, expected, id_weight_pair.id_tensor.eval(session=sess))
def test_get_sparse_tensors_dynamic_zero_length(self):
"""Tests _get_sparse_tensors with a dynamic sequence length."""
inputs = sparse_tensor.SparseTensorValue(
indices=np.zeros((0, 2)), values=[], dense_shape=(2, 0))
expected = sparse_tensor.SparseTensorValue(
indices=np.zeros((0, 3)),
values=np.array((), dtype=np.int64),
dense_shape=(2, 0, 1))
column = sfc.sequence_categorical_column_with_vocabulary_file(
key='aaa',
vocabulary_file=self._wire_vocabulary_file_name,
vocabulary_size=self._wire_vocabulary_size)
input_placeholder_shape = list(inputs.dense_shape)
# Make second dimension (sequence length) dynamic.
input_placeholder_shape[1] = None
input_placeholder = array_ops.sparse_placeholder(
dtypes.string, shape=input_placeholder_shape)
id_weight_pair = column._get_sparse_tensors(
_LazyBuilder({'aaa': input_placeholder}))
self.assertIsNone(id_weight_pair.weight_tensor)
with monitored_session.MonitoredSession() as sess:
result = id_weight_pair.id_tensor.eval(
session=sess, feed_dict={input_placeholder: inputs})
_assert_sparse_tensor_value(
self, expected, result)
class SequenceCategoricalColumnWithVocabularyListTest(
test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
{'testcase_name': '2D',
'inputs_args': {
'indices': ((0, 0), (1, 0), (1, 1)),
'values': ('marlo', 'skywalker', 'omar'),
'dense_shape': (2, 2)},
'expected_args': {
'indices': ((0, 0, 0), (1, 0, 0), (1, 1, 0)),
'values': np.array((2, -1, 0), dtype=np.int64),
'dense_shape': (2, 2, 1)}},
{'testcase_name': '3D',
'inputs_args': {
'indices': ((0, 0, 2), (1, 0, 0), (1, 2, 0)),
'values': ('omar', 'skywalker', 'marlo'),
'dense_shape': (2, 2, 2)},
'expected_args': {
'indices': ((0, 0, 2), (1, 0, 0), (1, 2, 0)),
'values': np.array((0, -1, 2), dtype=np.int64),
'dense_shape': (2, 2, 2)}}
)
def test_get_sparse_tensors(self, inputs_args, expected_args):
inputs = sparse_tensor.SparseTensorValue(**inputs_args)
expected = sparse_tensor.SparseTensorValue(**expected_args)
column = sfc.sequence_categorical_column_with_vocabulary_list(
key='aaa',
vocabulary_list=('omar', 'stringer', 'marlo'))
id_weight_pair = column._get_sparse_tensors(_LazyBuilder({'aaa': inputs}))
self.assertIsNone(id_weight_pair.weight_tensor)
with monitored_session.MonitoredSession() as sess:
_assert_sparse_tensor_value(
self, expected, id_weight_pair.id_tensor.eval(session=sess))
class SequenceEmbeddingColumnTest(
test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
{'testcase_name': '2D',
'inputs_args': {
# example 0, ids [2]
# example 1, ids [0, 1]
# example 2, ids []
# example 3, ids [1]
'indices': ((0, 0), (1, 0), (1, 1), (3, 0)),
'values': (2, 0, 1, 1),
'dense_shape': (4, 2)},
'expected': [
# example 0, ids [2]
[[7., 11.], [0., 0.]],
# example 1, ids [0, 1]
[[1., 2.], [3., 5.]],
# example 2, ids []
[[0., 0.], [0., 0.]],
# example 3, ids [1]
[[3., 5.], [0., 0.]]]},
{'testcase_name': '3D',
'inputs_args': {
# example 0, ids [[2]]
# example 1, ids [[0, 1], [2]]
# example 2, ids []
# example 3, ids [[1], [0, 2]]
'indices': ((0, 0, 0), (1, 0, 0), (1, 0, 1), (1, 1, 0),
(3, 0, 0), (3, 1, 0), (3, 1, 1)),
'values': (2, 0, 1, 2, 1, 0, 2),
'dense_shape': (4, 2, 2)},
'expected': [
# example 0, ids [[2]]
[[7., 11.], [0., 0.]],
# example 1, ids [[0, 1], [2]]
[[2, 3.5], [7., 11.]],
# example 2, ids []
[[0., 0.], [0., 0.]],
# example 3, ids [[1], [0, 2]]
[[3., 5.], [4., 6.5]]]}
)
def test_get_sequence_dense_tensor(self, inputs_args, expected):
inputs = sparse_tensor.SparseTensorValue(**inputs_args)
vocabulary_size = 3
embedding_dimension = 2
embedding_values = (
(1., 2.), # id 0
(3., 5.), # id 1
(7., 11.) # id 2
)
def _initializer(shape, dtype, partition_info):
self.assertAllEqual((vocabulary_size, embedding_dimension), shape)
self.assertEqual(dtypes.float32, dtype)
self.assertIsNone(partition_info)
return embedding_values
categorical_column = sfc.sequence_categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
embedding_column = fc._embedding_column(
categorical_column,
dimension=embedding_dimension,
initializer=_initializer)
embedding_lookup, _ = embedding_column._get_sequence_dense_tensor(
_LazyBuilder({'aaa': inputs}))
global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertItemsEqual(
('embedding_weights:0',), tuple([v.name for v in global_vars]))
with monitored_session.MonitoredSession() as sess:
self.assertAllEqual(embedding_values, global_vars[0].eval(session=sess))
self.assertAllEqual(expected, embedding_lookup.eval(session=sess))
@parameterized.named_parameters(
{'testcase_name': '2D',
'inputs_args': {
# example 0, ids [2]
# example 1, ids [0, 1]
'indices': ((0, 0), (1, 0), (1, 1)),
'values': (2, 0, 1),
'dense_shape': (2, 2)},
'expected_sequence_length': [1, 2]},
{'testcase_name': '3D',
'inputs_args': {
# example 0, ids [[2]]
# example 1, ids [[0, 1], [2]]
'indices': ((0, 0, 0), (1, 0, 0), (1, 0, 1), (1, 1, 0)),
'values': (2, 0, 1, 2),
'dense_shape': (2, 2, 2)},
'expected_sequence_length': [1, 2]}
)
def test_sequence_length(self, inputs_args, expected_sequence_length):
inputs = sparse_tensor.SparseTensorValue(**inputs_args)
vocabulary_size = 3
categorical_column = sfc.sequence_categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
embedding_column = fc._embedding_column(categorical_column, dimension=2)
_, sequence_length = embedding_column._get_sequence_dense_tensor(
_LazyBuilder({'aaa': inputs}))
with monitored_session.MonitoredSession() as sess:
sequence_length = sess.run(sequence_length)
self.assertAllEqual(expected_sequence_length, sequence_length)
self.assertEqual(np.int64, sequence_length.dtype)
def test_sequence_length_with_empty_rows(self):
"""Tests _sequence_length when some examples do not have ids."""
vocabulary_size = 3
sparse_input = sparse_tensor.SparseTensorValue(
# example 0, ids []
# example 1, ids [2]
# example 2, ids [0, 1]
# example 3, ids []
# example 4, ids [1]
# example 5, ids []
indices=((1, 0), (2, 0), (2, 1), (4, 0)),
values=(2, 0, 1, 1),
dense_shape=(6, 2))
expected_sequence_length = [0, 1, 2, 0, 1, 0]
categorical_column = sfc.sequence_categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
embedding_column = fc._embedding_column(categorical_column, dimension=2)
_, sequence_length = embedding_column._get_sequence_dense_tensor(
_LazyBuilder({'aaa': sparse_input}))
with monitored_session.MonitoredSession() as sess:
self.assertAllEqual(
expected_sequence_length, sequence_length.eval(session=sess))
class SequenceSharedEmbeddingColumnTest(test.TestCase):
def test_get_sequence_dense_tensor(self):
vocabulary_size = 3
embedding_dimension = 2
embedding_values = (
(1., 2.), # id 0
(3., 5.), # id 1
(7., 11.) # id 2
)
def _initializer(shape, dtype, partition_info):
self.assertAllEqual((vocabulary_size, embedding_dimension), shape)
self.assertEqual(dtypes.float32, dtype)
self.assertIsNone(partition_info)
return embedding_values
sparse_input_a = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
# example 2, ids []
# example 3, ids [1]
indices=((0, 0), (1, 0), (1, 1), (3, 0)),
values=(2, 0, 1, 1),
dense_shape=(4, 2))
sparse_input_b = sparse_tensor.SparseTensorValue(
# example 0, ids [1]
# example 1, ids [0, 2]
# example 2, ids [0]
# example 3, ids []
indices=((0, 0), (1, 0), (1, 1), (2, 0)),
values=(1, 0, 2, 0),
dense_shape=(4, 2))
expected_lookups_a = [
# example 0, ids [2]
[[7., 11.], [0., 0.]],
# example 1, ids [0, 1]
[[1., 2.], [3., 5.]],
# example 2, ids []
[[0., 0.], [0., 0.]],
# example 3, ids [1]
[[3., 5.], [0., 0.]],
]
expected_lookups_b = [
# example 0, ids [1]
[[3., 5.], [0., 0.]],
# example 1, ids [0, 2]
[[1., 2.], [7., 11.]],
# example 2, ids [0]
[[1., 2.], [0., 0.]],
# example 3, ids []
[[0., 0.], [0., 0.]],
]
categorical_column_a = sfc.sequence_categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
categorical_column_b = sfc.sequence_categorical_column_with_identity(
key='bbb', num_buckets=vocabulary_size)
shared_embedding_columns = fc_lib.shared_embedding_columns(
[categorical_column_a, categorical_column_b],
dimension=embedding_dimension,
initializer=_initializer)
embedding_lookup_a = shared_embedding_columns[0]._get_sequence_dense_tensor(
_LazyBuilder({
'aaa': sparse_input_a
}))[0]
embedding_lookup_b = shared_embedding_columns[1]._get_sequence_dense_tensor(
_LazyBuilder({
'bbb': sparse_input_b
}))[0]
global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertItemsEqual(('embedding_weights:0',),
tuple([v.name for v in global_vars]))
with monitored_session.MonitoredSession() as sess:
self.assertAllEqual(embedding_values, global_vars[0].eval(session=sess))
self.assertAllEqual(
expected_lookups_a, embedding_lookup_a.eval(session=sess))
self.assertAllEqual(
expected_lookups_b, embedding_lookup_b.eval(session=sess))
def test_sequence_length(self):
vocabulary_size = 3
sparse_input_a = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
indices=((0, 0), (1, 0), (1, 1)),
values=(2, 0, 1),
dense_shape=(2, 2))
expected_sequence_length_a = [1, 2]
categorical_column_a = sfc.sequence_categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
sparse_input_b = sparse_tensor.SparseTensorValue(
# example 0, ids [0, 2]
# example 1, ids [1]
indices=((0, 0), (0, 1), (1, 0)),
values=(0, 2, 1),
dense_shape=(2, 2))
expected_sequence_length_b = [2, 1]
categorical_column_b = sfc.sequence_categorical_column_with_identity(
key='bbb', num_buckets=vocabulary_size)
shared_embedding_columns = fc_lib.shared_embedding_columns(
[categorical_column_a, categorical_column_b], dimension=2)
sequence_length_a = shared_embedding_columns[0]._get_sequence_dense_tensor(
_LazyBuilder({
'aaa': sparse_input_a
}))[1]
sequence_length_b = shared_embedding_columns[1]._get_sequence_dense_tensor(
_LazyBuilder({
'bbb': sparse_input_b
}))[1]
with monitored_session.MonitoredSession() as sess:
sequence_length_a = sess.run(sequence_length_a)
self.assertAllEqual(expected_sequence_length_a, sequence_length_a)
self.assertEqual(np.int64, sequence_length_a.dtype)
sequence_length_b = sess.run(sequence_length_b)
self.assertAllEqual(expected_sequence_length_b, sequence_length_b)
self.assertEqual(np.int64, sequence_length_b.dtype)
def test_sequence_length_with_empty_rows(self):
"""Tests _sequence_length when some examples do not have ids."""
vocabulary_size = 3
sparse_input_a = sparse_tensor.SparseTensorValue(
# example 0, ids []
# example 1, ids [2]
# example 2, ids [0, 1]
# example 3, ids []
# example 4, ids [1]
# example 5, ids []
indices=((1, 0), (2, 0), (2, 1), (4, 0)),
values=(2, 0, 1, 1),
dense_shape=(6, 2))
expected_sequence_length_a = [0, 1, 2, 0, 1, 0]
categorical_column_a = sfc.sequence_categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
sparse_input_b = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids []
# example 2, ids []
# example 3, ids []
# example 4, ids [1]
# example 5, ids [0, 1]
indices=((0, 0), (4, 0), (5, 0), (5, 1)),
values=(2, 1, 0, 1),
dense_shape=(6, 2))
expected_sequence_length_b = [1, 0, 0, 0, 1, 2]
categorical_column_b = sfc.sequence_categorical_column_with_identity(
key='bbb', num_buckets=vocabulary_size)
shared_embedding_columns = fc_lib.shared_embedding_columns(
[categorical_column_a, categorical_column_b], dimension=2)
sequence_length_a = shared_embedding_columns[0]._get_sequence_dense_tensor(
_LazyBuilder({
'aaa': sparse_input_a
}))[1]
sequence_length_b = shared_embedding_columns[1]._get_sequence_dense_tensor(
_LazyBuilder({
'bbb': sparse_input_b
}))[1]
with monitored_session.MonitoredSession() as sess:
self.assertAllEqual(
expected_sequence_length_a, sequence_length_a.eval(session=sess))
self.assertAllEqual(
expected_sequence_length_b, sequence_length_b.eval(session=sess))
class SequenceIndicatorColumnTest(test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
{'testcase_name': '2D',
'inputs_args': {
# example 0, ids [2]
# example 1, ids [0, 1]
# example 2, ids []
# example 3, ids [1]
'indices': ((0, 0), (1, 0), (1, 1), (3, 0)),
'values': (2, 0, 1, 1),
'dense_shape': (4, 2)},
'expected': [
# example 0, ids [2]
[[0., 0., 1.], [0., 0., 0.]],
# example 1, ids [0, 1]
[[1., 0., 0.], [0., 1., 0.]],
# example 2, ids []
[[0., 0., 0.], [0., 0., 0.]],
# example 3, ids [1]
[[0., 1., 0.], [0., 0., 0.]]]},
{'testcase_name': '3D',
'inputs_args': {
# example 0, ids [[2]]
# example 1, ids [[0, 1], [2]]
# example 2, ids []
# example 3, ids [[1], [2, 2]]
'indices': ((0, 0, 0), (1, 0, 0), (1, 0, 1), (1, 1, 0),
(3, 0, 0), (3, 1, 0), (3, 1, 1)),
'values': (2, 0, 1, 2, 1, 2, 2),
'dense_shape': (4, 2, 2)},
'expected': [
# example 0, ids [[2]]
[[0., 0., 1.], [0., 0., 0.]],
# example 1, ids [[0, 1], [2]]
[[1., 1., 0.], [0., 0., 1.]],
# example 2, ids []
[[0., 0., 0.], [0., 0., 0.]],
# example 3, ids [[1], [2, 2]]
[[0., 1., 0.], [0., 0., 2.]]]}
)
def test_get_sequence_dense_tensor(self, inputs_args, expected):
inputs = sparse_tensor.SparseTensorValue(**inputs_args)
vocabulary_size = 3
categorical_column = sfc.sequence_categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
indicator_column = fc._indicator_column(categorical_column)
indicator_tensor, _ = indicator_column._get_sequence_dense_tensor(
_LazyBuilder({'aaa': inputs}))
with monitored_session.MonitoredSession() as sess:
self.assertAllEqual(expected, indicator_tensor.eval(session=sess))
@parameterized.named_parameters(
{'testcase_name': '2D',
'inputs_args': {
# example 0, ids [2]
# example 1, ids [0, 1]
'indices': ((0, 0), (1, 0), (1, 1)),
'values': (2, 0, 1),
'dense_shape': (2, 2)},
'expected_sequence_length': [1, 2]},
{'testcase_name': '3D',
'inputs_args': {
# example 0, ids [[2]]
# example 1, ids [[0, 1], [2]]
'indices': ((0, 0, 0), (1, 0, 0), (1, 0, 1), (1, 1, 0)),
'values': (2, 0, 1, 2),
'dense_shape': (2, 2, 2)},
'expected_sequence_length': [1, 2]}
)
def test_sequence_length(self, inputs_args, expected_sequence_length):
inputs = sparse_tensor.SparseTensorValue(**inputs_args)
vocabulary_size = 3
categorical_column = sfc.sequence_categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
indicator_column = fc._indicator_column(categorical_column)
_, sequence_length = indicator_column._get_sequence_dense_tensor(
_LazyBuilder({'aaa': inputs}))
with monitored_session.MonitoredSession() as sess:
sequence_length = sess.run(sequence_length)
self.assertAllEqual(expected_sequence_length, sequence_length)
self.assertEqual(np.int64, sequence_length.dtype)
def test_sequence_length_with_empty_rows(self):
"""Tests _sequence_length when some examples do not have ids."""
vocabulary_size = 3
sparse_input = sparse_tensor.SparseTensorValue(
# example 0, ids []
# example 1, ids [2]
# example 2, ids [0, 1]
# example 3, ids []
# example 4, ids [1]
# example 5, ids []
indices=((1, 0), (2, 0), (2, 1), (4, 0)),
values=(2, 0, 1, 1),
dense_shape=(6, 2))
expected_sequence_length = [0, 1, 2, 0, 1, 0]
categorical_column = sfc.sequence_categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
indicator_column = fc._indicator_column(categorical_column)
_, sequence_length = indicator_column._get_sequence_dense_tensor(
_LazyBuilder({'aaa': sparse_input}))
with monitored_session.MonitoredSession() as sess:
self.assertAllEqual(
expected_sequence_length, sequence_length.eval(session=sess))
class SequenceNumericColumnTest(test.TestCase, parameterized.TestCase):
def test_defaults(self):
a = sfc.sequence_numeric_column('aaa')
self.assertEqual('aaa', a.key)
self.assertEqual('aaa', a.name)
self.assertEqual('aaa', a._var_scope_name)
self.assertEqual((1,), a.shape)
self.assertEqual(0., a.default_value)
self.assertEqual(dtypes.float32, a.dtype)
self.assertIsNone(a.normalizer_fn)
def test_shape_saved_as_tuple(self):
a = sfc.sequence_numeric_column('aaa', shape=[1, 2])
self.assertEqual((1, 2), a.shape)
def test_shape_must_be_positive_integer(self):
with self.assertRaisesRegexp(TypeError, 'shape dimensions must be integer'):
sfc.sequence_numeric_column('aaa', shape=[1.0])
with self.assertRaisesRegexp(
ValueError, 'shape dimensions must be greater than 0'):
sfc.sequence_numeric_column('aaa', shape=[0])
def test_dtype_is_convertible_to_float(self):
with self.assertRaisesRegexp(
ValueError, 'dtype must be convertible to float'):
sfc.sequence_numeric_column('aaa', dtype=dtypes.string)
def test_normalizer_fn_must_be_callable(self):
with self.assertRaisesRegexp(TypeError, 'must be a callable'):
sfc.sequence_numeric_column('aaa', normalizer_fn='NotACallable')
@parameterized.named_parameters(
{'testcase_name': '2D',
'inputs_args': {
# example 0, values [0., 1]
# example 1, [10.]
'indices': ((0, 0), (0, 1), (1, 0)),
'values': (0., 1., 10.),
'dense_shape': (2, 2)},
'expected': [
[[0.], [1.]],
[[10.], [0.]]]},
{'testcase_name': '3D',
'inputs_args': {
# feature 0, ids [[20, 3], [5]]
# feature 1, ids [[3], [8]]
'indices': ((0, 0, 0), (0, 0, 1), (0, 1, 0), (1, 0, 0), (1, 1, 0)),
'values': (20, 3, 5., 3., 8.),
'dense_shape': (2, 2, 2)},
'expected': [
[[20.], [3.], [5.], [0.]],
[[3.], [0.], [8.], [0.]]]},
)
def test_get_sequence_dense_tensor(self, inputs_args, expected):
inputs = sparse_tensor.SparseTensorValue(**inputs_args)
numeric_column = sfc.sequence_numeric_column('aaa')
dense_tensor, _ = numeric_column._get_sequence_dense_tensor(
_LazyBuilder({'aaa': inputs}))
with monitored_session.MonitoredSession() as sess:
self.assertAllEqual(expected, dense_tensor.eval(session=sess))
def test_get_sequence_dense_tensor_with_normalizer_fn(self):
def _increment_two(input_sparse_tensor):
return sparse_ops.sparse_add(
input_sparse_tensor,
sparse_tensor.SparseTensor(((0, 0), (1, 1)), (2.0, 2.0), (2, 2))
)
sparse_input = sparse_tensor.SparseTensorValue(
# example 0, values [[0.], [1]]
# example 1, [[10.]]
indices=((0, 0), (0, 1), (1, 0)),
values=(0., 1., 10.),
dense_shape=(2, 2))
# Before _increment_two:
# [[0.], [1.]],
# [[10.], [0.]],
# After _increment_two:
# [[2.], [1.]],
# [[10.], [2.]],
expected_dense_tensor = [
[[2.], [1.]],
[[10.], [2.]],
]
numeric_column = sfc.sequence_numeric_column(
'aaa', normalizer_fn=_increment_two)
dense_tensor, _ = numeric_column._get_sequence_dense_tensor(
_LazyBuilder({'aaa': sparse_input}))
with monitored_session.MonitoredSession() as sess:
self.assertAllEqual(
expected_dense_tensor, dense_tensor.eval(session=sess))
@parameterized.named_parameters(
{'testcase_name': '2D',
'sparse_input_args': {
# example 0, values [[[0., 1.], [2., 3.]], [[4., 5.], [6., 7.]]]
# example 1, [[[10., 11.], [12., 13.]]]
'indices': ((0, 0), (0, 1), (0, 2), (0, 3), (0, 4), (0, 5), (0, 6),
(0, 7), (1, 0), (1, 1), (1, 2), (1, 3)),
'values': (0., 1., 2., 3., 4., 5., 6., 7., 10., 11., 12., 13.),
'dense_shape': (2, 8)},
'expected_dense_tensor': [
[[[0., 1.], [2., 3.]], [[4., 5.], [6., 7.]]],
[[[10., 11.], [12., 13.]], [[0., 0.], [0., 0.]]]]},
{'testcase_name': '3D',
'sparse_input_args': {
'indices': ((0, 0, 0), (0, 0, 2), (0, 0, 4), (0, 0, 6),
(0, 1, 0), (0, 1, 2), (0, 1, 4), (0, 1, 6),
(1, 0, 0), (1, 0, 2), (1, 0, 4), (1, 0, 6)),
'values': (0., 1., 2., 3., 4., 5., 6., 7., 10., 11., 12., 13.),
'dense_shape': (2, 2, 8)},
'expected_dense_tensor': [
[[[0., 0.], [1., 0.]], [[2., 0.], [3., 0.]],
[[4., 0.], [5., 0.]], [[6., 0.], [7., 0.]]],
[[[10., 0.], [11., 0.]], [[12., 0.], [13., 0.]],
[[0., 0.], [0., 0.]], [[0., 0.], [0., 0.]]]]},
)
def test_get_dense_tensor_multi_dim(
self, sparse_input_args, expected_dense_tensor):
"""Tests get_sequence_dense_tensor for multi-dim numeric_column."""
sparse_input = sparse_tensor.SparseTensorValue(**sparse_input_args)
numeric_column = sfc.sequence_numeric_column('aaa', shape=(2, 2))
dense_tensor, _ = numeric_column._get_sequence_dense_tensor(
_LazyBuilder({'aaa': sparse_input}))
with monitored_session.MonitoredSession() as sess:
self.assertAllEqual(
expected_dense_tensor, dense_tensor.eval(session=sess))
@parameterized.named_parameters(
{'testcase_name': '2D',
'inputs_args': {
# example 0, ids [2]
# example 1, ids [0, 1]
'indices': ((0, 0), (1, 0), (1, 1)),
'values': (2., 0., 1.),
'dense_shape': (2, 2)},
'expected_sequence_length': [1, 2],
'shape': (1,)},
{'testcase_name': '3D',
'inputs_args': {
# example 0, ids [[2]]
# example 1, ids [[0, 1], [2]]
'indices': ((0, 0, 0), (1, 0, 0), (1, 0, 1), (1, 1, 0)),
'values': (2., 0., 1., 2.),
'dense_shape': (2, 2, 2)},
'expected_sequence_length': [1, 2],
'shape': (1,)},
{'testcase_name': '2D_with_shape',
'inputs_args': {
# example 0, ids [2]
# example 1, ids [0, 1]
'indices': ((0, 0), (1, 0), (1, 1)),
'values': (2., 0., 1.),
'dense_shape': (2, 2)},
'expected_sequence_length': [1, 1],
'shape': (2,)},
{'testcase_name': '3D_with_shape',
'inputs_args': {
# example 0, ids [[2]]
# example 1, ids [[0, 1], [2]]
'indices': ((0, 0, 0), (1, 0, 0), (1, 0, 1), (1, 1, 0)),
'values': (2., 0., 1., 2.),
'dense_shape': (2, 2, 2)},
'expected_sequence_length': [1, 2],
'shape': (2,)},
)
def test_sequence_length(self, inputs_args, expected_sequence_length, shape):
inputs = sparse_tensor.SparseTensorValue(**inputs_args)
numeric_column = sfc.sequence_numeric_column('aaa', shape=shape)
_, sequence_length = numeric_column._get_sequence_dense_tensor(
_LazyBuilder({'aaa': inputs}))
with monitored_session.MonitoredSession() as sess:
sequence_length = sess.run(sequence_length)
self.assertAllEqual(expected_sequence_length, sequence_length)
self.assertEqual(np.int64, sequence_length.dtype)
def test_sequence_length_with_empty_rows(self):
"""Tests _sequence_length when some examples do not have ids."""
sparse_input = sparse_tensor.SparseTensorValue(
# example 0, values []
# example 1, values [[0.], [1.]]
# example 2, [[2.]]
# example 3, values []
# example 4, [[3.]]
# example 5, values []
indices=((1, 0), (1, 1), (2, 0), (4, 0)),
values=(0., 1., 2., 3.),
dense_shape=(6, 2))
expected_sequence_length = [0, 2, 1, 0, 1, 0]
numeric_column = sfc.sequence_numeric_column('aaa')
_, sequence_length = numeric_column._get_sequence_dense_tensor(
_LazyBuilder({'aaa': sparse_input}))
with monitored_session.MonitoredSession() as sess:
self.assertAllEqual(
expected_sequence_length, sequence_length.eval(session=sess))
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/feature_column/python/feature_column/sequence_feature_column_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Experimental methods for tf.feature_column sequence input."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.python.feature_column import feature_column as fc
from tensorflow.python.feature_column import utils as fc_utils
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import variable_scope
# pylint: disable=protected-access
def sequence_input_layer(
features,
feature_columns,
weight_collections=None,
trainable=True):
""""Builds input layer for sequence input.
All `feature_columns` must be sequence dense columns with the same
`sequence_length`. The output of this method can be fed into sequence
networks, such as RNN.
The output of this method is a 3D `Tensor` of shape `[batch_size, T, D]`.
`T` is the maximum sequence length for this batch, which could differ from
batch to batch.
If multiple `feature_columns` are given with `Di` `num_elements` each, their
outputs are concatenated. So, the final `Tensor` has shape
`[batch_size, T, D0 + D1 + ... + Dn]`.
Example:
```python
rating = sequence_numeric_column('rating')
watches = sequence_categorical_column_with_identity(
'watches', num_buckets=1000)
watches_embedding = embedding_column(watches, dimension=10)
columns = [rating, watches]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
input_layer, sequence_length = sequence_input_layer(features, columns)
rnn_cell = tf.compat.v1.nn.rnn_cell.BasicRNNCell(hidden_size)
outputs, state = tf.compat.v1.nn.dynamic_rnn(
rnn_cell, inputs=input_layer, sequence_length=sequence_length)
```
Args:
features: A dict mapping keys to tensors.
feature_columns: An iterable of dense sequence columns. Valid columns are
- `embedding_column` that wraps a `sequence_categorical_column_with_*`
- `sequence_numeric_column`.
weight_collections: A list of collection names to which the Variable will be
added. Note that variables will also be added to collections
`tf.GraphKeys.GLOBAL_VARIABLES` and `ops.GraphKeys.MODEL_VARIABLES`.
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES`.
Returns:
An `(input_layer, sequence_length)` tuple where:
- input_layer: A float `Tensor` of shape `[batch_size, T, D]`.
`T` is the maximum sequence length for this batch, which could differ
from batch to batch. `D` is the sum of `num_elements` for all
`feature_columns`.
- sequence_length: An int `Tensor` of shape `[batch_size]`. The sequence
length for each example.
Raises:
ValueError: If any of the `feature_columns` is the wrong type.
"""
feature_columns = fc._normalize_feature_columns(feature_columns)
for c in feature_columns:
if not isinstance(c, fc._SequenceDenseColumn):
raise ValueError(
'All feature_columns must be of type _SequenceDenseColumn. '
'You can wrap a sequence_categorical_column with an embedding_column '
'or indicator_column. '
'Given (type {}): {}'.format(type(c), c))
with variable_scope.variable_scope(
None, default_name='sequence_input_layer', values=features.values()):
builder = fc._LazyBuilder(features)
output_tensors = []
sequence_lengths = []
ordered_columns = []
for column in sorted(feature_columns, key=lambda x: x.name):
ordered_columns.append(column)
with variable_scope.variable_scope(
None, default_name=column._var_scope_name):
dense_tensor, sequence_length = column._get_sequence_dense_tensor(
builder,
weight_collections=weight_collections,
trainable=trainable)
# Flattens the final dimension to produce a 3D Tensor.
num_elements = column._variable_shape.num_elements()
shape = array_ops.shape(dense_tensor)
target_shape = [shape[0], shape[1], num_elements]
output_tensors.append(
array_ops.reshape(dense_tensor, shape=target_shape))
sequence_lengths.append(sequence_length)
fc._verify_static_batch_size_equality(output_tensors, ordered_columns)
fc._verify_static_batch_size_equality(sequence_lengths, ordered_columns)
sequence_length = _assert_all_equal_and_return(sequence_lengths)
return array_ops.concat(output_tensors, -1), sequence_length
def concatenate_context_input(context_input, sequence_input):
"""Replicates `context_input` across all timesteps of `sequence_input`.
Expands dimension 1 of `context_input` then tiles it `sequence_length` times.
This value is appended to `sequence_input` on dimension 2 and the result is
returned.
Args:
context_input: A `Tensor` of dtype `float32` and shape `[batch_size, d1]`.
sequence_input: A `Tensor` of dtype `float32` and shape `[batch_size,
padded_length, d0]`.
Returns:
A `Tensor` of dtype `float32` and shape `[batch_size, padded_length,
d0 + d1]`.
Raises:
ValueError: If `sequence_input` does not have rank 3 or `context_input` does
not have rank 2.
"""
seq_rank_check = check_ops.assert_rank(
sequence_input,
3,
message='sequence_input must have rank 3',
data=[array_ops.shape(sequence_input)])
seq_type_check = check_ops.assert_type(
sequence_input,
dtypes.float32,
message='sequence_input must have dtype float32; got {}.'.format(
sequence_input.dtype))
ctx_rank_check = check_ops.assert_rank(
context_input,
2,
message='context_input must have rank 2',
data=[array_ops.shape(context_input)])
ctx_type_check = check_ops.assert_type(
context_input,
dtypes.float32,
message='context_input must have dtype float32; got {}.'.format(
context_input.dtype))
with ops.control_dependencies(
[seq_rank_check, seq_type_check, ctx_rank_check, ctx_type_check]):
padded_length = array_ops.shape(sequence_input)[1]
tiled_context_input = array_ops.tile(
array_ops.expand_dims(context_input, 1),
array_ops.concat([[1], [padded_length], [1]], 0))
return array_ops.concat([sequence_input, tiled_context_input], 2)
def sequence_categorical_column_with_identity(
key, num_buckets, default_value=None):
"""Returns a feature column that represents sequences of integers.
Pass this to `embedding_column` or `indicator_column` to convert sequence
categorical data into dense representation for input to sequence NN, such as
RNN.
Example:
```python
watches = sequence_categorical_column_with_identity(
'watches', num_buckets=1000)
watches_embedding = embedding_column(watches, dimension=10)
columns = [watches_embedding]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
input_layer, sequence_length = sequence_input_layer(features, columns)
rnn_cell = tf.compat.v1.nn.rnn_cell.BasicRNNCell(hidden_size)
outputs, state = tf.compat.v1.nn.dynamic_rnn(
rnn_cell, inputs=input_layer, sequence_length=sequence_length)
```
Args:
key: A unique string identifying the input feature.
num_buckets: Range of inputs. Namely, inputs are expected to be in the
range `[0, num_buckets)`.
default_value: If `None`, this column's graph operations will fail for
out-of-range inputs. Otherwise, this value must be in the range
`[0, num_buckets)`, and will replace out-of-range inputs.
Returns:
A `_SequenceCategoricalColumn`.
Raises:
ValueError: if `num_buckets` is less than one.
ValueError: if `default_value` is not in range `[0, num_buckets)`.
"""
return fc._SequenceCategoricalColumn(
fc._categorical_column_with_identity(
key=key, num_buckets=num_buckets, default_value=default_value))
def sequence_categorical_column_with_hash_bucket(
key, hash_bucket_size, dtype=dtypes.string):
"""A sequence of categorical terms where ids are set by hashing.
Pass this to `embedding_column` or `indicator_column` to convert sequence
categorical data into dense representation for input to sequence NN, such as
RNN.
Example:
```python
tokens = sequence_categorical_column_with_hash_bucket(
'tokens', hash_bucket_size=1000)
tokens_embedding = embedding_column(tokens, dimension=10)
columns = [tokens_embedding]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
input_layer, sequence_length = sequence_input_layer(features, columns)
rnn_cell = tf.compat.v1.nn.rnn_cell.BasicRNNCell(hidden_size)
outputs, state = tf.compat.v1.nn.dynamic_rnn(
rnn_cell, inputs=input_layer, sequence_length=sequence_length)
```
Args:
key: A unique string identifying the input feature.
hash_bucket_size: An int > 1. The number of buckets.
dtype: The type of features. Only string and integer types are supported.
Returns:
A `_SequenceCategoricalColumn`.
Raises:
ValueError: `hash_bucket_size` is not greater than 1.
ValueError: `dtype` is neither string nor integer.
"""
return fc._SequenceCategoricalColumn(
fc._categorical_column_with_hash_bucket(
key=key, hash_bucket_size=hash_bucket_size, dtype=dtype))
def sequence_categorical_column_with_vocabulary_file(
key, vocabulary_file, vocabulary_size=None, num_oov_buckets=0,
default_value=None, dtype=dtypes.string):
"""A sequence of categorical terms where ids use a vocabulary file.
Pass this to `embedding_column` or `indicator_column` to convert sequence
categorical data into dense representation for input to sequence NN, such as
RNN.
Example:
```python
states = sequence_categorical_column_with_vocabulary_file(
key='states', vocabulary_file='/us/states.txt', vocabulary_size=50,
num_oov_buckets=5)
states_embedding = embedding_column(states, dimension=10)
columns = [states_embedding]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
input_layer, sequence_length = sequence_input_layer(features, columns)
rnn_cell = tf.compat.v1.nn.rnn_cell.BasicRNNCell(hidden_size)
outputs, state = tf.compat.v1.nn.dynamic_rnn(
rnn_cell, inputs=input_layer, sequence_length=sequence_length)
```
Args:
key: A unique string identifying the input feature.
vocabulary_file: The vocabulary file name.
vocabulary_size: Number of the elements in the vocabulary. This must be no
greater than length of `vocabulary_file`, if less than length, later
values are ignored. If None, it is set to the length of `vocabulary_file`.
num_oov_buckets: Non-negative integer, the number of out-of-vocabulary
buckets. All out-of-vocabulary inputs will be assigned IDs in the range
`[vocabulary_size, vocabulary_size+num_oov_buckets)` based on a hash of
the input value. A positive `num_oov_buckets` can not be specified with
`default_value`.
default_value: The integer ID value to return for out-of-vocabulary feature
values, defaults to `-1`. This can not be specified with a positive
`num_oov_buckets`.
dtype: The type of features. Only string and integer types are supported.
Returns:
A `_SequenceCategoricalColumn`.
Raises:
ValueError: `vocabulary_file` is missing or cannot be opened.
ValueError: `vocabulary_size` is missing or < 1.
ValueError: `num_oov_buckets` is a negative integer.
ValueError: `num_oov_buckets` and `default_value` are both specified.
ValueError: `dtype` is neither string nor integer.
"""
return fc._SequenceCategoricalColumn(
fc._categorical_column_with_vocabulary_file(
key=key,
vocabulary_file=vocabulary_file,
vocabulary_size=vocabulary_size,
num_oov_buckets=num_oov_buckets,
default_value=default_value,
dtype=dtype))
def sequence_categorical_column_with_vocabulary_list(
key, vocabulary_list, dtype=None, default_value=-1, num_oov_buckets=0):
"""A sequence of categorical terms where ids use an in-memory list.
Pass this to `embedding_column` or `indicator_column` to convert sequence
categorical data into dense representation for input to sequence NN, such as
RNN.
Example:
```python
colors = sequence_categorical_column_with_vocabulary_list(
key='colors', vocabulary_list=('R', 'G', 'B', 'Y'),
num_oov_buckets=2)
colors_embedding = embedding_column(colors, dimension=3)
columns = [colors_embedding]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
input_layer, sequence_length = sequence_input_layer(features, columns)
rnn_cell = tf.compat.v1.nn.rnn_cell.BasicRNNCell(hidden_size)
outputs, state = tf.compat.v1.nn.dynamic_rnn(
rnn_cell, inputs=input_layer, sequence_length=sequence_length)
```
Args:
key: A unique string identifying the input feature.
vocabulary_list: An ordered iterable defining the vocabulary. Each feature
is mapped to the index of its value (if present) in `vocabulary_list`.
Must be castable to `dtype`.
dtype: The type of features. Only string and integer types are supported.
If `None`, it will be inferred from `vocabulary_list`.
default_value: The integer ID value to return for out-of-vocabulary feature
values, defaults to `-1`. This can not be specified with a positive
`num_oov_buckets`.
num_oov_buckets: Non-negative integer, the number of out-of-vocabulary
buckets. All out-of-vocabulary inputs will be assigned IDs in the range
`[len(vocabulary_list), len(vocabulary_list)+num_oov_buckets)` based on a
hash of the input value. A positive `num_oov_buckets` can not be specified
with `default_value`.
Returns:
A `_SequenceCategoricalColumn`.
Raises:
ValueError: if `vocabulary_list` is empty, or contains duplicate keys.
ValueError: `num_oov_buckets` is a negative integer.
ValueError: `num_oov_buckets` and `default_value` are both specified.
ValueError: if `dtype` is not integer or string.
"""
return fc._SequenceCategoricalColumn(
fc._categorical_column_with_vocabulary_list(
key=key,
vocabulary_list=vocabulary_list,
dtype=dtype,
default_value=default_value,
num_oov_buckets=num_oov_buckets))
def sequence_numeric_column(
key,
shape=(1,),
default_value=0.,
dtype=dtypes.float32,
normalizer_fn=None):
"""Returns a feature column that represents sequences of numeric data.
Example:
```python
temperature = sequence_numeric_column('temperature')
columns = [temperature]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
input_layer, sequence_length = sequence_input_layer(features, columns)
rnn_cell = tf.compat.v1.nn.rnn_cell.BasicRNNCell(hidden_size)
outputs, state = tf.compat.v1.nn.dynamic_rnn(
rnn_cell, inputs=input_layer, sequence_length=sequence_length)
```
Args:
key: A unique string identifying the input features.
shape: The shape of the input data per sequence id. E.g. if `shape=(2,)`,
each example must contain `2 * sequence_length` values.
default_value: A single value compatible with `dtype` that is used for
padding the sparse data into a dense `Tensor`.
dtype: The type of values.
normalizer_fn: If not `None`, a function that can be used to normalize the
value of the tensor after `default_value` is applied for parsing.
Normalizer function takes the input `Tensor` as its argument, and returns
the output `Tensor`. (e.g. lambda x: (x - 3.0) / 4.2). Please note that
even though the most common use case of this function is normalization, it
can be used for any kind of Tensorflow transformations.
Returns:
A `_SequenceNumericColumn`.
Raises:
TypeError: if any dimension in shape is not an int.
ValueError: if any dimension in shape is not a positive integer.
ValueError: if `dtype` is not convertible to `tf.float32`.
"""
shape = fc._check_shape(shape=shape, key=key)
if not (dtype.is_integer or dtype.is_floating):
raise ValueError('dtype must be convertible to float. '
'dtype: {}, key: {}'.format(dtype, key))
if normalizer_fn is not None and not callable(normalizer_fn):
raise TypeError(
'normalizer_fn must be a callable. Given: {}'.format(normalizer_fn))
return _SequenceNumericColumn(
key,
shape=shape,
default_value=default_value,
dtype=dtype,
normalizer_fn=normalizer_fn)
def _assert_all_equal_and_return(tensors, name=None):
"""Asserts that all tensors are equal and returns the first one."""
with ops.name_scope(name, 'assert_all_equal', values=tensors):
if len(tensors) == 1:
return tensors[0]
assert_equal_ops = []
for t in tensors[1:]:
assert_equal_ops.append(check_ops.assert_equal(tensors[0], t))
with ops.control_dependencies(assert_equal_ops):
return array_ops.identity(tensors[0])
class _SequenceNumericColumn(
fc._SequenceDenseColumn,
collections.namedtuple(
'_SequenceNumericColumn',
['key', 'shape', 'default_value', 'dtype', 'normalizer_fn'])):
"""Represents sequences of numeric data."""
@property
def name(self):
return self.key
@property
def _parse_example_spec(self):
return {self.key: parsing_ops.VarLenFeature(self.dtype)}
def _transform_feature(self, inputs):
input_tensor = inputs.get(self.key)
if self.normalizer_fn is not None:
input_tensor = self.normalizer_fn(input_tensor)
return input_tensor
@property
def _variable_shape(self):
return tensor_shape.TensorShape(self.shape)
def _get_sequence_dense_tensor(
self, inputs, weight_collections=None, trainable=None):
# Do nothing with weight_collections and trainable since no variables are
# created in this function.
del weight_collections
del trainable
sp_tensor = inputs.get(self)
dense_tensor = sparse_ops.sparse_tensor_to_dense(
sp_tensor, default_value=self.default_value)
# Reshape into [batch_size, T, variable_shape].
dense_shape = array_ops.concat(
[array_ops.shape(dense_tensor)[:1], [-1], self._variable_shape],
axis=0)
dense_tensor = array_ops.reshape(dense_tensor, shape=dense_shape)
# Get the number of timesteps per example
# For the 2D case, the raw values are grouped according to num_elements;
# for the 3D case, the grouping happens in the third dimension, and
# sequence length is not affected.
num_elements = (self._variable_shape.num_elements()
if sp_tensor.shape.ndims == 2 else 1)
seq_length = fc_utils.sequence_length_from_sparse_tensor(
sp_tensor, num_elements=num_elements)
return fc._SequenceDenseColumn.TensorSequenceLengthPair(
dense_tensor=dense_tensor, sequence_length=seq_length)
# pylint: enable=protected-access
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/feature_column/python/feature_column/sequence_feature_column.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""estimator python module.
Importing from tensorflow.python.estimator
is unsupported and will soon break!
"""
# pylint: disable=unused-import,g-bad-import-order,g-import-not-at-top,wildcard-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Importing from tensorflow.python.estimator
# is unsupported and will soon break!
from tensorflow_estimator.contrib import estimator
# Fixes remove_undocumented not working as intended.
#
# Problem is that when the below import happens (for first time,
# Python only imports things once), Python sets attribute named
# 'python' to this package. If this first import happens
# after the call to remove_undocumented, then the 'python'
# attribute won't be removed.
import tensorflow.contrib.estimator.python
# Include attrs that start with single underscore.
_HAS_DYNAMIC_ATTRIBUTES = True
estimator.__all__ = [s for s in dir(estimator) if not s.startswith('__')]
from tensorflow_estimator.contrib.estimator import *
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = [
'add_metrics',
'binary_classification_head',
'clip_gradients_by_norm',
'forward_features',
'InMemoryEvaluatorHook',
'make_stop_at_checkpoint_step_hook',
'logistic_regression_head',
'multi_class_head',
'multi_head',
'multi_label_head',
'poisson_regression_head',
'regression_head',
'boosted_trees_classifier_train_in_memory',
'boosted_trees_regressor_train_in_memory',
'call_logit_fn',
'dnn_logit_fn_builder',
'linear_logit_fn_builder',
'replicate_model_fn',
'TowerOptimizer',
'RNNClassifier',
'RNNEstimator',
'export_saved_model_for_mode',
'export_all_saved_models',
'make_early_stopping_hook',
'read_eval_metrics',
'stop_if_lower_hook',
'stop_if_higher_hook',
'stop_if_no_increase_hook',
'stop_if_no_decrease_hook',
'build_raw_supervised_input_receiver_fn',
'build_supervised_input_receiver_fn_from_input_fn',
'SavedModelEstimator',
'DNNClassifierWithLayerAnnotations',
'DNNRegressorWithLayerAnnotations',
]
remove_undocumented(__name__, allowed_exception_list=_allowed_symbols)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/estimator/__init__.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""dnn_with_layer_annotations python module.
Importing from tensorflow.python.estimator is unsupported
and will soon break!
"""
# pylint: disable=unused-import,g-bad-import-order,g-import-not-at-top,wildcard-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow_estimator.contrib.estimator.python.estimator import dnn_with_layer_annotations
# Include attrs that start with single underscore.
_HAS_DYNAMIC_ATTRIBUTES = True
dnn_with_layer_annotations.__all__ = [
s for s in dir(dnn_with_layer_annotations) if not s.startswith('__')
]
from tensorflow_estimator.contrib.estimator.python.estimator.dnn_with_layer_annotations import *
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/estimator/python/estimator/dnn_with_layer_annotations.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""hooks python module.
Importing from tensorflow.python.estimator is unsupported
and will soon break!
"""
# pylint: disable=unused-import,g-bad-import-order,g-import-not-at-top,wildcard-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow_estimator.contrib.estimator.python.estimator import hooks
# Include attrs that start with single underscore.
_HAS_DYNAMIC_ATTRIBUTES = True
hooks.__all__ = [s for s in dir(hooks) if not s.startswith('__')]
from tensorflow_estimator.contrib.estimator.python.estimator.hooks import *
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/estimator/python/estimator/hooks.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""logit_fns python module.
Importing from tensorflow.python.estimator is unsupported
and will soon break!
"""
# pylint: disable=unused-import,g-bad-import-order,g-import-not-at-top,wildcard-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow_estimator.contrib.estimator.python.estimator import logit_fns
# Include attrs that start with single underscore.
_HAS_DYNAMIC_ATTRIBUTES = True
logit_fns.__all__ = [s for s in dir(logit_fns) if not s.startswith('__')]
from tensorflow_estimator.contrib.estimator.python.estimator.logit_fns import *
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/estimator/python/estimator/logit_fns.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""exporter python module.
Importing from tensorflow.python.estimator is unsupported
and will soon break!
"""
# pylint: disable=unused-import,g-bad-import-order,g-import-not-at-top,wildcard-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow_estimator.contrib.estimator.python.estimator import exporter
# Include attrs that start with single underscore.
_HAS_DYNAMIC_ATTRIBUTES = True
exporter.__all__ = [s for s in dir(exporter) if not s.startswith('__')]
from tensorflow_estimator.contrib.estimator.python.estimator.exporter import *
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/estimator/python/estimator/exporter.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""export python module.
Importing from tensorflow.python.estimator is unsupported
and will soon break!
"""
# pylint: disable=unused-import,g-bad-import-order,g-import-not-at-top,wildcard-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow_estimator.contrib.estimator.python.estimator import export
# Include attrs that start with single underscore.
_HAS_DYNAMIC_ATTRIBUTES = True
export.__all__ = [s for s in dir(export) if not s.startswith('__')]
from tensorflow_estimator.contrib.estimator.python.estimator.export import *
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/estimator/python/estimator/export.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""boosted_trees python module.
Importing from tensorflow.python.estimator is unsupported
and will soon break!
"""
# pylint: disable=unused-import,g-bad-import-order,g-import-not-at-top,wildcard-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow_estimator.contrib.estimator.python.estimator import boosted_trees
# Include attrs that start with single underscore.
_HAS_DYNAMIC_ATTRIBUTES = True
boosted_trees.__all__ = [
s for s in dir(boosted_trees) if not s.startswith('__')
]
from tensorflow_estimator.contrib.estimator.python.estimator.boosted_trees import *
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/estimator/python/estimator/boosted_trees.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""multi_head python module.
Importing from tensorflow.python.estimator is unsupported
and will soon break!
"""
# pylint: disable=unused-import,g-bad-import-order,g-import-not-at-top,wildcard-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow_estimator.contrib.estimator.python.estimator import multi_head
# Include attrs that start with single underscore.
_HAS_DYNAMIC_ATTRIBUTES = True
multi_head.__all__ = [s for s in dir(multi_head) if not s.startswith('__')]
from tensorflow_estimator.contrib.estimator.python.estimator.multi_head import *
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/estimator/python/estimator/multi_head.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""saved_model_estimator python module.
Importing from tensorflow.python.estimator is unsupported
and will soon break!
"""
# pylint: disable=unused-import,g-bad-import-order,g-import-not-at-top,wildcard-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow_estimator.contrib.estimator.python.estimator import saved_model_estimator
# Include attrs that start with single underscore.
_HAS_DYNAMIC_ATTRIBUTES = True
saved_model_estimator.__all__ = [
s for s in dir(saved_model_estimator) if not s.startswith('__')
]
from tensorflow_estimator.contrib.estimator.python.estimator.saved_model_estimator import *
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/estimator/python/estimator/saved_model_estimator.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""head python module.
Importing from tensorflow.python.estimator is unsupported
and will soon break!
"""
# pylint: disable=unused-import,g-bad-import-order,g-import-not-at-top,wildcard-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow_estimator.contrib.estimator.python.estimator import head
# Include attrs that start with single underscore.
_HAS_DYNAMIC_ATTRIBUTES = True
head.__all__ = [s for s in dir(head) if not s.startswith('__')]
from tensorflow_estimator.contrib.estimator.python.estimator.head import *
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/estimator/python/estimator/head.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""extenders python module.
Importing from tensorflow.python.estimator is unsupported
and will soon break!
"""
# pylint: disable=unused-import,g-bad-import-order,g-import-not-at-top,wildcard-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow_estimator.contrib.estimator.python.estimator import extenders
# Include attrs that start with single underscore.
_HAS_DYNAMIC_ATTRIBUTES = True
extenders.__all__ = [s for s in dir(extenders) if not s.startswith('__')]
from tensorflow_estimator.contrib.estimator.python.estimator.extenders import *
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/estimator/python/estimator/extenders.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""replicate_model_fn python module.
Importing from tensorflow.python.estimator is unsupported
and will soon break!
"""
# pylint: disable=unused-import,g-bad-import-order,g-import-not-at-top,wildcard-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow_estimator.contrib.estimator.python.estimator import replicate_model_fn
# Include attrs that start with single underscore.
_HAS_DYNAMIC_ATTRIBUTES = True
replicate_model_fn.__all__ = [
s for s in dir(replicate_model_fn) if not s.startswith('__')
]
from tensorflow_estimator.contrib.estimator.python.estimator.replicate_model_fn import *
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/estimator/python/estimator/replicate_model_fn.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""early_stopping python module.
Importing from tensorflow.python.estimator is unsupported
and will soon break!
"""
# pylint: disable=unused-import,g-bad-import-order,g-import-not-at-top,wildcard-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow_estimator.python.estimator import early_stopping
# Include attrs that start with single underscore.
_HAS_DYNAMIC_ATTRIBUTES = True
early_stopping.__all__ = [
s for s in dir(early_stopping) if not s.startswith('__')
]
from tensorflow_estimator.python.estimator.early_stopping import *
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/estimator/python/estimator/early_stopping.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""rnn python module.
Importing from tensorflow.python.estimator is unsupported
and will soon break!
"""
# pylint: disable=unused-import,g-bad-import-order,g-import-not-at-top,wildcard-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow_estimator.contrib.estimator.python.estimator import rnn
# Include attrs that start with single underscore.
_HAS_DYNAMIC_ATTRIBUTES = True
rnn.__all__ = [s for s in dir(rnn) if not s.startswith('__')]
from tensorflow_estimator.contrib.estimator.python.estimator.rnn import *
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/estimator/python/estimator/rnn.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""reduce by slice
@@reduce_slice_sum
@@reduce_slice_prod
@@reduce_slice_min
@@reduce_slice_max
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.reduce_slice_ops.python.ops import *
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/reduce_slice_ops/__init__.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.contrib.reduce_slice_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.reduce_slice_ops.python.ops import reduce_slice_ops
from tensorflow.python.framework.test_util import TensorFlowTestCase
from tensorflow.python.platform import googletest
class ReduceSliceTest(TensorFlowTestCase):
def testReduceSliceSum1D(self):
x = np.array([1, 40, 700], dtype=np.int32)
indices = np.array([[0, 1], [0, 3], [1, 2], [1, 3], [0, 2]], dtype=np.int32)
result = np.array([1, 741, 40, 740, 41], dtype=np.int32)
with self.test_session(use_gpu=True):
y_tf = reduce_slice_ops.reduce_slice_sum(x, indices, 0).eval()
self.assertAllEqual(y_tf, result)
def testReduceSliceSum2D(self):
x = np.array([[1, 2, 3], [40, 50, 60], [700, 800, 900]], dtype=np.int32)
indices = np.array([[0, 1], [0, 3], [1, 2], [1, 3], [0, 2]], dtype=np.int32)
result = np.array([[1, 2, 3], [741, 852, 963], [40, 50, 60],
[740, 850, 960], [41, 52, 63]], dtype=np.int32)
with self.test_session(use_gpu=True):
y_tf = reduce_slice_ops.reduce_slice_sum(x, indices, 0).eval()
self.assertAllEqual(y_tf, result)
def testReduceSliceSum3D(self):
x = np.array([[[1, 2], [3, 4]], [[50, 60], [70, 80]],
[[600, 700], [800, 900]]], dtype=np.int32)
indices = np.array([[0, 1], [0, 3], [1, 2], [1, 3], [0, 2]], dtype=np.int32)
result = np.array([[[1, 2], [3, 4]],
[[651, 762], [873, 984]],
[[50, 60], [70, 80]],
[[650, 760], [870, 980]],
[[51, 62], [73, 84]]], dtype=np.int32)
with self.test_session(use_gpu=True):
y_tf = reduce_slice_ops.reduce_slice_sum(x, indices, 0).eval()
self.assertAllEqual(y_tf, result)
def testReduceSliceSumAxis1(self):
x = np.transpose(np.array([[1, 2, 3], [40, 50, 60],
[700, 800, 900]], dtype=np.int32))
indices = np.array([[0, 1], [0, 3], [1, 2], [1, 3], [0, 2]], dtype=np.int32)
result = np.transpose(np.array([[1, 2, 3],
[741, 852, 963],
[40, 50, 60],
[740, 850, 960],
[41, 52, 63]], dtype=np.int32))
with self.test_session(use_gpu=True):
y_tf = reduce_slice_ops.reduce_slice_sum(x, indices, 1).eval()
self.assertAllEqual(y_tf, result)
def testReduceSliceSum1DIndices(self):
x = np.array([[1, 2, 3], [40, 50, 60], [700, 800, 900],
[1000, 2000, 3000], [40000, 50000, 60000]], dtype=np.int32)
indices = np.array([0, 0, 2, 5], dtype=np.int32)
result = np.array([[0, 0, 0], [41, 52, 63],
[41700, 52800, 63900]], dtype=np.int32)
with self.test_session(use_gpu=True):
y_tf = reduce_slice_ops.reduce_slice_sum(x, indices, 0).eval()
self.assertAllEqual(y_tf, result)
def testReduceSliceProd(self):
x = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.int32)
indices = np.array([[0, 1], [0, 3], [1, 2], [1, 3], [0, 2]], dtype=np.int32)
result = np.array([[1, 2, 3], [28, 80, 162], [4, 5, 6],
[28, 40, 54], [4, 10, 18]], dtype=np.int32)
with self.test_session(use_gpu=True):
y_tf = reduce_slice_ops.reduce_slice_prod(x, indices, 0).eval()
self.assertAllEqual(y_tf, result)
def testReduceSliceMax(self):
x = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.int32)
indices = np.array([[0, 1], [0, 3], [1, 2], [1, 3], [0, 2]], dtype=np.int32)
result = np.array([[1, 2, 3], [7, 8, 9], [4, 5, 6],
[7, 8, 9], [4, 5, 6]], dtype=np.int32)
with self.test_session(use_gpu=True):
y_tf = reduce_slice_ops.reduce_slice_max(x, indices, 0).eval()
self.assertAllEqual(y_tf, result)
def testReduceSliceMin(self):
x = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.int32)
indices = np.array([[0, 1], [0, 3], [1, 2], [1, 3], [0, 2]], dtype=np.int32)
result = np.array([[1, 2, 3], [1, 2, 3], [4, 5, 6],
[4, 5, 6], [1, 2, 3]], dtype=np.int32)
with self.test_session(use_gpu=True):
y_tf = reduce_slice_ops.reduce_slice_min(x, indices, 0).eval()
self.assertAllEqual(y_tf, result)
def testReduceSliceEmptyDataRows(self):
x = np.empty((0, 1, 2, 3, 4, 5, 6), dtype=np.int32)
indices = np.array([[0, 1], [0, 3], [1, 2], [1, 3], [0, 2]], dtype=np.int32)
result = np.zeros((5, 1, 2, 3, 4, 5, 6), dtype=np.int32)
with self.test_session(use_gpu=True):
y_tf = reduce_slice_ops.reduce_slice_sum(x, indices, 0).eval()
self.assertAllEqual(y_tf, result)
def testReduceSliceEmptyDataCols(self):
x = np.empty((100, 0, 2, 3, 4, 5, 6), dtype=np.int32)
indices = np.array([[0, 1], [0, 3], [1, 2], [1, 3], [0, 2]], dtype=np.int32)
result = np.empty((5, 0, 2, 3, 4, 5, 6), dtype=np.int32)
with self.test_session(use_gpu=True):
y_tf = reduce_slice_ops.reduce_slice_sum(x, indices, 0).eval()
self.assertAllEqual(y_tf, result)
def testReduceSliceEmptyIndicesRows(self):
x = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.int32)
indices = np.empty((0, 2), dtype=np.int32)
result = np.empty((0, 3), dtype=np.int32)
with self.test_session(use_gpu=True):
y_tf = reduce_slice_ops.reduce_slice_sum(x, indices, 0).eval()
self.assertAllEqual(y_tf, result)
def testReduceSliceEmpty0Indices1D(self):
x = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.int32)
indices = np.empty((0,), dtype=np.int32)
result = np.empty((0, 3), dtype=np.int32)
with self.test_session(use_gpu=True):
y_tf = reduce_slice_ops.reduce_slice_sum(x, indices, 0).eval()
self.assertAllEqual(y_tf, result)
def testReduceSliceEmpty1Indices1D(self):
x = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.int32)
indices = np.array([0], dtype=np.int32)
result = np.empty((0, 3), dtype=np.int32)
with self.test_session(use_gpu=True):
y_tf = reduce_slice_ops.reduce_slice_sum(x, indices, 0).eval()
self.assertAllEqual(y_tf, result)
if __name__ == "__main__":
googletest.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/reduce_slice_ops/python/kernel_tests/reduce_slice_ops_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python wrapper for the reduce slice operators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.reduce_slice_ops.ops import gen_reduce_slice_ops
from tensorflow.contrib.util import loader
from tensorflow.python.platform import resource_loader
_reduce_slice_ops = loader.load_op_library(
resource_loader.get_path_to_datafile("_reduce_slice_ops.so"))
reduce_slice_sum = gen_reduce_slice_ops.reduce_slice_sum
reduce_slice_prod = gen_reduce_slice_ops.reduce_slice_prod
reduce_slice_max = gen_reduce_slice_ops.reduce_slice_max
reduce_slice_min = gen_reduce_slice_ops.reduce_slice_min
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/reduce_slice_ops/python/ops/reduce_slice_ops.py
|
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# mixed_precisiond under the License is mixed_precisiond on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Library for mixed precision training."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,wildcard-import
from tensorflow.contrib.mixed_precision.python.loss_scale_manager import *
from tensorflow.contrib.mixed_precision.python.loss_scale_optimizer import *
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = [
"LossScaleManager",
"FixedLossScaleManager",
"ExponentialUpdateLossScaleManager",
"LossScaleOptimizer",
]
remove_undocumented(__name__, _allowed_symbols)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/mixed_precision/__init__.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for LossScaleManager classes.."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.mixed_precision.python import loss_scale_manager as lsm_lib
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import context
from tensorflow.python.framework import test_util
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
def _GetExampleIter(inputs):
dataset = dataset_ops.Dataset.from_tensor_slices(inputs)
return dataset_ops.make_one_shot_iterator(dataset)
class FixedLossScaleManagerTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def test_basic(self):
itr = _GetExampleIter([True] * 10 + [False] * 10)
loss_scale = 1000
lsm = lsm_lib.FixedLossScaleManager(loss_scale)
update_fn = lambda: lsm.update_loss_scale(itr.get_next())
self.evaluate(variables.global_variables_initializer())
if not context.executing_eagerly():
update_op = update_fn()
for _ in range(10):
if context.executing_eagerly():
update_fn()
else:
self.evaluate(update_op)
self.assertEqual(loss_scale, self.evaluate(lsm.get_loss_scale()))
class ExponentialUpdateLossScaleManagerTest(test.TestCase):
def _test_helper(self,
inputs,
expected_outputs,
init_loss_scale=1,
incr_every_n_step=2,
decr_every_n_nan_or_inf=2):
ratio = 2
lsm = lsm_lib.ExponentialUpdateLossScaleManager(
init_loss_scale=init_loss_scale,
incr_every_n_steps=incr_every_n_step,
decr_every_n_nan_or_inf=decr_every_n_nan_or_inf,
incr_ratio=ratio,
decr_ratio=1. / ratio)
itr = _GetExampleIter(inputs)
update_fn = lambda: lsm.update_loss_scale(itr.get_next())
self.evaluate(variables.global_variables_initializer())
actual_outputs = []
if not context.executing_eagerly():
update_op = update_fn()
for _ in range(len(inputs)):
if context.executing_eagerly():
update_fn()
else:
self.evaluate(update_op)
actual_outputs.append(self.evaluate(lsm.get_loss_scale()))
self.assertEqual(actual_outputs, expected_outputs)
@test_util.run_in_graph_and_eager_modes
def test_increase_every_n_steps(self):
inputs = [True] * 6
expected_outputs = [1, 2, 2, 4, 4, 8]
self._test_helper(inputs, expected_outputs)
@test_util.run_in_graph_and_eager_modes
def test_keep_increasing_until_capped(self):
init_loss_scale = np.finfo(np.float32).max / 4 + 10
max_float = np.finfo(np.float32).max
inputs = [True] * 6
# Output is capped the 2nd time it doubles.
expected_outputs = [
init_loss_scale, init_loss_scale * 2, init_loss_scale * 2, max_float,
max_float, max_float
]
self._test_helper(inputs, expected_outputs, init_loss_scale)
@test_util.run_in_graph_and_eager_modes
def test_decrease_every_n_steps(self):
inputs = [False] * 6
init_loss_scale = 1024
expected_outputs = [1024, 512, 512, 256, 256, 128]
self._test_helper(inputs, expected_outputs, init_loss_scale)
@test_util.run_in_graph_and_eager_modes
def test_keep_decreasing_until_one(self):
inputs = [False] * 10
init_loss_scale = 16
expected_outputs = [16, 8, 8, 4, 4, 2, 2, 1, 1, 1]
self._test_helper(inputs, expected_outputs, init_loss_scale)
@test_util.run_in_graph_and_eager_modes
def test_incr_bad_step_clear_good_step(self):
inputs = [True, True, True, False, True]
expected_outputs = [1, 2, 2, 2, 2]
self._test_helper(inputs, expected_outputs)
@test_util.run_in_graph_and_eager_modes
def test_incr_good_step_does_not_clear_bad_step(self):
inputs = [True, True, True, False, True, False]
expected_outputs = [1, 2, 2, 2, 2, 1]
self._test_helper(inputs, expected_outputs)
@test_util.run_in_graph_and_eager_modes
def test_trigger_loss_scale_update_each_step(self):
"""Test when incr_every_n_step and decr_every_n_nan_or_inf is 1."""
init_loss_scale = 1
incr_every_n_step = 1
decr_every_n_nan_or_inf = 1
inputs = [True] * 3 + [False, True, True]
expected_outputs = [2, 4, 8, 4, 8, 16]
self._test_helper(inputs, expected_outputs, init_loss_scale,
incr_every_n_step, decr_every_n_nan_or_inf)
@test_util.run_in_graph_and_eager_modes
def test_alternating_good_and_bad_gradients_trigger_each_step(self):
init_loss_scale = 1
incr_every_n_step = 1
decr_every_n_nan_or_inf = 1
inputs = [True, False] * 4 + [True]
expected_outputs = [2, 1, 2, 1, 2, 1, 2, 1, 2]
self._test_helper(inputs, expected_outputs, init_loss_scale,
incr_every_n_step, decr_every_n_nan_or_inf)
@test_util.run_in_graph_and_eager_modes
def test_alternating_good_and_bad_gradients_trigger_incr_every_2steps(self):
init_loss_scale = 32
incr_every_n_step = 2
decr_every_n_nan_or_inf = 1
inputs = [True, False] * 3 + [True]
expected_outputs = [32, 16, 16, 8, 8, 4, 4]
self._test_helper(inputs, expected_outputs, init_loss_scale,
incr_every_n_step, decr_every_n_nan_or_inf)
@test_util.run_in_graph_and_eager_modes
def test_random_mix_good_and_bad_gradients(self):
init_loss_scale = 4
inputs = [
False, False, True, True, True, False, True, False, True, True, True,
False
]
expected_outputs = [4, 2, 2, 4, 4, 4, 4, 2, 2, 4, 4, 4]
self._test_helper(inputs, expected_outputs, init_loss_scale)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/mixed_precision/python/loss_scale_manager_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""LossScaleManager classes for mixed precision training."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_control_flow_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
@six.add_metaclass(abc.ABCMeta)
class LossScaleManager(object):
"""Abstract loss scale manager class.
Loss scale managers with a different strategy should subclass this class.
Loss scaling is a process that:
1) Applies a multiplier on the loss before computing gradients, and
2) Applies the reciprocal of the multiplier on the gradients before they are
applied on variables.
This class is used together with
`tf.contrib.mixed_precision.LossScaleOptimizer` for mixed precision training
(float32 variables and float16 ops) on Nvidia GPUs in order to achieve the
same model quality as single precision training, with the benefits of
potential higher throughput.
See `tf.contrib.mixed_precision.LossScaleOptimizer` for more details.
"""
@abc.abstractmethod
def get_loss_scale(self):
"""Returns the loss scale as a scalar `float32` tensor."""
pass
@abc.abstractmethod
def update_loss_scale(self, finite_grads):
"""Updates loss scale based on if gradients are finite in current step.
Args:
finite_grads: bool scalar tensor indicating if all gradients are
finite (i.e., not inf or nan).
Returns:
An op, when executed updates the loss scale. If eager execution is
enabled, does not return anything.
"""
del finite_grads
return
class FixedLossScaleManager(LossScaleManager):
"""Loss scale manager with a fixed loss scale.
The loss scale is not updated for the lifetime of the class.
"""
def __init__(self, loss_scale):
"""Creates the fixed loss scale manager.
Args:
loss_scale: A Python float. Its ideal value varies depending on models to
run. Choosing a too small loss_scale might affect model quality; a too
big loss_scale might cause inf or nan. There is no single right
loss_scale to apply. There is no harm choosing a relatively big number
as long as no nan or inf is encountered in training.
Raises:
ValueError: If loss_scale is less than 1.
"""
if loss_scale < 1:
raise ValueError("loss scale must be at least 1.")
self._loss_scale = ops.convert_to_tensor(loss_scale, dtype=dtypes.float32)
def get_loss_scale(self):
return self._loss_scale
def update_loss_scale(self, finite_grads):
del finite_grads
return gen_control_flow_ops.no_op()
class ExponentialUpdateLossScaleManager(LossScaleManager):
"""Loss scale manager uses an exponential update strategy.
In general, the strategy increases loss scale by a greater-than-one factor
after encountering a consecutive series of steps with finite gradients;
Similarly, it decreases the loss scale by a factor when the accumulated number
of steps with non-finite (nan or inf) gradients are met. An update is not
applied if its result is less than 1 or overflows the float32 dynamic range.
The number of finite and non-finite steps are cleared every time the loss
scale is changed. The condition to decrease the loss scale is looser than to
increase it since the former does not require the steps to be consecutive.
"""
def __init__(self,
init_loss_scale,
incr_every_n_steps,
decr_every_n_nan_or_inf=2,
incr_ratio=2,
decr_ratio=0.8):
"""Constructor of exponential-update loss scale manager.
Args:
init_loss_scale: A Python float. The loss scale to use at the beginning.
incr_every_n_steps: Increases loss scale every n consecutive steps with
finite gradients.
decr_every_n_nan_or_inf: Decreases loss scale every n accumulated steps
with nan or inf gradients.
incr_ratio: The multiplier to use when increasing the loss scale.
decr_ratio: The less-than-one-multiplier to use when decreasing the loss
scale.
"""
self._incr_every_n_steps = incr_every_n_steps
self._decr_every_n_nan_or_inf = decr_every_n_nan_or_inf
self._incr_ratio = incr_ratio
self._decr_ratio = decr_ratio
self._loss_scale = variable_scope.variable(
name="loss_scale",
initial_value=ops.convert_to_tensor(init_loss_scale, dtypes.float32),
dtype=dtypes.float32,
trainable=False)
self._num_good_steps = variable_scope.variable(
name="good_steps", initial_value=0, dtype=dtypes.int32, trainable=False)
self._num_bad_steps = variable_scope.variable(
name="bad_steps", initial_value=0, dtype=dtypes.int32, trainable=False)
def _reset_stats(self):
return control_flow_ops.group(
state_ops.assign(self._num_good_steps, 0),
state_ops.assign(self._num_bad_steps, 0))
def get_loss_scale(self):
"""Returns the loss scale."""
return self._loss_scale
def update_loss_scale(self, finite_grads):
"""Updates loss scale based on if gradients are finite in current step."""
def update_if_finite_grads():
"""Branch function when grads are all finite."""
def incr_loss_scale():
new_loss_scale = control_flow_ops.cond(
gen_math_ops.is_finite(self._loss_scale * self._incr_ratio),
lambda: self._loss_scale * self._incr_ratio,
lambda: self._loss_scale)
update_op = state_ops.assign(self._loss_scale, new_loss_scale)
# When loss_scale is updated, both good and bad steps are reset.
return control_flow_ops.group(update_op, self._reset_stats())
return control_flow_ops.cond(
self._num_good_steps + 1 >= self._incr_every_n_steps,
incr_loss_scale,
lambda: state_ops.assign_add(self._num_good_steps, 1).op)
def update_if_not_finite_grads():
"""Branch function when any grad is not finite."""
def decr_loss_scale():
update_op = state_ops.assign(
self._loss_scale,
gen_math_ops.maximum(1., self._loss_scale * self._decr_ratio))
# When loss_scale is updated, both good and bad steps are reset.
return control_flow_ops.group(update_op, self._reset_stats())
def just_update_steps():
# When bad_steps is incremented, good_step is reset.
return control_flow_ops.group(
state_ops.assign_add(self._num_bad_steps, 1),
state_ops.assign(self._num_good_steps, 0))
return control_flow_ops.cond(
self._num_bad_steps + 1 >= self._decr_every_n_nan_or_inf,
decr_loss_scale, just_update_steps)
return control_flow_ops.cond(finite_grads, update_if_finite_grads,
update_if_not_finite_grads)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/mixed_precision/python/loss_scale_manager.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for LossScaleOptimizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.mixed_precision.python import loss_scale_manager as lsm_lib
from tensorflow.contrib.mixed_precision.python import loss_scale_optimizer as lso
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import gradient_descent as gd
class LossScaleOptimizerTest(test.TestCase):
def _build_graph(self, lr, init_val, loss_scale_opt_fn=None):
x = variable_scope.get_variable(
"x", initializer=init_val, dtype=dtypes.float32)
c1 = constant_op.constant(1e4, dtype=dtypes.float16)
c2 = constant_op.constant(1e-4, dtype=dtypes.float16)
c3 = constant_op.constant(1e-4, dtype=dtypes.float16)
if context.executing_eagerly():
loss = lambda: math_ops.cast(x, dtypes.float16) * c1 * c2 * c3
else:
loss = math_ops.cast(x, dtypes.float16) * c1 * c2 * c3
opt = gd.GradientDescentOptimizer(lr)
if loss_scale_opt_fn:
opt = loss_scale_opt_fn(opt)
return x, loss, opt
@test_util.run_in_graph_and_eager_modes
def test_float16_underflow_without_loss_scale(self):
lr = 1
init_val = 1.
x, loss, opt = self._build_graph(lr, init_val)
self.evaluate(variables.global_variables_initializer())
self.evaluate(opt.minimize(loss, var_list=[x]))
# Symbolic grad is c1 * c2 * c3 = 1e-4 and actual grad is 0, since in
# backprop, c2 * c3 underflows in fp16 range. So variable isn't updated.
expected_update = 0
symbolic_update = 1e-4 * lr
self.assertAllClose(
init_val - expected_update,
self.evaluate(x),
rtol=0,
atol=min(symbolic_update, 1e-6))
@test_util.run_in_graph_and_eager_modes
def test_float16_with_loss_scale(self):
lr = 1.
init_val = 1.
def loss_scale_opt_fn(opt):
return lso.LossScaleOptimizer(opt, lsm_lib.FixedLossScaleManager(1e4))
x, loss, opt = self._build_graph(lr, init_val, loss_scale_opt_fn)
self.evaluate(variables.global_variables_initializer())
self.evaluate(opt.minimize(loss, var_list=[x]))
# Symbolic grad is c1 * c2 * c3 = 1e-4 and actual grad is the same, due to
# up-scaled loss before backprop starts.
expected_update = 1.e-4 * lr
self.assertAllClose(
init_val - expected_update,
self.evaluate(x),
rtol=0,
atol=min(expected_update, 1e-6))
@test_util.run_in_graph_and_eager_modes
def test_compute_gradients_with_loss_scale(self):
lr = 1
init_val = 1.
def loss_scale_opt_fn(opt):
return lso.LossScaleOptimizer(opt, lsm_lib.FixedLossScaleManager(1e4))
x, loss, opt = self._build_graph(lr, init_val, loss_scale_opt_fn)
grads_and_vars = opt.compute_gradients(loss, var_list=[x])
self.assertEqual(len(grads_and_vars), 1)
self.evaluate(variables.global_variables_initializer())
g_v = self.evaluate(grads_and_vars[0][0])
self.assertAllClose(g_v, 1e-4)
self.assertIs(grads_and_vars[0][1], x)
# Gradients aren't applied.
self.assertAllClose(init_val, self.evaluate(x), rtol=0, atol=1e-6)
@test_util.run_in_graph_and_eager_modes
def test_compute_gradients_without_loss_scale(self):
lr = 1
init_val = 1.
x, loss, opt = self._build_graph(lr, init_val)
grads_and_vars = opt.compute_gradients(loss, var_list=[x])
self.assertEqual(len(grads_and_vars), 1)
self.evaluate(variables.global_variables_initializer())
g_v = self.evaluate(grads_and_vars[0][0])
self.assertAllClose(g_v, 0)
@test_util.run_in_graph_and_eager_modes
def test_apply_gradients(self):
x = variable_scope.get_variable("x", initializer=1., dtype=dtypes.float32)
dataset = dataset_ops.Dataset.from_tensor_slices([np.nan, np.inf, 0.1])
itr = dataset_ops.make_one_shot_iterator(dataset)
lr = 1
opt = gd.GradientDescentOptimizer(lr)
lsm = lsm_lib.FixedLossScaleManager(1.e4)
opt = lso.LossScaleOptimizer(opt, lsm)
train_fn = lambda: opt.apply_gradients([(itr.get_next(), x)])
if not context.executing_eagerly():
train_op = train_fn()
expected_output = [1, 1, 1 - 0.1]
actual_output = []
self.evaluate(variables.global_variables_initializer())
for _ in range(3):
# nan or inf is not applied.
if context.executing_eagerly():
train_fn()
else:
self.evaluate(train_op)
actual_output.append(self.evaluate(x))
self.assertAllClose(expected_output, actual_output)
@test_util.run_in_graph_and_eager_modes
def test_apply_gradients_loss_scale_is_updated(self):
class SimpleLossScaleManager(lsm_lib.LossScaleManager):
"""A simple loss scale manager for easier testing.
It increments loss scale by 1 if grads are finite, and decreases loss
scale by 1 if otherwise.
"""
def __init__(self, loss_scale):
self._loss_scale = variable_scope.variable(
name="loss_scale",
initial_value=loss_scale,
dtype=dtypes.float32,
trainable=False)
def get_loss_scale(self):
return self._loss_scale
def update_loss_scale(self, if_finite_grads):
return control_flow_ops.cond(
if_finite_grads, lambda: state_ops.assign_add(self._loss_scale, 1),
lambda: state_ops.assign_sub(self._loss_scale, 1))
x = variable_scope.get_variable("x", initializer=1., dtype=dtypes.float32)
dataset = dataset_ops.Dataset.from_tensor_slices([np.nan, np.inf, 0.1])
itr = dataset_ops.make_one_shot_iterator(dataset)
lr = 1
init_loss_scale = 8
opt = gd.GradientDescentOptimizer(lr)
lsm = SimpleLossScaleManager(init_loss_scale)
opt = lso.LossScaleOptimizer(opt, lsm)
train_fn = lambda: opt.apply_gradients([(itr.get_next(), x)])
if not context.executing_eagerly():
train_op = train_fn()
self.evaluate(variables.global_variables_initializer())
expected_loss_scale = [
init_loss_scale - 1, init_loss_scale - 2, init_loss_scale - 2 + 1
]
expected_output = [1, 1, 1 - 0.1]
actual_output = []
for i in range(3):
# nan or inf is not applied.
if context.executing_eagerly():
train_fn()
else:
self.evaluate(train_op)
actual_output.append(self.evaluate(x))
self.assertAllClose(expected_loss_scale[i],
self.evaluate(lsm._loss_scale))
self.assertAllClose(expected_output, actual_output)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/mixed_precision/python/loss_scale_optimizer_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Loss scaling optimizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import context
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_control_flow_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.training import optimizer
class LossScaleOptimizer(optimizer.Optimizer):
# TODO(jamesqin): move mixed precision training explanation to __init__
# docstring.
"""An optimizer that applies loss scaling in backprop.
This class is useful for "mixed precision training" on GPUs (or other
potential accelerators), an approach to improve compute throughput without
compromising model quality.
The canonical way to perform mixed precision training is the following:
* Model variables are kept in high precision (e.g. float32).
* Computations are done in lower precision (e.g. float16), which enjoys
performance speedup by virtue of hardware support. Variables are casted to
lower precision before they're used.
* Final gradients are casted back to high precision dtype, then used to update
variables.
The side-effect of performing computation in lower precision, is that it comes
with smaller numerical range. During backproping, small gradients might
underflow in the reduced numerical range, causing a model to converge at
suboptimal level.
To prevent underflow, this optimizer multiplies the loss by a factor before
backprop starts. Consequently, the gradients are linearly scaled up by the
same factor, thus not falling into the underflow zone. After that, to perserve
the correctness of backprop, the gradients are down-scaled by the same factor,
casted to the (higher) variable precision, then applied on the variables.
See [Nvidia's manual on mixed precision training](
https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/index.html)
for more details.
To use loss scale optimizer, one only needs choose a loss scale strategy and
wrap a regular optimizer. See examples below.
```
loss = loss_fn()
opt = tf.AdamOptimizer(learning_rate=...)
# Choose a loss scale manager which decides how to pick the right loss scale
# throughout the training process.
loss_scale_manager = tf.contrib.mixed_precision.FixedLossScaleManager(5000)
# Wraps the original optimizer in a LossScaleOptimizer.
loss_scale_optimizer =
tf.contrib.mixed_precision.LossScaleOptimizer(opt, loss_scale_manager)
# Call minimize() on the loss scale optimizer.
train_op = loss_scale_optimizer.minimize(loss)
```
If gradients clipping is applied, one can call
`optimizer.compute_gradients()` and `optimizer.apply_gradients()`
separately.
Notice the following way of using LossScaleOptimizer is not intended. Always
use `loss_scale_optimizer.compute_gradients()` to compute gradients instead of
`tf.gradients()` if doing mixed precision training.
```
# The following is a wrong way to use LossScaleOptimizer along with
# tf.gradients().
# Always use loss_scale_optimizer.compute_gradients() to compute grads, or
# loss scale is not correctly applied.
grads = tf.gradients(loss, ...)
# Do some custom grad clipping.
grads = clip_grads(grads, ...)
loss_scale_optimizer.apply(grads_and_vars)
```
"""
def __init__(self, opt, loss_scale_manager):
"""Construct a loss scaling optimizer.
Args:
opt: The actual optimizer that will be used to compute and apply the
gradients. Must be an implementation of the
`tf.compat.v1.train.Optimizer` interface.
loss_scale_manager: A LossScaleManager object.
"""
self._opt = opt
self._loss_scale_manager = loss_scale_manager
def compute_gradients(self,
loss,
var_list=None,
gate_gradients=optimizer.Optimizer.GATE_OP,
aggregation_method=None,
colocate_gradients_with_ops=False,
grad_loss=None):
"""Compute gradients. See base class `tf.compat.v1.train.Optimizer`."""
loss_scale = self._loss_scale_manager.get_loss_scale()
if context.executing_eagerly():
def scaled_loss():
loss_val = loss()
return loss_val * math_ops.cast(loss_scale, loss_val.dtype.base_dtype)
else:
if callable(loss):
loss_val = loss()
else:
loss_val = loss
scaled_loss = loss_val * math_ops.cast(loss_scale,
loss_val.dtype.base_dtype)
grads_and_vars = self._opt.compute_gradients(
scaled_loss,
var_list=var_list,
gate_gradients=gate_gradients,
aggregation_method=aggregation_method,
colocate_gradients_with_ops=colocate_gradients_with_ops,
grad_loss=grad_loss)
return self._down_scale(grads_and_vars, loss_scale)
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
"""Apply gradients. See base class `tf.compat.v1.train.Optimizer`."""
grads = [g for (g, _) in grads_and_vars]
is_finite_grad = []
for g in grads:
is_finite_grad.append(math_ops.reduce_all(gen_math_ops.is_finite(g)))
is_overall_finite = math_ops.reduce_all(is_finite_grad)
# Only update gradients when all grads are finite.
def true_apply_gradients_fn():
return self._opt.apply_gradients(grads_and_vars, global_step, name)
update_vars = control_flow_ops.cond(is_overall_finite,
true_apply_gradients_fn,
gen_control_flow_ops.no_op)
# Potentially adjust gradient scale in case of finite gradients.
return control_flow_ops.group(
update_vars,
self._loss_scale_manager.update_loss_scale(is_overall_finite))
def _down_scale(self, grads_vars, loss_scale):
# Down scale grads by the loss_scale.
gv = []
inv_loss_scale = gen_math_ops.reciprocal(loss_scale)
for g, v in grads_vars:
if g is not None:
gv.append((g * math_ops.cast(inv_loss_scale, g.dtype.base_dtype), v))
else:
gv.append((g, v))
return gv
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/mixed_precision/python/loss_scale_optimizer.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""All-reduce implementations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,line-too-long,wildcard-import
from tensorflow.contrib.all_reduce.python.all_reduce import *
from tensorflow.python.util.all_util import remove_undocumented
# pylint: enable=unused-import,line-too-long,wildcard-import
_allowed_symbols = [
'build_ring_all_reduce',
'build_recursive_hd_all_reduce',
'build_shuffle_all_reduce',
'build_nccl_all_reduce',
'build_nccl_then_ring',
'build_nccl_then_recursive_hd',
'build_nccl_then_shuffle',
'build_shuffle_then_ring',
'build_shuffle_then_shuffle'
]
remove_undocumented(__name__, allowed_exception_list=_allowed_symbols)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/all_reduce/__init__.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities to construct a TF subgraph implementing distributed All-Reduce."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,wildcard-import
from tensorflow.python.distribute.all_reduce import *
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/all_reduce/python/all_reduce.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops for representing Bayesian computation.
## This package provides classes for Bayesian computation with TensorFlow.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,wildcard-import,line-too-long
from tensorflow.contrib.solvers.python.ops import lanczos
from tensorflow.contrib.solvers.python.ops import least_squares
from tensorflow.contrib.solvers.python.ops import linear_equations
from tensorflow.contrib.solvers.python.ops import util
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/solvers/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""ops module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/solvers/python/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python import tf2
from tensorflow.contrib.solvers.python.ops import lanczos
from tensorflow.contrib.solvers.python.ops import util
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test as test_lib
def _add_test(test, test_name, fn):
test_name = "_".join(["test", test_name])
if hasattr(test, test_name):
raise RuntimeError("Test %s defined more than once" % test_name)
setattr(test, test_name, fn)
class LanczosBidiagTest(test_lib.TestCase):
pass # Filled in below.
def _get_lanczos_tests(dtype_, use_static_shape_, shape_, orthogonalize_,
steps_):
def test_lanczos_bidiag(self):
np.random.seed(1)
a_np = np.random.uniform(
low=-1.0, high=1.0, size=np.prod(shape_)).reshape(shape_).astype(dtype_)
tol = 1e-12 if dtype_ == np.float64 else 1e-5
with self.cached_session() as sess:
if use_static_shape_:
a = constant_op.constant(a_np)
else:
a = array_ops.placeholder(dtype_)
operator = util.create_operator(a)
lbd = lanczos.lanczos_bidiag(
operator, steps_, orthogonalize=orthogonalize_)
# The computed factorization should satisfy the equations
# A * V = U * B
# A' * U[:, :-1] = V * B[:-1, :]'
av = math_ops.matmul(a, lbd.v)
ub = lanczos.bidiag_matmul(lbd.u, lbd.alpha, lbd.beta, adjoint_b=False)
atu = math_ops.matmul(a, lbd.u[:, :-1], adjoint_a=True)
vbt = lanczos.bidiag_matmul(lbd.v, lbd.alpha, lbd.beta, adjoint_b=True)
if use_static_shape_:
av_val, ub_val, atu_val, vbt_val = sess.run([av, ub, atu, vbt])
else:
av_val, ub_val, atu_val, vbt_val = sess.run([av, ub, atu, vbt],
feed_dict={a: a_np})
self.assertAllClose(av_val, ub_val, atol=tol, rtol=tol)
self.assertAllClose(atu_val, vbt_val, atol=tol, rtol=tol)
return [test_lanczos_bidiag]
if __name__ == "__main__":
for dtype in np.float32, np.float64:
for shape in [[4, 4], [7, 4], [5, 8]]:
for orthogonalize in True, False:
for steps in range(1, min(shape) + 1):
# TF2 does not support placeholders so we skip it
for use_static_shape in set([True, tf2.enabled()]):
arg_string = "%s_%s_%s_%s_staticshape_%s" % (
dtype.__name__, "_".join(map(str, shape)), orthogonalize, steps,
use_static_shape)
for test_fn in _get_lanczos_tests(dtype, use_static_shape, shape,
orthogonalize, steps):
name = "_".join(["Lanczos", test_fn.__name__, arg_string])
_add_test(LanczosBidiagTest, name, test_fn)
test_lib.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/solvers/python/kernel_tests/lanczos_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python import tf2
from tensorflow.contrib.solvers.python.ops import linear_equations
from tensorflow.contrib.solvers.python.ops import util
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test as test_lib
def _add_test(test, test_name, fn):
test_name = "_".join(["test", test_name])
if hasattr(test, test_name):
raise RuntimeError("Test %s defined more than once" % test_name)
setattr(test, test_name, fn)
class LinearEquationsTest(test_lib.TestCase):
pass # Filled in below.
def _get_linear_equations_tests(dtype_, use_static_shape_, shape_):
def test_conjugate_gradient(self):
np.random.seed(1)
a_np = np.random.uniform(
low=-1.0, high=1.0, size=np.prod(shape_)).reshape(shape_).astype(dtype_)
# Make a selfadjoint, positive definite.
a_np = np.dot(a_np.T, a_np)
# jacobi preconditioner
jacobi_np = np.zeros_like(a_np)
jacobi_np[range(a_np.shape[0]), range(a_np.shape[1])] = (
1.0 / a_np.diagonal())
rhs_np = np.random.uniform(
low=-1.0, high=1.0, size=shape_[0]).astype(dtype_)
x_np = np.zeros_like(rhs_np)
tol = 1e-6 if dtype_ == np.float64 else 1e-3
max_iter = 20
with self.cached_session() as sess:
if use_static_shape_:
a = constant_op.constant(a_np)
rhs = constant_op.constant(rhs_np)
x = constant_op.constant(x_np)
jacobi = constant_op.constant(jacobi_np)
else:
a = array_ops.placeholder(dtype_)
rhs = array_ops.placeholder(dtype_)
x = array_ops.placeholder(dtype_)
jacobi = array_ops.placeholder(dtype_)
operator = util.create_operator(a)
preconditioners = [
None, util.identity_operator(a),
util.create_operator(jacobi)
]
cg_results = []
for preconditioner in preconditioners:
cg_graph = linear_equations.conjugate_gradient(
operator,
rhs,
preconditioner=preconditioner,
x=x,
tol=tol,
max_iter=max_iter)
if use_static_shape_:
cg_val = sess.run(cg_graph)
else:
cg_val = sess.run(
cg_graph,
feed_dict={
a: a_np,
rhs: rhs_np,
x: x_np,
jacobi: jacobi_np
})
norm_r0 = np.linalg.norm(rhs_np)
norm_r = np.linalg.norm(cg_val.r)
self.assertLessEqual(norm_r, tol * norm_r0)
# Validate that we get an equally small residual norm with numpy
# using the computed solution.
r_np = rhs_np - np.dot(a_np, cg_val.x)
norm_r_np = np.linalg.norm(r_np)
self.assertLessEqual(norm_r_np, tol * norm_r0)
cg_results.append(cg_val)
# Validate that we get same results using identity_preconditioner
# and None
self.assertEqual(cg_results[0].i, cg_results[1].i)
self.assertAlmostEqual(cg_results[0].gamma, cg_results[1].gamma)
self.assertAllClose(cg_results[0].r, cg_results[1].r, rtol=tol)
self.assertAllClose(cg_results[0].x, cg_results[1].x, rtol=tol)
self.assertAllClose(cg_results[0].p, cg_results[1].p, rtol=tol)
return [test_conjugate_gradient]
if __name__ == "__main__":
for dtype in np.float32, np.float64:
for size in 1, 4, 10:
# TF2 does not support placeholders under eager so we skip it
for use_static_shape in set([True, tf2.enabled()]):
shape = [size, size]
arg_string = "%s_%s_staticshape_%s" % (dtype.__name__, size,
use_static_shape)
for test_fn in _get_linear_equations_tests(dtype, use_static_shape,
shape):
name = "_".join(["LinearEquations", test_fn.__name__, arg_string])
_add_test(LinearEquationsTest, name, test_fn)
test_lib.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/solvers/python/kernel_tests/linear_equations_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.solvers.python.ops import util
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class UtilTest(test.TestCase):
def _testCreateOperator(self, use_static_shape_):
for dtype in np.float32, np.float64:
a_np = np.array([[1., 2.], [3., 4.], [5., 6.]], dtype=dtype)
x_np = np.array([[2.], [-3.]], dtype=dtype)
y_np = np.array([[2], [-3.], [5.]], dtype=dtype)
with self.cached_session() as sess:
if use_static_shape_:
a = constant_op.constant(a_np, dtype=dtype)
x = constant_op.constant(x_np, dtype=dtype)
y = constant_op.constant(y_np, dtype=dtype)
else:
a = array_ops.placeholder(dtype)
x = array_ops.placeholder(dtype)
y = array_ops.placeholder(dtype)
op = util.create_operator(a)
ax = op.apply(x)
aty = op.apply_adjoint(y)
op_shape = ops.convert_to_tensor(op.shape)
if use_static_shape_:
op_shape_val, ax_val, aty_val = sess.run([op_shape, ax, aty])
else:
op_shape_val, ax_val, aty_val = sess.run(
[op_shape, ax, aty], feed_dict={a: a_np,
x: x_np,
y: y_np})
self.assertAllEqual(op_shape_val, [3, 2])
self.assertAllClose(ax_val, np.dot(a_np, x_np))
self.assertAllClose(aty_val, np.dot(a_np.T, y_np))
def testCreateOperator(self):
self._testCreateOperator(True)
def testCreateOperatorUnknownShape(self):
self._testCreateOperator(False)
def _testIdentityOperator(self, use_static_shape_):
for dtype in np.float32, np.float64:
a_np = np.array([[1., 2.], [3., 4.], [5., 6.]], dtype=dtype)
x_np = np.array([[2.], [-3.]], dtype=dtype)
y_np = np.array([[2], [-3.], [5.]], dtype=dtype)
with self.cached_session() as sess:
if use_static_shape_:
a = constant_op.constant(a_np, dtype=dtype)
x = constant_op.constant(x_np, dtype=dtype)
y = constant_op.constant(y_np, dtype=dtype)
else:
a = array_ops.placeholder(dtype)
x = array_ops.placeholder(dtype)
y = array_ops.placeholder(dtype)
id_op = util.identity_operator(a)
ax = id_op.apply(x)
aty = id_op.apply_adjoint(y)
op_shape = ops.convert_to_tensor(id_op.shape)
if use_static_shape_:
op_shape_val, ax_val, aty_val = sess.run([op_shape, ax, aty])
else:
op_shape_val, ax_val, aty_val = sess.run(
[op_shape, ax, aty], feed_dict={
a: a_np,
x: x_np,
y: y_np
})
self.assertAllEqual(op_shape_val, [3, 2])
self.assertAllClose(ax_val, x_np)
self.assertAllClose(aty_val, y_np)
def testIdentityOperator(self):
self._testIdentityOperator(True)
def testIdentityOperatorUnknownShape(self):
self._testIdentityOperator(False)
def testL2Norm(self):
with self.cached_session():
x_np = np.array([[2], [-3.], [5.]])
x_norm_np = np.linalg.norm(x_np)
x_normalized_np = x_np / x_norm_np
x = constant_op.constant(x_np)
l2norm = util.l2norm(x)
l2norm_squared = util.l2norm_squared(x)
x_normalized, x_norm = util.l2normalize(x)
self.assertAllClose(l2norm.eval(), x_norm_np)
self.assertAllClose(l2norm_squared.eval(), np.square(x_norm_np))
self.assertAllClose(x_norm.eval(), x_norm_np)
self.assertAllClose(x_normalized.eval(), x_normalized_np)
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/solvers/python/kernel_tests/util_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python import tf2
from tensorflow.contrib.solvers.python.ops import least_squares
from tensorflow.contrib.solvers.python.ops import util
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test as test_lib
def _add_test(test, test_name, fn):
test_name = "_".join(["test", test_name])
if hasattr(test, test_name):
raise RuntimeError("Test %s defined more than once" % test_name)
setattr(test, test_name, fn)
class LeastSquaresTest(test_lib.TestCase):
pass # Filled in below.
def _get_least_squares_tests(dtype_, use_static_shape_, shape_):
def test_cgls(self):
np.random.seed(1)
a_np = np.random.uniform(
low=-1.0, high=1.0, size=np.prod(shape_)).reshape(shape_).astype(dtype_)
rhs_np = np.random.uniform(
low=-1.0, high=1.0, size=shape_[0]).astype(dtype_)
tol = 1e-12 if dtype_ == np.float64 else 1e-6
max_iter = 20
with self.cached_session() as sess:
if use_static_shape_:
a = constant_op.constant(a_np)
rhs = constant_op.constant(rhs_np)
else:
a = array_ops.placeholder(dtype_)
rhs = array_ops.placeholder(dtype_)
operator = util.create_operator(a)
cgls_graph = least_squares.cgls(operator, rhs, tol=tol, max_iter=max_iter)
if use_static_shape_:
cgls_val = sess.run(cgls_graph)
else:
cgls_val = sess.run(cgls_graph, feed_dict={a: a_np, rhs: rhs_np})
# Below we use s = A^* (rhs - A x), s0 = A^* rhs
norm_s0 = np.linalg.norm(np.dot(a_np.T, rhs_np))
norm_s = np.sqrt(cgls_val.gamma)
self.assertLessEqual(norm_s, tol * norm_s0)
# Validate that we get an equally small residual norm with numpy
# using the computed solution.
r_np = rhs_np - np.dot(a_np, cgls_val.x)
norm_s_np = np.linalg.norm(np.dot(a_np.T, r_np))
self.assertLessEqual(norm_s_np, tol * norm_s0)
return [test_cgls]
if __name__ == "__main__":
for dtype in np.float32, np.float64:
for shape in [[4, 4], [8, 5], [3, 7]]:
# TF2 does not support placeholders under eager so we skip it
for use_static_shape in set([True, tf2.enabled()]):
arg_string = "%s_%s_staticshape_%s" % (dtype.__name__,
"_".join(map(str, shape)),
use_static_shape)
for test_fn in _get_least_squares_tests(dtype, use_static_shape, shape):
name = "_".join(["LeastSquares", test_fn.__name__, arg_string])
_add_test(LeastSquaresTest, name, test_fn)
test_lib.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/solvers/python/kernel_tests/least_squares_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Solvers for linear equations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.contrib.solvers.python.ops import util
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
def conjugate_gradient(operator,
rhs,
preconditioner=None,
x=None,
tol=1e-4,
max_iter=20,
name="conjugate_gradient"):
r"""Conjugate gradient solver.
Solves a linear system of equations `A*x = rhs` for selfadjoint, positive
definite matrix `A` and right-hand side vector `rhs`, using an iterative,
matrix-free algorithm where the action of the matrix A is represented by
`operator`. The iteration terminates when either the number of iterations
exceeds `max_iter` or when the residual norm has been reduced to `tol`
times its initial value, i.e. \\(||rhs - A x_k|| <= tol ||rhs||\\).
Args:
operator: An object representing a linear operator with attributes:
- shape: Either a list of integers or a 1-D `Tensor` of type `int32` of
length 2. `shape[0]` is the dimension on the domain of the operator,
`shape[1]` is the dimension of the co-domain of the operator. On other
words, if operator represents an N x N matrix A, `shape` must contain
`[N, N]`.
- dtype: The datatype of input to and output from `apply`.
- apply: Callable object taking a vector `x` as input and returning a
vector with the result of applying the operator to `x`, i.e. if
`operator` represents matrix `A`, `apply` should return `A * x`.
rhs: A rank-1 `Tensor` of shape `[N]` containing the right-hand size vector.
preconditioner: An object representing a linear operator, see `operator`
for detail. The preconditioner should approximate the inverse of `A`.
An efficient preconditioner could dramatically improve the rate of
convergence. If `preconditioner` represents matrix `M`(`M` approximates
`A^{-1}`), the algorithm uses `preconditioner.apply(x)` to estimate
`A^{-1}x`. For this to be useful, the cost of applying `M` should be
much lower than computing `A^{-1}` directly.
x: A rank-1 `Tensor` of shape `[N]` containing the initial guess for the
solution.
tol: A float scalar convergence tolerance.
max_iter: An integer giving the maximum number of iterations.
name: A name scope for the operation.
Returns:
output: A namedtuple representing the final state with fields:
- i: A scalar `int32` `Tensor`. Number of iterations executed.
- x: A rank-1 `Tensor` of shape `[N]` containing the computed solution.
- r: A rank-1 `Tensor` of shape `[M]` containing the residual vector.
- p: A rank-1 `Tensor` of shape `[N]`. `A`-conjugate basis vector.
- gamma: \\(r \dot M \dot r\\), equivalent to \\(||r||_2^2\\) when
`preconditioner=None`.
"""
# ephemeral class holding CG state.
cg_state = collections.namedtuple("CGState", ["i", "x", "r", "p", "gamma"])
def stopping_criterion(i, state):
return math_ops.logical_and(i < max_iter, linalg_ops.norm(state.r) > tol)
def cg_step(i, state): # pylint: disable=missing-docstring
z = operator.apply(state.p)
alpha = state.gamma / util.dot(state.p, z)
x = state.x + alpha * state.p
r = state.r - alpha * z
if preconditioner is None:
gamma = util.dot(r, r)
beta = gamma / state.gamma
p = r + beta * state.p
else:
q = preconditioner.apply(r)
gamma = util.dot(r, q)
beta = gamma / state.gamma
p = q + beta * state.p
return i + 1, cg_state(i + 1, x, r, p, gamma)
with ops.name_scope(name):
n = operator.shape[1:]
rhs = array_ops.expand_dims(rhs, -1)
if x is None:
x = array_ops.expand_dims(
array_ops.zeros(n, dtype=rhs.dtype.base_dtype), -1)
r0 = rhs
else:
x = array_ops.expand_dims(x, -1)
r0 = rhs - operator.apply(x)
if preconditioner is None:
p0 = r0
else:
p0 = preconditioner.apply(r0)
gamma0 = util.dot(r0, p0)
tol *= linalg_ops.norm(r0)
i = constant_op.constant(0, dtype=dtypes.int32)
state = cg_state(i=i, x=x, r=r0, p=p0, gamma=gamma0)
_, state = control_flow_ops.while_loop(stopping_criterion, cg_step,
[i, state])
return cg_state(
state.i,
x=array_ops.squeeze(state.x),
r=array_ops.squeeze(state.r),
p=array_ops.squeeze(state.p),
gamma=state.gamma)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/solvers/python/ops/linear_equations.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Solvers for linear least-squares."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.contrib.solvers.python.ops import util
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
def cgls(operator, rhs, tol=1e-6, max_iter=20, name="cgls"):
r"""Conjugate gradient least squares solver.
Solves a linear least squares problem \\(||A x - rhs||_2\\) for a single
right-hand side, using an iterative, matrix-free algorithm where the action of
the matrix A is represented by `operator`. The CGLS algorithm implicitly
applies the symmetric conjugate gradient algorithm to the normal equations
\\(A^* A x = A^* rhs\\). The iteration terminates when either
the number of iterations exceeds `max_iter` or when the norm of the conjugate
residual (residual of the normal equations) have been reduced to `tol` times
its initial initial value, i.e.
\\(||A^* (rhs - A x_k)|| <= tol ||A^* rhs||\\).
Args:
operator: An object representing a linear operator with attributes:
- shape: Either a list of integers or a 1-D `Tensor` of type `int32` of
length 2. `shape[0]` is the dimension on the domain of the operator,
`shape[1]` is the dimension of the co-domain of the operator. On other
words, if operator represents an M x N matrix A, `shape` must contain
`[M, N]`.
- dtype: The datatype of input to and output from `apply` and
`apply_adjoint`.
- apply: Callable object taking a vector `x` as input and returning a
vector with the result of applying the operator to `x`, i.e. if
`operator` represents matrix `A`, `apply` should return `A * x`.
- apply_adjoint: Callable object taking a vector `x` as input and
returning a vector with the result of applying the adjoint operator
to `x`, i.e. if `operator` represents matrix `A`, `apply_adjoint` should
return `conj(transpose(A)) * x`.
rhs: A rank-1 `Tensor` of shape `[M]` containing the right-hand size vector.
tol: A float scalar convergence tolerance.
max_iter: An integer giving the maximum number of iterations.
name: A name scope for the operation.
Returns:
output: A namedtuple representing the final state with fields:
- i: A scalar `int32` `Tensor`. Number of iterations executed.
- x: A rank-1 `Tensor` of shape `[N]` containing the computed solution.
- r: A rank-1 `Tensor` of shape `[M]` containing the residual vector.
- p: A rank-1 `Tensor` of shape `[N]`. The next descent direction.
- gamma: \\(||A^* r||_2^2\\)
"""
# ephemeral class holding CGLS state.
cgls_state = collections.namedtuple("CGLSState",
["i", "x", "r", "p", "gamma"])
def stopping_criterion(i, state):
return math_ops.logical_and(i < max_iter, state.gamma > tol)
# TODO(rmlarsen): add preconditioning
def cgls_step(i, state):
q = operator.apply(state.p)
alpha = state.gamma / util.l2norm_squared(q)
x = state.x + alpha * state.p
r = state.r - alpha * q
s = operator.apply_adjoint(r)
gamma = util.l2norm_squared(s)
beta = gamma / state.gamma
p = s + beta * state.p
return i + 1, cgls_state(i + 1, x, r, p, gamma)
with ops.name_scope(name):
n = operator.shape[1:]
rhs = array_ops.expand_dims(rhs, -1)
s0 = operator.apply_adjoint(rhs)
gamma0 = util.l2norm_squared(s0)
tol = tol * tol * gamma0
x = array_ops.expand_dims(
array_ops.zeros(
n, dtype=rhs.dtype.base_dtype), -1)
i = constant_op.constant(0, dtype=dtypes.int32)
state = cgls_state(i=i, x=x, r=rhs, p=s0, gamma=gamma0)
_, state = control_flow_ops.while_loop(stopping_criterion, cgls_step,
[i, state])
return cgls_state(
state.i,
x=array_ops.squeeze(state.x),
r=array_ops.squeeze(state.r),
p=array_ops.squeeze(state.p),
gamma=state.gamma)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/solvers/python/ops/least_squares.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility functions for solvers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
def create_operator(matrix):
"""Creates a linear operator from a rank-2 tensor."""
linear_operator = collections.namedtuple(
"LinearOperator", ["shape", "dtype", "apply", "apply_adjoint"])
# TODO(rmlarsen): Handle SparseTensor.
shape = matrix.get_shape()
if shape.is_fully_defined():
shape = shape.as_list()
else:
shape = array_ops.shape(matrix)
return linear_operator(
shape=shape,
dtype=matrix.dtype,
apply=lambda v: math_ops.matmul(matrix, v, adjoint_a=False),
apply_adjoint=lambda v: math_ops.matmul(matrix, v, adjoint_a=True))
def identity_operator(matrix):
"""Creates a linear operator from a rank-2 identity tensor."""
linear_operator = collections.namedtuple(
"LinearOperator", ["shape", "dtype", "apply", "apply_adjoint"])
shape = matrix.get_shape()
if shape.is_fully_defined():
shape = shape.as_list()
else:
shape = array_ops.shape(matrix)
return linear_operator(
shape=shape,
dtype=matrix.dtype,
apply=lambda v: v,
apply_adjoint=lambda v: v)
# TODO(rmlarsen): Measure if we should just call matmul.
def dot(x, y):
return math_ops.reduce_sum(math_ops.conj(x) * y)
# TODO(rmlarsen): Implement matrix/vector norm op in C++ in core.
# We need 1-norm, inf-norm, and Frobenius norm.
def l2norm_squared(v):
return constant_op.constant(2, dtype=v.dtype.base_dtype) * nn_ops.l2_loss(v)
def l2norm(v):
return math_ops.sqrt(l2norm_squared(v))
def l2normalize(v):
norm = l2norm(v)
return v / norm, norm
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/solvers/python/ops/util.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Lanczos algorithms."""
# TODO(rmlarsen): Add implementation of symmetric Lanczos algorithm.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.contrib.solvers.python.ops import util
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import tensor_array_ops
def lanczos_bidiag(operator,
k,
orthogonalize=True,
starting_vector=None,
name="lanczos_bidiag"):
"""Computes a Lanczos bidiagonalization for a linear operator.
Computes matrices `U` of shape `[m, k+1]`, `V` of shape `[n, k]` and lower
bidiagonal matrix `B` of shape `[k+1, k]`, that satisfy the equations
`A * V = U * B` and `A' * U[:, :-1] = V * B[:-1, :]'`.
The columns of `U` are orthonormal and form a basis for the Krylov subspace
`K(A*A', U[:,0])`.
The columns of `V` are orthonormal and form a basis for the Krylov subspace
`K(A'*A, A' U[:,0])`.
Args:
operator: An object representing a linear operator with attributes:
- shape: Either a list of integers or a 1-D `Tensor` of type `int32` of
length 2. `shape[0]` is the dimension on the domain of the operator,
`shape[1]` is the dimension of the co-domain of the operator. On other
words, if operator represents an M x N matrix A, `shape` must contain
`[M, N]`.
- dtype: The datatype of input to and output from `apply` and
`apply_adjoint`.
- apply: Callable object taking a vector `x` as input and returning a
vector with the result of applying the operator to `x`, i.e. if
`operator` represents matrix `A`, `apply` should return `A * x`.
- apply_adjoint: Callable object taking a vector `x` as input and
returning a vector with the result of applying the adjoint operator
to `x`, i.e. if `operator` represents matrix `A`, `apply_adjoint` should
return `conj(transpose(A)) * x`.
k: An integer or a scalar Tensor of type `int32`. Determines the maximum
number of steps to run. If an invariant subspace is found, the algorithm
may terminate before `k` steps have been run.
orthogonalize: If `True`, perform full orthogonalization. If `False` no
orthogonalization is performed.
starting_vector: If not null, must be a `Tensor` of shape `[n]`.
name: A name scope for the operation.
Returns:
output: A namedtuple representing a Lanczos bidiagonalization of
`operator` with attributes:
u: A rank-2 `Tensor` of type `operator.dtype` and shape
`[operator.shape[0], k_actual+1]`, where `k_actual` is the number of
steps run.
v: A rank-2 `Tensor` of type `operator.dtype` and shape
`[operator.shape[1], k_actual]`, where `k_actual` is the number of steps
run.
alpha: A rank-1 `Tensor` of type `operator.dtype` and shape `[k]`.
beta: A rank-1 `Tensor` of type `operator.dtype` and shape `[k]`.
"""
def tarray(size, dtype, name):
return tensor_array_ops.TensorArray(
dtype=dtype, size=size, tensor_array_name=name, clear_after_read=False)
# Reads a row-vector at location i in tarray and returns it as a
# column-vector.
def read_colvec(tarray, i):
return array_ops.expand_dims(tarray.read(i), -1)
# Writes an column-vector as a row-vecor at location i in tarray.
def write_colvec(tarray, colvec, i):
return tarray.write(i, array_ops.squeeze(colvec))
# Ephemeral class holding Lanczos bidiagonalization state:
# u = left Lanczos vectors
# v = right Lanczos vectors
# alpha = diagonal of B_k.
# beta = subdiagonal of B_k.
# Notice that we store the left and right Lanczos vectors as the _rows_
# of u and v. This is done because tensors are stored row-major and
# TensorArray only supports packing along dimension 0.
lanzcos_bidiag_state = collections.namedtuple("LanczosBidiagState",
["u", "v", "alpha", "beta"])
def update_state(old, i, u, v, alpha, beta):
return lanzcos_bidiag_state(
write_colvec(old.u, u, i + 1),
write_colvec(old.v, v, i),
old.alpha.write(i, alpha), old.beta.write(i, beta))
def gram_schmidt_step(j, basis, v):
"""Makes v orthogonal to the j'th vector in basis."""
v_shape = v.get_shape()
basis_vec = read_colvec(basis, j)
v -= math_ops.matmul(basis_vec, v, adjoint_a=True) * basis_vec
v.set_shape(v_shape)
return j + 1, basis, v
def orthogonalize_once(i, basis, v):
j = constant_op.constant(0, dtype=dtypes.int32)
_, _, v = control_flow_ops.while_loop(lambda j, basis, v: j < i,
gram_schmidt_step, [j, basis, v])
return util.l2normalize(v)
# Iterated modified Gram-Schmidt orthogonalization adapted from PROPACK.
# TODO(rmlarsen): This is possibly the slowest implementation of
# iterated Gram-Schmidt orthogonalization since the abacus. Move to C++.
def orthogonalize_(i, basis, v):
v_norm = util.l2norm(v)
v_new, v_new_norm = orthogonalize_once(i, basis, v)
# If the norm decreases more than 1/sqrt(2), run a second
# round of MGS. See proof in:
# B. N. Parlett, ``The Symmetric Eigenvalue Problem'',
# Prentice-Hall, Englewood Cliffs, NJ, 1980. pp. 105-109
return control_flow_ops.cond(v_new_norm < 0.7071 * v_norm,
lambda: orthogonalize_once(i, basis, v),
lambda: (v_new, v_new_norm))
def stopping_criterion(i, _):
# TODO(rmlarsen): Stop if an invariant subspace is detected.
return i < k
def lanczos_bidiag_step(i, ls):
"""Extends the Lanczos bidiagonalization ls by one step."""
u = read_colvec(ls.u, i)
r = operator.apply_adjoint(u)
# The shape inference doesn't work across cond, save and reapply the shape.
r_shape = r.get_shape()
r = control_flow_ops.cond(
i > 0, lambda: r - ls.beta.read(i - 1) * read_colvec(ls.v, i - 1),
lambda: r)
r.set_shape(r_shape)
if orthogonalize:
v, alpha = orthogonalize_(i - 1, ls.v, r)
else:
v, alpha = util.l2normalize(r)
p = operator.apply(v) - alpha * u
if orthogonalize:
u, beta = orthogonalize_(i, ls.u, p)
else:
u, beta = util.l2normalize(p)
return i + 1, update_state(ls, i, u, v, alpha, beta)
with ops.name_scope(name):
dtype = operator.dtype
if starting_vector is None:
starting_vector = random_ops.random_uniform(
operator.shape[:1], -1, 1, dtype=dtype)
u0, _ = util.l2normalize(starting_vector)
ls = lanzcos_bidiag_state(
u=write_colvec(tarray(k + 1, dtype, "u"), u0, 0),
v=tarray(k, dtype, "v"),
alpha=tarray(k, dtype, "alpha"),
beta=tarray(k, dtype, "beta"))
i = constant_op.constant(0, dtype=dtypes.int32)
_, ls = control_flow_ops.while_loop(stopping_criterion, lanczos_bidiag_step,
[i, ls])
return lanzcos_bidiag_state(
array_ops.matrix_transpose(ls.u.stack()),
array_ops.matrix_transpose(ls.v.stack()),
ls.alpha.stack(), ls.beta.stack())
# TODO(rmlarsen): Implement C++ ops for handling bidiagonal matrices
# efficiently. Such a module should provide
# - multiplication,
# - linear system solution by back-substitution,
# - QR factorization,
# - SVD.
def bidiag_matmul(matrix, alpha, beta, adjoint_b=False, name="bidiag_matmul"):
"""Multiplies a matrix by a bidiagonal matrix.
alpha and beta are length k vectors representing the diagonal and first lower
subdiagonal of (K+1) x K matrix B.
If adjoint_b is False, computes A * B as follows:
A * B = A[:, :-1] * diag(alpha) + A[:, 1:] * diag(beta)
If adjoint_b is True, computes A * B[:-1, :]' as follows
A * B[:-1, :]' =
A * diag(alpha) + [zeros(m,1), A[:, :-1] * diag(beta[:-1])]
Args:
matrix: A rank-2 `Tensor` representing matrix A.
alpha: A rank-1 `Tensor` representing the diagonal of B.
beta: A rank-1 `Tensor` representing the lower subdiagonal diagonal of B.
adjoint_b: `bool` determining what to compute.
name: A name scope for the operation.
Returns:
If `adjoint_b` is False the `A * B` is returned.
If `adjoint_b` is True the `A * B'` is returned.
"""
with ops.name_scope(name):
alpha = array_ops.expand_dims(alpha, 0)
if adjoint_b is False:
beta = array_ops.expand_dims(beta, 0)
return matrix[:, :-1] * alpha + matrix[:, 1:] * beta
else:
beta = array_ops.expand_dims(beta[:-1], 0)
shape = array_ops.shape(matrix)
zero_column = array_ops.expand_dims(
array_ops.zeros(
shape[:1], dtype=matrix.dtype), 1)
return matrix * alpha + array_ops.concat(
[zero_column, matrix[:, :-1] * beta], 1)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/solvers/python/ops/lanczos.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Lookup table operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import gen_lookup_ops
from tensorflow.python.ops import lookup_ops
# pylint: disable=unused-import
from tensorflow.python.ops.lookup_ops import FastHashSpec
from tensorflow.python.ops.lookup_ops import HasherSpec
from tensorflow.python.ops.lookup_ops import IdTableWithHashBuckets
from tensorflow.python.ops.lookup_ops import index_table_from_file
from tensorflow.python.ops.lookup_ops import index_to_string_table_from_file
from tensorflow.python.ops.lookup_ops import InitializableLookupTableBase
from tensorflow.python.ops.lookup_ops import InitializableLookupTableBaseV1
from tensorflow.python.ops.lookup_ops import KeyValueTensorInitializer
from tensorflow.python.ops.lookup_ops import LookupInterface
from tensorflow.python.ops.lookup_ops import StrongHashSpec
from tensorflow.python.ops.lookup_ops import TableInitializerBase
from tensorflow.python.ops.lookup_ops import TextFileIdTableInitializer
from tensorflow.python.ops.lookup_ops import TextFileIndex
from tensorflow.python.ops.lookup_ops import TextFileInitializer
from tensorflow.python.ops.lookup_ops import TextFileStringTableInitializer
# pylint: enable=unused-import
from tensorflow.python.util.deprecation import deprecated
@deprecated("2017-04-10", "Use `index_table_from_file`.")
def string_to_index_table_from_file(vocabulary_file=None,
num_oov_buckets=0,
vocab_size=None,
default_value=-1,
hasher_spec=FastHashSpec,
name=None):
return index_table_from_file(
vocabulary_file,
num_oov_buckets,
vocab_size,
default_value,
hasher_spec,
key_dtype=dtypes.string,
name=name)
@deprecated("2017-04-10", "Use `index_table_from_tensor`.")
def string_to_index_table_from_tensor(mapping,
num_oov_buckets=0,
default_value=-1,
hasher_spec=FastHashSpec,
name=None):
with ops.name_scope(name, "string_to_index") as scope:
mapping = ops.convert_to_tensor(mapping)
if dtypes.string != mapping.dtype.base_dtype:
raise ValueError("string_to_index_table_from_tensor requires string.")
return index_table_from_tensor(
mapping, num_oov_buckets, default_value, hasher_spec, name=scope)
def index_table_from_tensor(mapping,
num_oov_buckets=0,
default_value=-1,
hasher_spec=FastHashSpec,
dtype=dtypes.string,
name=None):
"""Returns a lookup table that converts a string tensor into int64 IDs.
This operation constructs a lookup table to convert tensor of strings into
int64 IDs. The mapping can be initialized from a string `mapping` 1-D tensor
where each element is a key and corresponding index within the tensor is the
value.
Any lookup of an out-of-vocabulary token will return a bucket ID based on its
hash if `num_oov_buckets` is greater than zero. Otherwise it is assigned the
`default_value`.
The bucket ID range is `[mapping size, mapping size + num_oov_buckets - 1]`.
The underlying table must be initialized by calling
`session.run(tf.compat.v1.tables_initializer)` or `session.run(table.init)`
once.
Elements in `mapping` cannot have duplicates, otherwise when executing the
table initializer op, it will throw a `FailedPreconditionError`.
Sample Usages:
```python
mapping_strings = tf.constant(["emerson", "lake", "palmer"])
table = tf.contrib.lookup.index_table_from_tensor(
mapping=mapping_strings, num_oov_buckets=1, default_value=-1)
features = tf.constant(["emerson", "lake", "and", "palmer"])
ids = table.lookup(features)
...
tf.compat.v1.tables_initializer().run()
ids.eval() ==> [0, 1, 3, 2]
```
Args:
mapping: A 1-D `Tensor` that specifies the mapping of keys to indices. The
type of this object must be castable to `dtype`.
num_oov_buckets: The number of out-of-vocabulary buckets.
default_value: The value to use for out-of-vocabulary feature values.
Defaults to -1.
hasher_spec: A `HasherSpec` to specify the hash function to use for
assignment of out-of-vocabulary buckets.
dtype: The type of values passed to `lookup`. Only string and integers are
supported.
name: A name for this op (optional).
Returns:
The lookup table to map an input `Tensor` to index `int64` `Tensor`.
Raises:
ValueError: If `mapping` is invalid.
ValueError: If `num_oov_buckets` is negative.
"""
if mapping is None:
raise ValueError("mapping must be specified.")
return lookup_ops.index_table_from_tensor(
vocabulary_list=mapping,
num_oov_buckets=num_oov_buckets,
default_value=default_value,
hasher_spec=hasher_spec,
dtype=dtype,
name=name)
@deprecated("2017-01-07", "This op will be removed after the deprecation date. "
"Please switch to index_table_from_tensor and call the lookup "
"method of the returned table.")
def string_to_index(tensor, mapping, default_value=-1, name=None):
"""Maps `tensor` of strings into `int64` indices based on `mapping`.
This operation converts `tensor` of strings into `int64` indices.
The mapping is initialized from a string `mapping` tensor where each element
is a key and corresponding index within the tensor is the value.
Any entry in the input which does not have a corresponding entry in 'mapping'
(an out-of-vocabulary entry) is assigned the `default_value`
Elements in `mapping` cannot be duplicated, otherwise the initialization
will throw a FailedPreconditionError.
The underlying table must be initialized by calling
`session.run(tf.compat.v1.tables_initializer)` once.
For example:
```python
mapping_strings = tf.constant(["emerson", "lake", "palmer"])
feats = tf.constant(["emerson", "lake", "and", "palmer"])
ids = tf.contrib.lookup.string_to_index(
feats, mapping=mapping_strings, default_value=-1)
...
tf.compat.v1.tables_initializer().run()
ids.eval() ==> [0, 1, -1, 2]
```
Args:
tensor: A 1-D input `Tensor` with the strings to map to indices.
mapping: A 1-D string `Tensor` that specifies the mapping of strings to
indices.
default_value: The `int64` value to use for out-of-vocabulary strings.
Defaults to -1.
name: A name for this op (optional).
Returns:
The mapped indices. It has the same shape and tensor type (dense or sparse)
as `tensor`.
"""
table = index_table_from_tensor(
mapping=mapping, default_value=default_value, name=name)
return table.lookup(tensor)
def index_to_string_table_from_tensor(mapping, default_value="UNK", name=None):
"""Returns a lookup table that maps a `Tensor` of indices into strings.
This operation constructs a lookup table to map int64 indices into string
values. The mapping is initialized from a string `mapping` 1-D `Tensor` where
each element is a value and the corresponding index within the tensor is the
key.
Any input which does not have a corresponding index in 'mapping'
(an out-of-vocabulary entry) is assigned the `default_value`
The underlying table must be initialized by calling
`session.run(tf.compat.v1.tables_initializer)` or `session.run(table.init)`
once.
Elements in `mapping` cannot have duplicates, otherwise when executing the
table initializer op, it will throw a `FailedPreconditionError`.
Sample Usages:
```python
mapping_string = tf.constant(["emerson", "lake", "palmer"])
indices = tf.constant([1, 5], tf.int64)
table = tf.contrib.lookup.index_to_string_table_from_tensor(
mapping_string, default_value="UNKNOWN")
values = table.lookup(indices)
...
tf.compat.v1.tables_initializer().run()
values.eval() ==> ["lake", "UNKNOWN"]
```
Args:
mapping: A 1-D string `Tensor` that specifies the strings to map from
indices.
default_value: The value to use for out-of-vocabulary indices.
name: A name for this op (optional).
Returns:
The lookup table to map a string values associated to a given index `int64`
`Tensors`.
Raises:
ValueError: when `mapping` is not set.
"""
if mapping is None:
raise ValueError("mapping must be specified.")
return lookup_ops.index_to_string_table_from_tensor(
vocabulary_list=mapping, default_value=default_value, name=name)
@deprecated(
"2017-01-07", "This op will be removed after the deprecation date. "
"Please switch to index_to_string_table_from_tensor and call the lookup "
"method of the returned table.")
def index_to_string(tensor, mapping, default_value="UNK", name=None):
"""Maps `tensor` of indices into string values based on `mapping`.
This operation converts `int64` indices into string values. The mapping is
initialized from a string `mapping` tensor where each element is a value and
the corresponding index within the tensor is the key.
Any input which does not have a corresponding index in 'mapping'
(an out-of-vocabulary entry) is assigned the `default_value`
The underlying table must be initialized by calling
`session.run(tf.compat.v1.tables_initializer)` once.
For example:
```python
mapping_string = tf.constant(["emerson", "lake", "palmer"])
indices = tf.constant([1, 5], tf.int64)
values = tf.contrib.lookup.index_to_string(
indices, mapping=mapping_string, default_value="UNKNOWN")
...
tf.compat.v1.tables_initializer().run()
values.eval() ==> ["lake", "UNKNOWN"]
```
Args:
tensor: A `int64` `Tensor` with the indices to map to strings.
mapping: A 1-D string `Tensor` that specifies the strings to map from
indices.
default_value: The string value to use for out-of-vocabulary indices.
name: A name for this op (optional).
Returns:
The strings values associated to the indices. The resultant dense
feature value tensor has the same shape as the corresponding `indices`.
"""
table = index_to_string_table_from_tensor(
mapping=mapping, default_value=default_value, name=name)
return table.lookup(tensor)
class HashTable(InitializableLookupTableBaseV1):
"""A generic hash table implementation.
Example usage:
```python
table = tf.HashTable(
tf.KeyValueTensorInitializer(keys, values), -1)
out = table.lookup(input_tensor)
table.init.run()
print(out.eval())
```
"""
def __init__(self, initializer, default_value, shared_name=None, name=None):
"""Creates a non-initialized `HashTable` object.
Creates a table, the type of its keys and values are specified by the
initializer.
Before using the table you will have to initialize it. After initialization
the table will be immutable.
Args:
initializer: The table initializer to use. See `HashTable` kernel for
supported key and value types.
default_value: The value to use if a key is missing in the table.
shared_name: If non-empty, this table will be shared under the given name
across multiple sessions.
name: A name for the operation (optional).
Returns:
A `HashTable` object.
"""
self._initializer = initializer
self._default_value = default_value
self._shared_name = shared_name
self._name = name or "hash_table"
self._table_name = None
super(HashTable, self).__init__(default_value, initializer)
self._value_shape = self._default_value.get_shape()
def _create_resource(self):
table_ref = gen_lookup_ops.hash_table_v2(
shared_name=self._shared_name,
key_dtype=self._initializer.key_dtype,
value_dtype=self._initializer.value_dtype,
name=self._name)
if context.executing_eagerly():
self._table_name = None
else:
self._table_name = table_ref.op.name.split("/")[-1]
return table_ref
@property
def init(self):
return self.initializer
@property
def name(self):
return self._table_name
def export(self, name=None):
"""Returns tensors of all keys and values in the table.
Args:
name: A name for the operation (optional).
Returns:
A pair of tensors with the first tensor containing all keys and the
second tensors containing all values in the table.
"""
with ops.name_scope(name, "%s_Export" % self.name,
[self.resource_handle]) as name:
exported_keys, exported_values = gen_lookup_ops.lookup_table_export_v2(
self.resource_handle, self._key_dtype, self._value_dtype, name=name)
exported_values.set_shape(exported_keys.get_shape().concatenate(
self._value_shape))
return exported_keys, exported_values
MutableHashTable = lookup_ops.MutableHashTable
MutableDenseHashTable = lookup_ops.DenseHashTable
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/lookup/lookup_ops.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops for lookup operations.
@@string_to_index
@@string_to_index_table_from_file
@@string_to_index_table_from_tensor
@@index_table_from_file
@@index_table_from_tensor
@@index_to_string
@@index_to_string_table_from_file
@@index_to_string_table_from_tensor
@@LookupInterface
@@InitializableLookupTableBase
@@IdTableWithHashBuckets
@@HashTable
@@MutableHashTable
@@MutableDenseHashTable
@@TableInitializerBase
@@KeyValueTensorInitializer
@@TextFileIndex
@@TextFileInitializer
@@TextFileIdTableInitializer
@@TextFileStringTableInitializer
@@HasherSpec
@@StrongHashSpec
@@FastHashSpec
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,wildcard-import
from tensorflow.contrib.lookup.lookup_ops import *
# pylint: enable=unused-import,wildcard-import
from tensorflow.python.util.all_util import remove_undocumented
remove_undocumented(__name__)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/lookup/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.contrib.lookup.lookup."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from tensorflow.contrib import lookup
from tensorflow.python.client import session
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import server_lib
class HashTableOpTest(test.TestCase):
def testHashTable(self):
with self.cached_session():
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup.HashTable(
lookup.KeyValueTensorInitializer(keys, values), default_val)
table.initializer.run()
self.assertAllEqual(3, table.size().eval())
input_string = constant_op.constant(["brain", "salad", "tank"])
output = table.lookup(input_string)
self.assertAllEqual([3], output.get_shape())
result = output.eval()
self.assertAllEqual([0, 1, -1], result)
exported_keys_tensor, exported_values_tensor = table.export()
self.assertItemsEqual([b"brain", b"salad", b"surgery"],
exported_keys_tensor.eval())
self.assertItemsEqual([0, 1, 2], exported_values_tensor.eval())
def testHashTableFindHighRank(self):
with self.cached_session():
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup.HashTable(
lookup.KeyValueTensorInitializer(keys, values), default_val)
table.initializer.run()
self.assertAllEqual(3, table.size().eval())
input_string = constant_op.constant(
[["brain", "salad"], ["tank", "tarkus"]])
output = table.lookup(input_string)
result = output.eval()
self.assertAllEqual([[0, 1], [-1, -1]], result)
def testHashTableInitWithPythonArrays(self):
with self.cached_session():
default_val = -1
keys = ["brain", "salad", "surgery"]
values = [0, 1, 2]
table = lookup.HashTable(
lookup.KeyValueTensorInitializer(
keys, values, value_dtype=dtypes.int64),
default_val)
table.initializer.run()
self.assertAllEqual(3, table.size().eval())
input_string = constant_op.constant(["brain", "salad", "tank"])
output = table.lookup(input_string)
result = output.eval()
self.assertAllEqual([0, 1, -1], result)
def testHashTableInitWithNumPyArrays(self):
with self.cached_session():
default_val = -1
keys = np.array(["brain", "salad", "surgery"], dtype=np.str)
values = np.array([0, 1, 2], dtype=np.int64)
table = lookup.HashTable(
lookup.KeyValueTensorInitializer(keys, values), default_val)
table.initializer.run()
self.assertAllEqual(3, table.size().eval())
input_string = constant_op.constant(["brain", "salad", "tank"])
output = table.lookup(input_string)
result = output.eval()
self.assertAllEqual([0, 1, -1], result)
def testMultipleHashTables(self):
with self.cached_session() as sess:
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table1 = lookup.HashTable(
lookup.KeyValueTensorInitializer(keys, values), default_val)
table2 = lookup.HashTable(
lookup.KeyValueTensorInitializer(keys, values), default_val)
table3 = lookup.HashTable(
lookup.KeyValueTensorInitializer(keys, values), default_val)
lookup_ops.tables_initializer().run()
self.assertAllEqual(3, table1.size().eval())
self.assertAllEqual(3, table2.size().eval())
self.assertAllEqual(3, table3.size().eval())
input_string = constant_op.constant(["brain", "salad", "tank"])
output1 = table1.lookup(input_string)
output2 = table2.lookup(input_string)
output3 = table3.lookup(input_string)
out1, out2, out3 = sess.run([output1, output2, output3])
self.assertAllEqual([0, 1, -1], out1)
self.assertAllEqual([0, 1, -1], out2)
self.assertAllEqual([0, 1, -1], out3)
def testHashTableWithTensorDefault(self):
with self.cached_session():
default_val = constant_op.constant(-1, dtypes.int64)
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup.HashTable(
lookup.KeyValueTensorInitializer(keys, values), default_val)
table.initializer.run()
input_string = constant_op.constant(["brain", "salad", "tank"])
output = table.lookup(input_string)
result = output.eval()
self.assertAllEqual([0, 1, -1], result)
def testHashTableWithSparseTensorInput(self):
with self.cached_session() as sess:
default_val = constant_op.constant(-1, dtypes.int64)
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup.HashTable(
lookup.KeyValueTensorInitializer(keys, values), default_val)
table.initializer.run()
sp_indices = [[0, 0], [0, 1], [1, 0]]
sp_shape = [2, 2]
input_tensor = sparse_tensor.SparseTensor(
constant_op.constant(sp_indices, dtypes.int64),
constant_op.constant(["brain", "salad", "tank"]),
constant_op.constant(sp_shape, dtypes.int64))
output = table.lookup(input_tensor)
out_indices, out_values, out_shape = sess.run(output)
self.assertAllEqual([0, 1, -1], out_values)
self.assertAllEqual(sp_indices, out_indices)
self.assertAllEqual(sp_shape, out_shape)
def testSignatureMismatch(self):
with self.cached_session():
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup.HashTable(
lookup.KeyValueTensorInitializer(keys, values), default_val)
table.initializer.run()
# Ref types do not produce a lookup signature mismatch.
input_string_ref = variables.Variable("brain")
variables.global_variables_initializer().run()
self.assertEqual(0, table.lookup(input_string_ref).eval())
input_string = constant_op.constant([1, 2, 3], dtypes.int64)
with self.assertRaises(TypeError):
table.lookup(input_string)
with self.assertRaises(TypeError):
lookup.HashTable(
lookup.KeyValueTensorInitializer(keys, values), "UNK")
def testDTypes(self):
with self.cached_session():
default_val = -1
with self.assertRaises(TypeError):
lookup.HashTable(
lookup.KeyValueTensorInitializer(["a"], [1], [dtypes.string],
dtypes.int64), default_val)
def testNotInitialized(self):
with self.cached_session():
default_val = -1
table = lookup.HashTable(
lookup.KeyValueTensorInitializer(
["a"], [1], value_dtype=dtypes.int64),
default_val)
input_string = constant_op.constant(["brain", "salad", "surgery"])
output = table.lookup(input_string)
with self.assertRaisesOpError("Table not initialized"):
output.eval()
def testInitializeTwice(self):
with self.cached_session():
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup.HashTable(
lookup.KeyValueTensorInitializer(keys, values), default_val)
table.initializer.run()
# Re-initializing should not throw an error.
table.initializer.run()
def testInitializationWithInvalidDimensions(self):
with self.cached_session():
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2, 3, 4], dtypes.int64)
with self.assertRaises(ValueError):
lookup.HashTable(
lookup.KeyValueTensorInitializer(keys, values), default_val)
def testMultipleSessions(self):
# Start a server
server = server_lib.Server(
{
"local0": ["localhost:0"]
}, protocol="grpc", start=True)
# Create two sessions sharing the same state
session1 = session.Session(server.target)
session2 = session.Session(server.target)
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup.HashTable(
lookup.KeyValueTensorInitializer(keys, values),
default_val,
name="t1")
# Init the table in the first session.
with session1:
table.initializer.run()
self.assertAllEqual(3, table.size().eval())
# Init the table in the second session and verify that we do not get a
# "Table already initialized" error.
with session2:
table.initializer.run()
self.assertAllEqual(3, table.size().eval())
def testHashTableInt32String(self):
with self.cached_session():
default_val = "n/a"
keys = constant_op.constant([0, 1, 2], dtypes.int32)
values = constant_op.constant(["brain", "salad", "surgery"])
table = lookup.HashTable(
lookup.KeyValueTensorInitializer(keys, values), default_val)
table.initializer.run()
input_tensor = constant_op.constant([0, 1, -1])
output = table.lookup(input_tensor)
result = output.eval()
self.assertAllEqual([b"brain", b"salad", b"n/a"], result)
class IndexTableFromFile(test.TestCase):
def _createVocabFile(self, basename, values=("brain", "salad", "surgery")):
vocabulary_file = os.path.join(self.get_temp_dir(), basename)
with open(vocabulary_file, "w") as f:
f.write("\n".join(values) + "\n")
return vocabulary_file
def test_string_index_table_from_file(self):
vocabulary_file = self._createVocabFile("f2i_vocab1.txt")
with self.cached_session():
table = lookup.index_table_from_file(
vocabulary_file=vocabulary_file, num_oov_buckets=1)
ids = table.lookup(constant_op.constant(["salad", "surgery", "tarkus"]))
self.assertRaises(errors_impl.OpError, ids.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((1, 2, 3), ids.eval())
def test_string_index_table_from_file_tensor_filename(self):
vocabulary_file = self._createVocabFile("f2i_vocab1.txt")
with self.cached_session():
vocabulary_file = constant_op.constant(vocabulary_file)
table = lookup.index_table_from_file(
vocabulary_file=vocabulary_file, num_oov_buckets=1)
ids = table.lookup(constant_op.constant(["salad", "surgery", "tarkus"]))
self.assertRaises(errors_impl.OpError, ids.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((1, 2, 3), ids.eval())
self.assertEqual(1,
len(ops.get_collection(ops.GraphKeys.ASSET_FILEPATHS)))
def test_string_index_table_from_file_placeholder_filename(self):
vocabulary_file = self._createVocabFile("f2i_vocab1.txt")
with self.cached_session():
vocabulary_placeholder = array_ops.placeholder(dtypes.string, [])
table = lookup.index_table_from_file(
vocabulary_file=vocabulary_placeholder, num_oov_buckets=1)
ids = table.lookup(constant_op.constant(["salad", "surgery", "tarkus"]))
self.assertRaises(errors_impl.OpError, ids.eval)
feed_dict = {vocabulary_placeholder.name: vocabulary_file}
lookup_ops.tables_initializer().run(feed_dict=feed_dict)
self.assertAllEqual((1, 2, 3), ids.eval())
self.assertEqual(0,
len(ops.get_collection(ops.GraphKeys.ASSET_FILEPATHS)))
def test_int32_index_table_from_file(self):
vocabulary_file = self._createVocabFile(
"f2i_vocab2.txt", values=("42", "1", "-1000"))
with self.cached_session():
table = lookup.index_table_from_file(
vocabulary_file=vocabulary_file, num_oov_buckets=1,
key_dtype=dtypes.int32)
ids = table.lookup(
constant_op.constant((1, -1000, 11), dtype=dtypes.int32))
self.assertRaises(errors_impl.OpError, ids.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((1, 2, 3), ids.eval())
def test_int64_index_table_from_file(self):
vocabulary_file = self._createVocabFile(
"f2i_vocab3.txt", values=("42", "1", "-1000"))
with self.cached_session():
table = lookup.index_table_from_file(
vocabulary_file=vocabulary_file, num_oov_buckets=1,
key_dtype=dtypes.int64)
ids = table.lookup(
constant_op.constant((1, -1000, 11), dtype=dtypes.int64))
self.assertRaises(errors_impl.OpError, ids.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((1, 2, 3), ids.eval())
def test_index_table_from_file_with_default_value(self):
default_value = -42
vocabulary_file = self._createVocabFile("f2i_vocab4.txt")
with self.cached_session():
table = lookup.index_table_from_file(
vocabulary_file=vocabulary_file, default_value=default_value)
ids = table.lookup(constant_op.constant(["salad", "surgery", "tarkus"]))
self.assertRaises(errors_impl.OpError, ids.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((1, 2, default_value), ids.eval())
def test_index_table_from_file_with_oov_buckets(self):
vocabulary_file = self._createVocabFile("f2i_vocab5.txt")
with self.cached_session():
table = lookup.index_table_from_file(
vocabulary_file=vocabulary_file, num_oov_buckets=1000)
ids = table.lookup(
constant_op.constant(["salad", "surgery", "tarkus", "toccata"]))
self.assertRaises(errors_impl.OpError, ids.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual(
(
1, # From vocabulary file.
2, # From vocabulary file.
867, # 3 + fingerprint("tarkus") mod 300.
860), # 3 + fingerprint("toccata") mod 300.
ids.eval())
def test_index_table_from_file_fails_with_empty_vocabulary_file_name(self):
self.assertRaises(
ValueError,
lookup.index_table_from_file,
vocabulary_file="")
def test_index_table_from_file_fails_with_empty_vocabulary(self):
self.assertRaises(
ValueError,
lookup.index_table_from_file,
vocabulary_file=None)
def test_index_table_from_file_with_vocab_size_too_small(self):
vocabulary_file = self._createVocabFile("f2i_vocab6.txt")
with self.cached_session():
table = lookup.index_table_from_file(
vocabulary_file=vocabulary_file, vocab_size=2)
ids = table.lookup(constant_op.constant(["salad", "surgery", "tarkus"]))
self.assertRaises(errors_impl.OpError, ids.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((1, -1, -1), ids.eval())
self.assertEqual(2, table.size().eval())
def test_index_table_from_file_with_vocab_size_too_large(self):
vocabulary_file = self._createVocabFile("f2i_vocab7.txt")
with self.cached_session():
table = lookup.index_table_from_file(
vocabulary_file=vocabulary_file, vocab_size=4)
self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"Invalid vocab_size", table.initializer.run)
def test_index_table_from_file_with_vocab_size(self):
vocabulary_file = self._createVocabFile("f2i_vocab8.txt")
self.assertRaises(
ValueError,
lookup.index_table_from_file,
vocabulary_file=vocabulary_file,
vocab_size=0)
with self.cached_session():
table = lookup.index_table_from_file(
vocabulary_file=vocabulary_file, vocab_size=3)
ids = table.lookup(constant_op.constant(["salad", "surgery", "tarkus"]))
self.assertRaises(errors_impl.OpError, ids.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((1, 2, -1), ids.eval())
self.assertEqual(3, table.size().eval())
def test_index_table_from_file_with_invalid_hashers(self):
vocabulary_file = self._createVocabFile("invalid_hasher.txt")
with self.cached_session():
with self.assertRaises(TypeError):
lookup.index_table_from_file(
vocabulary_file=vocabulary_file,
vocab_size=3,
num_oov_buckets=1,
hasher_spec=1)
table = lookup.index_table_from_file(
vocabulary_file=vocabulary_file,
vocab_size=3,
num_oov_buckets=1,
hasher_spec=lookup.HasherSpec("my-awesome-hash", None))
self.assertRaises(ValueError, table.lookup,
constant_op.constant(["salad", "surgery", "tarkus"]))
class KeyValueTensorInitializerTest(test.TestCase):
def test_string(self):
with ops.Graph().as_default(), self.cached_session():
init = lookup.KeyValueTensorInitializer(
("brain", "salad", "surgery"), (0, 1, 2), dtypes.string, dtypes.int64)
table = lookup.HashTable(init, default_value=-1)
table.initializer.run()
def test_int64(self):
with ops.Graph().as_default(), self.cached_session():
init = lookup.KeyValueTensorInitializer(
(42, 1, -1000), (0, 1, 2), dtypes.int64, dtypes.int64)
table = lookup.HashTable(init, default_value=-1)
table.initializer.run()
def test_int32(self):
with ops.Graph().as_default(), self.cached_session():
init = lookup.KeyValueTensorInitializer(
(42, 1, -1000), (0, 1, 2), dtypes.int32, dtypes.int64)
table = lookup.HashTable(init, default_value=-1)
with self.assertRaisesRegexp(
errors_impl.OpError, "No OpKernel was registered"):
table.initializer.run()
class IndexTableFromTensor(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def test_index_table_from_tensor_with_tensor_init(self):
table = lookup.index_table_from_tensor(
mapping=("brain", "salad", "surgery"), num_oov_buckets=1)
if not context.executing_eagerly():
with self.assertRaises(errors_impl.OpError):
self.evaluate(table.lookup(
constant_op.constant(("salad", "surgery", "tarkus"))))
else:
# Reinitializing a table in eager should work.
table = lookup.index_table_from_tensor(
mapping=("brain", "salad", "surgery"), num_oov_buckets=1)
self.evaluate(lookup_ops.tables_initializer())
ids = table.lookup(constant_op.constant(("salad", "surgery", "tarkus")))
self.assertAllEqual((1, 2, 3), self.evaluate(ids))
def test_int32_index_table_from_tensor_with_tensor_init(self):
with self.cached_session():
table = lookup.index_table_from_tensor(
mapping=(42, 1, -1000), num_oov_buckets=1, dtype=dtypes.int32)
ids = table.lookup(
constant_op.constant((1, -1000, 11), dtype=dtypes.int32))
self.assertRaises(errors_impl.OpError, ids.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((1, 2, 3), ids.eval())
def test_int64_index_table_from_tensor_with_tensor_init(self):
with self.cached_session():
table = lookup.index_table_from_tensor(
mapping=(42, 1, -1000), num_oov_buckets=1, dtype=dtypes.int64)
ids = table.lookup(
constant_op.constant((1, -1000, 11), dtype=dtypes.int64))
self.assertRaises(errors_impl.OpError, ids.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((1, 2, 3), ids.eval())
def test_index_table_from_tensor_with_default_value(self):
default_value = -42
with self.cached_session():
table = lookup.index_table_from_tensor(
mapping=["brain", "salad", "surgery"], default_value=default_value)
ids = table.lookup(constant_op.constant(["salad", "surgery", "tarkus"]))
self.assertRaises(errors_impl.OpError, ids.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((1, 2, default_value), ids.eval())
def test_index_table_from_tensor_missing_mapping(self):
with self.cached_session():
with self.assertRaisesRegexp(ValueError, "mapping must be specified"):
lookup.index_table_from_tensor(mapping=None, num_oov_buckets=1)
def test_index_table_from_tensor_empty_mapping(self):
with self.cached_session():
table = lookup.index_table_from_tensor(
mapping=np.array([], dtype=np.str_), num_oov_buckets=1)
ids = table.lookup(constant_op.constant(["salad", "surgery", "brain"]))
self.assertRaises(errors_impl.OpError, ids.eval)
with self.assertRaisesRegexp(
errors_impl.OpError, "keys and values cannot be empty"):
lookup_ops.tables_initializer().run()
def test_index_table_from_tensor_with_invalid_hashers(self):
with self.cached_session():
with self.assertRaises(TypeError):
lookup.index_table_from_tensor(
mapping=["brain", "salad", "surgery"],
num_oov_buckets=1,
hasher_spec=1)
table = lookup.index_table_from_tensor(
mapping=["brain", "salad", "surgery"],
num_oov_buckets=1,
hasher_spec=lookup.HasherSpec("my-awesome-hash", None))
self.assertRaises(ValueError, table.lookup,
constant_op.constant(["salad", "surgery", "tarkus"]))
class StringToIndexTest(test.TestCase):
def test_string_to_index(self):
with self.cached_session():
mapping_strings = constant_op.constant(["brain", "salad", "surgery"])
feats = constant_op.constant(["salad", "surgery", "tarkus"])
indices = lookup.string_to_index(feats, mapping=mapping_strings)
self.assertRaises(errors_impl.OpError, indices.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((1, 2, -1), indices.eval())
def test_duplicate_entries(self):
with self.cached_session():
mapping_strings = constant_op.constant(["hello", "hello"])
feats = constant_op.constant(["hello", "hola"])
_ = lookup.string_to_index(feats, mapping=mapping_strings)
self.assertRaises(errors_impl.OpError,
lookup_ops.tables_initializer().run)
def test_string_to_index_with_default_value(self):
default_value = -42
with self.cached_session():
mapping_strings = constant_op.constant(["brain", "salad", "surgery"])
feats = constant_op.constant(["salad", "surgery", "tarkus"])
indices = lookup.string_to_index(
feats, mapping=mapping_strings, default_value=default_value)
self.assertRaises(errors_impl.OpError, indices.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((1, 2, default_value), indices.eval())
class IndexToStringTableFromFileTest(test.TestCase):
def _createVocabFile(self, basename):
vocabulary_file = os.path.join(self.get_temp_dir(), basename)
with open(vocabulary_file, "w") as f:
f.write("\n".join(["brain", "salad", "surgery"]) + "\n")
return vocabulary_file
def test_index_to_string_table(self):
vocabulary_file = self._createVocabFile("i2f_vocab1.txt")
with self.cached_session():
table = lookup.index_to_string_table_from_file(
vocabulary_file=vocabulary_file)
features = table.lookup(constant_op.constant([0, 1, 2, 3], dtypes.int64))
self.assertRaises(errors_impl.OpError, features.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((b"brain", b"salad", b"surgery", b"UNK"),
features.eval())
def test_index_to_string_table_with_default_value(self):
default_value = b"NONE"
vocabulary_file = self._createVocabFile("f2i_vocab2.txt")
with self.cached_session():
table = lookup.index_to_string_table_from_file(
vocabulary_file=vocabulary_file, default_value=default_value)
features = table.lookup(constant_op.constant([1, 2, 4], dtypes.int64))
self.assertRaises(errors_impl.OpError, features.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((b"salad", b"surgery", default_value),
features.eval())
def test_index_to_string_table_with_vocab_size_too_small(self):
default_value = b"NONE"
vocabulary_file = self._createVocabFile("f2i_vocab2.txt")
with self.cached_session():
table = lookup.index_to_string_table_from_file(
vocabulary_file=vocabulary_file,
vocab_size=2,
default_value=default_value)
features = table.lookup(constant_op.constant([1, 2, 4], dtypes.int64))
self.assertRaises(errors_impl.OpError, features.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((b"salad", default_value, default_value),
features.eval())
def test_index_to_string_table_with_vocab_size_too_large(self):
vocabulary_file = self._createVocabFile("f2i_vocab6.txt")
with self.cached_session():
table = lookup.index_to_string_table_from_file(
vocabulary_file=vocabulary_file, vocab_size=4)
features = table.lookup(constant_op.constant([1, 2, 4], dtypes.int64))
self.assertRaises(errors_impl.OpError, features.eval)
init = lookup_ops.tables_initializer()
self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"Invalid vocab_size", init.run)
def test_index_to_string_table_with_vocab_size(self):
vocabulary_file = self._createVocabFile("f2i_vocab7.txt")
with self.cached_session():
table = lookup.index_to_string_table_from_file(
vocabulary_file=vocabulary_file, vocab_size=3)
features = table.lookup(constant_op.constant([1, 2, 4], dtypes.int64))
self.assertRaises(errors_impl.OpError, features.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((b"salad", b"surgery", b"UNK"), features.eval())
class IndexToStringTableFromTensorTest(test.TestCase):
def test_index_to_string_table_from_tensor(self):
with self.cached_session():
mapping_strings = constant_op.constant(["brain", "salad", "surgery"])
table = lookup.index_to_string_table_from_tensor(
mapping=mapping_strings)
indices = constant_op.constant([0, 1, 2, 3], dtypes.int64)
features = table.lookup(indices)
self.assertRaises(errors_impl.OpError, features.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((b"brain", b"salad", b"surgery", b"UNK"),
features.eval())
def test_duplicate_entries(self):
with self.cached_session():
mapping_strings = constant_op.constant(["hello", "hello"])
table = lookup.index_to_string_table_from_tensor(
mapping=mapping_strings)
indices = constant_op.constant([0, 1, 4], dtypes.int64)
features = table.lookup(indices)
lookup_ops.tables_initializer().run()
self.assertAllEqual((b"hello", b"hello", b"UNK"), features.eval())
def test_index_to_string_with_default_value(self):
default_value = b"NONE"
with self.cached_session():
mapping_strings = constant_op.constant(["brain", "salad", "surgery"])
table = lookup.index_to_string_table_from_tensor(
mapping=mapping_strings, default_value=default_value)
indices = constant_op.constant([1, 2, 4], dtypes.int64)
features = table.lookup(indices)
self.assertRaises(errors_impl.OpError, features.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((b"salad", b"surgery", default_value),
features.eval())
class IndexToStringTest(test.TestCase):
def test_index_to_string(self):
with self.cached_session():
mapping_strings = constant_op.constant(["brain", "salad", "surgery"])
indices = constant_op.constant([0, 1, 2, 3], dtypes.int64)
feats = lookup.index_to_string(indices, mapping=mapping_strings)
self.assertRaises(errors_impl.OpError, feats.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((b"brain", b"salad", b"surgery", b"UNK"),
feats.eval())
def test_duplicate_entries(self):
with self.cached_session():
mapping_strings = constant_op.constant(["hello", "hello"])
indices = constant_op.constant([0, 1, 4], dtypes.int64)
feats = lookup.index_to_string(indices, mapping=mapping_strings)
lookup_ops.tables_initializer().run()
self.assertAllEqual((b"hello", b"hello", b"UNK"), feats.eval())
def test_index_to_string_with_default_value(self):
default_value = b"NONE"
with self.cached_session():
mapping_strings = constant_op.constant(["brain", "salad", "surgery"])
indices = constant_op.constant([1, 2, 4], dtypes.int64)
feats = lookup.index_to_string(
indices, mapping=mapping_strings, default_value=default_value)
self.assertRaises(errors_impl.OpError, feats.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((b"salad", b"surgery", default_value), feats.eval())
class InitializeTableFromFileOpTest(test.TestCase):
def _createVocabFile(self, basename, values=("brain", "salad", "surgery")):
vocabulary_file = os.path.join(self.get_temp_dir(), basename)
with open(vocabulary_file, "w") as f:
f.write("\n".join(values) + "\n")
return vocabulary_file
@test_util.run_in_graph_and_eager_modes
def testInitializeStringTable(self):
vocabulary_file = self._createVocabFile("one_column_1.txt")
default_value = -1
table = lookup.HashTable(
lookup.TextFileInitializer(vocabulary_file, dtypes.string,
lookup.TextFileIndex.WHOLE_LINE,
dtypes.int64,
lookup.TextFileIndex.LINE_NUMBER),
default_value)
self.evaluate(table.initializer)
output = table.lookup(constant_op.constant(["brain", "salad", "tank"]))
result = self.evaluate(output)
self.assertAllEqual([0, 1, -1], result)
def testInitializeInt64Table(self):
vocabulary_file = self._createVocabFile(
"one_column_int64.txt", values=("42", "1", "-1000"))
with self.cached_session():
default_value = -1
table = lookup.HashTable(
lookup.TextFileInitializer(vocabulary_file, dtypes.int64,
lookup.TextFileIndex.WHOLE_LINE,
dtypes.int64,
lookup.TextFileIndex.LINE_NUMBER),
default_value)
table.initializer.run()
output = table.lookup(
constant_op.constant((42, 1, 11), dtype=dtypes.int64))
result = output.eval()
self.assertAllEqual([0, 1, -1], result)
def testInitializeIndexTable(self):
vocabulary_file = self._createVocabFile("one_column_2.txt")
with self.cached_session():
default_value = "UNK"
key_index = lookup.TextFileIndex.LINE_NUMBER
value_index = lookup.TextFileIndex.WHOLE_LINE
table = lookup.HashTable(
lookup.TextFileInitializer(vocabulary_file, dtypes.int64,
key_index, dtypes.string, value_index),
default_value)
table.initializer.run()
input_values = constant_op.constant([0, 1, 2, 3], dtypes.int64)
output = table.lookup(input_values)
result = output.eval()
self.assertAllEqual([b"brain", b"salad", b"surgery", b"UNK"], result)
def testMultiColumn(self):
vocabulary_file = os.path.join(self.get_temp_dir(), "three_columns.txt")
with open(vocabulary_file, "w") as f:
f.write("\n".join(["0\tbrain\t1", "1\tsalad\t5", "2\tsurgery\t6"]) + "\n")
with self.cached_session():
default_value = -1
key_index = 1
value_index = 2
table = lookup.HashTable(
lookup.TextFileInitializer(vocabulary_file, dtypes.string,
key_index, dtypes.int64, value_index),
default_value)
table.initializer.run()
input_string = constant_op.constant(["brain", "salad", "surgery"])
output = table.lookup(input_string)
result = output.eval()
self.assertAllEqual([1, 5, 6], result)
def testInvalidDataTypeInMultiColumn(self):
vocabulary_file = os.path.join(self.get_temp_dir(), "three_columns.txt")
with open(vocabulary_file, "w") as f:
f.write("\n".join(["0\tbrain\t1", "1\tsalad\t5", "2\tsurgery\t6"]) + "\n")
with self.cached_session():
default_value = -1
key_index = 2
value_index = 1
table = lookup.HashTable(
lookup.TextFileInitializer(vocabulary_file, dtypes.string,
key_index, dtypes.int64, value_index),
default_value)
with self.assertRaisesOpError("is not a valid"):
table.initializer.run()
def testInvalidDataType(self):
vocabulary_file = self._createVocabFile("one_column_3.txt")
with self.cached_session():
default_value = "UNK"
key_index = lookup.TextFileIndex.WHOLE_LINE
value_index = lookup.TextFileIndex.LINE_NUMBER
with self.assertRaises(ValueError):
lookup.HashTable(
lookup.TextFileInitializer(vocabulary_file, dtypes.int64,
key_index, dtypes.string,
value_index), default_value)
def testInvalidIndex(self):
vocabulary_file = self._createVocabFile("one_column_4.txt")
with self.cached_session():
default_value = -1
key_index = 1 # second column of the line
value_index = lookup.TextFileIndex.LINE_NUMBER
table = lookup.HashTable(
lookup.TextFileInitializer(vocabulary_file, dtypes.string,
key_index, dtypes.int64, value_index),
default_value)
with self.assertRaisesOpError("Invalid number of columns"):
table.initializer.run()
def testInitializeSameTableWithMultipleNodes(self):
vocabulary_file = self._createVocabFile("one_column_5.txt")
with self.cached_session() as sess:
shared_name = "shared-one-columm"
default_value = -1
table1 = lookup.HashTable(
lookup.TextFileInitializer(vocabulary_file, dtypes.string,
lookup.TextFileIndex.WHOLE_LINE,
dtypes.int64,
lookup.TextFileIndex.LINE_NUMBER),
default_value,
shared_name=shared_name)
table2 = lookup.HashTable(
lookup.TextFileInitializer(vocabulary_file, dtypes.string,
lookup.TextFileIndex.WHOLE_LINE,
dtypes.int64,
lookup.TextFileIndex.LINE_NUMBER),
default_value,
shared_name=shared_name)
table3 = lookup.HashTable(
lookup.TextFileInitializer(vocabulary_file, dtypes.string,
lookup.TextFileIndex.WHOLE_LINE,
dtypes.int64,
lookup.TextFileIndex.LINE_NUMBER),
default_value,
shared_name=shared_name)
lookup_ops.tables_initializer().run()
input_string = constant_op.constant(["brain", "salad", "tank"])
output1 = table1.lookup(input_string)
output2 = table2.lookup(input_string)
output3 = table3.lookup(input_string)
out1, out2, out3 = sess.run([output1, output2, output3])
self.assertAllEqual([0, 1, -1], out1)
self.assertAllEqual([0, 1, -1], out2)
self.assertAllEqual([0, 1, -1], out3)
def testInitializeTableWithNoFilename(self):
with self.cached_session():
default_value = -1
with self.assertRaises(ValueError):
lookup.HashTable(
lookup.TextFileInitializer(
"", dtypes.string, lookup.TextFileIndex.WHOLE_LINE,
dtypes.int64, lookup.TextFileIndex.LINE_NUMBER),
default_value)
def testInitializeWithVocabSize(self):
with self.cached_session():
default_value = -1
vocab_size = 3
vocabulary_file1 = self._createVocabFile("one_column6.txt")
table1 = lookup.HashTable(
lookup.TextFileInitializer(
vocabulary_file1,
dtypes.string,
lookup.TextFileIndex.WHOLE_LINE,
dtypes.int64,
lookup.TextFileIndex.LINE_NUMBER,
vocab_size=vocab_size),
default_value)
# Initialize from file.
table1.initializer.run()
self.assertEquals(vocab_size, table1.size().eval())
vocabulary_file2 = self._createVocabFile("one_column7.txt")
vocab_size = 5
table2 = lookup.HashTable(
lookup.TextFileInitializer(
vocabulary_file2,
dtypes.string,
lookup.TextFileIndex.WHOLE_LINE,
dtypes.int64,
lookup.TextFileIndex.LINE_NUMBER,
vocab_size=vocab_size),
default_value)
with self.assertRaisesOpError("Invalid vocab_size"):
table2.initializer.run()
vocab_size = 1
vocabulary_file3 = self._createVocabFile("one_column3.txt")
table3 = lookup.HashTable(
lookup.TextFileInitializer(
vocabulary_file3,
dtypes.string,
lookup.TextFileIndex.WHOLE_LINE,
dtypes.int64,
lookup.TextFileIndex.LINE_NUMBER,
vocab_size=vocab_size),
default_value)
# Smaller vocab size reads only vocab_size records.
table3.initializer.run()
self.assertEquals(vocab_size, table3.size().eval())
def testFeedVocabularyName(self):
vocabulary_file = self._createVocabFile("feed_vocabulary.txt")
with self.cached_session():
default_value = -1
table = lookup.HashTable(
lookup.TextFileInitializer("old_file.txt", dtypes.string,
lookup.TextFileIndex.WHOLE_LINE,
dtypes.int64,
lookup.TextFileIndex.LINE_NUMBER),
default_value)
# Initialize with non existing file (old_file.txt) should fail.
# TODO(yleon): Update message, which might change per FileSystem.
with self.assertRaisesOpError("old_file.txt"):
table.initializer.run()
# Initialize the model feeding the vocabulary file.
filenames = ops.get_collection(ops.GraphKeys.ASSET_FILEPATHS)
table.initializer.run(feed_dict={filenames[0]: vocabulary_file})
input_string = constant_op.constant(["brain", "salad", "tank"])
output = table.lookup(input_string)
result = output.eval()
self.assertAllEqual([0, 1, -1], result)
def testInvalidFilenames(self):
vocabulary_file = self._createVocabFile("filename_shape.txt")
with self.cached_session():
default_value = -1
# Invalid data type
other_type = constant_op.constant(1)
with self.assertRaises(ValueError):
lookup.HashTable(
lookup.TextFileInitializer(
other_type, dtypes.string, lookup.TextFileIndex.WHOLE_LINE,
dtypes.int64, lookup.TextFileIndex.LINE_NUMBER),
default_value)
# Non-scalar filename
filenames = constant_op.constant([vocabulary_file, vocabulary_file])
with self.assertRaises(ValueError):
lookup.HashTable(
lookup.TextFileInitializer(
filenames, dtypes.string, lookup.TextFileIndex.WHOLE_LINE,
dtypes.int64, lookup.TextFileIndex.LINE_NUMBER),
default_value)
def testIdToStringTable(self):
vocab_file = self._createVocabFile("feat_to_id_1.txt")
with self.cached_session():
default_value = "UNK"
vocab_size = 3
table = lookup.HashTable(
lookup.TextFileStringTableInitializer(
vocab_file, vocab_size=vocab_size),
default_value)
table.initializer.run()
input_values = constant_op.constant([0, 1, 2, 3], dtypes.int64)
out = table.lookup(input_values)
self.assertAllEqual([b"brain", b"salad", b"surgery", b"UNK"], out.eval())
self.assertEquals(vocab_size, table.size().eval())
def testStringToIdTable(self):
vocab_file = self._createVocabFile("feat_to_id_2.txt")
with self.cached_session():
default_value = -1
vocab_size = 3
table = lookup.HashTable(
lookup.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size),
default_value)
table.initializer.run()
input_string = constant_op.constant(["brain", "salad", "surgery", "UNK"])
out = table.lookup(input_string)
self.assertAllEqual([0, 1, 2, -1], out.eval())
self.assertEquals(vocab_size, table.size().eval())
def testInt64ToIdTable(self):
vocab_file = self._createVocabFile(
"feat_to_id_3.txt", values=("42", "1", "-1000"))
with self.cached_session():
default_value = -1
vocab_size = 3
table = lookup.HashTable(
lookup.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size, key_dtype=dtypes.int64),
default_value)
table.initializer.run()
out = table.lookup(
constant_op.constant((42, 1, -1000, 11), dtype=dtypes.int64))
self.assertAllEqual((0, 1, 2, -1), out.eval())
self.assertEquals(vocab_size, table.size().eval())
class IdTableWithHashBucketsTest(test.TestCase):
def _createVocabFile(self, basename, values=("brain", "salad", "surgery")):
vocabulary_file = os.path.join(self.get_temp_dir(), basename)
with open(vocabulary_file, "w") as f:
f.write("\n".join(values) + "\n")
return vocabulary_file
def testStringIdTableWithHashBuckets(self):
vocab_file = self._createVocabFile("feat_to_id_1.txt")
with self.cached_session():
default_value = -1
vocab_size = 3
oov_buckets = 1
table = lookup.IdTableWithHashBuckets(
lookup.HashTable(
lookup.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size),
default_value),
oov_buckets)
table.initializer.run()
input_string = constant_op.constant(["brain", "salad", "surgery", "UNK"])
out = table.lookup(input_string)
self.assertAllEqual([0, 1, 2, 3], out.eval())
self.assertEquals(vocab_size + oov_buckets, table.size().eval())
def testInt32IdTableWithHashBuckets(self):
vocab_file = self._createVocabFile("feat_to_id_2.txt", ("42", "1", "-1000"))
with self.cached_session():
default_value = -1
vocab_size = 3
oov_buckets = 1
table = lookup.IdTableWithHashBuckets(
lookup.HashTable(
lookup.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size, key_dtype=dtypes.int64),
default_value),
oov_buckets,
key_dtype=dtypes.int32)
table.initializer.run()
values = constant_op.constant((42, 1, -1000, 11), dtype=dtypes.int32)
out = table.lookup(values)
self.assertAllEqual([0, 1, 2, 3], out.eval())
self.assertEquals(vocab_size + oov_buckets, table.size().eval())
def testInt64IdTableWithHashBuckets(self):
vocab_file = self._createVocabFile("feat_to_id_3.txt", ("42", "1", "-1000"))
with self.cached_session():
default_value = -1
vocab_size = 3
oov_buckets = 1
table = lookup.IdTableWithHashBuckets(
lookup.HashTable(
lookup.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size, key_dtype=dtypes.int64),
default_value),
oov_buckets)
table.initializer.run()
values = constant_op.constant((42, 1, -1000, 11), dtype=dtypes.int64)
out = table.lookup(values)
self.assertAllEqual([0, 1, 2, 3], out.eval())
self.assertEquals(vocab_size + oov_buckets, table.size().eval())
def testStringIdTableWithOnlyHashBucket(self):
with self.cached_session():
oov_buckets = 5
# Set a table that only uses hash buckets, for each input value returns
# an id calculated by fingerprint("input") mod oov_buckets.
table = lookup.IdTableWithHashBuckets(None, oov_buckets)
table.initializer.run()
values = constant_op.constant(("brain", "salad", "surgery"))
out = table.lookup(values)
self.assertAllEqual(
[
3, # fingerprint("brain") mod 5.
1, # fingerprint("salad") mod 5.
4 # fingerprint("surgery") mod 5
],
out.eval())
self.assertEquals(oov_buckets, table.size().eval())
def testInt32IdTableWithOnlyHashBucket(self):
with self.cached_session():
oov_buckets = 5
# Set a table that only uses hash buckets, for each input value returns
# an id calculated by fingerprint("input") mod oov_buckets.
table = lookup.IdTableWithHashBuckets(
None, oov_buckets, key_dtype=dtypes.int32)
table.initializer.run()
input_string = constant_op.constant([42, 1, -1000], dtype=dtypes.int32)
out = table.lookup(input_string)
self.assertAllEqual(
[
1, # fingerprint("42") mod 5.
4, # fingerprint("1") mod 5.
2 # fingerprint("-1000") mod 5
],
out.eval())
self.assertEquals(oov_buckets, table.size().eval())
def testFloat64IdTableWithOnlyHashBucket(self):
with self.cached_session():
with self.assertRaisesRegexp(TypeError, "Invalid key_dtype"):
lookup.IdTableWithHashBuckets(
None, num_oov_buckets=5, key_dtype=dtypes.float64)
def testBoolIdTableWithOnlyHashBucket(self):
with self.cached_session():
with self.assertRaisesRegexp(TypeError, "Invalid key_dtype"):
lookup.IdTableWithHashBuckets(
None, num_oov_buckets=5, key_dtype=dtypes.bool)
def testIdTableWithHashBucketsWithMultipleInitializers(self):
vocab_file = self._createVocabFile("feat_to_id_4.txt")
with self.cached_session() as sess:
default_value = -1
vocab_size = 3
oov_buckets = 3
vocab_table = lookup.HashTable(
lookup.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size),
default_value)
table1 = lookup.IdTableWithHashBuckets(
vocab_table,
oov_buckets,
hasher_spec=lookup.FastHashSpec,
name="table1")
table2 = lookup.IdTableWithHashBuckets(
vocab_table,
oov_buckets,
hasher_spec=lookup.StrongHashSpec((1, 2)),
name="table2")
lookup_ops.tables_initializer().run()
input_string = constant_op.constant(
["fruit", "brain", "salad", "surgery", "UNK"])
out1 = table1.lookup(input_string)
out2 = table2.lookup(input_string)
out1, out2 = sess.run([out1, out2])
self.assertAllEqual([5, 0, 1, 2, 5], out1)
self.assertAllEqual([5, 0, 1, 2, 3], out2)
self.assertEquals(vocab_size + oov_buckets, table1.size().eval())
self.assertEquals(vocab_size + oov_buckets, table2.size().eval())
test_util.assert_ops_in_graph({
"table1_Lookup/hash_bucket": "StringToHashBucketFast",
"table2_Lookup/hash_bucket": "StringToHashBucketStrong",
}, sess.graph)
def testIdTableWithHashBucketsInitializationAcrossSessions(self):
vocab_file = self._createVocabFile("feat_to_id_5.txt")
shared_name = "across-sessions"
with self.cached_session():
default_value = -1
vocab_size = 3
oov_buckets = 1
table1 = lookup.IdTableWithHashBuckets(
lookup.HashTable(
lookup.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size),
default_value,
shared_name=shared_name),
oov_buckets)
table1.initializer.run()
input_string_1 = constant_op.constant(
["brain", "salad", "surgery", "UNK"])
out1 = table1.lookup(input_string_1)
self.assertAllEqual([0, 1, 2, 3], out1.eval())
self.assertEquals(vocab_size + oov_buckets, table1.size().eval())
with self.cached_session():
default_value = -1
vocab_size = 3
oov_buckets = 1
# Underlying lookup table already initialized in previous session.
# No need to call table2.initializer.run()
table2 = lookup.IdTableWithHashBuckets(
lookup.HashTable(
lookup.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size),
default_value,
shared_name=shared_name),
oov_buckets)
input_string_2 = constant_op.constant(["fruit", "salad", "UNK"])
out2 = table2.lookup(input_string_2)
self.assertAllEqual([3, 1, 3], out2.eval())
self.assertEquals(vocab_size + oov_buckets, table2.size().eval())
def testIdTableWithHashBucketsWithMultipleInitializersDifferentDefault(self):
vocab_file = self._createVocabFile("feat_to_id_6.txt")
with self.cached_session() as sess:
default_value1 = -1
vocab_size = 3
oov_buckets = 0
table1 = lookup.IdTableWithHashBuckets(
lookup.HashTable(
lookup.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size),
default_value1),
oov_buckets)
default_value2 = -2
table2 = lookup.IdTableWithHashBuckets(
lookup.HashTable(
lookup.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size),
default_value2),
oov_buckets)
lookup_ops.tables_initializer().run()
input_string_1 = constant_op.constant(
["brain", "salad", "surgery", "UNK"])
input_string_2 = constant_op.constant(["fruit", "salad", "UNK"])
out1 = table1.lookup(input_string_1)
out2 = table2.lookup(input_string_2)
out1, out2 = sess.run([out1, out2])
self.assertAllEqual([0, 1, 2, -1], out1)
self.assertAllEqual([-2, 1, -2], out2)
self.assertEquals(vocab_size + oov_buckets, table1.size().eval())
self.assertEquals(vocab_size + oov_buckets, table2.size().eval())
def testSparseTensor(self):
vocab_file = self._createVocabFile("feat_to_id_7.txt")
input_indices = [[0, 0], [0, 1], [2, 0], [2, 2], [3, 0]]
input_shape = [4, 4]
with self.cached_session() as sess:
sp_features = sparse_tensor.SparseTensor(
constant_op.constant(input_indices, dtypes.int64),
constant_op.constant(["brain", "salad", "brain", "surgery", "tarkus"],
dtypes.string),
constant_op.constant(input_shape, dtypes.int64))
table = lookup.IdTableWithHashBuckets(
lookup.HashTable(
lookup.TextFileIdTableInitializer(
vocab_file, vocab_size=3),
-1),
1)
table.initializer.run()
sp_ids = table.lookup(sp_features)
self.assertAllEqual([5], sp_ids.values._shape_as_list())
sp_ids_ind, sp_ids_val, sp_ids_shape = sess.run(
[sp_ids.indices, sp_ids.values, sp_ids.dense_shape])
self.assertAllEqual(input_indices, sp_ids_ind)
self.assertAllEqual([0, 1, 0, 2, 3], sp_ids_val)
self.assertAllEqual(input_shape, sp_ids_shape)
def testInt32SparseTensor(self):
input_indices = [[0, 0], [0, 1], [2, 0], [2, 2], [3, 0]]
input_shape = [4, 4]
with self.cached_session() as sess:
sp_features = sparse_tensor.SparseTensor(
constant_op.constant(input_indices, dtypes.int64),
constant_op.constant([42, 1, 42, -1000, 11], dtypes.int32),
constant_op.constant(input_shape, dtypes.int64))
table = lookup.IdTableWithHashBuckets(
lookup.HashTable(
lookup.KeyValueTensorInitializer(
(42, 1, -1000), (0, 1, 2), dtypes.int64, dtypes.int64),
-1),
1,
key_dtype=dtypes.int32)
table.initializer.run()
sp_ids = table.lookup(sp_features)
self.assertAllEqual([5], sp_ids.values._shape_as_list())
sp_ids_ind, sp_ids_val, sp_ids_shape = sess.run(
[sp_ids.indices, sp_ids.values, sp_ids.dense_shape])
self.assertAllEqual(input_indices, sp_ids_ind)
self.assertAllEqual([0, 1, 0, 2, 3], sp_ids_val)
self.assertAllEqual(input_shape, sp_ids_shape)
def testInt64SparseTensor(self):
input_indices = [[0, 0], [0, 1], [2, 0], [2, 2], [3, 0]]
input_shape = [4, 4]
with self.cached_session() as sess:
sp_features = sparse_tensor.SparseTensor(
constant_op.constant(input_indices, dtypes.int64),
constant_op.constant([42, 1, 42, -1000, 11], dtypes.int64),
constant_op.constant(input_shape, dtypes.int64))
table = lookup.IdTableWithHashBuckets(
lookup.HashTable(
lookup.KeyValueTensorInitializer(
(42, 1, -1000), (0, 1, 2), dtypes.int64, dtypes.int64),
-1),
1,
key_dtype=dtypes.int64)
table.initializer.run()
sp_ids = table.lookup(sp_features)
self.assertAllEqual([5], sp_ids.values._shape_as_list())
sp_ids_ind, sp_ids_val, sp_ids_shape = sess.run(
[sp_ids.indices, sp_ids.values, sp_ids.dense_shape])
self.assertAllEqual(input_indices, sp_ids_ind)
self.assertAllEqual([0, 1, 0, 2, 3], sp_ids_val)
self.assertAllEqual(input_shape, sp_ids_shape)
def testIdTableWithHashBucketsWithInvalidHashers(self):
vocab_file = self._createVocabFile("feat_to_id_4.txt")
with self.cached_session():
default_value = -1
vocab_size = 3
oov_buckets = 1
lookup_table = lookup.HashTable(
lookup.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size),
default_value)
with self.assertRaises(TypeError):
lookup.IdTableWithHashBuckets(
lookup_table, oov_buckets, hasher_spec=1)
table = lookup.IdTableWithHashBuckets(
lookup_table,
oov_buckets,
hasher_spec=lookup.HasherSpec("my-awesome-hash", None))
input_string = constant_op.constant(["brain", "salad", "surgery", "UNK"])
with self.assertRaises(ValueError):
table.lookup(input_string)
with self.assertRaises(ValueError):
table = lookup.IdTableWithHashBuckets(
lookup_table,
oov_buckets,
hasher_spec=lookup.StrongHashSpec([]))
with self.assertRaises(ValueError):
table = lookup.IdTableWithHashBuckets(
lookup_table,
oov_buckets,
hasher_spec=lookup.StrongHashSpec([1, 2, 3]))
with self.assertRaises(TypeError):
table = lookup.IdTableWithHashBuckets(
lookup_table,
oov_buckets,
hasher_spec=lookup.StrongHashSpec([None, 2]))
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/lookup/lookup_ops_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops and modules related to resampler."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import
from tensorflow.contrib.resampler.python.ops.resampler_ops import *
from tensorflow.python.util.all_util import remove_undocumented
remove_undocumented(__name__, ["resampler"])
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/resampler/__init__.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for resampler ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.contrib import resampler
from tensorflow.contrib.resampler.ops import gen_resampler_ops
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class ResamplerOpsTest(xla_test.XLATestCase):
def _assertForwardOpMatchesExpected(self, image_np, warp_np, expected):
with self.session() as sess, self.test_scope():
input_image = array_ops.placeholder(image_np.dtype)
warp = array_ops.placeholder(warp_np.dtype)
resampled = resampler.resampler(input_image, warp, name='resampler')
out = sess.run(resampled, {input_image: image_np, warp: warp_np})
self.assertAllCloseAccordingToType(
expected, out, rtol=5e-3, half_rtol=1e-2, bfloat16_rtol=3e-2)
def _assertBackwardOpMatchesExpected(self, input_np, warp_np, grad_output_np,
expected_grad_data, expected_grad_warp):
with self.session() as sess, self.test_scope():
input_image = array_ops.placeholder(input_np.dtype)
warp = array_ops.placeholder(warp_np.dtype)
grad_output = array_ops.placeholder(grad_output_np.dtype)
grad_data, grad_warp = gen_resampler_ops.resampler_grad(
input_image, warp, grad_output)
grad_data_tf, grad_warp_tf = sess.run([grad_data, grad_warp], {
input_image: input_np,
warp: warp_np,
grad_output: grad_output_np
})
self.assertAllCloseAccordingToType(
expected_grad_warp, grad_warp_tf, half_rtol=1e-2, bfloat16_rtol=3e-2)
self.assertAllCloseAccordingToType(
expected_grad_data, grad_data_tf, half_rtol=1e-2, bfloat16_rtol=3e-2)
def testSimple(self):
for dtype in self.float_types:
input_shape = [1, 2, 2, 1]
input_data = [0, 5, 13, 54]
input_np = np.array(input_data, dtype=dtype).reshape(input_shape)
warp_shape = [1, 2]
warp_data = [0.7, 0.6]
warp_np = np.array(warp_data, dtype=dtype).reshape(warp_shape)
expected = [[26.42]]
self._assertForwardOpMatchesExpected(input_np, warp_np, expected)
grad_output = np.ones([1, 1], dtype=dtype)
expected_grad_data = [[[[0.12], [0.27999997]], [[0.18000001],
[0.42000002]]]]
expected_grad_warp = [[26.60000038, 38.20000076]]
self._assertBackwardOpMatchesExpected(input_np, warp_np, grad_output,
expected_grad_data,
expected_grad_warp)
def testMultiChannel(self):
for dtype in self.float_types:
input_shape = [1, 2, 2, 3]
input_rgb_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
input_np = np.array(input_rgb_data, dtype=dtype).reshape(input_shape)
warp_shape = [1, 2]
warp_data = [0.7, 0.6]
warp_np = np.array(warp_data, dtype=dtype).reshape(warp_shape)
expected = [[59.58000183, 146.94000244, 107.37999725]]
self._assertForwardOpMatchesExpected(input_np, warp_np, expected)
grad_output = np.ones([1, 3], dtype=dtype)
expected_grad_data = [[[[0.12, 0.12, 0.12],
[0.27999997, 0.27999997, 0.27999997]],
[[0.18000001, 0.18000001, 0.18000001],
[0.42000002, 0.42000002, 0.42000002]]]]
expected_grad_warp = [[199, 30]]
self._assertBackwardOpMatchesExpected(input_np, warp_np, grad_output,
expected_grad_data,
expected_grad_warp)
def testBatch2Height3byWidth3RGB(self):
for dtype in self.float_types:
input_shape = [2, 3, 3, 3]
input_rgb_data = [
0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1, 30, 105, 2, 40, 115,
3, 50, 125, 4, 60, 135, 5, 70, 145, 6, 0, 5, 13, 54, 135, 226, 37, 8,
234, 90, 255, 1, 30, 105, 2, 40, 115, 3, 50, 125, 4, 60, 135, 5, 70,
145, 6
]
input_np = np.array(input_rgb_data, dtype=dtype).reshape(input_shape)
# 2 batches and 2 samples for each batch.
warp_shape = [2, 2, 2]
warp_data = [0.7, 0.6, 1, 0.7, 0.9, 1.2, 1.3, 1.6]
warp_np = np.array(warp_data, dtype=dtype).reshape(warp_shape)
expected_forward = [[[43.92, 128.4, 65.86], [37.2, 114., 69.2]],
[[40.6, 122.8, 2.5], [51., 126, 4.1]]]
self._assertForwardOpMatchesExpected(input_np, warp_np, expected_forward)
expected_grad_data = [[[[0.12, 0.12, 0.12],
[0.57999998, 0.57999998, 0.57999998],
[0., 0., 0.]],
[[0.18000001, 0.18000001, 0.18000001],
[1.12, 1.12, 1.12], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]],
[[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0.08000001, 0.08000001, 0.08000001],
[0.99999988, 0.99999988, 0.99999988],
[0.11999997, 0.11999997, 0.11999997]],
[[0.02000001, 0.02000001, 0.02000001],
[0.60000008, 0.60000008, 0.60000008],
[0.17999998, 0.17999998, 0.17999998]]]]
expected_grad_warp = [[[33.39999008, -96.20000458], [-26.10000229,
-278.]],
[[-162.99998474, 39.99999619], [21., 63.]]]
grad_output = np.ones([2, 2, 3], dtype=dtype)
self._assertBackwardOpMatchesExpected(input_np, warp_np, grad_output,
expected_grad_data,
expected_grad_warp)
def testOutOfBoundWarps(self):
# (x, y) are both less than 0.
for dtype in self.float_types:
input_shape = [1, 2, 2, 1]
input_data = [10, 5, 13, 54]
input_np = np.array(input_data, dtype=dtype).reshape(input_shape)
warp_shape = [1, 2, 2]
warp_data = [-1, -1, 0.7, 0.6]
warp_np = np.array(warp_data, dtype=dtype).reshape(warp_shape)
expected = [[[0.0], [27.62]]]
self._assertForwardOpMatchesExpected(input_np, warp_np, expected)
expected_grad_data = [[[[0.12], [0.27999997]], [[0.18000001],
[0.42000002]]]]
expected_grad_warp = [[[0., 0.], [22.60000038, 35.20000076]]]
grad_output = np.ones([1, 2, 1], dtype=dtype)
self._assertBackwardOpMatchesExpected(input_np, warp_np, grad_output,
expected_grad_data,
expected_grad_warp)
# One of (x, y) is less than 0.
for dtype in self.float_types:
input_shape = [1, 2, 2, 1]
input_data = [10, 5, 13, 54]
input_np = np.array(input_data, dtype=dtype).reshape(input_shape)
warp_shape = [1, 2, 2]
# -1 is out of bound for grad_warp.
warp_data = [-1, 0.1, 0.7, 0.6]
warp_np = np.array(warp_data, dtype=dtype).reshape(warp_shape)
expected = [[[0.0], [27.62]]]
self._assertForwardOpMatchesExpected(input_np, warp_np, expected)
expected_grad_data = [[[[0.12], [0.27999997]], [[0.18000001],
[0.42000002]]]]
expected_grad_warp = [[[0., 0.], [22.60000038, 35.20000076]]]
grad_output = np.ones([1, 2, 1], dtype=dtype)
self._assertBackwardOpMatchesExpected(input_np, warp_np, grad_output,
expected_grad_data,
expected_grad_warp)
# Both of (x, y) are greater than image size.
for dtype in self.float_types:
input_shape = [1, 2, 2, 1]
input_data = [10, 5, 13, 54]
input_np = np.array(input_data, dtype=dtype).reshape(input_shape)
warp_shape = [1, 2, 2]
# -0.1 is *inbound* for grad_warp and grad_data, 2.1 is out of bound.
warp_data = [-0.1, 0.1, 1.2, 2.1]
warp_np = np.array(warp_data, dtype=dtype).reshape(warp_shape)
expected = [[[0.0], [0.0]]]
self._assertForwardOpMatchesExpected(input_np, warp_np, expected)
expected_grad_data = [[[[0.81], [0.0]], [[0.09], [0.0]]]]
expected_grad_warp = [[[10.30, 2.7], [0.0, 0.0]]]
grad_output = np.ones([1, 2, 1], dtype=dtype)
self._assertBackwardOpMatchesExpected(input_np, warp_np, grad_output,
expected_grad_data,
expected_grad_warp)
# One of (x, y) is greater than image size.
for dtype in self.float_types:
input_shape = [1, 2, 2, 1]
input_data = [10, 5, 13, 54]
input_np = np.array(input_data, dtype=dtype).reshape(input_shape)
warp_shape = [1, 2, 2]
warp_data = [0.1, -0.1, 1.2, 0.1]
warp_np = np.array(warp_data, dtype=dtype).reshape(warp_shape)
expected = [[[0.0], [0.0]]]
self._assertForwardOpMatchesExpected(input_np, warp_np, expected)
expected_grad_data = [[[[0.81], [0.81]], [[0.0], [0.08]]]]
expected_grad_warp = [[[-4.5, 9.5], [-9.9, 39.20]]]
grad_output = np.ones([1, 2, 1], dtype=dtype)
self._assertBackwardOpMatchesExpected(input_np, warp_np, grad_output,
expected_grad_data,
expected_grad_warp)
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/resampler/xla/resampler_ops_xla_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""ops module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/resampler/python/__init__.py
|
# pylint: disable=g-bad-file-header
# Copyright 2017 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tensorflow op performing differentiable resampling."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.resampler.ops import gen_resampler_ops
from tensorflow.contrib.util import loader
from tensorflow.python.framework import ops
from tensorflow.python.platform import resource_loader
_resampler_so = loader.load_op_library(
resource_loader.get_path_to_datafile("_resampler_ops.so"))
def resampler(data, warp, name="resampler"):
"""Resamples input data at user defined coordinates.
The resampler currently only supports bilinear interpolation of 2D data.
Args:
data: Tensor of shape `[batch_size, data_height, data_width,
data_num_channels]` containing 2D data that will be resampled.
warp: Tensor of minimum rank 2 containing the coordinates at which
resampling will be performed. Since only bilinear interpolation is
currently supported, the last dimension of the `warp` tensor must be 2,
representing the (x, y) coordinate where x is the index for width and y is
the index for height.
name: Optional name of the op.
Returns:
Tensor of resampled values from `data`. The output tensor shape is
determined by the shape of the warp tensor. For example, if `data` is of
shape `[batch_size, data_height, data_width, data_num_channels]` and warp of
shape `[batch_size, dim_0, ... , dim_n, 2]` the output will be of shape
`[batch_size, dim_0, ... , dim_n, data_num_channels]`.
Raises:
ImportError: if the wrapper generated during compilation is not present when
the function is called.
"""
with ops.name_scope(name, "resampler", [data, warp]):
data_tensor = ops.convert_to_tensor(data, name="data")
warp_tensor = ops.convert_to_tensor(warp, name="warp")
return gen_resampler_ops.resampler(data_tensor, warp_tensor)
@ops.RegisterGradient("Resampler")
def _resampler_grad(op, grad_output):
data, warp = op.inputs
grad_output_tensor = ops.convert_to_tensor(grad_output, name="grad_output")
return gen_resampler_ops.resampler_grad(data, warp, grad_output_tensor)
ops.NotDifferentiable("ResamplerGrad")
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/resampler/python/ops/resampler_ops.py
|
# pylint: disable=g-bad-file-header
# Copyright 2017 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for contrib.resampler.python.ops.resampler_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.contrib import resampler
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
def _bilinearly_interpolate(data, x, y):
"""Performs bilinenar interpolation of grid data at user defined coordinates.
This interpolation function:
a) implicitly pads the input data with 0s.
b) returns 0 when sampling outside the (padded) image.
The effect is that the sampled signal smoothly goes to 0 outside the original
input domain, rather than producing a jump discontinuity at the image
boundaries.
Args:
data: numpy array of shape `[data_height, data_width]` containing data
samples assumed to be defined at the corresponding pixel coordinates.
x: numpy array of shape `[warp_height, warp_width]` containing x coordinates
at which interpolation will be performed.
y: numpy array of shape `[warp_height, warp_width]` containing y coordinates
at which interpolation will be performed.
Returns:
Numpy array of shape `[warp_height, warp_width]` containing interpolated
values.
"""
shape = x.shape
x = np.asarray(x) + 1
y = np.asarray(y) + 1
data = np.lib.pad(data, 1, "constant", constant_values=0)
x_0 = np.floor(x).astype(int)
x_1 = x_0 + 1
y_0 = np.floor(y).astype(int)
y_1 = y_0 + 1
x_0 = np.clip(x_0, 0, data.shape[1] - 1)
x_1 = np.clip(x_1, 0, data.shape[1] - 1)
y_0 = np.clip(y_0, 0, data.shape[0] - 1)
y_1 = np.clip(y_1, 0, data.shape[0] - 1)
i_a = data[y_0, x_0]
i_b = data[y_1, x_0]
i_c = data[y_0, x_1]
i_d = data[y_1, x_1]
w_a = (x_1 - x) * (y_1 - y)
w_b = (x_1 - x) * (y - y_0)
w_c = (x - x_0) * (y_1 - y)
w_d = (x - x_0) * (y - y_0)
samples = (w_a * i_a + w_b * i_b + w_c * i_c + w_d * i_d)
samples.reshape(shape)
return samples
def _make_warp(batch_size, warp_height, warp_width, dtype):
"""Creates batch of warping coordinates."""
x, y = np.meshgrid(np.linspace(0, warp_width - 1, warp_width),
np.linspace(0, warp_height - 1, warp_height))
warp = np.concatenate((x.reshape([warp_height, warp_width, 1]),
y.reshape([warp_height, warp_width, 1])), 2)
warp = np.tile(warp.reshape([1, warp_height, warp_width, 2]),
[batch_size, 1, 1, 1])
warp += np.random.randn(*warp.shape)
return warp.astype(dtype)
class ResamplerTest(test.TestCase):
def test_op_forward_pass_gpu_float32(self):
self._test_op_forward_pass(True, dtypes.float32, 1e-4)
def test_op_forward_pass_gpu_float64(self):
self._test_op_forward_pass(True, dtypes.float64, 1e-5)
def test_op_forward_pass_cpu_float16(self):
self._test_op_forward_pass(False, dtypes.float16, 1e-2)
def test_op_forward_pass_cpu_float32(self):
self._test_op_forward_pass(False, dtypes.float32, 1e-4)
def test_op_forward_pass_cpu_float64(self):
self._test_op_forward_pass(False, dtypes.float64, 1e-5)
def test_op_backward_pass_gpu_float32(self):
self._test_op_backward_pass(True, dtypes.float32, 1e-3)
def test_op_backward_pass_cpu_float16(self):
self._test_op_backward_pass(False, dtypes.float16, 1e-3)
def test_op_backward_pass_cpu_float32(self):
self._test_op_backward_pass(False, dtypes.float32, 1e-4)
def test_op_backward_pass_cpu_float64(self):
self._test_op_backward_pass(False, dtypes.float64, 1e-6)
def _test_op_forward_pass(self, on_gpu, dtype, tol):
np.random.seed(0)
data_width = 7
data_height = 9
data_channels = 5
warp_width = 4
warp_height = 8
batch_size = 10
warp = _make_warp(batch_size, warp_height, warp_width, dtype.as_numpy_dtype)
data_shape = (batch_size, data_height, data_width, data_channels)
data = np.random.rand(*data_shape).astype(dtype.as_numpy_dtype)
with self.test_session(use_gpu=on_gpu, force_gpu=False) as sess:
data_ph = array_ops.placeholder(dtype, shape=(None,) + data.shape[1:])
warp_ph = array_ops.placeholder(dtype, shape=(None,) + warp.shape[1:])
outputs = resampler.resampler(data=data_ph, warp=warp_ph)
self.assertEqual(outputs.get_shape().as_list(),
[None, warp_height, warp_width, data_channels])
out = sess.run(outputs, feed_dict={data_ph: data, warp_ph: warp})
# Generate reference output via bilinear interpolation in numpy
reference_output = np.zeros_like(out)
for batch in xrange(batch_size):
for c in xrange(data_channels):
reference_output[batch, :, :, c] = _bilinearly_interpolate(
data[batch, :, :, c],
warp[batch, :, :, 0],
warp[batch, :, :, 1])
self.assertAllClose(out, reference_output, rtol=tol, atol=tol)
def _test_op_backward_pass(self, on_gpu, dtype, tol):
np.random.seed(13)
data_width = 5
data_height = 4
data_channels = 3
warp_width = 2
warp_height = 6
batch_size = 3
warp = _make_warp(batch_size, warp_height, warp_width, dtype.as_numpy_dtype)
data_shape = (batch_size, data_height, data_width, data_channels)
data = np.random.rand(*data_shape).astype(dtype.as_numpy_dtype)
with self.test_session(use_gpu=on_gpu, force_gpu=False):
data_tensor = constant_op.constant(data)
warp_tensor = constant_op.constant(warp)
output_tensor = resampler.resampler(data=data_tensor, warp=warp_tensor)
grads = test.compute_gradient([data_tensor, warp_tensor], [
data_tensor.get_shape().as_list(),
warp_tensor.get_shape().as_list()
], output_tensor, output_tensor.get_shape().as_list(), [data, warp])
if not on_gpu:
# On CPU we perform numerical differentiation at the best available
# precision, and compare against that. This is necessary for test to
# pass for float16.
data_tensor_64 = constant_op.constant(data, dtype=dtypes.float64)
warp_tensor_64 = constant_op.constant(warp, dtype=dtypes.float64)
output_tensor_64 = resampler.resampler(data=data_tensor_64,
warp=warp_tensor_64)
grads_64 = test.compute_gradient([data_tensor_64, warp_tensor_64], [
data_tensor.get_shape().as_list(),
warp_tensor.get_shape().as_list()
], output_tensor_64, output_tensor.get_shape().as_list(), [data, warp])
for g, g_64 in zip(grads, grads_64):
self.assertLess(np.fabs(g[0] - g_64[1]).max(), tol)
else:
for g in grads:
self.assertLess(np.fabs(g[0] - g[1]).max(), tol)
def test_op_errors(self):
data_width = 7
data_height = 9
data_depth = 3
data_channels = 5
warp_width = 4
warp_height = 8
batch_size = 10
# Input data shape is not defined over a 2D grid, i.e. its shape is not like
# (batch_size, data_height, data_width, data_channels).
with self.cached_session() as sess:
data_shape = (batch_size, data_height, data_width, data_depth,
data_channels)
data = np.zeros(data_shape)
warp_shape = (batch_size, warp_height, warp_width, 2)
warp = np.zeros(warp_shape)
outputs = resampler.resampler(constant_op.constant(data),
constant_op.constant(warp))
with self.assertRaisesRegexp(errors_impl.UnimplementedError,
"Only bilinear interpolation is currently "
"supported."):
sess.run(outputs)
# Warp tensor must be at least a matrix, with shape [batch_size, 2].
with self.cached_session() as sess:
data_shape = (batch_size, data_height, data_width, data_channels)
data = np.zeros(data_shape)
warp_shape = (batch_size,)
warp = np.zeros(warp_shape)
outputs = resampler.resampler(constant_op.constant(data),
constant_op.constant(warp))
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"warp should be at least a matrix"):
sess.run(outputs)
# The batch size of the data and warp tensors must be the same.
with self.cached_session() as sess:
data_shape = (batch_size, data_height, data_width, data_channels)
data = np.zeros(data_shape)
warp_shape = (batch_size+1, warp_height, warp_width, 2)
warp = np.zeros(warp_shape)
outputs = resampler.resampler(constant_op.constant(data),
constant_op.constant(warp))
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"Batch size of data and warp tensor"):
sess.run(outputs)
# The warp tensor must contain 2D coordinates, i.e. its shape last dimension
# must be 2.
with self.cached_session() as sess:
data_shape = (batch_size, data_height, data_width, data_channels)
data = np.zeros(data_shape)
warp_shape = (batch_size, warp_height, warp_width, 3)
warp = np.zeros(warp_shape)
outputs = resampler.resampler(constant_op.constant(data),
constant_op.constant(warp))
with self.assertRaisesRegexp(errors_impl.UnimplementedError,
"Only bilinear interpolation is supported, "
"warping"):
sess.run(outputs)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/resampler/python/ops/resampler_ops_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops for memory statistics.
@@BytesInUse
@@BytesLimit
@@MaxBytesInUse
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.memory_stats.python.ops.memory_stats_ops import BytesInUse
from tensorflow.contrib.memory_stats.python.ops.memory_stats_ops import BytesLimit
from tensorflow.contrib.memory_stats.python.ops.memory_stats_ops import MaxBytesInUse
from tensorflow.python.util.all_util import remove_undocumented
remove_undocumented(__name__)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/memory_stats/__init__.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for memory statistics ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.memory_stats.python.ops import memory_stats_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import test
class MemoryStatsOpsTest(test_util.TensorFlowTestCase):
def testBytesLimit(self):
# AllocatorStats.bytes_limit is set to zero for CPU allocators, so we skip
# the check.
if not test.is_gpu_available():
return
with self.test_session(use_gpu=True) as sess:
bytes_limit = sess.run(memory_stats_ops.BytesLimit())
self.assertLess(0, bytes_limit)
# Tests the peak memory usage of the following computation.
# a b
# | / |
# c |
# \ |
# \ |
# d
# The memory for matrix "a" can be reused for matrix "d". Therefore, this
# computation needs space for only three matrix plus some small overhead.
def testChainOfMatmul(self):
# MaxBytesInUse is registered on GPU only. See kernels/memory_stats_ops.cc.
if not test.is_gpu_available():
return
with self.test_session(use_gpu=True) as sess:
matrix_size = 64
matrix_shape = tensor_shape.TensorShape([matrix_size, matrix_size])
dtype = dtypes.float32
matrix_size_in_bytes = matrix_shape.num_elements() * dtype.size
a = random_ops.random_uniform(matrix_shape, dtype=dtype)
b = random_ops.random_uniform(matrix_shape, dtype=dtype)
c = math_ops.matmul(a, b)
d = math_ops.matmul(c, b)
sess.run(d)
max_bytes_in_use_op = memory_stats_ops.MaxBytesInUse()
max_bytes_in_use = sess.run(max_bytes_in_use_op)
self.assertGreaterEqual(max_bytes_in_use, matrix_size_in_bytes * 3)
self.assertLess(max_bytes_in_use, matrix_size_in_bytes * 4)
# run chain with 2 ops, make sure BytesInUse captures intermediate
# memory usage
a = random_ops.random_uniform(matrix_shape, dtype=dtype)
with ops.control_dependencies([a]):
bytes_in_use_op = memory_stats_ops.BytesInUse()
with ops.control_dependencies([bytes_in_use_op]):
b = random_ops.random_uniform(matrix_shape, dtype=dtype)
c = math_ops.matmul(a, b)
_, bytes_in_use, max_bytes_in_use = sess.run([c, bytes_in_use_op,
max_bytes_in_use_op])
# intermediate result allocates 1 matrix, max usage is at least 2
self.assertGreaterEqual(bytes_in_use, matrix_size_in_bytes * 1)
self.assertLess(bytes_in_use, matrix_size_in_bytes * 2)
# max usage is still 3 because it reflects maxium from previous .run call
self.assertGreaterEqual(max_bytes_in_use, matrix_size_in_bytes * 3)
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/memory_stats/python/kernel_tests/memory_stats_ops_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops for memory statistics."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.memory_stats.ops import gen_memory_stats_ops
from tensorflow.contrib.util import loader
from tensorflow.python.platform import resource_loader
_memory_stats_ops_so = loader.load_op_library(
resource_loader.get_path_to_datafile("_memory_stats_ops.so"))
def BytesInUse():
"""Generates an op that computes the current memory of a device."""
return gen_memory_stats_ops.bytes_in_use()
def BytesLimit():
"""Generates an op that measures the total memory (in bytes) of a device."""
return gen_memory_stats_ops.bytes_limit()
def MaxBytesInUse():
"""Generates an op that computes the peak memory of a device."""
return gen_memory_stats_ops.max_bytes_in_use()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/memory_stats/python/ops/memory_stats_ops.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Recurrent computations library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from tensorflow.contrib.recurrent.python.ops.functional_rnn import bidirectional_functional_rnn
from tensorflow.contrib.recurrent.python.ops.functional_rnn import functional_rnn
from tensorflow.contrib.recurrent.python.ops.recurrent import Recurrent
# pylint: enable=unused-import
del absolute_import
del division
del print_function
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/recurrent/python/recurrent_api.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Recurrent ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.contrib.recurrent.python.ops import recurrent
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import function
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import test as test_lib
from tensorflow.python.platform import tf_logging as logging
_ElmanState = collections.namedtuple('ElmanState', ('h'))
_ElmanTheta = collections.namedtuple('ElmanTheta', ('w', 'b'))
_ElmanInputs = collections.namedtuple('ElmanInputs', ('x'))
# TODO(drpng): add test for max length computation.
class RecurrentTest(test_util.TensorFlowTestCase):
def testBasic(self):
# pylint:disable=invalid-name
_PolyState = collections.namedtuple('PolyState', ('value', 'x_power'))
_PolyTheta = collections.namedtuple('PolyTheta', ('x'))
_PolyInputs = collections.namedtuple('PolyInputs', ('coeff'))
# pylint:enable=invalid-name
def Poly(theta, state, inputs):
next_state = _PolyState(
value=state.value + inputs.coeff * state.x_power,
x_power=state.x_power * theta.x)
return next_state, []
with self.cached_session() as sess:
theta = _PolyTheta(x=array_ops.constant(2.0))
state = _PolyState(
value=array_ops.constant(0.0),
x_power=array_ops.constant(1.0))
inputs = _PolyInputs(coeff=array_ops.constant([1., 2., 3.]))
# x = 2
# 1 + 2*x + 3*x^2
ret = recurrent.Recurrent(theta, state, inputs, Poly)
acc, state = sess.run(ret)
self.assertAllClose(acc.value, [1., 5., 17.])
self.assertAllClose(acc.x_power, [2., 4., 8.])
self.assertAllClose(state.value, 17.)
self.assertAllClose(state.x_power, 8.)
y = ret[1].value
dx, d_coeff = gradients_impl.gradients(ys=[y], xs=[theta.x, inputs.coeff])
dx_val, d_coeff_val = sess.run([dx, d_coeff])
# 2 + 6*x
self.assertAllClose(dx_val, 14.)
self.assertAllClose(d_coeff_val, [1., 2., 4.])
# acc = [1, 1+2x, 1+2x+3x^2]
# sum(acc) = 3 + 4x + 3x^2
acc = ret[0].value
dx, d_coeff = gradients_impl.gradients(
ys=[math_ops.reduce_sum(acc)], xs=[theta.x, inputs.coeff])
dx_val, d_coeff_val = sess.run([dx, d_coeff])
# 4 + 6*x
self.assertAllClose(dx_val, 16.)
self.assertAllClose(d_coeff_val, [3., 4., 4.])
@staticmethod
def Rand(shape):
return random_ops.random_uniform(
shape, minval=-0.2, maxval=0.2, dtype=dtypes.float64)
@staticmethod
def Elman(theta, state0, inputs):
h0, w, b, x = state0.h, theta.w, theta.b, inputs.x
xw = math_ops.matmul(array_ops.concat([x, h0], axis=1), w)
h1 = math_ops.sigmoid(xw + b)
state1 = _ElmanState(h=h1)
return (state1, state1)
@staticmethod
def ElmanGrad(theta, state0, inputs, extras, dstate1):
@function.Defun()
def Grad(h0, w, b, x, h1, dh1):
del b
# We hand-roll the gradient for the 2nd half of the cell as a demo.
dxwb = (dh1 * (1 - h1) * h1)
dxw, db = dxwb, math_ops.reduce_sum(dxwb, axis=0)
# Uses tf.gradient for the 1nd half of the cell as a demo.
xw = math_ops.matmul(array_ops.concat([x, h0], axis=1), w)
dh0, dx, dw = gradients_impl.gradients(
ys=[xw], xs=[h0, x, w], grad_ys=[dxw])
return dh0, dx, dw, db
dh0, dx, dw, db = Grad(state0.h, theta.w, theta.b, inputs.x,
extras.h, dstate1.h)
dstate0 = _ElmanState(h=dh0)
dinputs = _ElmanInputs(x=dx)
return (_ElmanTheta(w=dw, b=db), dstate0, dinputs)
@staticmethod
def ElmanOut(state1):
return _ElmanState(x=state1.h)
@staticmethod
def ElmanOutGrad(dout):
return _ElmanState(h=dout.x)
def testElman(self):
for seqlen, use_grad in [(1, False), (1, True), (7, False), (7, True)]:
logging.info('== Elman: seqlen=%s, use_grad=%s', seqlen, use_grad)
self._ParameterizedTestElman(seqlen, use_grad)
def _ParameterizedTestElman(self, seqlen, use_grad):
with self.cached_session() as sess:
random_seed.set_random_seed(342462)
batch = 3
dims = 4
theta = _ElmanTheta(w=RecurrentTest.Rand([2 * dims, dims]),
b=RecurrentTest.Rand([dims]))
state0 = _ElmanState(h=RecurrentTest.Rand([batch, dims]))
inputs = _ElmanInputs(x=RecurrentTest.Rand([seqlen, batch, dims]))
# Statically unrolled.
s = state0
out = []
for i in xrange(seqlen):
inp = _ElmanInputs(x=inputs.x[i, :])
s, _ = RecurrentTest.Elman(theta, s, inp)
out += [s.h]
acc0, final0 = array_ops.stack(out), s.h
loss0 = math_ops.reduce_sum(acc0) + math_ops.reduce_sum(final0)
(dw0, db0, dh0, di0) = gradients_impl.gradients(
loss0, [theta.w, theta.b, state0.h, inputs.x])
acc1, final1 = recurrent.Recurrent(
theta=theta,
state0=state0,
inputs=inputs,
cell_fn=RecurrentTest.Elman,
cell_grad=RecurrentTest.ElmanGrad if use_grad else None)
assert isinstance(acc1, _ElmanState)
assert isinstance(final1, _ElmanState)
acc1, final1 = acc1.h, final1.h
loss1 = math_ops.reduce_sum(acc1) + math_ops.reduce_sum(final1)
(dw1, db1, dh1, di1) = gradients_impl.gradients(
loss1, [theta.w, theta.b, state0.h, inputs.x])
# Fetches a few values and compare them.
(acc0, acc1, final0, final1, dw0, dw1, db0, db1, dh0, dh1, di0,
di1) = sess.run(
[acc0, acc1, final0, final1, dw0, dw1, db0, db1, dh0, dh1, di0, di1])
self.assertAllClose(acc0, acc1)
self.assertAllClose(final0, final1)
self.assertAllClose(dw0, dw1)
self.assertAllClose(db0, db1)
self.assertAllClose(dh0, dh1)
self.assertAllClose(di0, di1)
if __name__ == '__main__':
test_lib.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/recurrent/python/kernel_tests/recurrent_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Functional RNN."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.recurrent.python.ops import functional_rnn
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import rnn as rnn_lib
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import variables
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
import tensorflow.python.ops.tensor_array_grad # pylint: disable=unused-import
from tensorflow.python.platform import test as test_lib
from tensorflow.python.platform import tf_logging as logging
def _CreateStackedLstmCell(*cell_sizes):
subcells = [rnn_cell_impl.LSTMCell(cell_size) for cell_size in cell_sizes]
return rnn_cell_impl.MultiRNNCell(subcells)
class FunctionalRnnTest(test_util.TensorFlowTestCase):
_BATCH_SIZE = 3
_TOTAL_TIME = 5
_INPUT_SIZE = 11
_NUM_UNITS = 7
# Set this to some output if you want to use it.
_LSTM_GRAPH_DEF_FILEPATH = None
_CELLDEFS = {
'gru': (rnn_cell_impl.GRUCell, [_NUM_UNITS]),
'lstm': (rnn_cell_impl.LSTMCell, [_NUM_UNITS]),
'stacked_lstm': (_CreateStackedLstmCell, [_NUM_UNITS] * 3)
}
def _CreateCell(self, celldef_name):
func, args = self._CELLDEFS[celldef_name]
return func(*args)
def _CreateInputs(self, time_major=False):
if time_major:
inputs = np.random.random([
FunctionalRnnTest._TOTAL_TIME, FunctionalRnnTest._BATCH_SIZE,
FunctionalRnnTest._INPUT_SIZE
])
else:
inputs = np.random.random([
FunctionalRnnTest._BATCH_SIZE, FunctionalRnnTest._TOTAL_TIME,
FunctionalRnnTest._INPUT_SIZE
])
# Always leave one time slot empty, to check max_length behavior.
sequence_length = np.random.randint(
0, high=FunctionalRnnTest._TOTAL_TIME - 1,
size=FunctionalRnnTest._BATCH_SIZE,
dtype=np.int)
return (inputs, sequence_length)
def _CreateSymmetricInputs(self):
# total time = batch size
inputs = np.zeros(
(FunctionalRnnTest._BATCH_SIZE, FunctionalRnnTest._BATCH_SIZE,
FunctionalRnnTest._INPUT_SIZE))
for i in range(FunctionalRnnTest._BATCH_SIZE):
for j in range(i, FunctionalRnnTest._BATCH_SIZE):
inputs[i][j] = np.random.random([FunctionalRnnTest._INPUT_SIZE])
inputs[j][i] = inputs[i][j]
# Always leave one time slot empty, to check max_length behavior.
sequence_length = np.random.randint(
0,
high=FunctionalRnnTest._BATCH_SIZE - 1,
size=FunctionalRnnTest._BATCH_SIZE,
dtype=np.int)
return (inputs, sequence_length)
def _CreateRnnGraph(self,
create_rnn_computation_func,
cell,
tf_inputs,
tf_sequence_length,
is_bidirectional,
initial_state=None,
time_major=None,
scope=None):
if is_bidirectional:
tf_result = create_rnn_computation_func(
cell_fw=cell,
cell_bw=cell,
inputs=tf_inputs,
sequence_length=tf_sequence_length,
dtype=dtypes.float32,
time_major=time_major,
scope=scope)
else:
tf_result = create_rnn_computation_func(
cell=cell,
inputs=tf_inputs,
sequence_length=tf_sequence_length,
initial_state=initial_state,
dtype=dtypes.float32,
time_major=time_major,
scope=scope)
grad = gradients_impl.gradients(tf_result, variables.trainable_variables())
return {'inference': tf_result, 'grad': grad}
def _MaybeResetVariables(self, variable_cache, sess, var_list):
"""Possibly resets the variables to a previously seen value."""
reset_ops = []
fetches = []
for var in var_list:
if var.name in variable_cache:
reset_ops += [var.assign(variable_cache[var.name])]
else:
fetches += [(var.name, var)]
if reset_ops:
sess.run(reset_ops)
if fetches:
val = sess.run(dict(fetches))
for n, v in val.items():
assert n not in variable_cache
variable_cache[n] = v
def _RunRnn(self, numpy_inputs, numpy_slen, cell_name, variable_cache,
is_dynamic, time_major=None, is_bidirectional=False):
with ops.Graph().as_default() as graph:
tf_inputs = array_ops.placeholder(
dtypes.float32, shape=numpy_inputs.shape)
tf_slen = array_ops.placeholder(dtypes.int32)
feeds = {tf_inputs: numpy_inputs, tf_slen: numpy_slen}
cell = self._CreateCell(cell_name)
if is_dynamic:
if is_bidirectional:
fn = rnn_lib.bidirectional_dynamic_rnn
else:
fn = rnn_lib.dynamic_rnn
else:
if is_bidirectional:
fn = functional_rnn.bidirectional_functional_rnn
else:
fn = functional_rnn.functional_rnn
fetches = self._CreateRnnGraph(
fn, cell, tf_inputs, tf_slen, is_bidirectional, time_major=time_major)
with self.session(graph=graph) as sess:
sess.run(variables.global_variables_initializer())
# Note that cell.trainable_variables it not always set.
self._MaybeResetVariables(variable_cache, sess,
variables.trainable_variables())
val = sess.run(fetches, feed_dict=feeds)
graph_def = graph.as_graph_def()
return graph_def, val
def testRunLstm(self):
"""Runs a simple LSTM. Does not check output."""
np_inputs, np_slen = self._CreateInputs()
var_cache = {}
graphdef, _ = self._RunRnn(np_inputs, np_slen, 'lstm', var_cache, False)
logging.info('graphdef: %s', graphdef)
if self._LSTM_GRAPH_DEF_FILEPATH:
with open(self._LSTM_GRAPH_DEF_FILEPATH, 'w') as f:
f.write(str(graphdef))
def testLstm(self):
"""Checks an LSTM against the reference implementation."""
np_inputs, np_slen = self._CreateInputs()
var_cache = {}
_, func_rnn = self._RunRnn(np_inputs, np_slen, 'lstm', var_cache, False)
_, dyn_rnn = self._RunRnn(np_inputs, np_slen, 'lstm', var_cache, True)
self.assertAllClose(dyn_rnn['inference'], func_rnn['inference'])
self.assertAllClose(dyn_rnn['grad'], func_rnn['grad'])
def testGru(self):
"""Checks a GRU cell against the reference implementation."""
np_inputs, np_slen = self._CreateInputs()
var_cache = {}
_, func_rnn = self._RunRnn(np_inputs, np_slen, 'gru', var_cache, False)
_, dyn_rnn = self._RunRnn(np_inputs, np_slen, 'gru', var_cache, True)
self.assertAllClose(dyn_rnn['inference'], func_rnn['inference'])
self.assertAllClose(dyn_rnn['grad'], func_rnn['grad'])
def testStackedLstm(self):
"""Checks a stacked LSTM cell against the reference implementation."""
np_inputs, np_slen = self._CreateInputs()
var_cache = {}
args = [np_inputs, np_slen, 'stacked_lstm', var_cache]
_, func_rnn = self._RunRnn(*(args + [False]))
_, dyn_rnn = self._RunRnn(*(args + [True]))
self.assertAllClose(dyn_rnn['inference'], func_rnn['inference'])
self.assertAllClose(dyn_rnn['grad'], func_rnn['grad'])
def testLstmWithTimeMajorInputs(self):
"""Checks an LSTM against the reference implementation, with time_major."""
time_major = True
np_inputs, np_slen = self._CreateInputs(time_major=True)
var_cache = {}
args = [np_inputs, np_slen, 'lstm', var_cache]
_, func_rnn = self._RunRnn(*(args + [False]), time_major=time_major)
_, dyn_rnn = self._RunRnn(*(args + [True]), time_major=time_major)
self.assertAllClose(dyn_rnn['inference'], func_rnn['inference'])
self.assertAllClose(dyn_rnn['grad'], func_rnn['grad'])
def testBidirectionalLstmWithTimeMajorInputs(self):
"""Checks a bi-directional LSTM with time-major inputs."""
time_major = True
np_inputs, np_slen = self._CreateInputs(time_major)
var_cache = {}
args = [np_inputs, np_slen, 'lstm', var_cache]
_, func_rnn = self._RunRnn(
*(args + [False]), time_major=time_major, is_bidirectional=True)
_, dyn_rnn = self._RunRnn(
*(args + [True]), time_major=time_major, is_bidirectional=True)
self.assertAllClose(dyn_rnn['inference'], func_rnn['inference'])
# TODO(b/112170761): comment out this line after the bug is fixed.
# self.assertAllClose(dyn_rnn['grad'], func_rnn['grad'])
def testBidirectionalLstm(self):
"""Checks time-major and batch-major rnn produce consistent results."""
time_major_inputs, np_slen = self._CreateInputs(True)
batch_major_inputs = np.transpose(time_major_inputs, [1, 0, 2])
var_cache = {}
args = [np_slen, 'lstm', var_cache, False]
_, time_major_rnn = self._RunRnn(
*([time_major_inputs] + args), time_major=True, is_bidirectional=True)
_, batch_major_rnn = self._RunRnn(
*([batch_major_inputs]+ args), time_major=False, is_bidirectional=True)
# Convert the batch-major outputs to be time-major before the comparasion.
outputs, state = batch_major_rnn['inference']
outputs = [np.transpose(x, [1, 0, 2]) for x in outputs]
batch_major_rnn['inference'] = [outputs, state]
self.assertAllClose(time_major_rnn['inference'],
batch_major_rnn['inference'])
self.assertAllClose(time_major_rnn['grad'], batch_major_rnn['grad'])
def testBidirectionalLstmWithSymmetricInputs(self):
"""Checks a bi-directional LSTM with symmetric inputs.
time-major and batch-major rnn produce the same result with symmetric
inputs.
"""
np_inputs, np_slen = self._CreateSymmetricInputs()
var_cache = {}
args = [np_inputs, np_slen, 'lstm', var_cache]
_, time_major_func_rnn = self._RunRnn(
*(args + [False]), time_major=True, is_bidirectional=True)
_, batch_major_func_rnn = self._RunRnn(
*(args + [False]), time_major=False, is_bidirectional=True)
_, time_major_dyn_rnn = self._RunRnn(
*(args + [True]), time_major=True, is_bidirectional=True)
_, batch_major_dyn_rnn = self._RunRnn(
*(args + [True]), time_major=False, is_bidirectional=True)
self.assertAllClose(time_major_func_rnn['inference'],
batch_major_func_rnn['inference'])
self.assertAllClose(time_major_func_rnn['grad'],
batch_major_func_rnn['grad'])
self.assertAllClose(time_major_dyn_rnn['inference'],
batch_major_dyn_rnn['inference'])
self.assertAllClose(time_major_dyn_rnn['grad'], batch_major_dyn_rnn['grad'])
self.assertAllClose(time_major_func_rnn['inference'],
batch_major_dyn_rnn['inference'])
self.assertAllClose(time_major_func_rnn['grad'],
batch_major_dyn_rnn['grad'])
if __name__ == '__main__':
test_lib.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/recurrent/python/kernel_tests/functional_rnn_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Recurrent computation.
The main interface of this module is Recurrent().
A recurrent computation describes an auto-regressive process, where outputs
of one time step are fed to the output of the next time step.
This module uses:
theta: the "weights" each RNN uses.
state0: the initial state of each RNN.
cell_fn: A python function describing RNN cell. It must has the following
signature:
cell_fn: (theta, state0, inputs) -> (state1, extras)
state1 is the next RNN state, extras are computed by cell_fn
and the library forwards extras to cell_fn's gradient function.
cell_grad: A python function describing the backprop gradient function
for the RNN cell. It must has the following signature:
cell_grad: (theta, state0, inputs, extras, dstate1) -> (
dtheta, dstate0, dinputs)
dstate1 is what the backprop algorithm provides representing
gradients of state1 w.r.t. the final loss.
In this module, we handle structures of tensors for theta, state0, inputs,
and extras. The structure is an arbitrarily nested python structure, such
as a dictionary of named tuples.
Because the computation is a left-to-right chain, a single in-place accumulator
can be used rather than a stack. Thus a special gradient was written to reduce
unnecessary memory usage.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import inplace_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.inplace_ops import alias_inplace_update
from tensorflow.python.util import nest
def _AssertIsCompatible(a, b):
"""Checks that `a` and `b` are nested structures of the same type."""
# TODO(drpng): implement.
del a
del b
def _Index(struct, index):
"""Returns a structure with `x[index]` for each tensor `x` in the structure.
Args:
struct: A structure of tensors.
index: A scalar integer tensor. Performance is better if `index` is
on the host memory.
Returns:
A structure of tensors congruent to `struct`.
For each key in `ret`, `rets[key] = struct[key][index]`.
"""
index = ops.convert_to_tensor(index)
index.get_shape().assert_has_rank(0)
return nest.map_structure(lambda x: array_ops.gather(x, index), struct)
def _Update(struct_acc, struct_x, t):
"""Updates t-th row in accumulators.
Args:
struct_acc: The accumulators. A structure of tensors.
struct_x: The new values. A structure of tensors congruent to `struct_acc`.
t: A scalar integer. Performance is better if `t` is on the device
memory.
Returns:
A structure of tensors. Say, ret is a returned dictionary. Then, for
each key, we have:
ret[key] = struct_acc[key];
ret[key][t, :] = struct_x[key]
"""
to_skip_update = set()
acc_lst = nest.flatten(struct_acc)
x_lst = nest.flatten(struct_x)
t = math_ops.cast(
[t], dtypes.int32) # tf.compat.v1.to_int32 casts on-device tensors.
lst = []
for acc, x in zip(acc_lst, x_lst):
if acc in to_skip_update:
# Until b/62105730 is fixed, we need to avoid inplace update for tensors
# of rank 1. could reshape to handle it, but we don't really need the
# values applied to these, so just skip their modification.
lst += [acc]
else:
lst += [alias_inplace_update(acc, t, array_ops.expand_dims(x, 0))]
return nest.pack_sequence_as(struct_acc, lst)
def _SeqLenDim(struct):
"""Returns the 0-th dim size of tensors in a structure of tensors.
This is the max sequence length according to the shape of the inputs.
Args:
struct: A structure of tensors. Every tensor's 0-th dim has the same size.
Returns:
A scalar tensor which is the size of 0-th dim of every tensors in struct.
"""
xs = nest.flatten(struct)
assert xs
dim0 = array_ops.shape(xs[0])[0]
return dim0
def _Flatten(struct):
"""Flattens a structure."""
return nest.flatten(struct)
def _Pack(elements, struct_template):
"""Packs the list of tensors according to the structure.
In the event that `elements` should be a scalar, `struct_template` must
contain exactly one non-trivial element (for instance, `[[], {'x':elt}]`).
Args:
elements: Elements to be packed. A list of tensor, or a single tensor.
struct_template: The container structure in which to pack them.
Returns:
A python structure of the same type as `struct_template`, containing
`elements` as its contained elements.
"""
if not nest.is_sequence(elements):
return nest.pack_sequence_as(struct_template, [elements])
return nest.pack_sequence_as(struct_template, elements)
def _EmptyAcc(slen, struct_template):
"""Creates a set of accumulators for tensors in structure.
Args:
slen: The sequence length. A scalar tensor.
struct_template: A structure of tensors.
Returns:
A structure congruent to `struct_template`. Say ret is a returned
dictionary. Then, `ret.key`, a tensor, has the same dtype as
`struct_template.key`. The tensor's shape has 1 more dimension
than the tensor `struct_template.key`. The extra 0-th dimension is of size
`slen`. E.g., if `slen=10` and `struct_template.key`'s shape is `[3, 5]`,
then, `ret.key`'s shape is `[10, 3, 5]`.
"""
def _EmptyAccForTensor(tensor):
return inplace_ops.empty(
array_ops.concat([[slen], array_ops.shape(tensor)], axis=0),
tensor.dtype,
init=True)
return nest.map_structure(_EmptyAccForTensor, struct_template)
def _EmptyLike(struct):
"""Creates a set of empty initialized tensors.
Args:
struct: A structure of tensors.
Returns:
A struct of tensors. Each tensor has the same shape and dtype as
its corresponding tensor in `struct`. And each tensor is initialized.
"""
return nest.map_structure(
lambda x: inplace_ops.empty_like(x, init=True), struct)
def _Add(struct_x, struct_y):
"""Adds tensors in `struct_x` with respective tensors in `struct_y`.
Args:
struct_x: A struct of tensors.
struct_y: A struct of tensors congruent to `struct_x`.
Returns:
A struct of tensors. Each element of the returned value
equals `x + y`, with corresponding values in `struct_x` and `struct_y`.
"""
list_x = nest.flatten(struct_x)
list_y = nest.flatten(struct_y)
z = []
for x, y in zip(list_x, list_y):
z += [math_ops.add(x, y)]
return nest.pack_sequence_as(struct_x, z)
def _Dtypes(struct):
"""Returns all tensors' data types in a list."""
return [x.dtype for x in nest.flatten(struct)]
def _ConvertNoneGradientToZeros(xs, dxs):
"""Sanitize dxs so that None becomes zeros appropriately.
Args:
xs: A list of tensors.
dxs: A list of tensors. dxs[i] corresponds to xs[i]'s gradient.
Returns:
A structure same as `dxs` with `None` replaced by a zero tensor.
"""
list_xs = nest.flatten(xs)
list_dxs = nest.flatten(dxs)
# If x does not get any backprop-ed gradient, propagate zeros.
rets = []
for (x, dx) in zip(list_xs, list_dxs):
if dx is None:
rets.append(array_ops.zeros_like(x))
else:
rets.append(dx)
return nest.pack_sequence_as(dxs, rets)
# All structures are flattened for use internally. This is for simplicity
# and also to use the Defun construct.
# In the forward pass (inference), the computation is structured as follows.
# Forward: [gradient = _Recurrent.Grad]
# Flatten structures, create accumulators.
# for t = 0..max_input_length:
# Defun ForwardLoopBody:
# Defun Fwd: flatten/pack around cell_fn
# state1 = Fwd(inputs[t], state0)
# acc_state += [state1]
# Pack structures.
# During the backward pass (backpropping the gradient from the last time
# step to the first, through the structure), the computation is structured
# as follows.
# Grad:
# Flatten structures.
# Defun Backward:
# Create create accumulated derivatives: d_theta, d_inputs, d_acc_state.
# Regarding the note at the top of the file, there is only one accumulator
# for d_theta accumulated over the whole sequence.
# for t = max_input_length -1..0:
# Defun BackwardLoopBody:
# Retrieve acc_state[t] computed in the forward pass.
# Defun Bak: flatten/back around cell_fn_grad.
# d_state1 is d_state0 from previous step (ie next time).
# d_acc_state[dev_t] += d_state1
# d_theta_t, d_state0, d_inputs_t, = Bak()
# d_inputs[dev_t] += d_inputs
# d_theta += d_theta_t
# d_acc_state[t] += d_state1
# Pack structures and return.
class _Recurrent(object):
"""A helper class to construct a recurrent neural net."""
def __init__(self,
cell_fn,
cell_grad,
theta,
state0,
inputs,
max_input_length,
extras,
use_tpu,
aligned_end=False):
"""RNN helper class.
Args:
cell_fn: A python function, which computes:
state1, extras = cell_fn(theta, state0, inputs[t, :])
cell_grad: A python function which computes:
dtheta, dstate0, dinputs[t, :] = cell_grad(
theta, state0, inputs[t, :], extras, dstate1)
theta: weights. A structure of tensors.
state0: initial state. A structure of tensors.
inputs: inputs. A structure of tensors.
max_input_length: None, or the maximum effective length of the input over
all batches. A scalar tensor.
extras: A structure of tensors. The 2nd return value of every
invocation of cell_fn is a structure of tensors with matching keys
and shapes of this `extras`.
use_tpu: A boolean indicating whether the computation is mean to
run on a TPU.
aligned_end: A boolean indicating whether the sequence is aligned at
the end.
"""
self._theta = theta
self._state = state0
self._inputs = inputs
self._max_input_length = self._MaybeComputeMaxInputLength(
inputs, max_input_length)
self._cell_fn = cell_fn
self._cell_grad = cell_grad
self._extras = extras
self._aligned_end = aligned_end
# pylint: disable=unbalanced-tuple-unpacking
# NOTE: TF Function (Fwd, Bak, ForwardLoopBody, BackwardLoopBody,
# Forward and Backward defined below) simply takes a list of
# Tensors and returns a list of Tensors. When we pass in a
# structure (a list of structures of Tensors), we use _Flatten to
# convert the structure into a list of tensor. Conversely, the
# following code often uses _Pack to formulate a structure from a
# list of tensors based on a "template".
# Wraps cell_fn in a TF Function:
# state1 = cell_fn(theta, state0, inputs)
fwd_sig = [self._theta, self._state, self._inputs]
compiled = use_tpu
noinline = not compiled
dev_t_type = dtypes.int32 if use_tpu else dtypes.int64
@function.Defun(*_Dtypes(fwd_sig))
def Fwd(*args):
(theta, state0, inputs) = _Pack(args, fwd_sig)
state1, extras = self._cell_fn(theta, state0, inputs)
assert not function.get_extra_args(), (
'cell_fn is not pure with extra args: %s.' %
(function.get_extra_args()))
_AssertIsCompatible(state1, self._state)
_AssertIsCompatible(extras, self._extras)
return _Flatten([state1, extras])
# Wraps cell_fn in a TF Function as a for-loop's body.
#
# The loop state is composed of:
# t: The loop variable. Timestep id.
# dev_t: The loop variable mirrored on the device.
# theta: the recurrent net's weights.
# state0: the previous recurrent state.
# inputs: inputs to the recurrent net. inputs[t, :] are for the timestep t.
# acc_state: Each timestep's computed new state is also stashed into
# acc_state.
# acc_extras: Each timestep's computed extras is stashed into acc_extras
fwdloop_sig = [
self._theta, self._state, self._inputs, self._state, self._extras
]
@function.Defun(dtypes.int32, dev_t_type, *_Dtypes(fwdloop_sig))
def ForwardLoopBody(*args):
"""The body of forward loop."""
t, dev_t = args[0], args[1]
(theta, state0, inputs, acc_state, acc_extras) = _Pack(
args[2:], fwdloop_sig)
inputs_t = _Index(inputs, t) # external input at time step t.
fwd = Fwd(*_Flatten([theta, state0, inputs_t]))
state1, extras = _Pack(fwd, [self._state, self._extras])
# Saves state1 and extras in their accumulators.
acc_state = _Update(acc_state, state1, dev_t)
acc_extras = _Update(acc_extras, extras, dev_t)
return [math_ops.add(dev_t, 1)] + _Flatten(
[theta, state1, inputs, acc_state, acc_extras])
def Grad(op, *args):
"""The python grad function for the Forward function."""
# NOTE: tf.gradient backprops None for int32/int64 while zeros
# for float32/float64. For consistency, we always backprop
# zeros.
args = list(args)
for i, dy in enumerate(args):
if dy is None:
args[i] = array_ops.zeros_like(op.outputs[i])
# TODO(drpng): getting the extra state here?
op_inputs = [x for x in op.inputs]
op_struct = [
self._theta, self._state, self._inputs, self._max_input_length,
self._extras
]
(theta, state0, inputs, max_input_length, _) = _Pack(op_inputs, op_struct)
# acc_state and acc_extras are computed by the Forward pass and
# needed by the Backward pass.
acc_state, _, acc_extras = _Pack([x for x in op.outputs],
[self._state, self._state, self._extras])
# Forward computes acc_state, the final state and
# acc_extras. tf.gradients gives us their gradients w.r.t. the
# final loss. Because acc_extras are not exposed by Compute(),
# it has no gradients w.r.t. the final loss (i.e., by
# construction, it must be zeros).
d_acc_state, d_state1, _ = _Pack(args,
[self._state, self._state, self._extras])
return Backward(*_Flatten([
theta, state0, inputs, max_input_length, acc_state, acc_extras,
d_acc_state, d_state1
]))
# Forward calls ForwardLoopBody n times. Each time computes one
# time step of the recurrent net.
forward_sig = [
self._theta, self._state, self._inputs, self._max_input_length,
self._extras
]
@function.Defun(
*_Dtypes(forward_sig), python_grad_func=Grad, noinline=noinline)
def Forward(*args):
"""Forward pass of the recurrent net."""
theta, state0, inputs, max_input_length, extras = _Pack(args, forward_sig)
slen_dim = _SeqLenDim(inputs)
# Creates accumulators for state0 and extras.
acc_state = _EmptyAcc(slen_dim, state0)
acc_extras = _EmptyAcc(slen_dim, extras)
t = slen_dim - max_input_length if self._aligned_end else 0
dev_t = math_ops.cast(t, dtypes.int32) if use_tpu else math_ops.cast(
t, dtypes.int64)
run = functional_ops.For(
start=t,
limit=slen_dim if self._aligned_end else max_input_length,
delta=1,
inputs=[dev_t] + _Flatten(
[theta, state0, inputs, acc_state, acc_extras]),
body=ForwardLoopBody,
rewrite_with_while=compiled)
_, state1, _, acc_state, acc_extras = _Pack(
run[1:],
[self._theta, self._state, self._inputs, self._state, self._extras])
return _Flatten([acc_state, state1, acc_extras])
# The per-step backward computes:
# d_theta, d_state0, d_inputs = cell_grad(
# theta, state0, inputs, extras, d_state1)
# where d_state1 is the backprop-ed gradient for state1, and
# extras is the computed by the forward step to facilitate the
# backward step.
bak_sig = [
self._theta, self._state, self._inputs, self._extras, self._state
]
@function.Defun(*_Dtypes(bak_sig))
def Bak(*args):
"""Backward step."""
(theta, state0, inputs, extras, d_state1) = _Pack(args, bak_sig)
(dtheta, dstate0, dinputs) = self._cell_grad(theta, state0, inputs,
extras, d_state1)
assert not function.get_extra_args(), (
'cell_grad is not pure with extra args: %s.' %
(function.get_extra_args()))
_AssertIsCompatible(dtheta, self._theta)
_AssertIsCompatible(dstate0, self._state)
_AssertIsCompatible(dinputs, self._inputs)
return _Flatten(
_ConvertNoneGradientToZeros([theta, state0, inputs],
[dtheta, dstate0, dinputs]))
# Define defuns used by a functional_ops.If in BackwardLoopBody.
state_if_sig = [self._state, self._state]
@function.Defun(*_Dtypes(state_if_sig))
def ReturnOrigState0(*args):
"""Returns original state0 from inputs."""
(_, orig_state0) = _Pack(args, state_if_sig)
return nest.flatten(orig_state0)
@function.Defun(*_Dtypes(state_if_sig))
def ReturnAccState(*args):
"""Returns acc_state[t-1] from inputs."""
(acc_state, _) = _Pack(args, state_if_sig)
return nest.flatten(acc_state)
# Wraps cell_grad gradient function in a TF Function as a
# for-loop's body for the Backward pass.
#
# The loop state is composed of:
# t: The loop variable. Timestep id.
# state0: the initial state for the entire backward loop.
# dev_t: The loop variable mirrored on the device.
# theta: the recurrent net's weights.
# inputs: inputs to the recurrent net. inputs[t, :] are for the timestep t.
# acc_state: Each timestep's computed new state was stashed into
# acc_state by the Forward pass.
# acc_extras: Each timestep's computed extras was stashed into
# acc_extras by the Forward pass.
# d_theta: All timestep's gradient for theta is accumulated (added) into
# d_theta.
# d_state1: The backprop-ed gradient for the new stated computed by
# timestep t.
# d_inputs: d_inputs[t, :] is populated by the backward time step t.
# d_acc_state: The backprop-ed gradient for acc_state.
bakloop_sig = [
self._theta, self._state, self._inputs, self._state, self._extras,
self._theta, self._state, self._inputs, self._state
]
@function.Defun(dtypes.int32, dev_t_type, *_Dtypes(bakloop_sig))
def BackwardLoopBody(*args):
"""Backward loop body function."""
t, dev_t = args[0], args[1]
(theta, orig_state0, inputs, acc_state, acc_extras, d_theta, d_state1,
d_inputs, d_acc_state) = _Pack(args[2:], bakloop_sig)
# The input recurrent state for time step t is previous time step's
# output, or the original state0 when on time step 0.
state_from_acc = _Index(acc_state, math_ops.maximum(0, t - 1))
state0 = functional_ops.If(
math_ops.equal(t, array_ops.constant(0, dtypes.int32)),
_Flatten([state_from_acc, orig_state0]), ReturnOrigState0,
ReturnAccState)
state0 = nest.pack_sequence_as(orig_state0, state0)
# The external inputs for time step t.
inputs_t = _Index(inputs, t)
# The extras for time step t.
extras_t = _Index(acc_extras, t)
d_state1 = _Add(_Index(d_acc_state, t), d_state1)
(d_theta_t, d_state0, d_inputs_t) = _Pack(
Bak(*_Flatten([theta, state0, inputs_t, extras_t, d_state1])),
[self._theta, self._state, self._inputs])
d_theta = _Add(d_theta, d_theta_t)
d_inputs = _Update(d_inputs, d_inputs_t, dev_t)
return [math_ops.subtract(dev_t, 1)] + _Flatten([
theta, orig_state0, inputs, acc_state, acc_extras, d_theta, d_state0,
d_inputs, d_acc_state
])
# Backward calls BackwardLoopBody n times. Each time computes the backprop
# for one time step of the recurrent net.
backward_sig = [
self._theta, self._state, self._inputs, self._max_input_length,
self._state, self._extras, self._state, self._state
]
@function.Defun(*_Dtypes(backward_sig), noinline=noinline)
def Backward(*args):
"""Backward pass for the recurrent net."""
# theta, state0, inputs are Forward's inputs.
# acc_state is the accumulated 1st output of Forward.
# acc_extras is the accumulated 2nd output of Forward.
# d_acc_state is the gradient for acc_state.
# d_state1 is the gradient for the final state computed by Forward.
(theta, state0, inputs, max_input_length, acc_state, acc_extras,
d_acc_state, d_state1) = _Pack(args, backward_sig)
# Accumulators for gradients.
d_theta = _EmptyLike(theta)
d_inputs = _EmptyLike(inputs)
slen_dim = _SeqLenDim(inputs)
# Loop backwards. Note the loop's limit is open-ended, so goes through
# t=0.
t = slen_dim - 1 if self._aligned_end else max_input_length - 1
dev_t = math_ops.cast(t, dtypes.int32) if use_tpu else math_ops.cast(
t, dtypes.int64)
limit = slen_dim - max_input_length - 1 if self._aligned_end else -1
run = functional_ops.For(
start=t,
limit=limit,
delta=-1,
inputs=[dev_t] + _Flatten([
theta, state0, inputs, acc_state, acc_extras, d_theta, d_state1,
d_inputs, d_acc_state
]),
body=BackwardLoopBody,
rewrite_with_while=compiled)
(theta, state0, inputs, acc_state, acc_extras, d_theta, d_state0,
d_inputs, d_acc_state) = _Pack(run[1:], bakloop_sig)
d_max_input_length = array_ops.constant(0, dtype=max_input_length.dtype)
return _Flatten(
[d_theta, d_state0, d_inputs, d_max_input_length, acc_extras])
self._forward = Forward
def _MaybeComputeMaxInputLength(self, inputs, max_input_length):
if max_input_length is not None:
return max_input_length
return math_ops.reduce_max(array_ops.shape(nest.flatten(inputs)[0])[0])
def Compute(self):
return _Pack(
self._forward(*_Flatten([
self._theta, self._state, self._inputs, self._max_input_length,
self._extras
])), [self._state, self._state, self._extras])[:2]
def _GetCellGrad(cell_fn, cell_grad):
"""Returns the gradient function for cell_fn.
Args:
cell_fn: The recurrent neural net's cell function.
cell_grad: If not None, cell_fn's gradient function.
Returns:
Returns cell_grad if not None. Otherwise, assume cell_fn is a python
function representing the recurrent neural net's cell function, i.e.,
cell_fn: (theta, state0, inputs) -> (state1, extra)
returns its default gradient python function, i.e.,
cell_grad: (theta, state0, inputs, extras, dstate1) -> (
dtheta, dstate0, dinputs)
"""
if cell_grad:
return cell_grad
def CellGrad(theta, state0, inputs, extras, dstate1):
"""Default gradient function for cell_fn."""
# NOTE: The default grad function recomputes the forward
# function and does not take advantage of 'extras' returned by
# the forward function.
del extras
state1, extras = cell_fn(theta, state0, inputs)
ys = _Flatten([state1])
xs = _Flatten([theta, state0, inputs])
grad_ys = _Flatten([dstate1])
grads = gradients_impl.gradients(ys=ys, xs=xs, grad_ys=grad_ys)
return _ConvertNoneGradientToZeros([theta, state0, inputs],
_Pack(grads, [theta, state0, inputs]))
return CellGrad
def _IsSingleTimeStep(inputs, max_input_length):
"""Returns True only if the time dimension of inputs is 1."""
if not isinstance(max_input_length, ops.Tensor):
return max_input_length == 1
for x in nest.flatten(inputs):
if x.shape.dims is None or x.shape[0].value != 1:
return False
return True
def Recurrent(theta,
state0,
inputs,
cell_fn,
cell_grad=None,
extras=None,
max_input_length=None,
use_tpu=False,
aligned_end=False):
"""Compute a recurrent neural net.
Roughly, Recurrent() computes the following:
state = state0
for t in inputs' sequence length:
state = cell_fn(theta, state, inputs[t, :])
accumulate_state[t, :] = state
return accumulate_state, state
theta, state, inputs are all structures of tensors.
inputs[t, :] means taking a slice out from every tensor in the inputs.
accumulate_state[t, :] = state means that we stash every tensor in
'state' into a slice of the corresponding tensor in
accumulate_state.
cell_fn is a python callable computing (building up a TensorFlow
graph) the recurrent neural network's one forward step. Two calls of
cell_fn must describe two identical computations.
By construction, Recurrent()'s backward computation does not access
any intermediate values computed by cell_fn during forward
computation. We may extend Recurrent() to support that by taking a
customized backward function of cell_fn.
Args:
theta: weights. A structure of tensors.
state0: initial state. A structure of tensors.
inputs: inputs. A structure of tensors.
cell_fn: A python function, which computes:
state1, extras = cell_fn(theta, state0, inputs[t, :])
cell_grad: A python function which computes:
dtheta, dstate0, dinputs[t, :] = cell_grad(
theta, state0, inputs[t, :], extras, dstate1)
extras: A structure of tensors. The 2nd return value of every
invocation of cell_fn is a structure of tensors with matching keys
and shapes of this `extras`.
max_input_length: maximum length of effective input. This is used to
truncate the computation if the inputs have been allocated to a
larger size. A scalar tensor.
use_tpu: whether or not we are on TPU.
aligned_end: A boolean indicating whether the sequence is aligned at
the end.
Returns:
accumulate_state and the final state.
"""
if cell_grad is None and _IsSingleTimeStep(inputs, max_input_length):
# The seqlen length is staticly known as 1. Hence, we just need to
# call cell_fn once without putting it into a loop.
inputs = nest.map_structure(lambda x: array_ops.squeeze(x, axis=0), inputs)
state1, _ = cell_fn(theta, state0, inputs)
acc_state = nest.map_structure(lambda x: array_ops.expand_dims(x, axis=0),
state1)
return acc_state, state1
# If cell_grad is not given, derives the gradient function from
# cell_fn.
cell_grad = _GetCellGrad(cell_fn, cell_grad)
if extras is None:
# Derives 'extras' so that we can allocate extras' accumulator.
_, extras = cell_fn(theta, state0, _Index(inputs, 0))
extras = nest.map_structure(array_ops.zeros_like, extras)
else:
_, actual = cell_fn(theta, state0, _Index(inputs, 0))
_AssertIsCompatible(extras, actual)
return _Recurrent(
cell_fn=cell_fn,
cell_grad=cell_grad,
theta=theta,
state0=state0,
inputs=inputs,
max_input_length=max_input_length,
extras=extras,
use_tpu=use_tpu,
aligned_end=aligned_end).Compute()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/recurrent/python/ops/recurrent.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A tf.compat.v1.nn.dynamic_rnn variant, built on the Recurrent class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
from tensorflow.contrib.recurrent.python.ops import recurrent
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.util import nest
def _GetDTypesFromStructure(struct):
dtypes_list = []
for x in nest.flatten(struct):
x = ops.convert_to_tensor(x)
dtypes_list.append(x.dtype)
return dtypes_list
def _SetShapeFromTemplate(struct, struct_template):
as_list = nest.flatten(struct)
template_as_list = nest.flatten(struct_template)
for element, template in zip(as_list, template_as_list):
element.set_shape(template.shape)
class _FunctionalRnnCell(object):
"""Wrapper around RNNCell which separates state from computation.
This class accomplishes the following:
* Turn the cell's `__call__` function into a pure function. The global
side effects are separated as `theta`. They are the variables created
for the weights of the computation.
* Unless the output is aliased as part of the state, extend the state to
contain the output so that we store the history in `Recurrent`.
* Set static shapes as required.
"""
def __init__(self, rnn_cell, seq_inputs, initial_state):
assert initial_state is not None
# TODO(drpng): Dtype needs to be configurable.
input_dtypes = [seq_inputs.dtype] + _GetDTypesFromStructure(initial_state)
# See _index.
like_inputs_t = nest.map_structure(
lambda x: array_ops.stop_gradient(array_ops.gather(x, 0)), seq_inputs)
input_structure = (like_inputs_t, initial_state)
@function.Defun(*input_dtypes)
def FlatCellStep(*flat_inputs):
"""The flattened version of `rnn_cell`."""
inputs_t, state0 = nest.pack_sequence_as(input_structure, flat_inputs)
_SetShapeFromTemplate(state0, initial_state)
_SetShapeFromTemplate(inputs_t, like_inputs_t)
outputs_t, state1 = rnn_cell(inputs_t, state0)
state_list = nest.flatten(state1)
self._output_shape = outputs_t.shape
if outputs_t in state_list:
output_index_in_state = state_list.index(outputs_t)
else:
output_index_in_state = None
if output_index_in_state is None:
self._prepend_output = True
self._output_state_idx = 0
return [outputs_t] + state_list
else:
self._output_state_idx = output_index_in_state
self._prepend_output = False
# To save memory, we don't store return the output separately
# from the state list, since we know it's the same.
return state_list
def _ToPureFunction(func):
# NOTE: This forces the creating of the function.
if func.captured_inputs:
pure_func = copy.copy(func)
# pylint: disable=protected-access
pure_func._extra_inputs = []
return pure_func
return func
pure_flat_cell_step = _ToPureFunction(FlatCellStep)
def CellStep(theta, extended_state0, inputs_t):
"""Performs one time steps on structured inputs.
The purpose of this function is to turn the parameters into flattened
versions, and to resolve the parameter order difference between
`Recurrent` and `RNNCell`.
In the event the cell returns a transformed output that is not aliased
within its state, the `extended_state0` also contains the output as its
first element.
Args:
theta: Weights required for the computation. A structure of tensors.
extended_state0: the state0, and possibly the output at the previous
time step. A structure of tensors.
inputs_t: the inputs at time t.
Returns:
A pair of the next state (inclusive of the output), and an empty list
(unused `extras`).
The next state is congruent to state0.
"""
extended_state0_flat = nest.flatten(extended_state0)
state0_flat = self.MaybeRemoveOutputFromState(extended_state0_flat)
full_inputs = [inputs_t] + state0_flat + theta
# Note that the thetas are additional inputs appeneded as extra
# parameters.
cell_out = pure_flat_cell_step(*full_inputs)
return cell_out, []
self._cell_step = CellStep
self._theta = FlatCellStep.captured_inputs
self._zero_state = rnn_cell.zero_state
self._state_template = initial_state
self._output_size = rnn_cell.output_size
@property
def extended_initial_state(self):
if self._prepend_output:
return [
array_ops.zeros(
self._output_shape,
dtype=_GetDTypesFromStructure(self._state_template)[0]),
self._state_template
]
else:
# The base case, where the output is just the hidden state.
return self._state_template
@property
def cell_step(self):
return self._cell_step
@property
def theta(self):
return self._theta
@property
def state_template(self):
return self._state_template
@property
def output_shape(self):
return self._output_shape
def GetOutputFromState(self, state):
return nest.flatten(state)[self._output_state_idx]
def MaybeRemoveOutputFromState(self, flat_state):
if self._prepend_output:
return flat_state[1:]
return flat_state
def _ApplyLengthsToBatch(sequence_lengths, tf_output):
# TODO(drpng): just use Update so that we don't carry over the gradients?
"""Sets the output to be zero at the end of the sequence."""
# output is batch major.
shape = array_ops.shape(tf_output)
batch_size, max_time, vector_size = shape[0], shape[1], shape[2]
output_time = array_ops.tile(math_ops.range(0, max_time), [batch_size])
output_time = array_ops.reshape(output_time, [batch_size, max_time])
lengths = array_ops.tile(
array_ops.reshape(sequence_lengths, [-1, 1]), [1, max_time])
is_less = math_ops.cast(
math_ops.less(output_time, lengths), dtype=tf_output.dtype)
keep_mask = array_ops.tile(
array_ops.expand_dims(is_less, -1), [1, 1, vector_size])
final_output = keep_mask * tf_output
return final_output
def _PickFinalStateFromHistory(acc_state, sequence_length):
"""Implements acc_state[sequence_length - 1]."""
# This will work on all platforms, unlike the regular slice.
last_value = []
for state_var in nest.flatten(acc_state):
# We compute the following with matrix operations:
# last_var = state_var[sequence_length - 1]
shape = array_ops.shape(state_var)
max_time, batch_size = shape[0], shape[1]
output_time = array_ops.tile(math_ops.range(0, max_time), [batch_size])
output_time = array_ops.reshape(output_time, [batch_size, max_time])
lengths = array_ops.tile(
array_ops.reshape(sequence_length, [-1, 1]), [1, max_time])
last_idx = math_ops.cast(
math_ops.equal(output_time, lengths - 1), dtype=state_var.dtype)
last_idx = array_ops.transpose(last_idx)
last_idx_for_bcast = array_ops.expand_dims(last_idx, -1)
sliced = math_ops.multiply(last_idx_for_bcast, state_var)
last_var = math_ops.reduce_sum(sliced, 0)
last_value += [last_var]
return nest.pack_sequence_as(acc_state, last_value)
def _PostProcessOutput(extended_acc_state, extended_final_state, func_cell,
total_time, inputs_lengths, is_reversed):
"""Post-process output of recurrent.
This function takes the accumulated extended state and extracts the requested
state and output.
When `inputs_lengths` has been set, it extracts the output from the
accumulated state. It also sets outputs past.
When `is_reversed` is true, the output will be reversed in this function.
It also sets the static shape information.
Args:
extended_acc_state: A structure containing the accumulated state at each
time. It may contain the output at each time as well.
extended_final_state: A structure containing the final state. It may contain
the output at the final time.
func_cell: The functional wrapper around the cell.
total_time: A scalar integer tensor.
inputs_lengths: An integer tensor with one entry per input.
is_reversed: A boolean to indicate if the sequence is reversed.
Returns:
A tuple with the outputs at each time, and the final state.
"""
if inputs_lengths is None or is_reversed:
flat_final_state = func_cell.MaybeRemoveOutputFromState(
nest.flatten(extended_final_state))
tf_state = nest.pack_sequence_as(func_cell.state_template, flat_final_state)
else:
# The accumulated state is over the entire sequence, so we pick it
# out from the acc_state sequence.
flat_acc_state = func_cell.MaybeRemoveOutputFromState(
nest.flatten(extended_acc_state))
acc_state = nest.pack_sequence_as(func_cell.state_template, flat_acc_state)
tf_state = _PickFinalStateFromHistory(acc_state, inputs_lengths)
output_from_state = func_cell.GetOutputFromState(extended_acc_state)
if is_reversed:
output_from_state = array_ops.reverse(output_from_state, [0])
tf_output = array_ops.transpose(output_from_state, [1, 0, 2])
tf_output.set_shape(
[func_cell.output_shape[0], total_time, func_cell.output_shape[1]])
if inputs_lengths is not None:
# Need set the outputs to zero.
tf_output = _ApplyLengthsToBatch(inputs_lengths, tf_output)
_SetShapeFromTemplate(tf_state, func_cell.state_template)
return tf_output, tf_state
# pylint: disable=invalid-name
def functional_rnn(cell,
inputs,
sequence_length=None,
initial_state=None,
dtype=None,
time_major=False,
scope=None,
use_tpu=False,
reverse=False):
"""Same interface as `tf.compat.v1.nn.dynamic_rnn`."""
with variable_scope.variable_scope(scope or 'rnn'):
if not time_major:
inputs = nest.map_structure(lambda t: array_ops.transpose(t, [1, 0, 2]),
inputs)
inputs_flat = nest.flatten(inputs)
batch_size = array_ops.shape(inputs_flat[0])[1]
if initial_state is None:
initial_state = cell.zero_state(batch_size, dtype)
func_cell = _FunctionalRnnCell(cell, inputs, initial_state)
if sequence_length is not None:
max_length = math_ops.reduce_max(sequence_length)
else:
max_length = None
if reverse:
inputs = array_ops.reverse(inputs, [0])
extended_acc_state, extended_final_state = recurrent.Recurrent(
theta=func_cell.theta,
state0=func_cell.extended_initial_state,
inputs=inputs,
cell_fn=func_cell.cell_step,
max_input_length=max_length,
use_tpu=use_tpu,
aligned_end=reverse)
tf_output, tf_state = _PostProcessOutput(
extended_acc_state,
extended_final_state,
func_cell,
inputs_flat[0].shape[0],
sequence_length,
is_reversed=reverse)
if time_major:
tf_output = array_ops.transpose(tf_output, [1, 0, 2])
return tf_output, tf_state
def bidirectional_functional_rnn(cell_fw,
cell_bw,
inputs,
initial_state_fw=None,
initial_state_bw=None,
dtype=None,
sequence_length=None,
time_major=False,
use_tpu=False,
fast_reverse=False,
scope=None):
"""Creates a bidirectional recurrent neural network.
Performs fully dynamic unrolling of inputs in both directions. Built to be API
compatible with `tf.compat.v1.nn.bidirectional_dynamic_rnn`, but implemented
with
functional control flow for TPU compatibility.
Args:
cell_fw: An instance of `tf.compat.v1.nn.rnn_cell.RNNCell`.
cell_bw: An instance of `tf.compat.v1.nn.rnn_cell.RNNCell`.
inputs: The RNN inputs. If time_major == False (default), this must be a
Tensor (or hierarchical structure of Tensors) of shape [batch_size,
max_time, ...]. If time_major == True, this must be a Tensor
(or hierarchical structure of Tensors) of shape: [max_time, batch_size,
...]. The first two dimensions must match across all the inputs, but
otherwise the ranks and other shape components may differ.
initial_state_fw: An optional initial state for `cell_fw`. Should match
`cell_fw.zero_state` in structure and type.
initial_state_bw: An optional initial state for `cell_bw`. Should match
`cell_bw.zero_state` in structure and type.
dtype: (optional) The data type for the initial state and expected output.
Required if initial_states are not provided or RNN state has a
heterogeneous dtype.
sequence_length: An optional int32/int64 vector sized [batch_size]. Used to
copy-through state and zero-out outputs when past a batch element's
sequence length. So it's more for correctness than performance.
time_major: Whether the `inputs` tensor is in "time major" format.
use_tpu: Whether to enable TPU-compatible operation. If True, does not truly
reverse `inputs` in the backwards RNN. Once b/69305369 is fixed, we can
remove this flag.
fast_reverse: Whether to use fast tf.reverse to replace tf.reverse_sequence.
This is only possible when either all sequence lengths are the same inside
the batch, or when the cell function does not change the state on padded
input.
scope: An optional scope name for the dynamic RNN.
Returns:
outputs: A tuple of `(output_fw, output_bw)`. The output of the forward and
backward RNN. If time_major == False (default), these will
be Tensors shaped: [batch_size, max_time, cell.output_size]. If
time_major == True, these will be Tensors shaped:
[max_time, batch_size, cell.output_size]. Note, if cell.output_size is a
(possibly nested) tuple of integers or TensorShape objects, then the
output for that direction will be a tuple having the same structure as
cell.output_size, containing Tensors having shapes corresponding to the
shape data in cell.output_size.
final_states: A tuple of `(final_state_fw, final_state_bw)`. A Tensor or
hierarchical structure of Tensors indicating the final cell state in each
direction. Must have the same structure and shape as cell.zero_state.
Raises:
ValueError: If `initial_state_fw` is None or `initial_state_bw` is None and
`dtype` is not provided.
"""
# Keep this code in sync with tf.compat.v1.nn.dynamic_rnn for compatibility.
with variable_scope.variable_scope(scope or 'bidirectional_rnn'):
# Forward direction
with variable_scope.variable_scope('fw') as fw_scope:
output_fw, output_state_fw = functional_rnn(
cell=cell_fw,
inputs=inputs,
sequence_length=sequence_length,
initial_state=initial_state_fw,
dtype=dtype,
time_major=time_major,
scope=fw_scope,
use_tpu=use_tpu)
# Backward direction
if not time_major:
time_dim = 1
batch_dim = 0
else:
time_dim = 0
batch_dim = 1
def _reverse(input_, seq_lengths, seq_dim, batch_dim):
if seq_lengths is not None:
return array_ops.reverse_sequence(
input=input_,
seq_lengths=seq_lengths,
seq_dim=seq_dim,
batch_dim=batch_dim)
else:
# See b/69305369.
assert not use_tpu, (
'Bidirectional with variable sequence lengths unsupported on TPU')
return array_ops.reverse(input_, axis=[seq_dim])
with variable_scope.variable_scope('bw') as bw_scope:
if not fast_reverse:
inputs = _reverse(
inputs,
seq_lengths=sequence_length,
seq_dim=time_dim,
batch_dim=batch_dim)
output_bw, output_state_bw = functional_rnn(
cell=cell_bw,
inputs=inputs,
sequence_length=sequence_length,
initial_state=initial_state_bw,
dtype=dtype,
time_major=time_major,
scope=bw_scope,
use_tpu=use_tpu,
reverse=fast_reverse)
if not fast_reverse:
output_bw = _reverse(
output_bw,
seq_lengths=sequence_length,
seq_dim=time_dim,
batch_dim=batch_dim)
outputs = (output_fw, output_bw)
output_states = (output_state_fw, output_state_bw)
return (outputs, output_states)
# pylint: enable=invalid-name
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/recurrent/python/ops/functional_rnn.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Deprecated library for creating sequence-to-sequence models in TensorFlow.
@@attention_decoder
@@basic_rnn_seq2seq
@@embedding_attention_decoder
@@embedding_attention_seq2seq
@@embedding_rnn_decoder
@@embedding_rnn_seq2seq
@@embedding_tied_rnn_seq2seq
@@model_with_buckets
@@one2many_rnn_seq2seq
@@rnn_decoder
@@sequence_loss
@@sequence_loss_by_example
@@tied_rnn_seq2seq
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.legacy_seq2seq.python.ops.seq2seq import attention_decoder
from tensorflow.contrib.legacy_seq2seq.python.ops.seq2seq import basic_rnn_seq2seq
from tensorflow.contrib.legacy_seq2seq.python.ops.seq2seq import embedding_attention_decoder
from tensorflow.contrib.legacy_seq2seq.python.ops.seq2seq import embedding_attention_seq2seq
from tensorflow.contrib.legacy_seq2seq.python.ops.seq2seq import embedding_rnn_decoder
from tensorflow.contrib.legacy_seq2seq.python.ops.seq2seq import embedding_rnn_seq2seq
from tensorflow.contrib.legacy_seq2seq.python.ops.seq2seq import embedding_tied_rnn_seq2seq
from tensorflow.contrib.legacy_seq2seq.python.ops.seq2seq import model_with_buckets
from tensorflow.contrib.legacy_seq2seq.python.ops.seq2seq import one2many_rnn_seq2seq
from tensorflow.contrib.legacy_seq2seq.python.ops.seq2seq import rnn_decoder
from tensorflow.contrib.legacy_seq2seq.python.ops.seq2seq import sequence_loss
from tensorflow.contrib.legacy_seq2seq.python.ops.seq2seq import sequence_loss_by_example
from tensorflow.contrib.legacy_seq2seq.python.ops.seq2seq import tied_rnn_seq2seq
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = []
remove_undocumented(__name__, _allowed_symbols)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/legacy_seq2seq/__init__.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/legacy_seq2seq/python/__init__.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for functional style sequence-to-sequence models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import math
import random
import numpy as np
from tensorflow.contrib.legacy_seq2seq.python.ops import seq2seq as seq2seq_lib
from tensorflow.contrib.rnn.python.ops import core_rnn_cell
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import nn_impl
from tensorflow.python.ops import rnn
from tensorflow.python.ops import rnn_cell
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import adam
class Seq2SeqTest(test.TestCase):
def testRNNDecoder(self):
with self.cached_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
inp = [constant_op.constant(0.5, shape=[2, 2])] * 2
_, enc_state = rnn.static_rnn(
rnn_cell.GRUCell(2), inp, dtype=dtypes.float32)
dec_inp = [constant_op.constant(0.4, shape=[2, 2])] * 3
cell = core_rnn_cell.OutputProjectionWrapper(rnn_cell.GRUCell(2), 4)
dec, mem = seq2seq_lib.rnn_decoder(dec_inp, enc_state, cell)
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 4), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 2), res[0].shape)
def testBasicRNNSeq2Seq(self):
with self.cached_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
inp = [constant_op.constant(0.5, shape=[2, 2])] * 2
dec_inp = [constant_op.constant(0.4, shape=[2, 2])] * 3
cell = core_rnn_cell.OutputProjectionWrapper(rnn_cell.GRUCell(2), 4)
dec, mem = seq2seq_lib.basic_rnn_seq2seq(inp, dec_inp, cell)
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 4), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 2), res[0].shape)
def testTiedRNNSeq2Seq(self):
with self.cached_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
inp = [constant_op.constant(0.5, shape=[2, 2])] * 2
dec_inp = [constant_op.constant(0.4, shape=[2, 2])] * 3
cell = core_rnn_cell.OutputProjectionWrapper(rnn_cell.GRUCell(2), 4)
dec, mem = seq2seq_lib.tied_rnn_seq2seq(inp, dec_inp, cell)
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 4), res[0].shape)
res = sess.run([mem])
self.assertEqual(1, len(res))
self.assertEqual((2, 2), res[0].shape)
def testEmbeddingRNNDecoder(self):
with self.cached_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
inp = [constant_op.constant(0.5, shape=[2, 2])] * 2
cell_fn = lambda: rnn_cell.BasicLSTMCell(2)
cell = cell_fn()
_, enc_state = rnn.static_rnn(cell, inp, dtype=dtypes.float32)
dec_inp = [
constant_op.constant(
i, dtypes.int32, shape=[2]) for i in range(3)
]
# Use a new cell instance since the attention decoder uses a
# different variable scope.
dec, mem = seq2seq_lib.embedding_rnn_decoder(
dec_inp, enc_state, cell_fn(), num_symbols=4, embedding_size=2)
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 2), res[0].shape)
res = sess.run([mem])
self.assertEqual(1, len(res))
self.assertEqual((2, 2), res[0].c.shape)
self.assertEqual((2, 2), res[0].h.shape)
def testEmbeddingRNNSeq2Seq(self):
with self.cached_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
enc_inp = [
constant_op.constant(
1, dtypes.int32, shape=[2]) for i in range(2)
]
dec_inp = [
constant_op.constant(
i, dtypes.int32, shape=[2]) for i in range(3)
]
cell_fn = lambda: rnn_cell.BasicLSTMCell(2)
cell = cell_fn()
dec, mem = seq2seq_lib.embedding_rnn_seq2seq(
enc_inp,
dec_inp,
cell,
num_encoder_symbols=2,
num_decoder_symbols=5,
embedding_size=2)
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 5), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 2), res[0].c.shape)
self.assertEqual((2, 2), res[0].h.shape)
# Test with state_is_tuple=False.
with variable_scope.variable_scope("no_tuple"):
cell_nt = rnn_cell.BasicLSTMCell(2, state_is_tuple=False)
dec, mem = seq2seq_lib.embedding_rnn_seq2seq(
enc_inp,
dec_inp,
cell_nt,
num_encoder_symbols=2,
num_decoder_symbols=5,
embedding_size=2)
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 5), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 4), res[0].shape)
# Test externally provided output projection.
w = variable_scope.get_variable("proj_w", [2, 5])
b = variable_scope.get_variable("proj_b", [5])
with variable_scope.variable_scope("proj_seq2seq"):
dec, _ = seq2seq_lib.embedding_rnn_seq2seq(
enc_inp,
dec_inp,
cell_fn(),
num_encoder_symbols=2,
num_decoder_symbols=5,
embedding_size=2,
output_projection=(w, b))
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 2), res[0].shape)
# Test that previous-feeding model ignores inputs after the first.
dec_inp2 = [
constant_op.constant(
0, dtypes.int32, shape=[2]) for _ in range(3)
]
with variable_scope.variable_scope("other"):
d3, _ = seq2seq_lib.embedding_rnn_seq2seq(
enc_inp,
dec_inp2,
cell_fn(),
num_encoder_symbols=2,
num_decoder_symbols=5,
embedding_size=2,
feed_previous=constant_op.constant(True))
with variable_scope.variable_scope("other_2"):
d1, _ = seq2seq_lib.embedding_rnn_seq2seq(
enc_inp,
dec_inp,
cell_fn(),
num_encoder_symbols=2,
num_decoder_symbols=5,
embedding_size=2,
feed_previous=True)
with variable_scope.variable_scope("other_3"):
d2, _ = seq2seq_lib.embedding_rnn_seq2seq(
enc_inp,
dec_inp2,
cell_fn(),
num_encoder_symbols=2,
num_decoder_symbols=5,
embedding_size=2,
feed_previous=True)
sess.run([variables.global_variables_initializer()])
res1 = sess.run(d1)
res2 = sess.run(d2)
res3 = sess.run(d3)
self.assertAllClose(res1, res2)
self.assertAllClose(res1, res3)
def testEmbeddingTiedRNNSeq2Seq(self):
with self.cached_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
enc_inp = [
constant_op.constant(
1, dtypes.int32, shape=[2]) for i in range(2)
]
dec_inp = [
constant_op.constant(
i, dtypes.int32, shape=[2]) for i in range(3)
]
cell = functools.partial(rnn_cell.BasicLSTMCell, 2, state_is_tuple=True)
dec, mem = seq2seq_lib.embedding_tied_rnn_seq2seq(
enc_inp, dec_inp, cell(), num_symbols=5, embedding_size=2)
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 5), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 2), res[0].c.shape)
self.assertEqual((2, 2), res[0].h.shape)
# Test when num_decoder_symbols is provided, the size of decoder output
# is num_decoder_symbols.
with variable_scope.variable_scope("decoder_symbols_seq2seq"):
dec, mem = seq2seq_lib.embedding_tied_rnn_seq2seq(
enc_inp,
dec_inp,
cell(),
num_symbols=5,
num_decoder_symbols=3,
embedding_size=2)
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 3), res[0].shape)
# Test externally provided output projection.
w = variable_scope.get_variable("proj_w", [2, 5])
b = variable_scope.get_variable("proj_b", [5])
with variable_scope.variable_scope("proj_seq2seq"):
dec, _ = seq2seq_lib.embedding_tied_rnn_seq2seq(
enc_inp,
dec_inp,
cell(),
num_symbols=5,
embedding_size=2,
output_projection=(w, b))
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 2), res[0].shape)
# Test that previous-feeding model ignores inputs after the first.
dec_inp2 = [constant_op.constant(0, dtypes.int32, shape=[2])] * 3
with variable_scope.variable_scope("other"):
d3, _ = seq2seq_lib.embedding_tied_rnn_seq2seq(
enc_inp,
dec_inp2,
cell(),
num_symbols=5,
embedding_size=2,
feed_previous=constant_op.constant(True))
with variable_scope.variable_scope("other_2"):
d1, _ = seq2seq_lib.embedding_tied_rnn_seq2seq(
enc_inp,
dec_inp,
cell(),
num_symbols=5,
embedding_size=2,
feed_previous=True)
with variable_scope.variable_scope("other_3"):
d2, _ = seq2seq_lib.embedding_tied_rnn_seq2seq(
enc_inp,
dec_inp2,
cell(),
num_symbols=5,
embedding_size=2,
feed_previous=True)
sess.run([variables.global_variables_initializer()])
res1 = sess.run(d1)
res2 = sess.run(d2)
res3 = sess.run(d3)
self.assertAllClose(res1, res2)
self.assertAllClose(res1, res3)
def testAttentionDecoder1(self):
with self.cached_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
cell_fn = lambda: rnn_cell.GRUCell(2)
cell = cell_fn()
inp = [constant_op.constant(0.5, shape=[2, 2])] * 2
enc_outputs, enc_state = rnn.static_rnn(cell, inp, dtype=dtypes.float32)
attn_states = array_ops.concat([
array_ops.reshape(e, [-1, 1, cell.output_size]) for e in enc_outputs
], 1)
dec_inp = [constant_op.constant(0.4, shape=[2, 2])] * 3
# Create a new cell instance for the decoder, since it uses a
# different variable scope
dec, mem = seq2seq_lib.attention_decoder(
dec_inp, enc_state, attn_states, cell_fn(), output_size=4)
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 4), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 2), res[0].shape)
def testAttentionDecoder2(self):
with self.cached_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
cell_fn = lambda: rnn_cell.GRUCell(2)
cell = cell_fn()
inp = [constant_op.constant(0.5, shape=[2, 2])] * 2
enc_outputs, enc_state = rnn.static_rnn(cell, inp, dtype=dtypes.float32)
attn_states = array_ops.concat([
array_ops.reshape(e, [-1, 1, cell.output_size]) for e in enc_outputs
], 1)
dec_inp = [constant_op.constant(0.4, shape=[2, 2])] * 3
# Use a new cell instance since the attention decoder uses a
# different variable scope.
dec, mem = seq2seq_lib.attention_decoder(
dec_inp, enc_state, attn_states, cell_fn(),
output_size=4, num_heads=2)
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 4), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 2), res[0].shape)
def testDynamicAttentionDecoder1(self):
with self.cached_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
cell_fn = lambda: rnn_cell.GRUCell(2)
cell = cell_fn()
inp = constant_op.constant(0.5, shape=[2, 2, 2])
enc_outputs, enc_state = rnn.dynamic_rnn(
cell, inp, dtype=dtypes.float32)
attn_states = enc_outputs
dec_inp = [constant_op.constant(0.4, shape=[2, 2])] * 3
# Use a new cell instance since the attention decoder uses a
# different variable scope.
dec, mem = seq2seq_lib.attention_decoder(
dec_inp, enc_state, attn_states, cell_fn(), output_size=4)
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 4), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 2), res[0].shape)
def testDynamicAttentionDecoder2(self):
with self.cached_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
cell_fn = lambda: rnn_cell.GRUCell(2)
cell = cell_fn()
inp = constant_op.constant(0.5, shape=[2, 2, 2])
enc_outputs, enc_state = rnn.dynamic_rnn(
cell, inp, dtype=dtypes.float32)
attn_states = enc_outputs
dec_inp = [constant_op.constant(0.4, shape=[2, 2])] * 3
# Use a new cell instance since the attention decoder uses a
# different variable scope.
dec, mem = seq2seq_lib.attention_decoder(
dec_inp, enc_state, attn_states, cell_fn(),
output_size=4, num_heads=2)
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 4), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 2), res[0].shape)
def testAttentionDecoderStateIsTuple(self):
with self.cached_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
single_cell = lambda: rnn_cell.BasicLSTMCell( # pylint: disable=g-long-lambda
2, state_is_tuple=True)
cell_fn = lambda: rnn_cell.MultiRNNCell( # pylint: disable=g-long-lambda
cells=[single_cell() for _ in range(2)], state_is_tuple=True)
cell = cell_fn()
inp = [constant_op.constant(0.5, shape=[2, 2])] * 2
enc_outputs, enc_state = rnn.static_rnn(cell, inp, dtype=dtypes.float32)
attn_states = array_ops.concat([
array_ops.reshape(e, [-1, 1, cell.output_size]) for e in enc_outputs
], 1)
dec_inp = [constant_op.constant(0.4, shape=[2, 2])] * 3
# Use a new cell instance since the attention decoder uses a
# different variable scope.
dec, mem = seq2seq_lib.attention_decoder(
dec_inp, enc_state, attn_states, cell_fn(), output_size=4)
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 4), res[0].shape)
res = sess.run([mem])
self.assertEqual(2, len(res[0]))
self.assertEqual((2, 2), res[0][0].c.shape)
self.assertEqual((2, 2), res[0][0].h.shape)
self.assertEqual((2, 2), res[0][1].c.shape)
self.assertEqual((2, 2), res[0][1].h.shape)
def testDynamicAttentionDecoderStateIsTuple(self):
with self.cached_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
cell_fn = lambda: rnn_cell.MultiRNNCell( # pylint: disable=g-long-lambda
cells=[rnn_cell.BasicLSTMCell(2) for _ in range(2)])
cell = cell_fn()
inp = [constant_op.constant(0.5, shape=[2, 2])] * 2
enc_outputs, enc_state = rnn.static_rnn(cell, inp, dtype=dtypes.float32)
attn_states = array_ops.concat([
array_ops.reshape(e, [-1, 1, cell.output_size])
for e in enc_outputs
], 1)
dec_inp = [constant_op.constant(0.4, shape=[2, 2])] * 3
# Use a new cell instance since the attention decoder uses a
# different variable scope.
dec, mem = seq2seq_lib.attention_decoder(
dec_inp, enc_state, attn_states, cell_fn(), output_size=4)
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 4), res[0].shape)
res = sess.run([mem])
self.assertEqual(2, len(res[0]))
self.assertEqual((2, 2), res[0][0].c.shape)
self.assertEqual((2, 2), res[0][0].h.shape)
self.assertEqual((2, 2), res[0][1].c.shape)
self.assertEqual((2, 2), res[0][1].h.shape)
def testEmbeddingAttentionDecoder(self):
with self.cached_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
inp = [constant_op.constant(0.5, shape=[2, 2])] * 2
cell_fn = lambda: rnn_cell.GRUCell(2)
cell = cell_fn()
enc_outputs, enc_state = rnn.static_rnn(cell, inp, dtype=dtypes.float32)
attn_states = array_ops.concat([
array_ops.reshape(e, [-1, 1, cell.output_size]) for e in enc_outputs
], 1)
dec_inp = [
constant_op.constant(
i, dtypes.int32, shape=[2]) for i in range(3)
]
# Use a new cell instance since the attention decoder uses a
# different variable scope.
dec, mem = seq2seq_lib.embedding_attention_decoder(
dec_inp,
enc_state,
attn_states,
cell_fn(),
num_symbols=4,
embedding_size=2,
output_size=3)
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 3), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 2), res[0].shape)
def testEmbeddingAttentionSeq2Seq(self):
with self.cached_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
enc_inp = [
constant_op.constant(
1, dtypes.int32, shape=[2]) for i in range(2)
]
dec_inp = [
constant_op.constant(
i, dtypes.int32, shape=[2]) for i in range(3)
]
cell_fn = lambda: rnn_cell.BasicLSTMCell(2)
cell = cell_fn()
dec, mem = seq2seq_lib.embedding_attention_seq2seq(
enc_inp,
dec_inp,
cell,
num_encoder_symbols=2,
num_decoder_symbols=5,
embedding_size=2)
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 5), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 2), res[0].c.shape)
self.assertEqual((2, 2), res[0].h.shape)
# Test with state_is_tuple=False.
with variable_scope.variable_scope("no_tuple"):
cell_fn = functools.partial(
rnn_cell.BasicLSTMCell, 2, state_is_tuple=False)
cell_nt = cell_fn()
dec, mem = seq2seq_lib.embedding_attention_seq2seq(
enc_inp,
dec_inp,
cell_nt,
num_encoder_symbols=2,
num_decoder_symbols=5,
embedding_size=2)
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 5), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 4), res[0].shape)
# Test externally provided output projection.
w = variable_scope.get_variable("proj_w", [2, 5])
b = variable_scope.get_variable("proj_b", [5])
with variable_scope.variable_scope("proj_seq2seq"):
dec, _ = seq2seq_lib.embedding_attention_seq2seq(
enc_inp,
dec_inp,
cell_fn(),
num_encoder_symbols=2,
num_decoder_symbols=5,
embedding_size=2,
output_projection=(w, b))
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 2), res[0].shape)
# TODO(ebrevdo, lukaszkaiser): Re-enable once RNNCells allow reuse
# within a variable scope that already has a weights tensor.
#
# # Test that previous-feeding model ignores inputs after the first.
# dec_inp2 = [
# constant_op.constant(
# 0, dtypes.int32, shape=[2]) for _ in range(3)
# ]
# with variable_scope.variable_scope("other"):
# d3, _ = seq2seq_lib.embedding_attention_seq2seq(
# enc_inp,
# dec_inp2,
# cell_fn(),
# num_encoder_symbols=2,
# num_decoder_symbols=5,
# embedding_size=2,
# feed_previous=constant_op.constant(True))
# sess.run([variables.global_variables_initializer()])
# variable_scope.get_variable_scope().reuse_variables()
# cell = cell_fn()
# d1, _ = seq2seq_lib.embedding_attention_seq2seq(
# enc_inp,
# dec_inp,
# cell,
# num_encoder_symbols=2,
# num_decoder_symbols=5,
# embedding_size=2,
# feed_previous=True)
# d2, _ = seq2seq_lib.embedding_attention_seq2seq(
# enc_inp,
# dec_inp2,
# cell,
# num_encoder_symbols=2,
# num_decoder_symbols=5,
# embedding_size=2,
# feed_previous=True)
# res1 = sess.run(d1)
# res2 = sess.run(d2)
# res3 = sess.run(d3)
# self.assertAllClose(res1, res2)
# self.assertAllClose(res1, res3)
def testOne2ManyRNNSeq2Seq(self):
with self.cached_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
enc_inp = [
constant_op.constant(
1, dtypes.int32, shape=[2]) for i in range(2)
]
dec_inp_dict = {}
dec_inp_dict["0"] = [
constant_op.constant(
i, dtypes.int32, shape=[2]) for i in range(3)
]
dec_inp_dict["1"] = [
constant_op.constant(
i, dtypes.int32, shape=[2]) for i in range(4)
]
dec_symbols_dict = {"0": 5, "1": 6}
def EncCellFn():
return rnn_cell.BasicLSTMCell(2, state_is_tuple=True)
def DecCellsFn():
return dict((k, rnn_cell.BasicLSTMCell(2, state_is_tuple=True))
for k in dec_symbols_dict)
outputs_dict, state_dict = (seq2seq_lib.one2many_rnn_seq2seq(
enc_inp, dec_inp_dict, EncCellFn(), DecCellsFn(),
2, dec_symbols_dict, embedding_size=2))
sess.run([variables.global_variables_initializer()])
res = sess.run(outputs_dict["0"])
self.assertEqual(3, len(res))
self.assertEqual((2, 5), res[0].shape)
res = sess.run(outputs_dict["1"])
self.assertEqual(4, len(res))
self.assertEqual((2, 6), res[0].shape)
res = sess.run([state_dict["0"]])
self.assertEqual((2, 2), res[0].c.shape)
self.assertEqual((2, 2), res[0].h.shape)
res = sess.run([state_dict["1"]])
self.assertEqual((2, 2), res[0].c.shape)
self.assertEqual((2, 2), res[0].h.shape)
# Test that previous-feeding model ignores inputs after the first, i.e.
# dec_inp_dict2 has different inputs from dec_inp_dict after the first
# time-step.
dec_inp_dict2 = {}
dec_inp_dict2["0"] = [
constant_op.constant(
0, dtypes.int32, shape=[2]) for _ in range(3)
]
dec_inp_dict2["1"] = [
constant_op.constant(
0, dtypes.int32, shape=[2]) for _ in range(4)
]
with variable_scope.variable_scope("other"):
outputs_dict3, _ = seq2seq_lib.one2many_rnn_seq2seq(
enc_inp,
dec_inp_dict2,
EncCellFn(),
DecCellsFn(),
2,
dec_symbols_dict,
embedding_size=2,
feed_previous=constant_op.constant(True))
with variable_scope.variable_scope("other_2"):
outputs_dict1, _ = seq2seq_lib.one2many_rnn_seq2seq(
enc_inp,
dec_inp_dict,
EncCellFn(),
DecCellsFn(),
2,
dec_symbols_dict,
embedding_size=2,
feed_previous=True)
with variable_scope.variable_scope("other_3"):
outputs_dict2, _ = seq2seq_lib.one2many_rnn_seq2seq(
enc_inp,
dec_inp_dict2,
EncCellFn(),
DecCellsFn(),
2,
dec_symbols_dict,
embedding_size=2,
feed_previous=True)
sess.run([variables.global_variables_initializer()])
res1 = sess.run(outputs_dict1["0"])
res2 = sess.run(outputs_dict2["0"])
res3 = sess.run(outputs_dict3["0"])
self.assertAllClose(res1, res2)
self.assertAllClose(res1, res3)
def testSequenceLoss(self):
with self.cached_session() as sess:
logits = [constant_op.constant(i + 0.5, shape=[2, 5]) for i in range(3)]
targets = [
constant_op.constant(
i, dtypes.int32, shape=[2]) for i in range(3)
]
weights = [constant_op.constant(1.0, shape=[2]) for i in range(3)]
average_loss_per_example = seq2seq_lib.sequence_loss(
logits,
targets,
weights,
average_across_timesteps=True,
average_across_batch=True)
res = sess.run(average_loss_per_example)
self.assertAllClose(1.60944, res)
average_loss_per_sequence = seq2seq_lib.sequence_loss(
logits,
targets,
weights,
average_across_timesteps=False,
average_across_batch=True)
res = sess.run(average_loss_per_sequence)
self.assertAllClose(4.828314, res)
total_loss = seq2seq_lib.sequence_loss(
logits,
targets,
weights,
average_across_timesteps=False,
average_across_batch=False)
res = sess.run(total_loss)
self.assertAllClose(9.656628, res)
def testSequenceLossByExample(self):
with self.cached_session() as sess:
output_classes = 5
logits = [
constant_op.constant(
i + 0.5, shape=[2, output_classes]) for i in range(3)
]
targets = [
constant_op.constant(
i, dtypes.int32, shape=[2]) for i in range(3)
]
weights = [constant_op.constant(1.0, shape=[2]) for i in range(3)]
average_loss_per_example = (seq2seq_lib.sequence_loss_by_example(
logits, targets, weights, average_across_timesteps=True))
res = sess.run(average_loss_per_example)
self.assertAllClose(np.asarray([1.609438, 1.609438]), res)
loss_per_sequence = seq2seq_lib.sequence_loss_by_example(
logits, targets, weights, average_across_timesteps=False)
res = sess.run(loss_per_sequence)
self.assertAllClose(np.asarray([4.828314, 4.828314]), res)
# TODO(ebrevdo, lukaszkaiser): Re-enable once RNNCells allow reuse
# within a variable scope that already has a weights tensor.
#
# def testModelWithBucketsScopeAndLoss(self):
# """Test variable scope reuse is not reset after model_with_buckets."""
# classes = 10
# buckets = [(4, 4), (8, 8)]
# with self.cached_session():
# # Here comes a sample Seq2Seq model using GRU cells.
# def SampleGRUSeq2Seq(enc_inp, dec_inp, weights, per_example_loss):
# """Example sequence-to-sequence model that uses GRU cells."""
# def GRUSeq2Seq(enc_inp, dec_inp):
# cell = rnn_cell.MultiRNNCell(
# [rnn_cell.GRUCell(24) for _ in range(2)])
# return seq2seq_lib.embedding_attention_seq2seq(
# enc_inp,
# dec_inp,
# cell,
# num_encoder_symbols=classes,
# num_decoder_symbols=classes,
# embedding_size=24)
# targets = [dec_inp[i + 1] for i in range(len(dec_inp) - 1)] + [0]
# return seq2seq_lib.model_with_buckets(
# enc_inp,
# dec_inp,
# targets,
# weights,
# buckets,
# GRUSeq2Seq,
# per_example_loss=per_example_loss)
# # Now we construct the copy model.
# inp = [
# array_ops.placeholder(
# dtypes.int32, shape=[None]) for _ in range(8)
# ]
# out = [
# array_ops.placeholder(
# dtypes.int32, shape=[None]) for _ in range(8)
# ]
# weights = [
# array_ops.ones_like(
# inp[0], dtype=dtypes.float32) for _ in range(8)
# ]
# with variable_scope.variable_scope("root"):
# _, losses1 = SampleGRUSeq2Seq(
# inp, out, weights, per_example_loss=False)
# # Now check that we did not accidentally set reuse.
# self.assertEqual(False, variable_scope.get_variable_scope().reuse)
# with variable_scope.variable_scope("new"):
# _, losses2 = SampleGRUSeq2Seq
# inp, out, weights, per_example_loss=True)
# # First loss is scalar, the second one is a 1-dimensional tensor.
# self.assertEqual([], losses1[0].get_shape().as_list())
# self.assertEqual([None], losses2[0].get_shape().as_list())
def testModelWithBuckets(self):
"""Larger tests that does full sequence-to-sequence model training."""
# We learn to copy 10 symbols in 2 buckets: length 4 and length 8.
classes = 10
buckets = [(4, 4), (8, 8)]
perplexities = [[], []] # Results for each bucket.
random_seed.set_random_seed(111)
random.seed(111)
np.random.seed(111)
with self.cached_session() as sess:
# We use sampled softmax so we keep output projection separate.
w = variable_scope.get_variable("proj_w", [24, classes])
w_t = array_ops.transpose(w)
b = variable_scope.get_variable("proj_b", [classes])
# Here comes a sample Seq2Seq model using GRU cells.
def SampleGRUSeq2Seq(enc_inp, dec_inp, weights):
"""Example sequence-to-sequence model that uses GRU cells."""
def GRUSeq2Seq(enc_inp, dec_inp):
cell = rnn_cell.MultiRNNCell(
[rnn_cell.GRUCell(24) for _ in range(2)], state_is_tuple=True)
return seq2seq_lib.embedding_attention_seq2seq(
enc_inp,
dec_inp,
cell,
num_encoder_symbols=classes,
num_decoder_symbols=classes,
embedding_size=24,
output_projection=(w, b))
targets = [dec_inp[i + 1] for i in range(len(dec_inp) - 1)] + [0]
def SampledLoss(labels, logits):
labels = array_ops.reshape(labels, [-1, 1])
return nn_impl.sampled_softmax_loss(
weights=w_t,
biases=b,
labels=labels,
inputs=logits,
num_sampled=8,
num_classes=classes)
return seq2seq_lib.model_with_buckets(
enc_inp,
dec_inp,
targets,
weights,
buckets,
GRUSeq2Seq,
softmax_loss_function=SampledLoss)
# Now we construct the copy model.
batch_size = 8
inp = [
array_ops.placeholder(
dtypes.int32, shape=[None]) for _ in range(8)
]
out = [
array_ops.placeholder(
dtypes.int32, shape=[None]) for _ in range(8)
]
weights = [
array_ops.ones_like(
inp[0], dtype=dtypes.float32) for _ in range(8)
]
with variable_scope.variable_scope("root"):
_, losses = SampleGRUSeq2Seq(inp, out, weights)
updates = []
params = variables.global_variables()
optimizer = adam.AdamOptimizer(0.03, epsilon=1e-5)
for i in range(len(buckets)):
full_grads = gradients_impl.gradients(losses[i], params)
grads, _ = clip_ops.clip_by_global_norm(full_grads, 30.0)
update = optimizer.apply_gradients(zip(grads, params))
updates.append(update)
sess.run([variables.global_variables_initializer()])
steps = 6
for _ in range(steps):
bucket = random.choice(np.arange(len(buckets)))
length = buckets[bucket][0]
i = [
np.array(
[np.random.randint(9) + 1 for _ in range(batch_size)],
dtype=np.int32) for _ in range(length)
]
# 0 is our "GO" symbol here.
o = [np.array([0] * batch_size, dtype=np.int32)] + i
feed = {}
for i1, i2, o1, o2 in zip(inp[:length], i[:length], out[:length],
o[:length]):
feed[i1.name] = i2
feed[o1.name] = o2
if length < 8: # For the 4-bucket, we need the 5th as target.
feed[out[length].name] = o[length]
res = sess.run([updates[bucket], losses[bucket]], feed)
perplexities[bucket].append(math.exp(float(res[1])))
for bucket in range(len(buckets)):
if len(perplexities[bucket]) > 1: # Assert that perplexity went down.
self.assertLess(perplexities[bucket][-1], # 20% margin of error.
1.2 * perplexities[bucket][0])
def testModelWithBooleanFeedPrevious(self):
"""Test the model behavior when feed_previous is True.
For example, the following two cases have the same effect:
- Train `embedding_rnn_seq2seq` with `feed_previous=True`, which contains
a `embedding_rnn_decoder` with `feed_previous=True` and
`update_embedding_for_previous=True`. The decoder is fed with "<Go>"
and outputs "A, B, C".
- Train `embedding_rnn_seq2seq` with `feed_previous=False`. The decoder
is fed with "<Go>, A, B".
"""
num_encoder_symbols = 3
num_decoder_symbols = 5
batch_size = 2
num_enc_timesteps = 2
num_dec_timesteps = 3
def TestModel(seq2seq):
with self.session(graph=ops.Graph()) as sess:
random_seed.set_random_seed(111)
random.seed(111)
np.random.seed(111)
enc_inp = [
constant_op.constant(
i + 1, dtypes.int32, shape=[batch_size])
for i in range(num_enc_timesteps)
]
dec_inp_fp_true = [
constant_op.constant(
i, dtypes.int32, shape=[batch_size])
for i in range(num_dec_timesteps)
]
dec_inp_holder_fp_false = [
array_ops.placeholder(
dtypes.int32, shape=[batch_size])
for _ in range(num_dec_timesteps)
]
targets = [
constant_op.constant(
i + 1, dtypes.int32, shape=[batch_size])
for i in range(num_dec_timesteps)
]
weights = [
constant_op.constant(
1.0, shape=[batch_size]) for i in range(num_dec_timesteps)
]
def ForwardBackward(enc_inp, dec_inp, feed_previous):
scope_name = "fp_{}".format(feed_previous)
with variable_scope.variable_scope(scope_name):
dec_op, _ = seq2seq(enc_inp, dec_inp, feed_previous=feed_previous)
net_variables = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES,
scope_name)
optimizer = adam.AdamOptimizer(0.03, epsilon=1e-5)
update_op = optimizer.minimize(
seq2seq_lib.sequence_loss(dec_op, targets, weights),
var_list=net_variables)
return dec_op, update_op, net_variables
dec_op_fp_true, update_fp_true, variables_fp_true = ForwardBackward(
enc_inp, dec_inp_fp_true, feed_previous=True)
_, update_fp_false, variables_fp_false = ForwardBackward(
enc_inp, dec_inp_holder_fp_false, feed_previous=False)
sess.run(variables.global_variables_initializer())
# We only check consistencies between the variables existing in both
# the models with True and False feed_previous. Variables created by
# the loop_function in the model with True feed_previous are ignored.
v_false_name_dict = {
v.name.split("/", 1)[-1]: v
for v in variables_fp_false
}
matched_variables = [(v, v_false_name_dict[v.name.split("/", 1)[-1]])
for v in variables_fp_true]
for v_true, v_false in matched_variables:
sess.run(state_ops.assign(v_false, v_true))
# Take the symbols generated by the decoder with feed_previous=True as
# the true input symbols for the decoder with feed_previous=False.
dec_fp_true = sess.run(dec_op_fp_true)
output_symbols_fp_true = np.argmax(dec_fp_true, axis=2)
dec_inp_fp_false = np.vstack((dec_inp_fp_true[0].eval(),
output_symbols_fp_true[:-1]))
sess.run(update_fp_true)
sess.run(update_fp_false, {
holder: inp
for holder, inp in zip(dec_inp_holder_fp_false, dec_inp_fp_false)
})
for v_true, v_false in matched_variables:
self.assertAllClose(v_true.eval(), v_false.eval())
def EmbeddingRNNSeq2SeqF(enc_inp, dec_inp, feed_previous):
cell = rnn_cell.BasicLSTMCell(2, state_is_tuple=True)
return seq2seq_lib.embedding_rnn_seq2seq(
enc_inp,
dec_inp,
cell,
num_encoder_symbols,
num_decoder_symbols,
embedding_size=2,
feed_previous=feed_previous)
def EmbeddingRNNSeq2SeqNoTupleF(enc_inp, dec_inp, feed_previous):
cell = rnn_cell.BasicLSTMCell(2, state_is_tuple=False)
return seq2seq_lib.embedding_rnn_seq2seq(
enc_inp,
dec_inp,
cell,
num_encoder_symbols,
num_decoder_symbols,
embedding_size=2,
feed_previous=feed_previous)
def EmbeddingTiedRNNSeq2Seq(enc_inp, dec_inp, feed_previous):
cell = rnn_cell.BasicLSTMCell(2, state_is_tuple=True)
return seq2seq_lib.embedding_tied_rnn_seq2seq(
enc_inp,
dec_inp,
cell,
num_decoder_symbols,
embedding_size=2,
feed_previous=feed_previous)
def EmbeddingTiedRNNSeq2SeqNoTuple(enc_inp, dec_inp, feed_previous):
cell = rnn_cell.BasicLSTMCell(2, state_is_tuple=False)
return seq2seq_lib.embedding_tied_rnn_seq2seq(
enc_inp,
dec_inp,
cell,
num_decoder_symbols,
embedding_size=2,
feed_previous=feed_previous)
def EmbeddingAttentionSeq2Seq(enc_inp, dec_inp, feed_previous):
cell = rnn_cell.BasicLSTMCell(2, state_is_tuple=True)
return seq2seq_lib.embedding_attention_seq2seq(
enc_inp,
dec_inp,
cell,
num_encoder_symbols,
num_decoder_symbols,
embedding_size=2,
feed_previous=feed_previous)
def EmbeddingAttentionSeq2SeqNoTuple(enc_inp, dec_inp, feed_previous):
cell = rnn_cell.BasicLSTMCell(2, state_is_tuple=False)
return seq2seq_lib.embedding_attention_seq2seq(
enc_inp,
dec_inp,
cell,
num_encoder_symbols,
num_decoder_symbols,
embedding_size=2,
feed_previous=feed_previous)
for model in (EmbeddingRNNSeq2SeqF, EmbeddingRNNSeq2SeqNoTupleF,
EmbeddingTiedRNNSeq2Seq, EmbeddingTiedRNNSeq2SeqNoTuple,
EmbeddingAttentionSeq2Seq, EmbeddingAttentionSeq2SeqNoTuple):
TestModel(model)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/legacy_seq2seq/python/kernel_tests/seq2seq_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/legacy_seq2seq/python/kernel_tests/__init__.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Library for creating sequence-to-sequence models in TensorFlow.
Sequence-to-sequence recurrent neural networks can learn complex functions
that map input sequences to output sequences. These models yield very good
results on a number of tasks, such as speech recognition, parsing, machine
translation, or even constructing automated replies to emails.
Before using this module, it is recommended to read the TensorFlow tutorial
on sequence-to-sequence models. It explains the basic concepts of this module
and shows an end-to-end example of how to build a translation model.
https://www.tensorflow.org/versions/master/tutorials/seq2seq/index.html
Here is an overview of functions available in this module. They all use
a very similar interface, so after reading the above tutorial and using
one of them, others should be easy to substitute.
* Full sequence-to-sequence models.
- basic_rnn_seq2seq: The most basic RNN-RNN model.
- tied_rnn_seq2seq: The basic model with tied encoder and decoder weights.
- embedding_rnn_seq2seq: The basic model with input embedding.
- embedding_tied_rnn_seq2seq: The tied model with input embedding.
- embedding_attention_seq2seq: Advanced model with input embedding and
the neural attention mechanism; recommended for complex tasks.
* Multi-task sequence-to-sequence models.
- one2many_rnn_seq2seq: The embedding model with multiple decoders.
* Decoders (when you write your own encoder, you can use these to decode;
e.g., if you want to write a model that generates captions for images).
- rnn_decoder: The basic decoder based on a pure RNN.
- attention_decoder: A decoder that uses the attention mechanism.
* Losses.
- sequence_loss: Loss for a sequence model returning average log-perplexity.
- sequence_loss_by_example: As above, but not averaging over all examples.
* model_with_buckets: A convenience function to create models with bucketing
(see the tutorial above for an explanation of why and how to use it).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
# We disable pylint because we need python3 compatibility.
from six.moves import xrange # pylint: disable=redefined-builtin
from six.moves import zip # pylint: disable=redefined-builtin
from tensorflow.contrib.rnn.python.ops import core_rnn_cell
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import rnn
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import variable_scope
from tensorflow.python.util import nest
# TODO(ebrevdo): Remove once _linear is fully deprecated.
Linear = core_rnn_cell._Linear # pylint: disable=protected-access,invalid-name
def _extract_argmax_and_embed(embedding,
output_projection=None,
update_embedding=True):
"""Get a loop_function that extracts the previous symbol and embeds it.
Args:
embedding: embedding tensor for symbols.
output_projection: None or a pair (W, B). If provided, each fed previous
output will first be multiplied by W and added B.
update_embedding: Boolean; if False, the gradients will not propagate
through the embeddings.
Returns:
A loop function.
"""
def loop_function(prev, _):
if output_projection is not None:
prev = nn_ops.xw_plus_b(prev, output_projection[0], output_projection[1])
prev_symbol = math_ops.argmax(prev, 1)
# Note that gradients will not propagate through the second parameter of
# embedding_lookup.
emb_prev = embedding_ops.embedding_lookup(embedding, prev_symbol)
if not update_embedding:
emb_prev = array_ops.stop_gradient(emb_prev)
return emb_prev
return loop_function
def rnn_decoder(decoder_inputs,
initial_state,
cell,
loop_function=None,
scope=None):
"""RNN decoder for the sequence-to-sequence model.
Args:
decoder_inputs: A list of 2D Tensors [batch_size x input_size].
initial_state: 2D Tensor with shape [batch_size x cell.state_size].
cell: rnn_cell.RNNCell defining the cell function and size.
loop_function: If not None, this function will be applied to the i-th output
in order to generate the i+1-st input, and decoder_inputs will be ignored,
except for the first element ("GO" symbol). This can be used for decoding,
but also for training to emulate http://arxiv.org/abs/1506.03099.
Signature -- loop_function(prev, i) = next * prev is a 2D Tensor of
shape [batch_size x output_size], * i is an integer, the step number
(when advanced control is needed), * next is a 2D Tensor of shape
[batch_size x input_size].
scope: VariableScope for the created subgraph; defaults to "rnn_decoder".
Returns:
A tuple of the form (outputs, state), where:
outputs: A list of the same length as decoder_inputs of 2D Tensors with
shape [batch_size x output_size] containing generated outputs.
state: The state of each cell at the final time-step.
It is a 2D Tensor of shape [batch_size x cell.state_size].
(Note that in some cases, like basic RNN cell or GRU cell, outputs and
states can be the same. They are different for LSTM cells though.)
"""
with variable_scope.variable_scope(scope or "rnn_decoder"):
state = initial_state
outputs = []
prev = None
for i, inp in enumerate(decoder_inputs):
if loop_function is not None and prev is not None:
with variable_scope.variable_scope("loop_function", reuse=True):
inp = loop_function(prev, i)
if i > 0:
variable_scope.get_variable_scope().reuse_variables()
output, state = cell(inp, state)
outputs.append(output)
if loop_function is not None:
prev = output
return outputs, state
def basic_rnn_seq2seq(encoder_inputs,
decoder_inputs,
cell,
dtype=dtypes.float32,
scope=None):
"""Basic RNN sequence-to-sequence model.
This model first runs an RNN to encode encoder_inputs into a state vector,
then runs decoder, initialized with the last encoder state, on decoder_inputs.
Encoder and decoder use the same RNN cell type, but don't share parameters.
Args:
encoder_inputs: A list of 2D Tensors [batch_size x input_size].
decoder_inputs: A list of 2D Tensors [batch_size x input_size].
cell: tf.compat.v1.nn.rnn_cell.RNNCell defining the cell function and size.
dtype: The dtype of the initial state of the RNN cell (default: tf.float32).
scope: VariableScope for the created subgraph; default: "basic_rnn_seq2seq".
Returns:
A tuple of the form (outputs, state), where:
outputs: A list of the same length as decoder_inputs of 2D Tensors with
shape [batch_size x output_size] containing the generated outputs.
state: The state of each decoder cell in the final time-step.
It is a 2D Tensor of shape [batch_size x cell.state_size].
"""
with variable_scope.variable_scope(scope or "basic_rnn_seq2seq"):
enc_cell = copy.deepcopy(cell)
_, enc_state = rnn.static_rnn(enc_cell, encoder_inputs, dtype=dtype)
return rnn_decoder(decoder_inputs, enc_state, cell)
def tied_rnn_seq2seq(encoder_inputs,
decoder_inputs,
cell,
loop_function=None,
dtype=dtypes.float32,
scope=None):
"""RNN sequence-to-sequence model with tied encoder and decoder parameters.
This model first runs an RNN to encode encoder_inputs into a state vector, and
then runs decoder, initialized with the last encoder state, on decoder_inputs.
Encoder and decoder use the same RNN cell and share parameters.
Args:
encoder_inputs: A list of 2D Tensors [batch_size x input_size].
decoder_inputs: A list of 2D Tensors [batch_size x input_size].
cell: tf.compat.v1.nn.rnn_cell.RNNCell defining the cell function and size.
loop_function: If not None, this function will be applied to i-th output in
order to generate i+1-th input, and decoder_inputs will be ignored, except
for the first element ("GO" symbol), see rnn_decoder for details.
dtype: The dtype of the initial state of the rnn cell (default: tf.float32).
scope: VariableScope for the created subgraph; default: "tied_rnn_seq2seq".
Returns:
A tuple of the form (outputs, state), where:
outputs: A list of the same length as decoder_inputs of 2D Tensors with
shape [batch_size x output_size] containing the generated outputs.
state: The state of each decoder cell in each time-step. This is a list
with length len(decoder_inputs) -- one item for each time-step.
It is a 2D Tensor of shape [batch_size x cell.state_size].
"""
with variable_scope.variable_scope("combined_tied_rnn_seq2seq"):
scope = scope or "tied_rnn_seq2seq"
_, enc_state = rnn.static_rnn(
cell, encoder_inputs, dtype=dtype, scope=scope)
variable_scope.get_variable_scope().reuse_variables()
return rnn_decoder(
decoder_inputs,
enc_state,
cell,
loop_function=loop_function,
scope=scope)
def embedding_rnn_decoder(decoder_inputs,
initial_state,
cell,
num_symbols,
embedding_size,
output_projection=None,
feed_previous=False,
update_embedding_for_previous=True,
scope=None):
"""RNN decoder with embedding and a pure-decoding option.
Args:
decoder_inputs: A list of 1D batch-sized int32 Tensors (decoder inputs).
initial_state: 2D Tensor [batch_size x cell.state_size].
cell: tf.compat.v1.nn.rnn_cell.RNNCell defining the cell function.
num_symbols: Integer, how many symbols come into the embedding.
embedding_size: Integer, the length of the embedding vector for each symbol.
output_projection: None or a pair (W, B) of output projection weights and
biases; W has shape [output_size x num_symbols] and B has shape
[num_symbols]; if provided and feed_previous=True, each fed previous
output will first be multiplied by W and added B.
feed_previous: Boolean; if True, only the first of decoder_inputs will be
used (the "GO" symbol), and all other decoder inputs will be generated by:
next = embedding_lookup(embedding, argmax(previous_output)), In effect,
this implements a greedy decoder. It can also be used
during training to emulate http://arxiv.org/abs/1506.03099. If False,
decoder_inputs are used as given (the standard decoder case).
update_embedding_for_previous: Boolean; if False and feed_previous=True,
only the embedding for the first symbol of decoder_inputs (the "GO"
symbol) will be updated by back propagation. Embeddings for the symbols
generated from the decoder itself remain unchanged. This parameter has no
effect if feed_previous=False.
scope: VariableScope for the created subgraph; defaults to
"embedding_rnn_decoder".
Returns:
A tuple of the form (outputs, state), where:
outputs: A list of the same length as decoder_inputs of 2D Tensors. The
output is of shape [batch_size x cell.output_size] when
output_projection is not None (and represents the dense representation
of predicted tokens). It is of shape [batch_size x num_decoder_symbols]
when output_projection is None.
state: The state of each decoder cell in each time-step. This is a list
with length len(decoder_inputs) -- one item for each time-step.
It is a 2D Tensor of shape [batch_size x cell.state_size].
Raises:
ValueError: When output_projection has the wrong shape.
"""
with variable_scope.variable_scope(scope or "embedding_rnn_decoder") as scope:
if output_projection is not None:
dtype = scope.dtype
proj_weights = ops.convert_to_tensor(output_projection[0], dtype=dtype)
proj_weights.get_shape().assert_is_compatible_with([None, num_symbols])
proj_biases = ops.convert_to_tensor(output_projection[1], dtype=dtype)
proj_biases.get_shape().assert_is_compatible_with([num_symbols])
embedding = variable_scope.get_variable("embedding",
[num_symbols, embedding_size])
loop_function = _extract_argmax_and_embed(
embedding, output_projection,
update_embedding_for_previous) if feed_previous else None
emb_inp = (
embedding_ops.embedding_lookup(embedding, i) for i in decoder_inputs)
return rnn_decoder(
emb_inp, initial_state, cell, loop_function=loop_function)
def embedding_rnn_seq2seq(encoder_inputs,
decoder_inputs,
cell,
num_encoder_symbols,
num_decoder_symbols,
embedding_size,
output_projection=None,
feed_previous=False,
dtype=None,
scope=None):
"""Embedding RNN sequence-to-sequence model.
This model first embeds encoder_inputs by a newly created embedding (of shape
[num_encoder_symbols x input_size]). Then it runs an RNN to encode
embedded encoder_inputs into a state vector. Next, it embeds decoder_inputs
by another newly created embedding (of shape [num_decoder_symbols x
input_size]). Then it runs RNN decoder, initialized with the last
encoder state, on embedded decoder_inputs.
Args:
encoder_inputs: A list of 1D int32 Tensors of shape [batch_size].
decoder_inputs: A list of 1D int32 Tensors of shape [batch_size].
cell: tf.compat.v1.nn.rnn_cell.RNNCell defining the cell function and size.
num_encoder_symbols: Integer; number of symbols on the encoder side.
num_decoder_symbols: Integer; number of symbols on the decoder side.
embedding_size: Integer, the length of the embedding vector for each symbol.
output_projection: None or a pair (W, B) of output projection weights and
biases; W has shape [output_size x num_decoder_symbols] and B has shape
[num_decoder_symbols]; if provided and feed_previous=True, each fed
previous output will first be multiplied by W and added B.
feed_previous: Boolean or scalar Boolean Tensor; if True, only the first of
decoder_inputs will be used (the "GO" symbol), and all other decoder
inputs will be taken from previous outputs (as in embedding_rnn_decoder).
If False, decoder_inputs are used as given (the standard decoder case).
dtype: The dtype of the initial state for both the encoder and encoder
rnn cells (default: tf.float32).
scope: VariableScope for the created subgraph; defaults to
"embedding_rnn_seq2seq"
Returns:
A tuple of the form (outputs, state), where:
outputs: A list of the same length as decoder_inputs of 2D Tensors. The
output is of shape [batch_size x cell.output_size] when
output_projection is not None (and represents the dense representation
of predicted tokens). It is of shape [batch_size x num_decoder_symbols]
when output_projection is None.
state: The state of each decoder cell in each time-step. This is a list
with length len(decoder_inputs) -- one item for each time-step.
It is a 2D Tensor of shape [batch_size x cell.state_size].
"""
with variable_scope.variable_scope(scope or "embedding_rnn_seq2seq") as scope:
if dtype is not None:
scope.set_dtype(dtype)
else:
dtype = scope.dtype
# Encoder.
encoder_cell = copy.deepcopy(cell)
encoder_cell = core_rnn_cell.EmbeddingWrapper(
encoder_cell,
embedding_classes=num_encoder_symbols,
embedding_size=embedding_size)
_, encoder_state = rnn.static_rnn(encoder_cell, encoder_inputs, dtype=dtype)
# Decoder.
if output_projection is None:
cell = core_rnn_cell.OutputProjectionWrapper(cell, num_decoder_symbols)
if isinstance(feed_previous, bool):
return embedding_rnn_decoder(
decoder_inputs,
encoder_state,
cell,
num_decoder_symbols,
embedding_size,
output_projection=output_projection,
feed_previous=feed_previous)
# If feed_previous is a Tensor, we construct 2 graphs and use cond.
def decoder(feed_previous_bool):
reuse = None if feed_previous_bool else True
with variable_scope.variable_scope(
variable_scope.get_variable_scope(), reuse=reuse):
outputs, state = embedding_rnn_decoder(
decoder_inputs,
encoder_state,
cell,
num_decoder_symbols,
embedding_size,
output_projection=output_projection,
feed_previous=feed_previous_bool,
update_embedding_for_previous=False)
state_list = [state]
if nest.is_sequence(state):
state_list = nest.flatten(state)
return outputs + state_list
outputs_and_state = control_flow_ops.cond(
feed_previous, lambda: decoder(True), lambda: decoder(False))
outputs_len = len(decoder_inputs) # Outputs length same as decoder inputs.
state_list = outputs_and_state[outputs_len:]
state = state_list[0]
if nest.is_sequence(encoder_state):
state = nest.pack_sequence_as(
structure=encoder_state, flat_sequence=state_list)
return outputs_and_state[:outputs_len], state
def embedding_tied_rnn_seq2seq(encoder_inputs,
decoder_inputs,
cell,
num_symbols,
embedding_size,
num_decoder_symbols=None,
output_projection=None,
feed_previous=False,
dtype=None,
scope=None):
"""Embedding RNN sequence-to-sequence model with tied (shared) parameters.
This model first embeds encoder_inputs by a newly created embedding (of shape
[num_symbols x input_size]). Then it runs an RNN to encode embedded
encoder_inputs into a state vector. Next, it embeds decoder_inputs using
the same embedding. Then it runs RNN decoder, initialized with the last
encoder state, on embedded decoder_inputs. The decoder output is over symbols
from 0 to num_decoder_symbols - 1 if num_decoder_symbols is none; otherwise it
is over 0 to num_symbols - 1.
Args:
encoder_inputs: A list of 1D int32 Tensors of shape [batch_size].
decoder_inputs: A list of 1D int32 Tensors of shape [batch_size].
cell: tf.compat.v1.nn.rnn_cell.RNNCell defining the cell function and size.
num_symbols: Integer; number of symbols for both encoder and decoder.
embedding_size: Integer, the length of the embedding vector for each symbol.
num_decoder_symbols: Integer; number of output symbols for decoder. If
provided, the decoder output is over symbols 0 to num_decoder_symbols - 1.
Otherwise, decoder output is over symbols 0 to num_symbols - 1. Note that
this assumes that the vocabulary is set up such that the first
num_decoder_symbols of num_symbols are part of decoding.
output_projection: None or a pair (W, B) of output projection weights and
biases; W has shape [output_size x num_symbols] and B has shape
[num_symbols]; if provided and feed_previous=True, each fed previous
output will first be multiplied by W and added B.
feed_previous: Boolean or scalar Boolean Tensor; if True, only the first of
decoder_inputs will be used (the "GO" symbol), and all other decoder
inputs will be taken from previous outputs (as in embedding_rnn_decoder).
If False, decoder_inputs are used as given (the standard decoder case).
dtype: The dtype to use for the initial RNN states (default: tf.float32).
scope: VariableScope for the created subgraph; defaults to
"embedding_tied_rnn_seq2seq".
Returns:
A tuple of the form (outputs, state), where:
outputs: A list of the same length as decoder_inputs of 2D Tensors with
shape [batch_size x output_symbols] containing the generated
outputs where output_symbols = num_decoder_symbols if
num_decoder_symbols is not None otherwise output_symbols = num_symbols.
state: The state of each decoder cell at the final time-step.
It is a 2D Tensor of shape [batch_size x cell.state_size].
Raises:
ValueError: When output_projection has the wrong shape.
"""
with variable_scope.variable_scope(
scope or "embedding_tied_rnn_seq2seq", dtype=dtype) as scope:
dtype = scope.dtype
if output_projection is not None:
proj_weights = ops.convert_to_tensor(output_projection[0], dtype=dtype)
proj_weights.get_shape().assert_is_compatible_with([None, num_symbols])
proj_biases = ops.convert_to_tensor(output_projection[1], dtype=dtype)
proj_biases.get_shape().assert_is_compatible_with([num_symbols])
embedding = variable_scope.get_variable(
"embedding", [num_symbols, embedding_size], dtype=dtype)
emb_encoder_inputs = [
embedding_ops.embedding_lookup(embedding, x) for x in encoder_inputs
]
emb_decoder_inputs = [
embedding_ops.embedding_lookup(embedding, x) for x in decoder_inputs
]
output_symbols = num_symbols
if num_decoder_symbols is not None:
output_symbols = num_decoder_symbols
if output_projection is None:
cell = core_rnn_cell.OutputProjectionWrapper(cell, output_symbols)
if isinstance(feed_previous, bool):
loop_function = _extract_argmax_and_embed(embedding, output_projection,
True) if feed_previous else None
return tied_rnn_seq2seq(
emb_encoder_inputs,
emb_decoder_inputs,
cell,
loop_function=loop_function,
dtype=dtype)
# If feed_previous is a Tensor, we construct 2 graphs and use cond.
def decoder(feed_previous_bool):
loop_function = _extract_argmax_and_embed(
embedding, output_projection, False) if feed_previous_bool else None
reuse = None if feed_previous_bool else True
with variable_scope.variable_scope(
variable_scope.get_variable_scope(), reuse=reuse):
outputs, state = tied_rnn_seq2seq(
emb_encoder_inputs,
emb_decoder_inputs,
cell,
loop_function=loop_function,
dtype=dtype)
state_list = [state]
if nest.is_sequence(state):
state_list = nest.flatten(state)
return outputs + state_list
outputs_and_state = control_flow_ops.cond(
feed_previous, lambda: decoder(True), lambda: decoder(False))
outputs_len = len(decoder_inputs) # Outputs length same as decoder inputs.
state_list = outputs_and_state[outputs_len:]
state = state_list[0]
# Calculate zero-state to know it's structure.
static_batch_size = encoder_inputs[0].get_shape()[0]
for inp in encoder_inputs[1:]:
static_batch_size.merge_with(inp.get_shape()[0])
batch_size = static_batch_size.value
if batch_size is None:
batch_size = array_ops.shape(encoder_inputs[0])[0]
zero_state = cell.zero_state(batch_size, dtype)
if nest.is_sequence(zero_state):
state = nest.pack_sequence_as(
structure=zero_state, flat_sequence=state_list)
return outputs_and_state[:outputs_len], state
def attention_decoder(decoder_inputs,
initial_state,
attention_states,
cell,
output_size=None,
num_heads=1,
loop_function=None,
dtype=None,
scope=None,
initial_state_attention=False):
"""RNN decoder with attention for the sequence-to-sequence model.
In this context "attention" means that, during decoding, the RNN can look up
information in the additional tensor attention_states, and it does this by
focusing on a few entries from the tensor. This model has proven to yield
especially good results in a number of sequence-to-sequence tasks. This
implementation is based on http://arxiv.org/abs/1412.7449 (see below for
details). It is recommended for complex sequence-to-sequence tasks.
Args:
decoder_inputs: A list of 2D Tensors [batch_size x input_size].
initial_state: 2D Tensor [batch_size x cell.state_size].
attention_states: 3D Tensor [batch_size x attn_length x attn_size].
cell: tf.compat.v1.nn.rnn_cell.RNNCell defining the cell function and size.
output_size: Size of the output vectors; if None, we use cell.output_size.
num_heads: Number of attention heads that read from attention_states.
loop_function: If not None, this function will be applied to i-th output in
order to generate i+1-th input, and decoder_inputs will be ignored, except
for the first element ("GO" symbol). This can be used for decoding,
but also for training to emulate http://arxiv.org/abs/1506.03099.
Signature -- loop_function(prev, i) = next * prev is a 2D Tensor of
shape [batch_size x output_size], * i is an integer, the step number
(when advanced control is needed), * next is a 2D Tensor of shape
[batch_size x input_size].
dtype: The dtype to use for the RNN initial state (default: tf.float32).
scope: VariableScope for the created subgraph; default: "attention_decoder".
initial_state_attention: If False (default), initial attentions are zero. If
True, initialize the attentions from the initial state and attention
states -- useful when we wish to resume decoding from a previously stored
decoder state and attention states.
Returns:
A tuple of the form (outputs, state), where:
outputs: A list of the same length as decoder_inputs of 2D Tensors of
shape [batch_size x output_size]. These represent the generated outputs.
Output i is computed from input i (which is either the i-th element
of decoder_inputs or loop_function(output {i-1}, i)) as follows.
First, we run the cell on a combination of the input and previous
attention masks:
cell_output, new_state = cell(linear(input, prev_attn), prev_state).
Then, we calculate new attention masks:
new_attn = softmax(V^T * tanh(W * attention_states + U * new_state))
and then we calculate the output:
output = linear(cell_output, new_attn).
state: The state of each decoder cell the final time-step.
It is a 2D Tensor of shape [batch_size x cell.state_size].
Raises:
ValueError: when num_heads is not positive, there are no inputs, shapes
of attention_states are not set, or input size cannot be inferred
from the input.
"""
if not decoder_inputs:
raise ValueError("Must provide at least 1 input to attention decoder.")
if num_heads < 1:
raise ValueError("With less than 1 heads, use a non-attention decoder.")
if attention_states.get_shape()[2].value is None:
raise ValueError("Shape[2] of attention_states must be known: %s" %
attention_states.get_shape())
if output_size is None:
output_size = cell.output_size
with variable_scope.variable_scope(
scope or "attention_decoder", dtype=dtype) as scope:
dtype = scope.dtype
batch_size = array_ops.shape(decoder_inputs[0])[0] # Needed for reshaping.
attn_length = attention_states.get_shape()[1].value
if attn_length is None:
attn_length = array_ops.shape(attention_states)[1]
attn_size = attention_states.get_shape()[2].value
# To calculate W1 * h_t we use a 1-by-1 convolution, need to reshape before.
hidden = array_ops.reshape(attention_states,
[-1, attn_length, 1, attn_size])
hidden_features = []
v = []
attention_vec_size = attn_size # Size of query vectors for attention.
for a in xrange(num_heads):
k = variable_scope.get_variable(
"AttnW_%d" % a, [1, 1, attn_size, attention_vec_size], dtype=dtype)
hidden_features.append(nn_ops.conv2d(hidden, k, [1, 1, 1, 1], "SAME"))
v.append(
variable_scope.get_variable(
"AttnV_%d" % a, [attention_vec_size], dtype=dtype))
state = initial_state
def attention(query):
"""Put attention masks on hidden using hidden_features and query."""
ds = [] # Results of attention reads will be stored here.
if nest.is_sequence(query): # If the query is a tuple, flatten it.
query_list = nest.flatten(query)
for q in query_list: # Check that ndims == 2 if specified.
ndims = q.get_shape().ndims
if ndims:
assert ndims == 2
query = array_ops.concat(query_list, 1)
for a in xrange(num_heads):
with variable_scope.variable_scope("Attention_%d" % a):
y = Linear(query, attention_vec_size, True)(query)
y = array_ops.reshape(y, [-1, 1, 1, attention_vec_size])
y = math_ops.cast(y, dtype)
# Attention mask is a softmax of v^T * tanh(...).
s = math_ops.reduce_sum(v[a] * math_ops.tanh(hidden_features[a] + y),
[2, 3])
a = nn_ops.softmax(math_ops.cast(s, dtype=dtypes.float32))
# Now calculate the attention-weighted vector d.
a = math_ops.cast(a, dtype)
d = math_ops.reduce_sum(
array_ops.reshape(a, [-1, attn_length, 1, 1]) * hidden, [1, 2])
ds.append(array_ops.reshape(d, [-1, attn_size]))
return ds
outputs = []
prev = None
batch_attn_size = array_ops.stack([batch_size, attn_size])
attns = [
array_ops.zeros(batch_attn_size, dtype=dtype) for _ in xrange(num_heads)
]
for a in attns: # Ensure the second shape of attention vectors is set.
a.set_shape([None, attn_size])
if initial_state_attention:
attns = attention(initial_state)
for i, inp in enumerate(decoder_inputs):
if i > 0:
variable_scope.get_variable_scope().reuse_variables()
# If loop_function is set, we use it instead of decoder_inputs.
if loop_function is not None and prev is not None:
with variable_scope.variable_scope("loop_function", reuse=True):
inp = loop_function(prev, i)
# Merge input and previous attentions into one vector of the right size.
input_size = inp.get_shape().with_rank(2)[1]
if input_size.value is None:
raise ValueError("Could not infer input size from input: %s" % inp.name)
inputs = [inp] + attns
inputs = [math_ops.cast(e, dtype) for e in inputs]
x = Linear(inputs, input_size, True)(inputs)
# Run the RNN.
cell_output, state = cell(x, state)
# Run the attention mechanism.
if i == 0 and initial_state_attention:
with variable_scope.variable_scope(
variable_scope.get_variable_scope(), reuse=True):
attns = attention(state)
else:
attns = attention(state)
with variable_scope.variable_scope("AttnOutputProjection"):
cell_output = math_ops.cast(cell_output, dtype)
inputs = [cell_output] + attns
output = Linear(inputs, output_size, True)(inputs)
if loop_function is not None:
prev = output
outputs.append(output)
return outputs, state
def embedding_attention_decoder(decoder_inputs,
initial_state,
attention_states,
cell,
num_symbols,
embedding_size,
num_heads=1,
output_size=None,
output_projection=None,
feed_previous=False,
update_embedding_for_previous=True,
dtype=None,
scope=None,
initial_state_attention=False):
"""RNN decoder with embedding and attention and a pure-decoding option.
Args:
decoder_inputs: A list of 1D batch-sized int32 Tensors (decoder inputs).
initial_state: 2D Tensor [batch_size x cell.state_size].
attention_states: 3D Tensor [batch_size x attn_length x attn_size].
cell: tf.compat.v1.nn.rnn_cell.RNNCell defining the cell function.
num_symbols: Integer, how many symbols come into the embedding.
embedding_size: Integer, the length of the embedding vector for each symbol.
num_heads: Number of attention heads that read from attention_states.
output_size: Size of the output vectors; if None, use output_size.
output_projection: None or a pair (W, B) of output projection weights and
biases; W has shape [output_size x num_symbols] and B has shape
[num_symbols]; if provided and feed_previous=True, each fed previous
output will first be multiplied by W and added B.
feed_previous: Boolean; if True, only the first of decoder_inputs will be
used (the "GO" symbol), and all other decoder inputs will be generated by:
next = embedding_lookup(embedding, argmax(previous_output)), In effect,
this implements a greedy decoder. It can also be used
during training to emulate http://arxiv.org/abs/1506.03099. If False,
decoder_inputs are used as given (the standard decoder case).
update_embedding_for_previous: Boolean; if False and feed_previous=True,
only the embedding for the first symbol of decoder_inputs (the "GO"
symbol) will be updated by back propagation. Embeddings for the symbols
generated from the decoder itself remain unchanged. This parameter has no
effect if feed_previous=False.
dtype: The dtype to use for the RNN initial states (default: tf.float32).
scope: VariableScope for the created subgraph; defaults to
"embedding_attention_decoder".
initial_state_attention: If False (default), initial attentions are zero. If
True, initialize the attentions from the initial state and attention
states -- useful when we wish to resume decoding from a previously stored
decoder state and attention states.
Returns:
A tuple of the form (outputs, state), where:
outputs: A list of the same length as decoder_inputs of 2D Tensors with
shape [batch_size x output_size] containing the generated outputs.
state: The state of each decoder cell at the final time-step.
It is a 2D Tensor of shape [batch_size x cell.state_size].
Raises:
ValueError: When output_projection has the wrong shape.
"""
if output_size is None:
output_size = cell.output_size
if output_projection is not None:
proj_biases = ops.convert_to_tensor(output_projection[1], dtype=dtype)
proj_biases.get_shape().assert_is_compatible_with([num_symbols])
with variable_scope.variable_scope(
scope or "embedding_attention_decoder", dtype=dtype) as scope:
embedding = variable_scope.get_variable("embedding",
[num_symbols, embedding_size])
loop_function = _extract_argmax_and_embed(
embedding, output_projection,
update_embedding_for_previous) if feed_previous else None
emb_inp = [
embedding_ops.embedding_lookup(embedding, i) for i in decoder_inputs
]
return attention_decoder(
emb_inp,
initial_state,
attention_states,
cell,
output_size=output_size,
num_heads=num_heads,
loop_function=loop_function,
initial_state_attention=initial_state_attention)
def embedding_attention_seq2seq(encoder_inputs,
decoder_inputs,
cell,
num_encoder_symbols,
num_decoder_symbols,
embedding_size,
num_heads=1,
output_projection=None,
feed_previous=False,
dtype=None,
scope=None,
initial_state_attention=False):
"""Embedding sequence-to-sequence model with attention.
This model first embeds encoder_inputs by a newly created embedding (of shape
[num_encoder_symbols x input_size]). Then it runs an RNN to encode
embedded encoder_inputs into a state vector. It keeps the outputs of this
RNN at every step to use for attention later. Next, it embeds decoder_inputs
by another newly created embedding (of shape [num_decoder_symbols x
input_size]). Then it runs attention decoder, initialized with the last
encoder state, on embedded decoder_inputs and attending to encoder outputs.
Warning: when output_projection is None, the size of the attention vectors
and variables will be made proportional to num_decoder_symbols, can be large.
Args:
encoder_inputs: A list of 1D int32 Tensors of shape [batch_size].
decoder_inputs: A list of 1D int32 Tensors of shape [batch_size].
cell: tf.compat.v1.nn.rnn_cell.RNNCell defining the cell function and size.
num_encoder_symbols: Integer; number of symbols on the encoder side.
num_decoder_symbols: Integer; number of symbols on the decoder side.
embedding_size: Integer, the length of the embedding vector for each symbol.
num_heads: Number of attention heads that read from attention_states.
output_projection: None or a pair (W, B) of output projection weights and
biases; W has shape [output_size x num_decoder_symbols] and B has shape
[num_decoder_symbols]; if provided and feed_previous=True, each fed
previous output will first be multiplied by W and added B.
feed_previous: Boolean or scalar Boolean Tensor; if True, only the first of
decoder_inputs will be used (the "GO" symbol), and all other decoder
inputs will be taken from previous outputs (as in embedding_rnn_decoder).
If False, decoder_inputs are used as given (the standard decoder case).
dtype: The dtype of the initial RNN state (default: tf.float32).
scope: VariableScope for the created subgraph; defaults to
"embedding_attention_seq2seq".
initial_state_attention: If False (default), initial attentions are zero. If
True, initialize the attentions from the initial state and attention
states.
Returns:
A tuple of the form (outputs, state), where:
outputs: A list of the same length as decoder_inputs of 2D Tensors with
shape [batch_size x num_decoder_symbols] containing the generated
outputs.
state: The state of each decoder cell at the final time-step.
It is a 2D Tensor of shape [batch_size x cell.state_size].
"""
with variable_scope.variable_scope(
scope or "embedding_attention_seq2seq", dtype=dtype) as scope:
dtype = scope.dtype
# Encoder.
encoder_cell = copy.deepcopy(cell)
encoder_cell = core_rnn_cell.EmbeddingWrapper(
encoder_cell,
embedding_classes=num_encoder_symbols,
embedding_size=embedding_size)
encoder_outputs, encoder_state = rnn.static_rnn(
encoder_cell, encoder_inputs, dtype=dtype)
# First calculate a concatenation of encoder outputs to put attention on.
top_states = [
array_ops.reshape(e, [-1, 1, cell.output_size]) for e in encoder_outputs
]
attention_states = array_ops.concat(top_states, 1)
# Decoder.
output_size = None
if output_projection is None:
cell = core_rnn_cell.OutputProjectionWrapper(cell, num_decoder_symbols)
output_size = num_decoder_symbols
if isinstance(feed_previous, bool):
return embedding_attention_decoder(
decoder_inputs,
encoder_state,
attention_states,
cell,
num_decoder_symbols,
embedding_size,
num_heads=num_heads,
output_size=output_size,
output_projection=output_projection,
feed_previous=feed_previous,
initial_state_attention=initial_state_attention)
# If feed_previous is a Tensor, we construct 2 graphs and use cond.
def decoder(feed_previous_bool):
reuse = None if feed_previous_bool else True
with variable_scope.variable_scope(
variable_scope.get_variable_scope(), reuse=reuse):
outputs, state = embedding_attention_decoder(
decoder_inputs,
encoder_state,
attention_states,
cell,
num_decoder_symbols,
embedding_size,
num_heads=num_heads,
output_size=output_size,
output_projection=output_projection,
feed_previous=feed_previous_bool,
update_embedding_for_previous=False,
initial_state_attention=initial_state_attention)
state_list = [state]
if nest.is_sequence(state):
state_list = nest.flatten(state)
return outputs + state_list
outputs_and_state = control_flow_ops.cond(
feed_previous, lambda: decoder(True), lambda: decoder(False))
outputs_len = len(decoder_inputs) # Outputs length same as decoder inputs.
state_list = outputs_and_state[outputs_len:]
state = state_list[0]
if nest.is_sequence(encoder_state):
state = nest.pack_sequence_as(
structure=encoder_state, flat_sequence=state_list)
return outputs_and_state[:outputs_len], state
def one2many_rnn_seq2seq(encoder_inputs,
decoder_inputs_dict,
enc_cell,
dec_cells_dict,
num_encoder_symbols,
num_decoder_symbols_dict,
embedding_size,
feed_previous=False,
dtype=None,
scope=None):
"""One-to-many RNN sequence-to-sequence model (multi-task).
This is a multi-task sequence-to-sequence model with one encoder and multiple
decoders. Reference to multi-task sequence-to-sequence learning can be found
here: http://arxiv.org/abs/1511.06114
Args:
encoder_inputs: A list of 1D int32 Tensors of shape [batch_size].
decoder_inputs_dict: A dictionary mapping decoder name (string) to the
corresponding decoder_inputs; each decoder_inputs is a list of 1D Tensors
of shape [batch_size]; num_decoders is defined as
len(decoder_inputs_dict).
enc_cell: tf.compat.v1.nn.rnn_cell.RNNCell defining the encoder cell
function and size.
dec_cells_dict: A dictionary mapping encoder name (string) to an instance of
tf.nn.rnn_cell.RNNCell.
num_encoder_symbols: Integer; number of symbols on the encoder side.
num_decoder_symbols_dict: A dictionary mapping decoder name (string) to an
integer specifying number of symbols for the corresponding decoder;
len(num_decoder_symbols_dict) must be equal to num_decoders.
embedding_size: Integer, the length of the embedding vector for each symbol.
feed_previous: Boolean or scalar Boolean Tensor; if True, only the first of
decoder_inputs will be used (the "GO" symbol), and all other decoder
inputs will be taken from previous outputs (as in embedding_rnn_decoder).
If False, decoder_inputs are used as given (the standard decoder case).
dtype: The dtype of the initial state for both the encoder and encoder
rnn cells (default: tf.float32).
scope: VariableScope for the created subgraph; defaults to
"one2many_rnn_seq2seq"
Returns:
A tuple of the form (outputs_dict, state_dict), where:
outputs_dict: A mapping from decoder name (string) to a list of the same
length as decoder_inputs_dict[name]; each element in the list is a 2D
Tensors with shape [batch_size x num_decoder_symbol_list[name]]
containing the generated outputs.
state_dict: A mapping from decoder name (string) to the final state of the
corresponding decoder RNN; it is a 2D Tensor of shape
[batch_size x cell.state_size].
Raises:
TypeError: if enc_cell or any of the dec_cells are not instances of RNNCell.
ValueError: if len(dec_cells) != len(decoder_inputs_dict).
"""
outputs_dict = {}
state_dict = {}
if not isinstance(enc_cell, rnn_cell_impl.RNNCell):
raise TypeError("enc_cell is not an RNNCell: %s" % type(enc_cell))
if set(dec_cells_dict) != set(decoder_inputs_dict):
raise ValueError("keys of dec_cells_dict != keys of decodre_inputs_dict")
for dec_cell in dec_cells_dict.values():
if not isinstance(dec_cell, rnn_cell_impl.RNNCell):
raise TypeError("dec_cell is not an RNNCell: %s" % type(dec_cell))
with variable_scope.variable_scope(
scope or "one2many_rnn_seq2seq", dtype=dtype) as scope:
dtype = scope.dtype
# Encoder.
enc_cell = core_rnn_cell.EmbeddingWrapper(
enc_cell,
embedding_classes=num_encoder_symbols,
embedding_size=embedding_size)
_, encoder_state = rnn.static_rnn(enc_cell, encoder_inputs, dtype=dtype)
# Decoder.
for name, decoder_inputs in decoder_inputs_dict.items():
num_decoder_symbols = num_decoder_symbols_dict[name]
dec_cell = dec_cells_dict[name]
with variable_scope.variable_scope("one2many_decoder_" +
str(name)) as scope:
dec_cell = core_rnn_cell.OutputProjectionWrapper(
dec_cell, num_decoder_symbols)
if isinstance(feed_previous, bool):
outputs, state = embedding_rnn_decoder(
decoder_inputs,
encoder_state,
dec_cell,
num_decoder_symbols,
embedding_size,
feed_previous=feed_previous)
else:
# If feed_previous is a Tensor, we construct 2 graphs and use cond.
def filled_embedding_rnn_decoder(feed_previous):
"""The current decoder with a fixed feed_previous parameter."""
# pylint: disable=cell-var-from-loop
reuse = None if feed_previous else True
vs = variable_scope.get_variable_scope()
with variable_scope.variable_scope(vs, reuse=reuse):
outputs, state = embedding_rnn_decoder(
decoder_inputs,
encoder_state,
dec_cell,
num_decoder_symbols,
embedding_size,
feed_previous=feed_previous)
# pylint: enable=cell-var-from-loop
state_list = [state]
if nest.is_sequence(state):
state_list = nest.flatten(state)
return outputs + state_list
outputs_and_state = control_flow_ops.cond(
feed_previous, lambda: filled_embedding_rnn_decoder(True), lambda:
filled_embedding_rnn_decoder(False))
# Outputs length is the same as for decoder inputs.
outputs_len = len(decoder_inputs)
outputs = outputs_and_state[:outputs_len]
state_list = outputs_and_state[outputs_len:]
state = state_list[0]
if nest.is_sequence(encoder_state):
state = nest.pack_sequence_as(
structure=encoder_state, flat_sequence=state_list)
outputs_dict[name] = outputs
state_dict[name] = state
return outputs_dict, state_dict
def sequence_loss_by_example(logits,
targets,
weights,
average_across_timesteps=True,
softmax_loss_function=None,
name=None):
"""Weighted cross-entropy loss for a sequence of logits (per example).
Args:
logits: List of 2D Tensors of shape [batch_size x num_decoder_symbols].
targets: List of 1D batch-sized int32 Tensors of the same length as logits.
weights: List of 1D batch-sized float-Tensors of the same length as logits.
average_across_timesteps: If set, divide the returned cost by the total
label weight.
softmax_loss_function: Function (labels, logits) -> loss-batch to be used
instead of the standard softmax (the default if this is None). **Note that
to avoid confusion, it is required for the function to accept named
arguments.**
name: Optional name for this operation, default: "sequence_loss_by_example".
Returns:
1D batch-sized float Tensor: The log-perplexity for each sequence.
Raises:
ValueError: If len(logits) is different from len(targets) or len(weights).
"""
if len(targets) != len(logits) or len(weights) != len(logits):
raise ValueError("Lengths of logits, weights, and targets must be the same "
"%d, %d, %d." % (len(logits), len(weights), len(targets)))
with ops.name_scope(name, "sequence_loss_by_example",
logits + targets + weights):
log_perp_list = []
for logit, target, weight in zip(logits, targets, weights):
if softmax_loss_function is None:
# TODO(irving,ebrevdo): This reshape is needed because
# sequence_loss_by_example is called with scalars sometimes, which
# violates our general scalar strictness policy.
target = array_ops.reshape(target, [-1])
crossent = nn_ops.sparse_softmax_cross_entropy_with_logits(
labels=target, logits=logit)
else:
crossent = softmax_loss_function(labels=target, logits=logit)
log_perp_list.append(crossent * weight)
log_perps = math_ops.add_n(log_perp_list)
if average_across_timesteps:
total_size = math_ops.add_n(weights)
total_size += 1e-12 # Just to avoid division by 0 for all-0 weights.
log_perps /= total_size
return log_perps
def sequence_loss(logits,
targets,
weights,
average_across_timesteps=True,
average_across_batch=True,
softmax_loss_function=None,
name=None):
"""Weighted cross-entropy loss for a sequence of logits, batch-collapsed.
Args:
logits: List of 2D Tensors of shape [batch_size x num_decoder_symbols].
targets: List of 1D batch-sized int32 Tensors of the same length as logits.
weights: List of 1D batch-sized float-Tensors of the same length as logits.
average_across_timesteps: If set, divide the returned cost by the total
label weight.
average_across_batch: If set, divide the returned cost by the batch size.
softmax_loss_function: Function (labels, logits) -> loss-batch to be used
instead of the standard softmax (the default if this is None). **Note that
to avoid confusion, it is required for the function to accept named
arguments.**
name: Optional name for this operation, defaults to "sequence_loss".
Returns:
A scalar float Tensor: The average log-perplexity per symbol (weighted).
Raises:
ValueError: If len(logits) is different from len(targets) or len(weights).
"""
with ops.name_scope(name, "sequence_loss", logits + targets + weights):
cost = math_ops.reduce_sum(
sequence_loss_by_example(
logits,
targets,
weights,
average_across_timesteps=average_across_timesteps,
softmax_loss_function=softmax_loss_function))
if average_across_batch:
batch_size = array_ops.shape(targets[0])[0]
return cost / math_ops.cast(batch_size, cost.dtype)
else:
return cost
def model_with_buckets(encoder_inputs,
decoder_inputs,
targets,
weights,
buckets,
seq2seq,
softmax_loss_function=None,
per_example_loss=False,
name=None):
"""Create a sequence-to-sequence model with support for bucketing.
The seq2seq argument is a function that defines a sequence-to-sequence model,
e.g., seq2seq = lambda x, y: basic_rnn_seq2seq(
x, y, rnn_cell.GRUCell(24))
Args:
encoder_inputs: A list of Tensors to feed the encoder; first seq2seq input.
decoder_inputs: A list of Tensors to feed the decoder; second seq2seq input.
targets: A list of 1D batch-sized int32 Tensors (desired output sequence).
weights: List of 1D batch-sized float-Tensors to weight the targets.
buckets: A list of pairs of (input size, output size) for each bucket.
seq2seq: A sequence-to-sequence model function; it takes 2 input that agree
with encoder_inputs and decoder_inputs, and returns a pair consisting of
outputs and states (as, e.g., basic_rnn_seq2seq).
softmax_loss_function: Function (labels, logits) -> loss-batch to be used
instead of the standard softmax (the default if this is None). **Note that
to avoid confusion, it is required for the function to accept named
arguments.**
per_example_loss: Boolean. If set, the returned loss will be a batch-sized
tensor of losses for each sequence in the batch. If unset, it will be a
scalar with the averaged loss from all examples.
name: Optional name for this operation, defaults to "model_with_buckets".
Returns:
A tuple of the form (outputs, losses), where:
outputs: The outputs for each bucket. Its j'th element consists of a list
of 2D Tensors. The shape of output tensors can be either
[batch_size x output_size] or [batch_size x num_decoder_symbols]
depending on the seq2seq model used.
losses: List of scalar Tensors, representing losses for each bucket, or,
if per_example_loss is set, a list of 1D batch-sized float Tensors.
Raises:
ValueError: If length of encoder_inputs, targets, or weights is smaller
than the largest (last) bucket.
"""
if len(encoder_inputs) < buckets[-1][0]:
raise ValueError("Length of encoder_inputs (%d) must be at least that of la"
"st bucket (%d)." % (len(encoder_inputs), buckets[-1][0]))
if len(targets) < buckets[-1][1]:
raise ValueError("Length of targets (%d) must be at least that of last "
"bucket (%d)." % (len(targets), buckets[-1][1]))
if len(weights) < buckets[-1][1]:
raise ValueError("Length of weights (%d) must be at least that of last "
"bucket (%d)." % (len(weights), buckets[-1][1]))
all_inputs = encoder_inputs + decoder_inputs + targets + weights
losses = []
outputs = []
with ops.name_scope(name, "model_with_buckets", all_inputs):
for j, bucket in enumerate(buckets):
with variable_scope.variable_scope(
variable_scope.get_variable_scope(), reuse=True if j > 0 else None):
bucket_outputs, _ = seq2seq(encoder_inputs[:bucket[0]],
decoder_inputs[:bucket[1]])
outputs.append(bucket_outputs)
if per_example_loss:
losses.append(
sequence_loss_by_example(
outputs[-1],
targets[:bucket[1]],
weights[:bucket[1]],
softmax_loss_function=softmax_loss_function))
else:
losses.append(
sequence_loss(
outputs[-1],
targets[:bucket[1]],
weights[:bucket[1]],
softmax_loss_function=softmax_loss_function))
return outputs, losses
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/legacy_seq2seq/python/ops/seq2seq.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops for image manipulation.
### API
This module provides functions for image manipulation; currently, chrominance
transforms (including changing saturation and hue) in YIQ space and
projective transforms (including rotation) are supported.
## Image Transformation `Ops`
@@angles_to_projective_transforms
@@compose_transforms
@@adjust_yiq_hsv
@@flat_transforms_to_matrices
@@matrices_to_flat_transforms
@@random_yiq_hsv
@@rotate
@@transform
@@translate
@@translations_to_projective_transforms
@@dense_image_warp
@@interpolate_spline
@@sparse_image_warp
## Image Segmentation `Ops`
@@connected_components
## Matching `Ops`
@@bipartite_match
## Random Dot Stereogram `Ops`
@@single_image_random_dot_stereograms
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.image.python.ops.dense_image_warp import dense_image_warp
from tensorflow.contrib.image.python.ops.distort_image_ops import adjust_hsv_in_yiq
from tensorflow.contrib.image.python.ops.distort_image_ops import random_hsv_in_yiq
from tensorflow.contrib.image.python.ops.image_ops import angles_to_projective_transforms
from tensorflow.contrib.image.python.ops.image_ops import bipartite_match
from tensorflow.contrib.image.python.ops.image_ops import compose_transforms
from tensorflow.contrib.image.python.ops.image_ops import connected_components
from tensorflow.contrib.image.python.ops.image_ops import flat_transforms_to_matrices
from tensorflow.contrib.image.python.ops.image_ops import matrices_to_flat_transforms
from tensorflow.contrib.image.python.ops.image_ops import rotate
from tensorflow.contrib.image.python.ops.image_ops import transform
from tensorflow.contrib.image.python.ops.image_ops import translate
from tensorflow.contrib.image.python.ops.image_ops import translations_to_projective_transforms
from tensorflow.contrib.image.python.ops.interpolate_spline import interpolate_spline
from tensorflow.contrib.image.python.ops.single_image_random_dot_stereograms import single_image_random_dot_stereograms
from tensorflow.contrib.image.python.ops.sparse_image_warp import sparse_image_warp
from tensorflow.python.util.all_util import remove_undocumented
# pylint: enable=line-too-long
remove_undocumented(__name__)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/image/__init__.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for python single_image_random_dot_stereograms_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.image.python.ops.single_image_random_dot_stereograms \
import single_image_random_dot_stereograms
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
class SingleImageRandomDotStereogramsTest(test_util.TensorFlowTestCase):
def test_shape_function_default(self):
"""
NOTE: The output_image_shape is [X, Y, C]
while the output data is [Y, X, C] (or [H, W, C]).
As a result, by default the output_image_shape has the value
of [1024, 768, 1], but the output data will be [768, 1024, 1].
"""
x_np = [[1, 2, 3, 3, 2, 1],
[1, 2, 3, 4, 5, 2],
[1, 2, 3, 4, 5, 3],
[1, 2, 3, 4, 5, 4],
[6, 5, 4, 4, 5, 5]]
x_tf = constant_op.constant(x_np)
# By default [1024, 768, 1] => [768, 1024, 1].
sirds_1 = single_image_random_dot_stereograms(
x_tf,
convergence_dots_size=8,
number_colors=256,
normalize=True)
shape_1 = sirds_1.get_shape().as_list()
self.assertEqual(shape_1, [768, 1024, 1])
with self.cached_session():
r_tf_1 = sirds_1.eval()
self.assertAllEqual(shape_1, r_tf_1.shape)
# If color > 256 then [1024, 768, 3] => [768, 1024, 3].
sirds_2 = single_image_random_dot_stereograms(
x_tf,
convergence_dots_size=8,
number_colors=512,
normalize=True)
shape_2 = sirds_2.get_shape().as_list()
self.assertEqual(shape_2, [768, 1024, 3])
with self.cached_session():
r_tf_2 = sirds_2.eval()
self.assertAllEqual(shape_2, r_tf_2.shape)
# If explicitly set output_image_shape to [1200, 800, 1],
# then the output data should be [800, 1200, 1].
sirds_3 = single_image_random_dot_stereograms(
x_tf,
convergence_dots_size=8,
number_colors=256,
normalize=True,
output_image_shape=[1200, 800, 1])
shape_3 = sirds_3.get_shape().as_list()
self.assertEqual(shape_3, [800, 1200, 1])
with self.cached_session():
r_tf_3 = sirds_3.eval()
self.assertAllEqual(shape_3, r_tf_3.shape)
if __name__ == '__main__':
googletest.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/image/python/kernel_tests/single_image_random_dot_stereograms_ops_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for interpolate_spline."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from scipy import interpolate as sc_interpolate
from tensorflow.contrib.image.python.ops import interpolate_spline
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import gradients
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.training import momentum
class _InterpolationProblem(object):
"""Abstract class for interpolation problem descriptions."""
def get_problem(self, optimizable=False, extrapolate=True, dtype='float32'):
"""Make data for an interpolation problem where all x vectors are n-d.
Args:
optimizable: If True, then make train_points a tf.Variable.
extrapolate: If False, then clamp the query_points values to be within
the max and min of train_points.
dtype: The data type to use.
Returns:
query_points, query_values, train_points, train_values: training and
test tensors for interpolation problem
"""
# The values generated here depend on a seed of 0.
np.random.seed(0)
batch_size = 1
num_training_points = 10
num_query_points = 4
init_points = np.random.uniform(
size=[batch_size, num_training_points, self.DATA_DIM])
init_points = init_points.astype(dtype)
train_points = (
variables.Variable(init_points)
if optimizable else constant_op.constant(init_points))
train_values = self.tf_function(train_points)
query_points_np = np.random.uniform(
size=[batch_size, num_query_points, self.DATA_DIM])
query_points_np = query_points_np.astype(dtype)
if not extrapolate:
query_points_np = np.clip(query_points_np, np.min(init_points),
np.max(init_points))
query_points = constant_op.constant(query_points_np)
query_values = self.np_function(query_points_np)
return query_points, query_values, train_points, train_values
class _QuadraticPlusSinProblem1D(_InterpolationProblem):
"""1D interpolation problem used for regression testing."""
DATA_DIM = 1
HARDCODED_QUERY_VALUES = {
(1.0, 0.0): [6.2647187603, -7.84362604077, -5.63690142322, 1.42928896387],
(1.0,
0.01): [6.77688289946, -8.02163669853, -5.79491157027, 1.4063285693],
(2.0,
0.0): [8.67110264937, -8.41281390883, -5.80190044693, 1.50155606059],
(2.0,
0.01): [6.70797816797, -7.49709587663, -5.28965776238, 1.52284731741],
(3.0,
0.0): [9.37691802935, -8.50390141515, -5.80786417426, 1.63467762122],
(3.0,
0.01): [4.47106304758, -5.71266128361, -3.92529303296, 1.86755293857],
(4.0,
0.0): [9.58172461111, -8.51432104771, -5.80967675388, 1.63361164256],
(4.0, 0.01): [
-3.87902711352, -0.0253462273846, 1.79857618022, -0.769339675725
]
}
def np_function(self, x):
"""Takes np array, evaluates the test function, and returns np array."""
return np.sum(
np.power((x - 0.5), 3) - 0.25 * x + 10 * np.sin(x * 10),
axis=2,
keepdims=True)
def tf_function(self, x):
"""Takes tf tensor, evaluates the test function, and returns tf tensor."""
return math_ops.reduce_mean(
math_ops.pow((x - 0.5), 3) - 0.25 * x + 10 * math_ops.sin(x * 10),
2,
keepdims=True)
class _QuadraticPlusSinProblemND(_InterpolationProblem):
"""3D interpolation problem used for regression testing."""
DATA_DIM = 3
HARDCODED_QUERY_VALUES = {
(1.0, 0.0): [1.06609663962, 1.28894849357, 1.10882405595, 1.63966936885],
(1.0, 0.01): [1.03123780748, 1.2952930985, 1.10366822954, 1.65265118569],
(2.0, 0.0): [0.627787735064, 1.43802857251, 1.00194632358, 1.91667538215],
(2.0, 0.01): [0.730159985046, 1.41702471595, 1.0065827217, 1.85758519312],
(3.0, 0.0): [0.350460417862, 1.67223539464, 1.00475331246, 2.31580322491],
(3.0,
0.01): [0.624557250556, 1.63138876667, 0.976588193162, 2.12511237866],
(4.0,
0.0): [0.898129669986, 1.24434133638, -0.938056116931, 1.59910338833],
(4.0,
0.01): [0.0930360338179, -3.38791305538, -1.00969032567, 0.745535080382],
}
def np_function(self, x):
"""Takes np array, evaluates the test function, and returns np array."""
return np.sum(
np.square(x - 0.5) + 0.25 * x + 1 * np.sin(x * 15),
axis=2,
keepdims=True)
def tf_function(self, x):
"""Takes tf tensor, evaluates the test function, and returns tf tensor."""
return math_ops.reduce_sum(
math_ops.square(x - 0.5) + 0.25 * x + 1 * math_ops.sin(x * 15),
2,
keepdims=True)
class InterpolateSplineTest(test_util.TensorFlowTestCase):
def test_1d_linear_interpolation(self):
"""For 1d linear interpolation, we can compare directly to scipy."""
tp = _QuadraticPlusSinProblem1D()
(query_points, _, train_points, train_values) = tp.get_problem(
extrapolate=False, dtype='float64')
interpolation_order = 1
with ops.name_scope('interpolator'):
interpolator = interpolate_spline.interpolate_spline(
train_points, train_values, query_points, interpolation_order)
with self.cached_session() as sess:
fetches = [query_points, train_points, train_values, interpolator]
query_points_, train_points_, train_values_, interp_ = sess.run(fetches)
# Just look at the first element of the minibatch.
# Also, trim the final singleton dimension.
interp_ = interp_[0, :, 0]
query_points_ = query_points_[0, :, 0]
train_points_ = train_points_[0, :, 0]
train_values_ = train_values_[0, :, 0]
# Compute scipy interpolation.
scipy_interp_function = sc_interpolate.interp1d(
train_points_, train_values_, kind='linear')
scipy_interpolation = scipy_interp_function(query_points_)
scipy_interpolation_on_train = scipy_interp_function(train_points_)
# Even with float64 precision, the interpolants disagree with scipy a
# bit due to the fact that we add the EPSILON to prevent sqrt(0), etc.
tol = 1e-3
self.assertAllClose(
train_values_, scipy_interpolation_on_train, atol=tol, rtol=tol)
self.assertAllClose(interp_, scipy_interpolation, atol=tol, rtol=tol)
def test_1d_interpolation(self):
"""Regression test for interpolation with 1-D points."""
tp = _QuadraticPlusSinProblem1D()
(query_points, _, train_points,
train_values) = tp.get_problem(dtype='float64')
for order in (1, 2, 3):
for reg_weight in (0, 0.01):
interpolator = interpolate_spline.interpolate_spline(
train_points, train_values, query_points, order, reg_weight)
target_interpolation = tp.HARDCODED_QUERY_VALUES[(order, reg_weight)]
target_interpolation = np.array(target_interpolation)
with self.cached_session() as sess:
interp_val = sess.run(interpolator)
self.assertAllClose(interp_val[0, :, 0], target_interpolation)
def test_nd_linear_interpolation(self):
"""Regression test for interpolation with N-D points."""
tp = _QuadraticPlusSinProblemND()
(query_points, _, train_points,
train_values) = tp.get_problem(dtype='float64')
for order in (1, 2, 3):
for reg_weight in (0, 0.01):
interpolator = interpolate_spline.interpolate_spline(
train_points, train_values, query_points, order, reg_weight)
target_interpolation = tp.HARDCODED_QUERY_VALUES[(order, reg_weight)]
target_interpolation = np.array(target_interpolation)
with self.cached_session() as sess:
interp_val = sess.run(interpolator)
self.assertAllClose(interp_val[0, :, 0], target_interpolation)
def test_nd_linear_interpolation_unspecified_shape(self):
"""Ensure that interpolation supports dynamic batch_size and num_points."""
tp = _QuadraticPlusSinProblemND()
(query_points, _, train_points,
train_values) = tp.get_problem(dtype='float64')
# Construct placeholders such that the batch size, number of train points,
# and number of query points are not known at graph construction time.
feature_dim = query_points.shape[-1]
value_dim = train_values.shape[-1]
train_points_ph = array_ops.placeholder(
dtype=train_points.dtype, shape=[None, None, feature_dim])
train_values_ph = array_ops.placeholder(
dtype=train_values.dtype, shape=[None, None, value_dim])
query_points_ph = array_ops.placeholder(
dtype=query_points.dtype, shape=[None, None, feature_dim])
order = 1
reg_weight = 0.01
interpolator = interpolate_spline.interpolate_spline(
train_points_ph, train_values_ph, query_points_ph, order, reg_weight)
target_interpolation = tp.HARDCODED_QUERY_VALUES[(order, reg_weight)]
target_interpolation = np.array(target_interpolation)
with self.cached_session() as sess:
(train_points_value, train_values_value, query_points_value) = sess.run(
[train_points, train_values, query_points])
interp_val = sess.run(
interpolator,
feed_dict={
train_points_ph: train_points_value,
train_values_ph: train_values_value,
query_points_ph: query_points_value
})
self.assertAllClose(interp_val[0, :, 0], target_interpolation)
def test_fully_unspecified_shape(self):
"""Ensure that erreor is thrown when input/output dim unspecified."""
tp = _QuadraticPlusSinProblemND()
(query_points, _, train_points,
train_values) = tp.get_problem(dtype='float64')
# Construct placeholders such that the batch size, number of train points,
# and number of query points are not known at graph construction time.
feature_dim = query_points.shape[-1]
value_dim = train_values.shape[-1]
train_points_ph = array_ops.placeholder(
dtype=train_points.dtype, shape=[None, None, feature_dim])
train_points_ph_invalid = array_ops.placeholder(
dtype=train_points.dtype, shape=[None, None, None])
train_values_ph = array_ops.placeholder(
dtype=train_values.dtype, shape=[None, None, value_dim])
train_values_ph_invalid = array_ops.placeholder(
dtype=train_values.dtype, shape=[None, None, None])
query_points_ph = array_ops.placeholder(
dtype=query_points.dtype, shape=[None, None, feature_dim])
order = 1
reg_weight = 0.01
with self.assertRaises(ValueError):
_ = interpolate_spline.interpolate_spline(
train_points_ph_invalid, train_values_ph, query_points_ph, order,
reg_weight)
with self.assertRaises(ValueError):
_ = interpolate_spline.interpolate_spline(
train_points_ph, train_values_ph_invalid, query_points_ph, order,
reg_weight)
def test_interpolation_gradient(self):
"""Make sure that backprop can run. Correctness of gradients is assumed.
Here, we create a use a small 'training' set and a more densely-sampled
set of query points, for which we know the true value in advance. The goal
is to choose x locations for the training data such that interpolating using
this training data yields the best reconstruction for the function
values at the query points. The training data locations are optimized
iteratively using gradient descent.
"""
tp = _QuadraticPlusSinProblemND()
(query_points, query_values, train_points,
train_values) = tp.get_problem(optimizable=True)
regularization = 0.001
for interpolation_order in (1, 2, 3, 4):
interpolator = interpolate_spline.interpolate_spline(
train_points, train_values, query_points, interpolation_order,
regularization)
loss = math_ops.reduce_mean(math_ops.square(query_values - interpolator))
optimizer = momentum.MomentumOptimizer(0.001, 0.9)
grad = gradients.gradients(loss, [train_points])
grad, _ = clip_ops.clip_by_global_norm(grad, 1.0)
opt_func = optimizer.apply_gradients(zip(grad, [train_points]))
init_op = variables.global_variables_initializer()
with self.cached_session() as sess:
sess.run(init_op)
for _ in range(100):
sess.run([loss, opt_func])
if __name__ == '__main__':
googletest.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/image/python/kernel_tests/interpolate_spline_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for connected component analysis."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import numpy as np
from tensorflow.contrib.image.python.ops import image_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import googletest
# Image for testing connected_components, with a single, winding component.
SNAKE = np.asarray(
[[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 1, 1, 1, 1, 1, 0],
[0, 1, 0, 0, 0, 0, 0, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]]) # pyformat: disable
class SegmentationTest(test_util.TensorFlowTestCase):
def testDisconnected(self):
arr = math_ops.cast(
[[1, 0, 0, 1, 0, 0, 0, 0, 1],
[0, 1, 0, 0, 0, 1, 0, 1, 0],
[1, 0, 1, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0]],
dtypes.bool) # pyformat: disable
expected = (
[[1, 0, 0, 2, 0, 0, 0, 0, 3],
[0, 4, 0, 0, 0, 5, 0, 6, 0],
[7, 0, 8, 0, 0, 0, 9, 0, 0],
[0, 0, 0, 0, 10, 0, 0, 0, 0],
[0, 0, 11, 0, 0, 0, 0, 0, 0]]) # pyformat: disable
with self.cached_session():
self.assertAllEqual(image_ops.connected_components(arr).eval(), expected)
def testSimple(self):
arr = [[0, 1, 0], [1, 1, 1], [0, 1, 0]]
with self.cached_session():
# Single component with id 1.
self.assertAllEqual(
image_ops.connected_components(math_ops.cast(
arr, dtypes.bool)).eval(), arr)
def testSnake(self):
with self.cached_session():
# Single component with id 1.
self.assertAllEqual(
image_ops.connected_components(math_ops.cast(
SNAKE, dtypes.bool)).eval(), SNAKE)
def testSnake_disconnected(self):
for i in range(SNAKE.shape[0]):
for j in range(SNAKE.shape[1]):
with self.cached_session():
# If we disconnect any part of the snake except for the endpoints,
# there will be 2 components.
if SNAKE[i, j] and (i, j) not in [(1, 1), (6, 3)]:
disconnected_snake = SNAKE.copy()
disconnected_snake[i, j] = 0
components = image_ops.connected_components(
math_ops.cast(disconnected_snake, dtypes.bool)).eval()
self.assertEqual(components.max(), 2, 'disconnect (%d, %d)' % (i,
j))
bins = np.bincount(components.ravel())
# Nonzero number of pixels labeled 0, 1, or 2.
self.assertGreater(bins[0], 0)
self.assertGreater(bins[1], 0)
self.assertGreater(bins[2], 0)
def testMultipleImages(self):
images = [[[1, 1, 1, 1],
[1, 0, 0, 1],
[1, 0, 0, 1],
[1, 1, 1, 1]],
[[1, 0, 0, 1],
[0, 0, 0, 0],
[0, 0, 0, 0],
[1, 0, 0, 1]],
[[1, 1, 0, 1],
[0, 1, 1, 0],
[1, 0, 1, 0],
[0, 0, 1, 1]]] # pyformat: disable
expected = [[[1, 1, 1, 1],
[1, 0, 0, 1],
[1, 0, 0, 1],
[1, 1, 1, 1]],
[[2, 0, 0, 3],
[0, 0, 0, 0],
[0, 0, 0, 0],
[4, 0, 0, 5]],
[[6, 6, 0, 7],
[0, 6, 6, 0],
[8, 0, 6, 0],
[0, 0, 6, 6]]] # pyformat: disable
with self.cached_session():
self.assertAllEqual(
image_ops.connected_components(math_ops.cast(
images, dtypes.bool)).eval(), expected)
def testZeros(self):
with self.cached_session():
self.assertAllEqual(
image_ops.connected_components(
array_ops.zeros((100, 20, 50), dtypes.bool)).eval(),
np.zeros((100, 20, 50)))
def testOnes(self):
with self.cached_session():
self.assertAllEqual(
image_ops.connected_components(
array_ops.ones((100, 20, 50), dtypes.bool)).eval(),
np.tile(np.arange(100)[:, None, None] + 1, [1, 20, 50]))
def testOnes_small(self):
with self.cached_session():
self.assertAllEqual(
image_ops.connected_components(array_ops.ones((3, 5),
dtypes.bool)).eval(),
np.ones((3, 5)))
def testRandom_scipy(self):
np.random.seed(42)
images = np.random.randint(0, 2, size=(10, 100, 200)).astype(np.bool)
expected = connected_components_reference_implementation(images)
if expected is None:
return
with self.cached_session():
self.assertAllEqual(
image_ops.connected_components(images).eval(), expected)
def connected_components_reference_implementation(images):
try:
# pylint: disable=g-import-not-at-top
from scipy.ndimage import measurements
except ImportError:
logging.exception('Skipping test method because scipy could not be loaded')
return
image_or_images = np.asarray(images)
if len(image_or_images.shape) == 2:
images = image_or_images[None, :, :]
elif len(image_or_images.shape) == 3:
images = image_or_images
components = np.asarray([measurements.label(image)[0] for image in images])
# Get the count of nonzero ids for each image, and offset each image's nonzero
# ids using the cumulative sum.
num_ids_per_image = components.reshape(
[-1, components.shape[1] * components.shape[2]]).max(axis=-1)
positive_id_start_per_image = np.cumsum(num_ids_per_image)
for i in range(components.shape[0]):
new_id_start = positive_id_start_per_image[i - 1] if i > 0 else 0
components[i, components[i] > 0] += new_id_start
if len(image_or_images.shape) == 2:
return components[0, :, :]
else:
return components
if __name__ == '__main__':
googletest.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/image/python/kernel_tests/segmentation_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for dense_image_warp."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
from tensorflow.contrib.image.python.ops import dense_image_warp
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.training import adam
class DenseImageWarpTest(test_util.TensorFlowTestCase):
def setUp(self):
np.random.seed(0)
def test_interpolate_small_grid_ij(self):
grid = constant_op.constant(
[[0., 1., 2.], [3., 4., 5.], [6., 7., 8.]], shape=[1, 3, 3, 1])
query_points = constant_op.constant(
[[0., 0.], [1., 0.], [2., 0.5], [1.5, 1.5]], shape=[1, 4, 2])
expected_results = np.reshape(np.array([0., 3., 6.5, 6.]), [1, 4, 1])
interp = dense_image_warp._interpolate_bilinear(grid, query_points)
with self.cached_session() as sess:
predicted = sess.run(interp)
self.assertAllClose(expected_results, predicted)
def test_interpolate_small_grid_xy(self):
grid = constant_op.constant(
[[0., 1., 2.], [3., 4., 5.], [6., 7., 8.]], shape=[1, 3, 3, 1])
query_points = constant_op.constant(
[[0., 0.], [0., 1.], [0.5, 2.0], [1.5, 1.5]], shape=[1, 4, 2])
expected_results = np.reshape(np.array([0., 3., 6.5, 6.]), [1, 4, 1])
interp = dense_image_warp._interpolate_bilinear(
grid, query_points, indexing='xy')
with self.cached_session() as sess:
predicted = sess.run(interp)
self.assertAllClose(expected_results, predicted)
def test_interpolate_small_grid_batched(self):
grid = constant_op.constant(
[[[0., 1.], [3., 4.]], [[5., 6.], [7., 8.]]], shape=[2, 2, 2, 1])
query_points = constant_op.constant([[[0., 0.], [1., 0.], [0.5, 0.5]],
[[0.5, 0.], [1., 0.], [1., 1.]]])
expected_results = np.reshape(
np.array([[0., 3., 2.], [6., 7., 8.]]), [2, 3, 1])
interp = dense_image_warp._interpolate_bilinear(grid, query_points)
with self.cached_session() as sess:
predicted = sess.run(interp)
self.assertAllClose(expected_results, predicted)
def get_image_and_flow_placeholders(self, shape, image_type, flow_type):
batch_size, height, width, numchannels = shape
image_shape = [batch_size, height, width, numchannels]
flow_shape = [batch_size, height, width, 2]
tf_type = {
'float16': dtypes.half,
'float32': dtypes.float32,
'float64': dtypes.float64
}
image = array_ops.placeholder(dtype=tf_type[image_type], shape=image_shape)
flows = array_ops.placeholder(dtype=tf_type[flow_type], shape=flow_shape)
return image, flows
def get_random_image_and_flows(self, shape, image_type, flow_type):
batch_size, height, width, numchannels = shape
image_shape = [batch_size, height, width, numchannels]
image = np.random.normal(size=image_shape)
flow_shape = [batch_size, height, width, 2]
flows = np.random.normal(size=flow_shape) * 3
return image.astype(image_type), flows.astype(flow_type)
def assert_correct_interpolation_value(self,
image,
flows,
pred_interpolation,
batch_index,
y_index,
x_index,
low_precision=False):
"""Assert that the tf interpolation matches hand-computed value."""
height = image.shape[1]
width = image.shape[2]
displacement = flows[batch_index, y_index, x_index, :]
float_y = y_index - displacement[0]
float_x = x_index - displacement[1]
floor_y = max(min(height - 2, math.floor(float_y)), 0)
floor_x = max(min(width - 2, math.floor(float_x)), 0)
ceil_y = floor_y + 1
ceil_x = floor_x + 1
alpha_y = min(max(0.0, float_y - floor_y), 1.0)
alpha_x = min(max(0.0, float_x - floor_x), 1.0)
floor_y = int(floor_y)
floor_x = int(floor_x)
ceil_y = int(ceil_y)
ceil_x = int(ceil_x)
top_left = image[batch_index, floor_y, floor_x, :]
top_right = image[batch_index, floor_y, ceil_x, :]
bottom_left = image[batch_index, ceil_y, floor_x, :]
bottom_right = image[batch_index, ceil_y, ceil_x, :]
interp_top = alpha_x * (top_right - top_left) + top_left
interp_bottom = alpha_x * (bottom_right - bottom_left) + bottom_left
interp = alpha_y * (interp_bottom - interp_top) + interp_top
atol = 1e-6
rtol = 1e-6
if low_precision:
atol = 1e-2
rtol = 1e-3
self.assertAllClose(
interp,
pred_interpolation[batch_index, y_index, x_index, :],
atol=atol,
rtol=rtol)
def check_zero_flow_correctness(self, shape, image_type, flow_type):
"""Assert using zero flows doesn't change the input image."""
image, flows = self.get_image_and_flow_placeholders(shape, image_type,
flow_type)
interp = dense_image_warp.dense_image_warp(image, flows)
with self.cached_session() as sess:
rand_image, rand_flows = self.get_random_image_and_flows(
shape, image_type, flow_type)
rand_flows *= 0
predicted_interpolation = sess.run(
interp, feed_dict={
image: rand_image,
flows: rand_flows
})
self.assertAllClose(rand_image, predicted_interpolation)
def test_zero_flows(self):
"""Apply check_zero_flow_correctness() for a few sizes and types."""
shapes_to_try = [[3, 4, 5, 6], [1, 2, 2, 1]]
for shape in shapes_to_try:
self.check_zero_flow_correctness(
shape, image_type='float32', flow_type='float32')
def check_interpolation_correctness(self,
shape,
image_type,
flow_type,
num_probes=5):
"""Interpolate, and then assert correctness for a few query locations."""
image, flows = self.get_image_and_flow_placeholders(shape, image_type,
flow_type)
interp = dense_image_warp.dense_image_warp(image, flows)
low_precision = image_type == 'float16' or flow_type == 'float16'
with self.cached_session() as sess:
rand_image, rand_flows = self.get_random_image_and_flows(
shape, image_type, flow_type)
pred_interpolation = sess.run(
interp, feed_dict={
image: rand_image,
flows: rand_flows
})
for _ in range(num_probes):
batch_index = np.random.randint(0, shape[0])
y_index = np.random.randint(0, shape[1])
x_index = np.random.randint(0, shape[2])
self.assert_correct_interpolation_value(
rand_image,
rand_flows,
pred_interpolation,
batch_index,
y_index,
x_index,
low_precision=low_precision)
def test_interpolation(self):
"""Apply check_interpolation_correctness() for a few sizes and types."""
shapes_to_try = [[3, 4, 5, 6], [1, 5, 5, 3], [1, 2, 2, 1]]
for im_type in ['float32', 'float64', 'float16']:
for flow_type in ['float32', 'float64', 'float16']:
for shape in shapes_to_try:
self.check_interpolation_correctness(shape, im_type, flow_type)
def test_gradients_exist(self):
"""Check that backprop can run.
The correctness of the gradients is assumed, since the forward propagation
is tested to be correct and we only use built-in tf ops.
However, we perform a simple test to make sure that backprop can actually
run. We treat the flows as a tf.Variable and optimize them to minimize
the difference between the interpolated image and the input image.
"""
batch_size, height, width, numchannels = [4, 5, 6, 7]
image_shape = [batch_size, height, width, numchannels]
image = random_ops.random_normal(image_shape)
flow_shape = [batch_size, height, width, 2]
init_flows = np.float32(np.random.normal(size=flow_shape) * 0.25)
flows = variables.Variable(init_flows)
interp = dense_image_warp.dense_image_warp(image, flows)
loss = math_ops.reduce_mean(math_ops.square(interp - image))
optimizer = adam.AdamOptimizer(1.0)
grad = gradients.gradients(loss, [flows])
opt_func = optimizer.apply_gradients(zip(grad, [flows]))
init_op = variables.global_variables_initializer()
with self.cached_session() as sess:
sess.run(init_op)
for _ in range(10):
sess.run(opt_func)
def test_size_exception(self):
"""Make sure it throws an exception for images that are too small."""
shape = [1, 2, 1, 1]
msg = 'Should have raised an exception for invalid image size'
with self.assertRaises(errors.InvalidArgumentError, msg=msg):
self.check_interpolation_correctness(shape, 'float32', 'float32')
if __name__ == '__main__':
googletest.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/image/python/kernel_tests/dense_image_warp_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for python distort_image_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.contrib.image.python.ops import distort_image_ops
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.platform import test
# TODO(huangyp): also measure the differences between AdjustHsvInYiq and
# AdjustHsv in core.
class AdjustHueInYiqTest(test_util.TensorFlowTestCase):
def _adjust_hue_in_yiq_np(self, x_np, delta_h):
"""Rotate hue in YIQ space.
Mathematically we first convert rgb color to yiq space, rotate the hue
degrees, and then convert back to rgb.
Args:
x_np: input x with last dimension = 3.
delta_h: degree of hue rotation, in radians.
Returns:
Adjusted y with the same shape as x_np.
"""
self.assertEqual(x_np.shape[-1], 3)
x_v = x_np.reshape([-1, 3])
y_v = np.ndarray(x_v.shape, dtype=x_v.dtype)
u = np.cos(delta_h)
w = np.sin(delta_h)
# Projection matrix from RGB to YIQ. Numbers from wikipedia
# https://en.wikipedia.org/wiki/YIQ
tyiq = np.array([[0.299, 0.587, 0.114], [0.596, -0.274, -0.322],
[0.211, -0.523, 0.312]])
y_v = np.dot(x_v, tyiq.T)
# Hue rotation matrix in YIQ space.
hue_rotation = np.array([[1.0, 0.0, 0.0], [0.0, u, -w], [0.0, w, u]])
y_v = np.dot(y_v, hue_rotation.T)
# Projecting back to RGB space.
y_v = np.dot(y_v, np.linalg.inv(tyiq).T)
return y_v.reshape(x_np.shape)
def _adjust_hue_in_yiq_tf(self, x_np, delta_h):
with self.test_session(use_gpu=True):
x = constant_op.constant(x_np)
y = distort_image_ops.adjust_hsv_in_yiq(x, delta_h, 1, 1)
y_tf = y.eval()
return y_tf
def test_adjust_random_hue_in_yiq(self):
x_shapes = [
[2, 2, 3],
[4, 2, 3],
[2, 4, 3],
[2, 5, 3],
[1000, 1, 3],
]
test_styles = [
'all_random',
'rg_same',
'rb_same',
'gb_same',
'rgb_same',
]
for x_shape in x_shapes:
for test_style in test_styles:
x_np = np.random.rand(*x_shape) * 255.
delta_h = (np.random.rand() * 2.0 - 1.0) * np.pi
if test_style == 'all_random':
pass
elif test_style == 'rg_same':
x_np[..., 1] = x_np[..., 0]
elif test_style == 'rb_same':
x_np[..., 2] = x_np[..., 0]
elif test_style == 'gb_same':
x_np[..., 2] = x_np[..., 1]
elif test_style == 'rgb_same':
x_np[..., 1] = x_np[..., 0]
x_np[..., 2] = x_np[..., 0]
else:
raise AssertionError('Invalid test style: %s' % (test_style))
y_np = self._adjust_hue_in_yiq_np(x_np, delta_h)
y_tf = self._adjust_hue_in_yiq_tf(x_np, delta_h)
self.assertAllClose(y_tf, y_np, rtol=2e-4, atol=1e-4)
def test_invalid_shapes(self):
x_np = np.random.rand(2, 3) * 255.
delta_h = np.random.rand() * 2.0 - 1.0
with self.assertRaisesRegexp(ValueError, 'Shape must be at least rank 3'):
self._adjust_hue_in_yiq_tf(x_np, delta_h)
x_np = np.random.rand(4, 2, 4) * 255.
delta_h = np.random.rand() * 2.0 - 1.0
with self.assertRaisesOpError('input must have 3 channels but instead has '
'4 channels'):
self._adjust_hue_in_yiq_tf(x_np, delta_h)
class AdjustValueInYiqTest(test_util.TensorFlowTestCase):
def _adjust_value_in_yiq_np(self, x_np, scale):
return x_np * scale
def _adjust_value_in_yiq_tf(self, x_np, scale):
with self.test_session(use_gpu=True):
x = constant_op.constant(x_np)
y = distort_image_ops.adjust_hsv_in_yiq(x, 0, 1, scale)
y_tf = y.eval()
return y_tf
def test_adjust_random_value_in_yiq(self):
x_shapes = [
[2, 2, 3],
[4, 2, 3],
[2, 4, 3],
[2, 5, 3],
[1000, 1, 3],
]
test_styles = [
'all_random',
'rg_same',
'rb_same',
'gb_same',
'rgb_same',
]
for x_shape in x_shapes:
for test_style in test_styles:
x_np = np.random.rand(*x_shape) * 255.
scale = np.random.rand() * 2.0 - 1.0
if test_style == 'all_random':
pass
elif test_style == 'rg_same':
x_np[..., 1] = x_np[..., 0]
elif test_style == 'rb_same':
x_np[..., 2] = x_np[..., 0]
elif test_style == 'gb_same':
x_np[..., 2] = x_np[..., 1]
elif test_style == 'rgb_same':
x_np[..., 1] = x_np[..., 0]
x_np[..., 2] = x_np[..., 0]
else:
raise AssertionError('Invalid test style: %s' % (test_style))
y_np = self._adjust_value_in_yiq_np(x_np, scale)
y_tf = self._adjust_value_in_yiq_tf(x_np, scale)
self.assertAllClose(y_tf, y_np, rtol=2e-4, atol=1e-4)
def test_invalid_shapes(self):
x_np = np.random.rand(2, 3) * 255.
scale = np.random.rand() * 2.0 - 1.0
with self.assertRaisesRegexp(ValueError, 'Shape must be at least rank 3'):
self._adjust_value_in_yiq_tf(x_np, scale)
x_np = np.random.rand(4, 2, 4) * 255.
scale = np.random.rand() * 2.0 - 1.0
with self.assertRaisesOpError('input must have 3 channels but instead has '
'4 channels'):
self._adjust_value_in_yiq_tf(x_np, scale)
class AdjustSaturationInYiqTest(test_util.TensorFlowTestCase):
def _adjust_saturation_in_yiq_tf(self, x_np, scale):
with self.test_session(use_gpu=True):
x = constant_op.constant(x_np)
y = distort_image_ops.adjust_hsv_in_yiq(x, 0, scale, 1)
y_tf = y.eval()
return y_tf
def _adjust_saturation_in_yiq_np(self, x_np, scale):
"""Adjust saturation using linear interpolation."""
rgb_weights = np.array([0.299, 0.587, 0.114])
gray = np.sum(x_np * rgb_weights, axis=-1, keepdims=True)
y_v = x_np * scale + gray * (1 - scale)
return y_v
def test_adjust_random_saturation_in_yiq(self):
x_shapes = [
[2, 2, 3],
[4, 2, 3],
[2, 4, 3],
[2, 5, 3],
[1000, 1, 3],
]
test_styles = [
'all_random',
'rg_same',
'rb_same',
'gb_same',
'rgb_same',
]
with self.cached_session():
for x_shape in x_shapes:
for test_style in test_styles:
x_np = np.random.rand(*x_shape) * 255.
scale = np.random.rand() * 2.0 - 1.0
if test_style == 'all_random':
pass
elif test_style == 'rg_same':
x_np[..., 1] = x_np[..., 0]
elif test_style == 'rb_same':
x_np[..., 2] = x_np[..., 0]
elif test_style == 'gb_same':
x_np[..., 2] = x_np[..., 1]
elif test_style == 'rgb_same':
x_np[..., 1] = x_np[..., 0]
x_np[..., 2] = x_np[..., 0]
else:
raise AssertionError('Invalid test style: %s' % (test_style))
y_baseline = self._adjust_saturation_in_yiq_np(x_np, scale)
y_tf = self._adjust_saturation_in_yiq_tf(x_np, scale)
self.assertAllClose(y_tf, y_baseline, rtol=2e-4, atol=1e-4)
def test_invalid_shapes(self):
x_np = np.random.rand(2, 3) * 255.
scale = np.random.rand() * 2.0 - 1.0
with self.assertRaisesRegexp(ValueError, 'Shape must be at least rank 3'):
self._adjust_saturation_in_yiq_tf(x_np, scale)
x_np = np.random.rand(4, 2, 4) * 255.
scale = np.random.rand() * 2.0 - 1.0
with self.assertRaisesOpError('input must have 3 channels but instead has '
'4 channels'):
self._adjust_saturation_in_yiq_tf(x_np, scale)
class AdjustHueInYiqBenchmark(test.Benchmark):
def _benchmark_adjust_hue_in_yiq(self, device, cpu_count):
image_shape = [299, 299, 3]
warmup_rounds = 100
benchmark_rounds = 1000
config = config_pb2.ConfigProto()
if cpu_count is not None:
config.inter_op_parallelism_threads = 1
config.intra_op_parallelism_threads = cpu_count
with session.Session('', graph=ops.Graph(), config=config) as sess:
with ops.device(device):
inputs = variables.Variable(
random_ops.random_uniform(image_shape, dtype=dtypes.float32) * 255,
trainable=False,
dtype=dtypes.float32)
delta = constant_op.constant(0.1, dtype=dtypes.float32)
outputs = distort_image_ops.adjust_hsv_in_yiq(inputs, delta, 1, 1)
run_op = control_flow_ops.group(outputs)
sess.run(variables.global_variables_initializer())
for i in xrange(warmup_rounds + benchmark_rounds):
if i == warmup_rounds:
start = time.time()
sess.run(run_op)
end = time.time()
step_time = (end - start) / benchmark_rounds
tag = device + '_%s' % (cpu_count if cpu_count is not None else 'all')
print('benchmarkadjust_hue_in_yiq_299_299_3_%s step_time: %.2f us' %
(tag, step_time * 1e6))
self.report_benchmark(
name='benchmarkadjust_hue_in_yiq_299_299_3_%s' % (tag),
iters=benchmark_rounds,
wall_time=step_time)
def benchmark_adjust_hue_in_yiqCpu1(self):
self._benchmark_adjust_hue_in_yiq('/cpu:0', 1)
def benchmark_adjust_hue_in_yiqCpuAll(self):
self._benchmark_adjust_hue_in_yiq('/cpu:0', None)
def benchmark_adjust_hue_in_yiq_gpu_all(self):
self._benchmark_adjust_hue_in_yiq(test.gpu_device_name(), None)
class AdjustSaturationInYiqBenchmark(test.Benchmark):
def _benchmark_adjust_saturation_in_yiq(self, device, cpu_count):
image_shape = [299, 299, 3]
warmup_rounds = 100
benchmark_rounds = 1000
config = config_pb2.ConfigProto()
if cpu_count is not None:
config.inter_op_parallelism_threads = 1
config.intra_op_parallelism_threads = cpu_count
with session.Session('', graph=ops.Graph(), config=config) as sess:
with ops.device(device):
inputs = variables.Variable(
random_ops.random_uniform(image_shape, dtype=dtypes.float32) * 255,
trainable=False,
dtype=dtypes.float32)
scale = constant_op.constant(0.1, dtype=dtypes.float32)
outputs = distort_image_ops.adjust_hsv_in_yiq(inputs, 0, scale, 1)
run_op = control_flow_ops.group(outputs)
sess.run(variables.global_variables_initializer())
for _ in xrange(warmup_rounds):
sess.run(run_op)
start = time.time()
for _ in xrange(benchmark_rounds):
sess.run(run_op)
end = time.time()
step_time = (end - start) / benchmark_rounds
tag = '%s' % (cpu_count) if cpu_count is not None else '_all'
print('benchmarkAdjustSaturationInYiq_299_299_3_cpu%s step_time: %.2f us' %
(tag, step_time * 1e6))
self.report_benchmark(
name='benchmarkAdjustSaturationInYiq_299_299_3_cpu%s' % (tag),
iters=benchmark_rounds,
wall_time=step_time)
def benchmark_adjust_saturation_in_yiq_cpu1(self):
self._benchmark_adjust_saturation_in_yiq('/cpu:0', 1)
def benchmark_adjust_saturation_in_yiq_cpu_all(self):
self._benchmark_adjust_saturation_in_yiq('/cpu:0', None)
def benchmark_adjust_saturation_in_yiq_gpu_all(self):
self._benchmark_adjust_saturation_in_yiq(test.gpu_device_name(), None)
if __name__ == '__main__':
googletest.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/image/python/kernel_tests/distort_image_ops_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for image_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.image.ops import gen_image_ops
from tensorflow.contrib.image.python.ops import image_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import googletest
_DTYPES = set([
dtypes.uint8, dtypes.int32, dtypes.int64, dtypes.float16, dtypes.float32,
dtypes.float64
])
class ImageOpsTest(test_util.TensorFlowTestCase):
def test_zeros(self):
for dtype in _DTYPES:
with self.cached_session():
for shape in [(5, 5), (24, 24), (2, 24, 24, 3)]:
for angle in [0, 1, np.pi / 2.0]:
image = array_ops.zeros(shape, dtype)
self.assertAllEqual(
image_ops.rotate(image, angle).eval(),
np.zeros(shape, dtype.as_numpy_dtype()))
def test_rotate_even(self):
for dtype in _DTYPES:
with self.cached_session():
image = array_ops.reshape(
math_ops.cast(math_ops.range(36), dtype), (6, 6))
image_rep = array_ops.tile(image[None, :, :, None], [3, 1, 1, 1])
angles = constant_op.constant([0.0, np.pi / 2.0, np.pi * 3. / 2.],
dtypes.float32)
image_rotated = image_ops.rotate(image_rep, angles)
# pyformat: disable
self.assertAllEqual(image_rotated[:, :, :, 0].eval(),
[[[0, 1, 2, 3, 4, 5],
[6, 7, 8, 9, 10, 11],
[12, 13, 14, 15, 16, 17],
[18, 19, 20, 21, 22, 23],
[24, 25, 26, 27, 28, 29],
[30, 31, 32, 33, 34, 35]],
[[5, 11, 17, 23, 29, 35],
[4, 10, 16, 22, 28, 34],
[3, 9, 15, 21, 27, 33],
[2, 8, 14, 20, 26, 32],
[1, 7, 13, 19, 25, 31],
[0, 6, 12, 18, 24, 30]],
[[30, 24, 18, 12, 6, 0],
[31, 25, 19, 13, 7, 1],
[32, 26, 20, 14, 8, 2],
[33, 27, 21, 15, 9, 3],
[34, 28, 22, 16, 10, 4],
[35, 29, 23, 17, 11, 5]]])
def test_rotate_odd(self):
for dtype in _DTYPES:
with self.cached_session():
image = array_ops.reshape(
math_ops.cast(math_ops.range(25), dtype), (5, 5))
image_rep = array_ops.tile(image[None, :, :, None], [3, 1, 1, 1])
angles = constant_op.constant([np.pi / 4.0, 1.0, -np.pi / 2.0],
dtypes.float32)
image_rotated = image_ops.rotate(image_rep, angles)
self.assertAllEqual(image_rotated[:, :, :, 0].eval(),
[[[0, 3, 8, 9, 0], [1, 7, 8, 13, 19],
[6, 6, 12, 18, 18], [5, 11, 16, 17, 23],
[0, 15, 16, 21, 0]],
[[0, 3, 9, 14, 0], [2, 7, 8, 13, 19],
[1, 6, 12, 18, 23], [5, 11, 16, 17, 22],
[0, 10, 15, 21, 0]],
[[20, 15, 10, 5, 0], [21, 16, 11, 6, 1],
[22, 17, 12, 7, 2], [23, 18, 13, 8, 3],
[24, 19, 14, 9, 4]]])
def test_translate(self):
for dtype in _DTYPES:
with self.cached_session():
image = constant_op.constant(
[[1, 0, 1, 0],
[0, 1, 0, 1],
[1, 0, 1, 0],
[0, 1, 0, 1]], dtype=dtype)
translation = constant_op.constant([-1, -1], dtypes.float32)
image_translated = image_ops.translate(image, translation)
self.assertAllEqual(image_translated.eval(),
[[1, 0, 1, 0],
[0, 1, 0, 0],
[1, 0, 1, 0],
[0, 0, 0, 0]])
def test_compose(self):
for dtype in _DTYPES:
with self.cached_session():
image = constant_op.constant(
[[1, 1, 1, 0],
[1, 0, 0, 0],
[1, 1, 1, 0],
[0, 0, 0, 0]], dtype=dtype)
# Rotate counter-clockwise by pi / 2.
rotation = image_ops.angles_to_projective_transforms(np.pi / 2, 4, 4)
# Translate right by 1 (the transformation matrix is always inverted,
# hence the -1).
translation = constant_op.constant([1, 0, -1,
0, 1, 0,
0, 0],
dtype=dtypes.float32)
composed = image_ops.compose_transforms(rotation, translation)
image_transformed = image_ops.transform(image, composed)
self.assertAllEqual(image_transformed.eval(),
[[0, 0, 0, 0],
[0, 1, 0, 1],
[0, 1, 0, 1],
[0, 1, 1, 1]])
def test_extreme_projective_transform(self):
for dtype in _DTYPES:
with self.cached_session():
image = constant_op.constant(
[[1, 0, 1, 0],
[0, 1, 0, 1],
[1, 0, 1, 0],
[0, 1, 0, 1]], dtype=dtype)
transformation = constant_op.constant([1, 0, 0, 0, 1, 0, -1, 0],
dtypes.float32)
image_transformed = image_ops.transform(image, transformation)
self.assertAllEqual(image_transformed.eval(),
[[1, 0, 0, 0],
[0, 0, 0, 0],
[1, 0, 0, 0],
[0, 0, 0, 0]])
def test_bilinear(self):
with self.cached_session():
image = constant_op.constant(
[[0, 0, 0, 0, 0],
[0, 1, 1, 1, 0],
[0, 1, 0, 1, 0],
[0, 1, 1, 1, 0],
[0, 0, 0, 0, 0]],
dtypes.float32)
# The following result matches:
# >>> scipy.ndimage.rotate(image, 45, order=1, reshape=False)
# which uses spline interpolation of order 1, equivalent to bilinear
# interpolation.
self.assertAllClose(
image_ops.rotate(image, np.pi / 4.0, interpolation="BILINEAR").eval(),
[[0.000, 0.000, 0.343, 0.000, 0.000],
[0.000, 0.586, 0.914, 0.586, 0.000],
[0.343, 0.914, 0.000, 0.914, 0.343],
[0.000, 0.586, 0.914, 0.586, 0.000],
[0.000, 0.000, 0.343, 0.000, 0.000]],
atol=0.001)
self.assertAllClose(
image_ops.rotate(image, np.pi / 4.0, interpolation="NEAREST").eval(),
[[0, 0, 1, 0, 0],
[0, 1, 1, 1, 0],
[1, 1, 0, 1, 1],
[0, 1, 1, 1, 0],
[0, 0, 1, 0, 0]])
def test_bilinear_uint8(self):
with self.cached_session():
image = constant_op.constant(
np.asarray(
[[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 255, 255, 255, 0.0],
[0.0, 255, 0.0, 255, 0.0],
[0.0, 255, 255, 255, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0]],
np.uint8),
dtypes.uint8)
# == np.rint((expected image above) * 255)
self.assertAllEqual(
image_ops.rotate(image, np.pi / 4.0, interpolation="BILINEAR").eval(),
[[0.0, 0.0, 87., 0.0, 0.0],
[0.0, 149, 233, 149, 0.0],
[87., 233, 0.0, 233, 87.],
[0.0, 149, 233, 149, 0.0],
[0.0, 0.0, 87., 0.0, 0.0]])
def test_rotate_static_shape(self):
image = array_ops.diag([1., 2., 3.])
result = image_ops.rotate(
image, random_ops.random_uniform((), -1, 1), interpolation="BILINEAR")
self.assertEqual(image.get_shape(), result.get_shape())
def test_transform_static_output_shape(self):
image = constant_op.constant([[1., 2.], [3., 4.]])
result = image_ops.transform(
image, random_ops.random_uniform([8], -1, 1),
output_shape=constant_op.constant([3, 5]))
self.assertAllEqual([3, 5], result.get_shape())
def _test_grad(self, shape_to_test):
with self.cached_session():
test_image_shape = shape_to_test
test_image = np.random.randn(*test_image_shape)
test_image_tensor = constant_op.constant(
test_image, shape=test_image_shape)
test_transform = image_ops.angles_to_projective_transforms(
np.pi / 2, 4, 4)
output_shape = test_image_shape
output = image_ops.transform(test_image_tensor, test_transform)
left_err = gradient_checker.compute_gradient_error(
test_image_tensor,
test_image_shape,
output,
output_shape,
x_init_value=test_image)
self.assertLess(left_err, 1e-10)
def _test_grad_different_shape(self, input_shape, output_shape):
with self.cached_session():
test_image_shape = input_shape
test_image = np.random.randn(*test_image_shape)
test_image_tensor = constant_op.constant(
test_image, shape=test_image_shape)
test_transform = image_ops.angles_to_projective_transforms(
np.pi / 2, 4, 4)
if len(output_shape) == 2:
resize_shape = output_shape
elif len(output_shape) == 3:
resize_shape = output_shape[0:2]
elif len(output_shape) == 4:
resize_shape = output_shape[1:3]
output = image_ops.transform(
images=test_image_tensor,
transforms=test_transform,
output_shape=resize_shape)
left_err = gradient_checker.compute_gradient_error(
test_image_tensor,
test_image_shape,
output,
output_shape,
x_init_value=test_image)
self.assertLess(left_err, 1e-10)
def test_grad(self):
self._test_grad([16, 16])
self._test_grad([4, 12, 12])
self._test_grad([3, 4, 12, 12])
self._test_grad_different_shape([16, 16], [8, 8])
self._test_grad_different_shape([4, 12, 3], [8, 24, 3])
self._test_grad_different_shape([3, 4, 12, 3], [3, 8, 24, 3])
def test_projective_transform_v1(self):
"""The original ImageProjectiveTransform op should take 2 arguments."""
image = constant_op.constant([[[[1], [0]], [[0], [1]]]])
transform = constant_op.constant([[1., 0., 0., 0., 1., 0., 0., 0.]])
result = gen_image_ops.image_projective_transform(
image, transform, interpolation="NEAREST")
with self.cached_session():
self.assertAllEqual([[[[1], [0]], [[0], [1]]]], result.eval())
def test_transform_data_types(self):
for dtype in _DTYPES:
image = constant_op.constant([[1, 2], [3, 4]], dtype=dtype)
value = image_ops.transform(image, [1] * 8)
with self.test_session(use_gpu=True):
self.assertAllEqual(
value.eval(),
np.array([[4, 4], [4, 4]]).astype(dtype.as_numpy_dtype()))
@test_util.run_in_graph_and_eager_modes
def test_transform_eager(self):
image = constant_op.constant([[1., 2.], [3., 4.]])
value = image_ops.transform(image, [1] * 8)
with self.test_session(use_gpu=True):
self.assertAllEqual(self.evaluate(value), np.array([[4, 4], [4, 4]]))
class BipartiteMatchTest(test_util.TensorFlowTestCase):
def _BipartiteMatchTest(self, distance_mat, distance_mat_shape,
num_valid_rows,
expected_row_to_col_match,
expected_col_to_row_match):
distance_mat_np = np.array(distance_mat, dtype=np.float32).reshape(
distance_mat_shape)
expected_row_to_col_match_np = np.array(expected_row_to_col_match,
dtype=np.int32)
expected_col_to_row_match_np = np.array(expected_col_to_row_match,
dtype=np.int32)
with self.cached_session():
distance_mat_tf = constant_op.constant(distance_mat_np,
shape=distance_mat_shape)
location_to_prior, prior_to_location = image_ops.bipartite_match(
distance_mat_tf, num_valid_rows)
location_to_prior_np = location_to_prior.eval()
prior_to_location_np = prior_to_location.eval()
self.assertAllEqual(location_to_prior_np, expected_row_to_col_match_np)
self.assertAllEqual(prior_to_location_np, expected_col_to_row_match_np)
def testBipartiteMatch(self):
distance_mat = [0.5, 0.8, 0.1,
0.3, 0.2, 0.15]
num_valid_rows = 2
expected_row_to_col_match = [2, 1]
expected_col_to_row_match = [-1, 1, 0]
self._BipartiteMatchTest(distance_mat, [2, 3], num_valid_rows,
expected_row_to_col_match,
expected_col_to_row_match)
# The case of num_valid_rows less than num-of-rows-in-distance-mat.
num_valid_rows = 1
expected_row_to_col_match = [2, -1]
expected_col_to_row_match = [-1, -1, 0]
self._BipartiteMatchTest(distance_mat, [2, 3], num_valid_rows,
expected_row_to_col_match,
expected_col_to_row_match)
# The case of num_valid_rows being 0.
num_valid_rows = 0
expected_row_to_col_match = [-1, -1]
expected_col_to_row_match = [-1, -1, -1]
self._BipartiteMatchTest(distance_mat, [2, 3], num_valid_rows,
expected_row_to_col_match,
expected_col_to_row_match)
# The case of num_valid_rows less being -1.
num_valid_rows = -1
# The expected results are the same as num_valid_rows being 2.
expected_row_to_col_match = [2, 1]
expected_col_to_row_match = [-1, 1, 0]
self._BipartiteMatchTest(distance_mat, [2, 3], num_valid_rows,
expected_row_to_col_match,
expected_col_to_row_match)
if __name__ == "__main__":
googletest.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/image/python/kernel_tests/image_ops_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for sparse_image_warp."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.image.python.ops import sparse_image_warp
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import gradients
from tensorflow.python.ops import image_ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.platform import test
from tensorflow.python.training import momentum
class SparseImageWarpTest(test_util.TensorFlowTestCase):
def setUp(self):
np.random.seed(0)
def testGetBoundaryLocations(self):
image_height = 11
image_width = 11
num_points_per_edge = 4
locs = sparse_image_warp._get_boundary_locations(image_height, image_width,
num_points_per_edge)
num_points = locs.shape[0]
self.assertEqual(num_points, 4 + 4 * num_points_per_edge)
locs = [(locs[i, 0], locs[i, 1]) for i in range(num_points)]
for i in (0, image_height - 1):
for j in (0, image_width - 1):
self.assertIn((i, j), locs, '{},{} not in the locations'.format(i, j))
for i in (2, 4, 6, 8):
for j in (0, image_width - 1):
self.assertIn((i, j), locs, '{},{} not in the locations'.format(i, j))
for i in (0, image_height - 1):
for j in (2, 4, 6, 8):
self.assertIn((i, j), locs, '{},{} not in the locations'.format(i, j))
def testGetGridLocations(self):
image_height = 5
image_width = 3
grid = sparse_image_warp._get_grid_locations(image_height, image_width)
for i in range(image_height):
for j in range(image_width):
self.assertEqual(grid[i, j, 0], i)
self.assertEqual(grid[i, j, 1], j)
def testZeroShift(self):
"""Run assertZeroShift for various hyperparameters."""
for order in (1, 2):
for regularization in (0, 0.01):
for num_boundary_points in (0, 1):
self.assertZeroShift(order, regularization, num_boundary_points)
def assertZeroShift(self, order, regularization, num_boundary_points):
"""Check that warping with zero displacements doesn't change the image."""
batch_size = 1
image_height = 4
image_width = 4
channels = 3
image = np.random.uniform(
size=[batch_size, image_height, image_width, channels])
input_image_op = constant_op.constant(np.float32(image))
control_point_locations = [[1., 1.], [2., 2.], [2., 1.]]
control_point_locations = constant_op.constant(
np.float32(np.expand_dims(control_point_locations, 0)))
control_point_displacements = np.zeros(
control_point_locations.shape.as_list())
control_point_displacements = constant_op.constant(
np.float32(control_point_displacements))
(warped_image_op, flow_field) = sparse_image_warp.sparse_image_warp(
input_image_op,
control_point_locations,
control_point_locations + control_point_displacements,
interpolation_order=order,
regularization_weight=regularization,
num_boundary_points=num_boundary_points)
with self.cached_session() as sess:
warped_image, input_image, _ = sess.run(
[warped_image_op, input_image_op, flow_field])
self.assertAllClose(warped_image, input_image)
def testMoveSinglePixel(self):
"""Run assertMoveSinglePixel for various hyperparameters and data types."""
for order in (1, 2):
for num_boundary_points in (1, 2):
for type_to_use in (dtypes.float32, dtypes.float64):
self.assertMoveSinglePixel(order, num_boundary_points, type_to_use)
def assertMoveSinglePixel(self, order, num_boundary_points, type_to_use):
"""Move a single block in a small grid using warping."""
batch_size = 1
image_height = 7
image_width = 7
channels = 3
image = np.zeros([batch_size, image_height, image_width, channels])
image[:, 3, 3, :] = 1.0
input_image_op = constant_op.constant(image, dtype=type_to_use)
# Place a control point at the one white pixel.
control_point_locations = [[3., 3.]]
control_point_locations = constant_op.constant(
np.float32(np.expand_dims(control_point_locations, 0)),
dtype=type_to_use)
# Shift it one pixel to the right.
control_point_displacements = [[0., 1.0]]
control_point_displacements = constant_op.constant(
np.float32(np.expand_dims(control_point_displacements, 0)),
dtype=type_to_use)
(warped_image_op, flow_field) = sparse_image_warp.sparse_image_warp(
input_image_op,
control_point_locations,
control_point_locations + control_point_displacements,
interpolation_order=order,
num_boundary_points=num_boundary_points)
with self.cached_session() as sess:
warped_image, input_image, flow = sess.run(
[warped_image_op, input_image_op, flow_field])
# Check that it moved the pixel correctly.
self.assertAllClose(
warped_image[0, 4, 5, :],
input_image[0, 4, 4, :],
atol=1e-5,
rtol=1e-5)
# Test that there is no flow at the corners.
for i in (0, image_height - 1):
for j in (0, image_width - 1):
self.assertAllClose(
flow[0, i, j, :], np.zeros([2]), atol=1e-5, rtol=1e-5)
def load_image(self, image_file, sess):
image_op = image_ops.decode_png(
io_ops.read_file(image_file), dtype=dtypes.uint8, channels=4)[:, :, 0:3]
return sess.run(image_op)
def testSmileyFace(self):
"""Check warping accuracy by comparing to hardcoded warped images."""
test_data_dir = test.test_src_dir_path('contrib/image/python/'
'kernel_tests/test_data/')
input_file = test_data_dir + 'Yellow_Smiley_Face.png'
with self.cached_session() as sess:
input_image = self.load_image(input_file, sess)
control_points = np.asarray([[64, 59], [180 - 64, 59], [39, 111],
[180 - 39, 111], [90, 143], [58, 134],
[180 - 58, 134]]) # pyformat: disable
control_point_displacements = np.asarray(
[[-10.5, 10.5], [10.5, 10.5], [0, 0], [0, 0], [0, -10], [-20, 10.25],
[10, 10.75]])
control_points_op = constant_op.constant(
np.expand_dims(np.float32(control_points[:, [1, 0]]), 0))
control_point_displacements_op = constant_op.constant(
np.expand_dims(np.float32(control_point_displacements[:, [1, 0]]), 0))
float_image = np.expand_dims(np.float32(input_image) / 255, 0)
input_image_op = constant_op.constant(float_image)
for interpolation_order in (1, 2, 3):
for num_boundary_points in (0, 1, 4):
warp_op, _ = sparse_image_warp.sparse_image_warp(
input_image_op,
control_points_op,
control_points_op + control_point_displacements_op,
interpolation_order=interpolation_order,
num_boundary_points=num_boundary_points)
with self.cached_session() as sess:
warped_image = sess.run(warp_op)
out_image = np.uint8(warped_image[0, :, :, :] * 255)
target_file = (
test_data_dir +
'Yellow_Smiley_Face_Warp-interp' + '-{}-clamp-{}.png'.format(
interpolation_order, num_boundary_points))
target_image = self.load_image(target_file, sess)
# Check that the target_image and out_image difference is no
# bigger than 2 (on a scale of 0-255). Due to differences in
# floating point computation on different devices, the float
# output in warped_image may get rounded to a different int
# than that in the saved png file loaded into target_image.
self.assertAllClose(target_image, out_image, atol=2, rtol=1e-3)
def testThatBackpropRuns(self):
"""Run optimization to ensure that gradients can be computed."""
batch_size = 1
image_height = 9
image_width = 12
image = variables.Variable(
np.float32(
np.random.uniform(size=[batch_size, image_height, image_width, 3])))
control_point_locations = [[3., 3.]]
control_point_locations = constant_op.constant(
np.float32(np.expand_dims(control_point_locations, 0)))
control_point_displacements = [[0.25, -0.5]]
control_point_displacements = constant_op.constant(
np.float32(np.expand_dims(control_point_displacements, 0)))
warped_image, _ = sparse_image_warp.sparse_image_warp(
image,
control_point_locations,
control_point_locations + control_point_displacements,
num_boundary_points=3)
loss = math_ops.reduce_mean(math_ops.abs(warped_image - image))
optimizer = momentum.MomentumOptimizer(0.001, 0.9)
grad = gradients.gradients(loss, [image])
grad, _ = clip_ops.clip_by_global_norm(grad, 1.0)
opt_func = optimizer.apply_gradients(zip(grad, [image]))
init_op = variables.global_variables_initializer()
with self.cached_session() as sess:
sess.run(init_op)
for _ in range(5):
sess.run([loss, opt_func])
if __name__ == '__main__':
googletest.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/image/python/kernel_tests/sparse_image_warp_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python layer for image_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.image.ops import gen_single_image_random_dot_stereograms_ops
from tensorflow.contrib.util import loader
from tensorflow.python.framework import ops
from tensorflow.python.platform import resource_loader
_sirds_ops = loader.load_op_library(
resource_loader.get_path_to_datafile(
"_single_image_random_dot_stereograms.so"))
def single_image_random_dot_stereograms(depth_values,
hidden_surface_removal=None,
convergence_dots_size=None,
dots_per_inch=None,
eye_separation=None,
mu=None,
normalize=None,
normalize_max=None,
normalize_min=None,
border_level=None,
number_colors=None,
output_image_shape=None,
output_data_window=None):
"""Output a RandomDotStereogram Tensor for export via encode_PNG/JPG OP.
Given the 2-D tensor 'depth_values' with encoded Z values, this operation
will encode 3-D data into a 2-D image. The output of this Op is suitable
for the encode_PNG/JPG ops. Be careful with image compression as this may
corrupt the encode 3-D data within the image.
Based upon [this
paper](https://www.cs.waikato.ac.nz/~ihw/papers/94-HWT-SI-IHW-SIRDS-paper.pdf).
This outputs a SIRDS image as picture_out.png:
```python
img=[[1,2,3,3,2,1],
[1,2,3,4,5,2],
[1,2,3,4,5,3],
[1,2,3,4,5,4],
[6,5,4,4,5,5]]
session = tf.compat.v1.InteractiveSession()
sirds = single_image_random_dot_stereograms(
img,
convergence_dots_size=8,
number_colors=256,normalize=True)
out = sirds.eval()
png = tf.image.encode_png(out).eval()
with open('picture_out.png', 'wb') as f:
f.write(png)
```
Args:
depth_values: A `Tensor`. Must be one of the following types:
`float64`, `float32`, `int64`, `int32`. Z values of data to encode
into 'output_data_window' window, lower further away {0.0 floor(far),
1.0 ceiling(near) after norm}, must be 2-D tensor
hidden_surface_removal: An optional `bool`. Defaults to `True`.
Activate hidden surface removal
convergence_dots_size: An optional `int`. Defaults to `8`.
Black dot size in pixels to help view converge image, drawn on bottom
of the image
dots_per_inch: An optional `int`. Defaults to `72`.
Output device in dots/inch
eye_separation: An optional `float`. Defaults to `2.5`.
Separation between eyes in inches
mu: An optional `float`. Defaults to `0.3333`.
Depth of field, Fraction of viewing distance (eg. 1/3 = 0.3333)
normalize: An optional `bool`. Defaults to `True`.
Normalize input data to [0.0, 1.0]
normalize_max: An optional `float`. Defaults to `-100`.
Fix MAX value for Normalization (0.0) - if < MIN, autoscale
normalize_min: An optional `float`. Defaults to `100`.
Fix MIN value for Normalization (0.0) - if > MAX, autoscale
border_level: An optional `float`. Defaults to `0`.
Value of bord in depth 0.0 {far} to 1.0 {near}
number_colors: An optional `int`. Defaults to `256`. 2 (Black &
White), 256 (grayscale), and Numbers > 256 (Full Color) are
supported
output_image_shape: An optional `tf.TensorShape` or list of `ints`.
Defaults to shape `[1024, 768, 1]`. Defines output shape of returned
image in '[X,Y, Channels]' 1-grayscale, 3 color; channels will be
updated to 3 if number_colors > 256
output_data_window: An optional `tf.TensorShape` or list of `ints`.
Defaults to `[1022, 757]`. Size of "DATA" window, must be equal to or
smaller than `output_image_shape`, will be centered and use
`convergence_dots_size` for best fit to avoid overlap if possible
Returns:
A `Tensor` of type `uint8` of shape 'output_image_shape' with encoded
'depth_values'
"""
result = gen_single_image_random_dot_stereograms_ops.single_image_random_dot_stereograms( # pylint: disable=line-too-long
depth_values=depth_values,
hidden_surface_removal=hidden_surface_removal,
convergence_dots_size=convergence_dots_size,
dots_per_inch=dots_per_inch,
eye_separation=eye_separation,
mu=mu,
normalize=normalize,
normalize_max=normalize_max,
normalize_min=normalize_min,
border_level=border_level,
number_colors=number_colors,
output_image_shape=output_image_shape,
output_data_window=output_data_window)
return result
ops.NotDifferentiable("SingleImageRandomDotStereograms")
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/image/python/ops/single_image_random_dot_stereograms.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Image warping using sparse flow defined at control points."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.image.python.ops import dense_image_warp
from tensorflow.contrib.image.python.ops import interpolate_spline
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
def _get_grid_locations(image_height, image_width):
"""Wrapper for np.meshgrid."""
y_range = np.linspace(0, image_height - 1, image_height)
x_range = np.linspace(0, image_width - 1, image_width)
y_grid, x_grid = np.meshgrid(y_range, x_range, indexing='ij')
return np.stack((y_grid, x_grid), -1)
def _expand_to_minibatch(np_array, batch_size):
"""Tile arbitrarily-sized np_array to include new batch dimension."""
tiles = [batch_size] + [1] * np_array.ndim
return np.tile(np.expand_dims(np_array, 0), tiles)
def _get_boundary_locations(image_height, image_width, num_points_per_edge):
"""Compute evenly-spaced indices along edge of image."""
y_range = np.linspace(0, image_height - 1, num_points_per_edge + 2)
x_range = np.linspace(0, image_width - 1, num_points_per_edge + 2)
ys, xs = np.meshgrid(y_range, x_range, indexing='ij')
is_boundary = np.logical_or(
np.logical_or(xs == 0, xs == image_width - 1),
np.logical_or(ys == 0, ys == image_height - 1))
return np.stack([ys[is_boundary], xs[is_boundary]], axis=-1)
def _add_zero_flow_controls_at_boundary(control_point_locations,
control_point_flows, image_height,
image_width, boundary_points_per_edge):
"""Add control points for zero-flow boundary conditions.
Augment the set of control points with extra points on the
boundary of the image that have zero flow.
Args:
control_point_locations: input control points
control_point_flows: their flows
image_height: image height
image_width: image width
boundary_points_per_edge: number of points to add in the middle of each
edge (not including the corners).
The total number of points added is
4 + 4*(boundary_points_per_edge).
Returns:
merged_control_point_locations: augmented set of control point locations
merged_control_point_flows: augmented set of control point flows
"""
batch_size = tensor_shape.dimension_value(control_point_locations.shape[0])
boundary_point_locations = _get_boundary_locations(image_height, image_width,
boundary_points_per_edge)
boundary_point_flows = np.zeros([boundary_point_locations.shape[0], 2])
type_to_use = control_point_locations.dtype
boundary_point_locations = constant_op.constant(
_expand_to_minibatch(boundary_point_locations, batch_size),
dtype=type_to_use)
boundary_point_flows = constant_op.constant(
_expand_to_minibatch(boundary_point_flows, batch_size), dtype=type_to_use)
merged_control_point_locations = array_ops.concat(
[control_point_locations, boundary_point_locations], 1)
merged_control_point_flows = array_ops.concat(
[control_point_flows, boundary_point_flows], 1)
return merged_control_point_locations, merged_control_point_flows
def sparse_image_warp(image,
source_control_point_locations,
dest_control_point_locations,
interpolation_order=2,
regularization_weight=0.0,
num_boundary_points=0,
name='sparse_image_warp'):
"""Image warping using correspondences between sparse control points.
Apply a non-linear warp to the image, where the warp is specified by
the source and destination locations of a (potentially small) number of
control points. First, we use a polyharmonic spline
(`tf.contrib.image.interpolate_spline`) to interpolate the displacements
between the corresponding control points to a dense flow field.
Then, we warp the image using this dense flow field
(`tf.contrib.image.dense_image_warp`).
Let t index our control points. For regularization_weight=0, we have:
warped_image[b, dest_control_point_locations[b, t, 0],
dest_control_point_locations[b, t, 1], :] =
image[b, source_control_point_locations[b, t, 0],
source_control_point_locations[b, t, 1], :].
For regularization_weight > 0, this condition is met approximately, since
regularized interpolation trades off smoothness of the interpolant vs.
reconstruction of the interpolant at the control points.
See `tf.contrib.image.interpolate_spline` for further documentation of the
interpolation_order and regularization_weight arguments.
Args:
image: `[batch, height, width, channels]` float `Tensor`
source_control_point_locations: `[batch, num_control_points, 2]` float
`Tensor`
dest_control_point_locations: `[batch, num_control_points, 2]` float
`Tensor`
interpolation_order: polynomial order used by the spline interpolation
regularization_weight: weight on smoothness regularizer in interpolation
num_boundary_points: How many zero-flow boundary points to include at
each image edge.Usage:
num_boundary_points=0: don't add zero-flow points
num_boundary_points=1: 4 corners of the image
num_boundary_points=2: 4 corners and one in the middle of each edge
(8 points total)
num_boundary_points=n: 4 corners and n-1 along each edge
name: A name for the operation (optional).
Note that image and offsets can be of type tf.half, tf.float32, or
tf.float64, and do not necessarily have to be the same type.
Returns:
warped_image: `[batch, height, width, channels]` float `Tensor` with same
type as input image.
flow_field: `[batch, height, width, 2]` float `Tensor` containing the dense
flow field produced by the interpolation.
"""
image = ops.convert_to_tensor(image)
source_control_point_locations = ops.convert_to_tensor(
source_control_point_locations)
dest_control_point_locations = ops.convert_to_tensor(
dest_control_point_locations)
control_point_flows = (
dest_control_point_locations - source_control_point_locations)
clamp_boundaries = num_boundary_points > 0
boundary_points_per_edge = num_boundary_points - 1
with ops.name_scope(name):
batch_size, image_height, image_width, _ = image.get_shape().as_list()
# This generates the dense locations where the interpolant
# will be evaluated.
grid_locations = _get_grid_locations(image_height, image_width)
flattened_grid_locations = np.reshape(grid_locations,
[image_height * image_width, 2])
flattened_grid_locations = constant_op.constant(
_expand_to_minibatch(flattened_grid_locations, batch_size), image.dtype)
if clamp_boundaries:
(dest_control_point_locations,
control_point_flows) = _add_zero_flow_controls_at_boundary(
dest_control_point_locations, control_point_flows, image_height,
image_width, boundary_points_per_edge)
flattened_flows = interpolate_spline.interpolate_spline(
dest_control_point_locations, control_point_flows,
flattened_grid_locations, interpolation_order, regularization_weight)
dense_flows = array_ops.reshape(flattened_flows,
[batch_size, image_height, image_width, 2])
warped_image = dense_image_warp.dense_image_warp(image, dense_flows)
return warped_image, dense_flows
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/image/python/ops/sparse_image_warp.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Image warping using per-pixel flow vectors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import math_ops
def _interpolate_bilinear(grid,
query_points,
name='interpolate_bilinear',
indexing='ij'):
"""Similar to Matlab's interp2 function.
Finds values for query points on a grid using bilinear interpolation.
Args:
grid: a 4-D float `Tensor` of shape `[batch, height, width, channels]`.
query_points: a 3-D float `Tensor` of N points with shape `[batch, N, 2]`.
name: a name for the operation (optional).
indexing: whether the query points are specified as row and column (ij),
or Cartesian coordinates (xy).
Returns:
values: a 3-D `Tensor` with shape `[batch, N, channels]`
Raises:
ValueError: if the indexing mode is invalid, or if the shape of the inputs
invalid.
"""
if indexing != 'ij' and indexing != 'xy':
raise ValueError('Indexing mode must be \'ij\' or \'xy\'')
with ops.name_scope(name):
grid = ops.convert_to_tensor(grid)
query_points = ops.convert_to_tensor(query_points)
shape = grid.get_shape().as_list()
if len(shape) != 4:
msg = 'Grid must be 4 dimensional. Received size: '
raise ValueError(msg + str(grid.get_shape()))
batch_size, height, width, channels = (array_ops.shape(grid)[0],
array_ops.shape(grid)[1],
array_ops.shape(grid)[2],
array_ops.shape(grid)[3])
shape = [batch_size, height, width, channels]
query_type = query_points.dtype
grid_type = grid.dtype
with ops.control_dependencies([
check_ops.assert_equal(
len(query_points.get_shape()),
3,
message='Query points must be 3 dimensional.'),
check_ops.assert_equal(
array_ops.shape(query_points)[2],
2,
message='Query points must be size 2 in dim 2.')
]):
num_queries = array_ops.shape(query_points)[1]
with ops.control_dependencies([
check_ops.assert_greater_equal(
height, 2, message='Grid height must be at least 2.'),
check_ops.assert_greater_equal(
width, 2, message='Grid width must be at least 2.')
]):
alphas = []
floors = []
ceils = []
index_order = [0, 1] if indexing == 'ij' else [1, 0]
unstacked_query_points = array_ops.unstack(query_points, axis=2)
for dim in index_order:
with ops.name_scope('dim-' + str(dim)):
queries = unstacked_query_points[dim]
size_in_indexing_dimension = shape[dim + 1]
# max_floor is size_in_indexing_dimension - 2 so that max_floor + 1
# is still a valid index into the grid.
max_floor = math_ops.cast(size_in_indexing_dimension - 2, query_type)
min_floor = constant_op.constant(0.0, dtype=query_type)
floor = math_ops.minimum(
math_ops.maximum(min_floor, math_ops.floor(queries)), max_floor)
int_floor = math_ops.cast(floor, dtypes.int32)
floors.append(int_floor)
ceil = int_floor + 1
ceils.append(ceil)
# alpha has the same type as the grid, as we will directly use alpha
# when taking linear combinations of pixel values from the image.
alpha = math_ops.cast(queries - floor, grid_type)
min_alpha = constant_op.constant(0.0, dtype=grid_type)
max_alpha = constant_op.constant(1.0, dtype=grid_type)
alpha = math_ops.minimum(math_ops.maximum(min_alpha, alpha), max_alpha)
# Expand alpha to [b, n, 1] so we can use broadcasting
# (since the alpha values don't depend on the channel).
alpha = array_ops.expand_dims(alpha, 2)
alphas.append(alpha)
with ops.control_dependencies([
check_ops.assert_less_equal(
math_ops.cast(batch_size * height * width, dtype=dtypes.float32),
np.iinfo(np.int32).max / 8,
message="""The image size or batch size is sufficiently large
that the linearized addresses used by array_ops.gather
may exceed the int32 limit.""")
]):
flattened_grid = array_ops.reshape(
grid, [batch_size * height * width, channels])
batch_offsets = array_ops.reshape(
math_ops.range(batch_size) * height * width, [batch_size, 1])
# This wraps array_ops.gather. We reshape the image data such that the
# batch, y, and x coordinates are pulled into the first dimension.
# Then we gather. Finally, we reshape the output back. It's possible this
# code would be made simpler by using array_ops.gather_nd.
def gather(y_coords, x_coords, name):
with ops.name_scope('gather-' + name):
linear_coordinates = batch_offsets + y_coords * width + x_coords
gathered_values = array_ops.gather(flattened_grid, linear_coordinates)
return array_ops.reshape(gathered_values,
[batch_size, num_queries, channels])
# grab the pixel values in the 4 corners around each query point
top_left = gather(floors[0], floors[1], 'top_left')
top_right = gather(floors[0], ceils[1], 'top_right')
bottom_left = gather(ceils[0], floors[1], 'bottom_left')
bottom_right = gather(ceils[0], ceils[1], 'bottom_right')
# now, do the actual interpolation
with ops.name_scope('interpolate'):
interp_top = alphas[1] * (top_right - top_left) + top_left
interp_bottom = alphas[1] * (bottom_right - bottom_left) + bottom_left
interp = alphas[0] * (interp_bottom - interp_top) + interp_top
return interp
def dense_image_warp(image, flow, name='dense_image_warp'):
"""Image warping using per-pixel flow vectors.
Apply a non-linear warp to the image, where the warp is specified by a dense
flow field of offset vectors that define the correspondences of pixel values
in the output image back to locations in the source image. Specifically, the
pixel value at output[b, j, i, c] is
images[b, j - flow[b, j, i, 0], i - flow[b, j, i, 1], c].
The locations specified by this formula do not necessarily map to an int
index. Therefore, the pixel value is obtained by bilinear
interpolation of the 4 nearest pixels around
(b, j - flow[b, j, i, 0], i - flow[b, j, i, 1]). For locations outside
of the image, we use the nearest pixel values at the image boundary.
Args:
image: 4-D float `Tensor` with shape `[batch, height, width, channels]`.
flow: A 4-D float `Tensor` with shape `[batch, height, width, 2]`.
name: A name for the operation (optional).
Note that image and flow can be of type tf.half, tf.float32, or tf.float64,
and do not necessarily have to be the same type.
Returns:
A 4-D float `Tensor` with shape`[batch, height, width, channels]`
and same type as input image.
Raises:
ValueError: if height < 2 or width < 2 or the inputs have the wrong number
of dimensions.
"""
with ops.name_scope(name):
batch_size, height, width, channels = (array_ops.shape(image)[0],
array_ops.shape(image)[1],
array_ops.shape(image)[2],
array_ops.shape(image)[3])
# The flow is defined on the image grid. Turn the flow into a list of query
# points in the grid space.
grid_x, grid_y = array_ops.meshgrid(
math_ops.range(width), math_ops.range(height))
stacked_grid = math_ops.cast(
array_ops.stack([grid_y, grid_x], axis=2), flow.dtype)
batched_grid = array_ops.expand_dims(stacked_grid, axis=0)
query_points_on_grid = batched_grid - flow
query_points_flattened = array_ops.reshape(query_points_on_grid,
[batch_size, height * width, 2])
# Compute values at the query points, then reshape the result back to the
# image grid.
interpolated = _interpolate_bilinear(image, query_points_flattened)
interpolated = array_ops.reshape(interpolated,
[batch_size, height, width, channels])
return interpolated
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/image/python/ops/dense_image_warp.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Polyharmonic spline interpolation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
EPSILON = 0.0000000001
def _cross_squared_distance_matrix(x, y):
"""Pairwise squared distance between two (batch) matrices' rows (2nd dim).
Computes the pairwise distances between rows of x and rows of y
Args:
x: [batch_size, n, d] float `Tensor`
y: [batch_size, m, d] float `Tensor`
Returns:
squared_dists: [batch_size, n, m] float `Tensor`, where
squared_dists[b,i,j] = ||x[b,i,:] - y[b,j,:]||^2
"""
x_norm_squared = math_ops.reduce_sum(math_ops.square(x), 2)
y_norm_squared = math_ops.reduce_sum(math_ops.square(y), 2)
# Expand so that we can broadcast.
x_norm_squared_tile = array_ops.expand_dims(x_norm_squared, 2)
y_norm_squared_tile = array_ops.expand_dims(y_norm_squared, 1)
x_y_transpose = math_ops.matmul(x, y, adjoint_b=True)
# squared_dists[b,i,j] = ||x_bi - y_bj||^2 = x_bi'x_bi- 2x_bi'x_bj + x_bj'x_bj
squared_dists = x_norm_squared_tile - 2 * x_y_transpose + y_norm_squared_tile
return squared_dists
def _pairwise_squared_distance_matrix(x):
"""Pairwise squared distance among a (batch) matrix's rows (2nd dim).
This saves a bit of computation vs. using _cross_squared_distance_matrix(x,x)
Args:
x: `[batch_size, n, d]` float `Tensor`
Returns:
squared_dists: `[batch_size, n, n]` float `Tensor`, where
squared_dists[b,i,j] = ||x[b,i,:] - x[b,j,:]||^2
"""
x_x_transpose = math_ops.matmul(x, x, adjoint_b=True)
x_norm_squared = array_ops.matrix_diag_part(x_x_transpose)
x_norm_squared_tile = array_ops.expand_dims(x_norm_squared, 2)
# squared_dists[b,i,j] = ||x_bi - x_bj||^2 = x_bi'x_bi- 2x_bi'x_bj + x_bj'x_bj
squared_dists = x_norm_squared_tile - 2 * x_x_transpose + array_ops.transpose(
x_norm_squared_tile, [0, 2, 1])
return squared_dists
def _solve_interpolation(train_points, train_values, order,
regularization_weight):
"""Solve for interpolation coefficients.
Computes the coefficients of the polyharmonic interpolant for the 'training'
data defined by (train_points, train_values) using the kernel phi.
Args:
train_points: `[b, n, d]` interpolation centers
train_values: `[b, n, k]` function values
order: order of the interpolation
regularization_weight: weight to place on smoothness regularization term
Returns:
w: `[b, n, k]` weights on each interpolation center
v: `[b, d, k]` weights on each input dimension
Raises:
ValueError: if d or k is not fully specified.
"""
# These dimensions are set dynamically at runtime.
b, n, _ = array_ops.unstack(array_ops.shape(train_points), num=3)
d = train_points.shape[-1]
if tensor_shape.dimension_value(d) is None:
raise ValueError('The dimensionality of the input points (d) must be '
'statically-inferrable.')
k = train_values.shape[-1]
if tensor_shape.dimension_value(k) is None:
raise ValueError('The dimensionality of the output values (k) must be '
'statically-inferrable.')
# First, rename variables so that the notation (c, f, w, v, A, B, etc.)
# follows https://en.wikipedia.org/wiki/Polyharmonic_spline.
# To account for python style guidelines we use
# matrix_a for A and matrix_b for B.
c = train_points
f = train_values
# Next, construct the linear system.
with ops.name_scope('construct_linear_system'):
matrix_a = _phi(_pairwise_squared_distance_matrix(c), order) # [b, n, n]
if regularization_weight > 0:
batch_identity_matrix = array_ops.expand_dims(
linalg_ops.eye(n, dtype=c.dtype), 0)
matrix_a += regularization_weight * batch_identity_matrix
# Append ones to the feature values for the bias term in the linear model.
ones = array_ops.ones_like(c[..., :1], dtype=c.dtype)
matrix_b = array_ops.concat([c, ones], 2) # [b, n, d + 1]
# [b, n + d + 1, n]
left_block = array_ops.concat(
[matrix_a, array_ops.transpose(matrix_b, [0, 2, 1])], 1)
num_b_cols = matrix_b.get_shape()[2] # d + 1
lhs_zeros = array_ops.zeros([b, num_b_cols, num_b_cols], train_points.dtype)
right_block = array_ops.concat([matrix_b, lhs_zeros],
1) # [b, n + d + 1, d + 1]
lhs = array_ops.concat([left_block, right_block],
2) # [b, n + d + 1, n + d + 1]
rhs_zeros = array_ops.zeros([b, d + 1, k], train_points.dtype)
rhs = array_ops.concat([f, rhs_zeros], 1) # [b, n + d + 1, k]
# Then, solve the linear system and unpack the results.
with ops.name_scope('solve_linear_system'):
w_v = linalg_ops.matrix_solve(lhs, rhs)
w = w_v[:, :n, :]
v = w_v[:, n:, :]
return w, v
def _apply_interpolation(query_points, train_points, w, v, order):
"""Apply polyharmonic interpolation model to data.
Given coefficients w and v for the interpolation model, we evaluate
interpolated function values at query_points.
Args:
query_points: `[b, m, d]` x values to evaluate the interpolation at
train_points: `[b, n, d]` x values that act as the interpolation centers
( the c variables in the wikipedia article)
w: `[b, n, k]` weights on each interpolation center
v: `[b, d, k]` weights on each input dimension
order: order of the interpolation
Returns:
Polyharmonic interpolation evaluated at points defined in query_points.
"""
# First, compute the contribution from the rbf term.
pairwise_dists = _cross_squared_distance_matrix(query_points, train_points)
phi_pairwise_dists = _phi(pairwise_dists, order)
rbf_term = math_ops.matmul(phi_pairwise_dists, w)
# Then, compute the contribution from the linear term.
# Pad query_points with ones, for the bias term in the linear model.
query_points_pad = array_ops.concat([
query_points,
array_ops.ones_like(query_points[..., :1], train_points.dtype)
], 2)
linear_term = math_ops.matmul(query_points_pad, v)
return rbf_term + linear_term
def _phi(r, order):
"""Coordinate-wise nonlinearity used to define the order of the interpolation.
See https://en.wikipedia.org/wiki/Polyharmonic_spline for the definition.
Args:
r: input op
order: interpolation order
Returns:
phi_k evaluated coordinate-wise on r, for k = r
"""
# using EPSILON prevents log(0), sqrt0), etc.
# sqrt(0) is well-defined, but its gradient is not
with ops.name_scope('phi'):
if order == 1:
r = math_ops.maximum(r, EPSILON)
r = math_ops.sqrt(r)
return r
elif order == 2:
return 0.5 * r * math_ops.log(math_ops.maximum(r, EPSILON))
elif order == 4:
return 0.5 * math_ops.square(r) * math_ops.log(
math_ops.maximum(r, EPSILON))
elif order % 2 == 0:
r = math_ops.maximum(r, EPSILON)
return 0.5 * math_ops.pow(r, 0.5 * order) * math_ops.log(r)
else:
r = math_ops.maximum(r, EPSILON)
return math_ops.pow(r, 0.5 * order)
def interpolate_spline(train_points,
train_values,
query_points,
order,
regularization_weight=0.0,
name='interpolate_spline'):
r"""Interpolate signal using polyharmonic interpolation.
The interpolant has the form
$$f(x) = \sum_{i = 1}^n w_i \phi(||x - c_i||) + v^T x + b.$$
This is a sum of two terms: (1) a weighted sum of radial basis function (RBF)
terms, with the centers \\(c_1, ... c_n\\), and (2) a linear term with a bias.
The \\(c_i\\) vectors are 'training' points. In the code, b is absorbed into v
by appending 1 as a final dimension to x. The coefficients w and v are
estimated such that the interpolant exactly fits the value of the function at
the \\(c_i\\) points, the vector w is orthogonal to each \\(c_i\\), and the
vector w sums to 0. With these constraints, the coefficients can be obtained
by solving a linear system.
\\(\phi\\) is an RBF, parametrized by an interpolation
order. Using order=2 produces the well-known thin-plate spline.
We also provide the option to perform regularized interpolation. Here, the
interpolant is selected to trade off between the squared loss on the training
data and a certain measure of its curvature
([details](https://en.wikipedia.org/wiki/Polyharmonic_spline)).
Using a regularization weight greater than zero has the effect that the
interpolant will no longer exactly fit the training data. However, it may be
less vulnerable to overfitting, particularly for high-order interpolation.
Note the interpolation procedure is differentiable with respect to all inputs
besides the order parameter.
We support dynamically-shaped inputs, where batch_size, n, and m are None
at graph construction time. However, d and k must be known.
Args:
train_points: `[batch_size, n, d]` float `Tensor` of n d-dimensional
locations. These do not need to be regularly-spaced.
train_values: `[batch_size, n, k]` float `Tensor` of n c-dimensional values
evaluated at train_points.
query_points: `[batch_size, m, d]` `Tensor` of m d-dimensional locations
where we will output the interpolant's values.
order: order of the interpolation. Common values are 1 for
\\(\phi(r) = r\\), 2 for \\(\phi(r) = r^2 * log(r)\\) (thin-plate spline),
or 3 for \\(\phi(r) = r^3\\).
regularization_weight: weight placed on the regularization term.
This will depend substantially on the problem, and it should always be
tuned. For many problems, it is reasonable to use no regularization.
If using a non-zero value, we recommend a small value like 0.001.
name: name prefix for ops created by this function
Returns:
`[b, m, k]` float `Tensor` of query values. We use train_points and
train_values to perform polyharmonic interpolation. The query values are
the values of the interpolant evaluated at the locations specified in
query_points.
"""
with ops.name_scope(name):
train_points = ops.convert_to_tensor(train_points)
train_values = ops.convert_to_tensor(train_values)
query_points = ops.convert_to_tensor(query_points)
# First, fit the spline to the observed data.
with ops.name_scope('solve'):
w, v = _solve_interpolation(train_points, train_values, order,
regularization_weight)
# Then, evaluate the spline at the query locations.
with ops.name_scope('predict'):
query_values = _apply_interpolation(query_points, train_points, w, v,
order)
return query_values
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/image/python/ops/interpolate_spline.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python layer for image_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import context
from tensorflow.contrib.image.ops import gen_image_ops
from tensorflow.contrib.util import loader
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import resource_loader
_image_ops_so = loader.load_op_library(
resource_loader.get_path_to_datafile("_image_ops.so"))
_IMAGE_DTYPES = set(
[dtypes.uint8, dtypes.int32, dtypes.int64,
dtypes.float16, dtypes.float32, dtypes.float64])
ops.RegisterShape("ImageConnectedComponents")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("ImageProjectiveTransform")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("ImageProjectiveTransformV2")(common_shapes.call_cpp_shape_fn)
# TODO(ringwalt): Support a "reshape" (name used by SciPy) or "expand" (name
# used by PIL, maybe more readable) mode, which determines the correct
# output_shape and translation for the transform.
def rotate(images, angles, interpolation="NEAREST", name=None):
"""Rotate image(s) counterclockwise by the passed angle(s) in radians.
Args:
images: A tensor of shape (num_images, num_rows, num_columns, num_channels)
(NHWC), (num_rows, num_columns, num_channels) (HWC), or
(num_rows, num_columns) (HW). The rank must be statically known (the
shape is not `TensorShape(None)`.
angles: A scalar angle to rotate all images by, or (if images has rank 4)
a vector of length num_images, with an angle for each image in the batch.
interpolation: Interpolation mode. Supported values: "NEAREST", "BILINEAR".
name: The name of the op.
Returns:
Image(s) with the same type and shape as `images`, rotated by the given
angle(s). Empty space due to the rotation will be filled with zeros.
Raises:
TypeError: If `image` is an invalid type.
"""
with ops.name_scope(name, "rotate"):
image_or_images = ops.convert_to_tensor(images)
if image_or_images.dtype.base_dtype not in _IMAGE_DTYPES:
raise TypeError("Invalid dtype %s." % image_or_images.dtype)
elif image_or_images.get_shape().ndims is None:
raise TypeError("image_or_images rank must be statically known")
elif len(image_or_images.get_shape()) == 2:
images = image_or_images[None, :, :, None]
elif len(image_or_images.get_shape()) == 3:
images = image_or_images[None, :, :, :]
elif len(image_or_images.get_shape()) == 4:
images = image_or_images
else:
raise TypeError("Images should have rank between 2 and 4.")
image_height = math_ops.cast(array_ops.shape(images)[1],
dtypes.float32)[None]
image_width = math_ops.cast(array_ops.shape(images)[2],
dtypes.float32)[None]
output = transform(
images,
angles_to_projective_transforms(angles, image_height, image_width),
interpolation=interpolation)
if image_or_images.get_shape().ndims is None:
raise TypeError("image_or_images rank must be statically known")
elif len(image_or_images.get_shape()) == 2:
return output[0, :, :, 0]
elif len(image_or_images.get_shape()) == 3:
return output[0, :, :, :]
else:
return output
def translate(images, translations, interpolation="NEAREST", name=None):
"""Translate image(s) by the passed vectors(s).
Args:
images: A tensor of shape (num_images, num_rows, num_columns, num_channels)
(NHWC), (num_rows, num_columns, num_channels) (HWC), or
(num_rows, num_columns) (HW). The rank must be statically known (the
shape is not `TensorShape(None)`.
translations: A vector representing [dx, dy] or (if images has rank 4)
a matrix of length num_images, with a [dx, dy] vector for each image in
the batch.
interpolation: Interpolation mode. Supported values: "NEAREST", "BILINEAR".
name: The name of the op.
Returns:
Image(s) with the same type and shape as `images`, translated by the given
vector(s). Empty space due to the translation will be filled with zeros.
Raises:
TypeError: If `image` is an invalid type.
"""
with ops.name_scope(name, "translate"):
return transform(
images,
translations_to_projective_transforms(translations),
interpolation=interpolation)
def angles_to_projective_transforms(angles,
image_height,
image_width,
name=None):
"""Returns projective transform(s) for the given angle(s).
Args:
angles: A scalar angle to rotate all images by, or (for batches of images)
a vector with an angle to rotate each image in the batch. The rank must
be statically known (the shape is not `TensorShape(None)`.
image_height: Height of the image(s) to be transformed.
image_width: Width of the image(s) to be transformed.
Returns:
A tensor of shape (num_images, 8). Projective transforms which can be given
to `tf.contrib.image.transform`.
"""
with ops.name_scope(name, "angles_to_projective_transforms"):
angle_or_angles = ops.convert_to_tensor(
angles, name="angles", dtype=dtypes.float32)
if len(angle_or_angles.get_shape()) == 0: # pylint: disable=g-explicit-length-test
angles = angle_or_angles[None]
elif len(angle_or_angles.get_shape()) == 1:
angles = angle_or_angles
else:
raise TypeError("Angles should have rank 0 or 1.")
x_offset = ((image_width - 1) - (math_ops.cos(angles) *
(image_width - 1) - math_ops.sin(angles) *
(image_height - 1))) / 2.0
y_offset = ((image_height - 1) - (math_ops.sin(angles) *
(image_width - 1) + math_ops.cos(angles) *
(image_height - 1))) / 2.0
num_angles = array_ops.shape(angles)[0]
return array_ops.concat(
values=[
math_ops.cos(angles)[:, None],
-math_ops.sin(angles)[:, None],
x_offset[:, None],
math_ops.sin(angles)[:, None],
math_ops.cos(angles)[:, None],
y_offset[:, None],
array_ops.zeros((num_angles, 2), dtypes.float32),
],
axis=1)
def translations_to_projective_transforms(translations, name=None):
"""Returns projective transform(s) for the given translation(s).
Args:
translations: A 2-element list representing [dx, dy] or a matrix of
2-element lists representing [dx, dy] to translate for each image
(for a batch of images). The rank must be statically known (the shape
is not `TensorShape(None)`.
name: The name of the op.
Returns:
A tensor of shape (num_images, 8) projective transforms which can be given
to `tf.contrib.image.transform`.
"""
with ops.name_scope(name, "translations_to_projective_transforms"):
translation_or_translations = ops.convert_to_tensor(
translations, name="translations", dtype=dtypes.float32)
if translation_or_translations.get_shape().ndims is None:
raise TypeError(
"translation_or_translations rank must be statically known")
elif len(translation_or_translations.get_shape()) == 1:
translations = translation_or_translations[None]
elif len(translation_or_translations.get_shape()) == 2:
translations = translation_or_translations
else:
raise TypeError("Translations should have rank 1 or 2.")
num_translations = array_ops.shape(translations)[0]
# The translation matrix looks like:
# [[1 0 -dx]
# [0 1 -dy]
# [0 0 1]]
# where the last entry is implicit.
# Translation matrices are always float32.
return array_ops.concat(
values=[
array_ops.ones((num_translations, 1), dtypes.float32),
array_ops.zeros((num_translations, 1), dtypes.float32),
-translations[:, 0, None],
array_ops.zeros((num_translations, 1), dtypes.float32),
array_ops.ones((num_translations, 1), dtypes.float32),
-translations[:, 1, None],
array_ops.zeros((num_translations, 2), dtypes.float32),
],
axis=1)
def transform(images,
transforms,
interpolation="NEAREST",
output_shape=None,
name=None):
"""Applies the given transform(s) to the image(s).
Args:
images: A tensor of shape (num_images, num_rows, num_columns, num_channels)
(NHWC), (num_rows, num_columns, num_channels) (HWC), or
(num_rows, num_columns) (HW). The rank must be statically known (the
shape is not `TensorShape(None)`.
transforms: Projective transform matrix/matrices. A vector of length 8 or
tensor of size N x 8. If one row of transforms is
[a0, a1, a2, b0, b1, b2, c0, c1], then it maps the *output* point
`(x, y)` to a transformed *input* point
`(x', y') = ((a0 x + a1 y + a2) / k, (b0 x + b1 y + b2) / k)`,
where `k = c0 x + c1 y + 1`. The transforms are *inverted* compared to
the transform mapping input points to output points. Note that gradients
are not backpropagated into transformation parameters.
interpolation: Interpolation mode. Supported values: "NEAREST", "BILINEAR".
output_shape: Output dimesion after the transform, [height, width].
If None, output is the same size as input image.
name: The name of the op.
Returns:
Image(s) with the same type and shape as `images`, with the given
transform(s) applied. Transformed coordinates outside of the input image
will be filled with zeros.
Raises:
TypeError: If `image` is an invalid type.
ValueError: If output shape is not 1-D int32 Tensor.
"""
with ops.name_scope(name, "transform"):
image_or_images = ops.convert_to_tensor(images, name="images")
transform_or_transforms = ops.convert_to_tensor(
transforms, name="transforms", dtype=dtypes.float32)
if image_or_images.dtype.base_dtype not in _IMAGE_DTYPES:
raise TypeError("Invalid dtype %s." % image_or_images.dtype)
elif image_or_images.get_shape().ndims is None:
raise TypeError("image_or_images rank must be statically known")
elif len(image_or_images.get_shape()) == 2:
images = image_or_images[None, :, :, None]
elif len(image_or_images.get_shape()) == 3:
images = image_or_images[None, :, :, :]
elif len(image_or_images.get_shape()) == 4:
images = image_or_images
else:
raise TypeError("Images should have rank between 2 and 4.")
if output_shape is None:
output_shape = array_ops.shape(images)[1:3]
if not context.executing_eagerly():
output_shape_value = tensor_util.constant_value(output_shape)
if output_shape_value is not None:
output_shape = output_shape_value
output_shape = ops.convert_to_tensor(
output_shape, dtypes.int32, name="output_shape")
if not output_shape.get_shape().is_compatible_with([2]):
raise ValueError("output_shape must be a 1-D Tensor of 2 elements: "
"new_height, new_width")
if len(transform_or_transforms.get_shape()) == 1:
transforms = transform_or_transforms[None]
elif transform_or_transforms.get_shape().ndims is None:
raise TypeError(
"transform_or_transforms rank must be statically known")
elif len(transform_or_transforms.get_shape()) == 2:
transforms = transform_or_transforms
else:
raise TypeError("Transforms should have rank 1 or 2.")
output = gen_image_ops.image_projective_transform_v2(
images,
output_shape=output_shape,
transforms=transforms,
interpolation=interpolation.upper())
if len(image_or_images.get_shape()) == 2:
return output[0, :, :, 0]
elif len(image_or_images.get_shape()) == 3:
return output[0, :, :, :]
else:
return output
def compose_transforms(*transforms):
"""Composes the transforms tensors.
Args:
*transforms: List of image projective transforms to be composed. Each
transform is length 8 (single transform) or shape (N, 8) (batched
transforms). The shapes of all inputs must be equal, and at least one
input must be given.
Returns:
A composed transform tensor. When passed to `tf.contrib.image.transform`,
equivalent to applying each of the given transforms to the image in
order.
"""
assert transforms, "transforms cannot be empty"
with ops.name_scope("compose_transforms"):
composed = flat_transforms_to_matrices(transforms[0])
for tr in transforms[1:]:
# Multiply batches of matrices.
composed = math_ops.matmul(composed, flat_transforms_to_matrices(tr))
return matrices_to_flat_transforms(composed)
def flat_transforms_to_matrices(transforms):
"""Converts `tf.contrib.image` projective transforms to affine matrices.
Note that the output matrices map output coordinates to input coordinates. For
the forward transformation matrix, call `tf.linalg.inv` on the result.
Args:
transforms: Vector of length 8, or batches of transforms with shape
`(N, 8)`.
Returns:
3D tensor of matrices with shape `(N, 3, 3)`. The output matrices map the
*output coordinates* (in homogeneous coordinates) of each transform to the
corresponding *input coordinates*.
Raises:
ValueError: If `transforms` have an invalid shape.
"""
with ops.name_scope("flat_transforms_to_matrices"):
transforms = ops.convert_to_tensor(transforms, name="transforms")
if transforms.shape.ndims not in (1, 2):
raise ValueError("Transforms should be 1D or 2D, got: %s" % transforms)
# Make the transform(s) 2D in case the input is a single transform.
transforms = array_ops.reshape(transforms, constant_op.constant([-1, 8]))
num_transforms = array_ops.shape(transforms)[0]
# Add a column of ones for the implicit last entry in the matrix.
return array_ops.reshape(
array_ops.concat(
[transforms, array_ops.ones([num_transforms, 1])], axis=1),
constant_op.constant([-1, 3, 3]))
def matrices_to_flat_transforms(transform_matrices):
"""Converts affine matrices to `tf.contrib.image` projective transforms.
Note that we expect matrices that map output coordinates to input coordinates.
To convert forward transformation matrices, call `tf.linalg.inv` on the
matrices and use the result here.
Args:
transform_matrices: One or more affine transformation matrices, for the
reverse transformation in homogeneous coordinates. Shape `(3, 3)` or
`(N, 3, 3)`.
Returns:
2D tensor of flat transforms with shape `(N, 8)`, which may be passed into
`tf.contrib.image.transform`.
Raises:
ValueError: If `transform_matrices` have an invalid shape.
"""
with ops.name_scope("matrices_to_flat_transforms"):
transform_matrices = ops.convert_to_tensor(
transform_matrices, name="transform_matrices")
if transform_matrices.shape.ndims not in (2, 3):
raise ValueError(
"Matrices should be 2D or 3D, got: %s" % transform_matrices)
# Flatten each matrix.
transforms = array_ops.reshape(transform_matrices,
constant_op.constant([-1, 9]))
# Divide each matrix by the last entry (normally 1).
transforms /= transforms[:, 8:9]
return transforms[:, :8]
@ops.RegisterGradient("ImageProjectiveTransformV2")
def _image_projective_transform_grad(op, grad):
"""Computes the gradient for ImageProjectiveTransform."""
images = op.inputs[0]
transforms = op.inputs[1]
interpolation = op.get_attr("interpolation")
image_or_images = ops.convert_to_tensor(images, name="images")
transform_or_transforms = ops.convert_to_tensor(
transforms, name="transforms", dtype=dtypes.float32)
if image_or_images.dtype.base_dtype not in _IMAGE_DTYPES:
raise TypeError("Invalid dtype %s." % image_or_images.dtype)
if len(transform_or_transforms.get_shape()) == 1:
transforms = transform_or_transforms[None]
elif len(transform_or_transforms.get_shape()) == 2:
transforms = transform_or_transforms
else:
raise TypeError("Transforms should have rank 1 or 2.")
# Invert transformations
transforms = flat_transforms_to_matrices(transforms=transforms)
inverse = linalg_ops.matrix_inverse(transforms)
transforms = matrices_to_flat_transforms(inverse)
output = gen_image_ops.image_projective_transform_v2(
images=grad,
transforms=transforms,
output_shape=array_ops.shape(image_or_images)[1:3],
interpolation=interpolation)
return [output, None, None]
def bipartite_match(distance_mat,
num_valid_rows,
top_k=-1,
name="bipartite_match"):
"""Find bipartite matching based on a given distance matrix.
A greedy bi-partite matching algorithm is used to obtain the matching with
the (greedy) minimum distance.
Args:
distance_mat: A 2-D float tensor of shape `[num_rows, num_columns]`. It is a
pair-wise distance matrix between the entities represented by each row and
each column. It is an asymmetric matrix. The smaller the distance is, the
more similar the pairs are. The bipartite matching is to minimize the
distances.
num_valid_rows: A scalar or a 1-D tensor with one element describing the
number of valid rows of distance_mat to consider for the bipartite
matching. If set to be negative, then all rows from `distance_mat` are
used.
top_k: A scalar that specifies the number of top-k matches to retrieve.
If set to be negative, then is set according to the maximum number of
matches from `distance_mat`.
name: The name of the op.
Returns:
row_to_col_match_indices: A vector of length num_rows, which is the number
of rows of the input `distance_matrix`. If `row_to_col_match_indices[i]`
is not -1, row i is matched to column `row_to_col_match_indices[i]`.
col_to_row_match_indices: A vector of length num_columns, which is the
number of columns of the input distance matrix.
If `col_to_row_match_indices[j]` is not -1, column j is matched to row
`col_to_row_match_indices[j]`.
"""
result = gen_image_ops.bipartite_match(
distance_mat, num_valid_rows, top_k, name=name)
return result
def connected_components(images):
"""Labels the connected components in a batch of images.
A component is a set of pixels in a single input image, which are all adjacent
and all have the same non-zero value. The components using a squared
connectivity of one (all True entries are joined with their neighbors above,
below, left, and right). Components across all images have consecutive ids 1
through n. Components are labeled according to the first pixel of the
component appearing in row-major order (lexicographic order by
image_index_in_batch, row, col). Zero entries all have an output id of 0.
This op is equivalent with `scipy.ndimage.measurements.label` on a 2D array
with the default structuring element (which is the connectivity used here).
Args:
images: A 2D (H, W) or 3D (N, H, W) Tensor of boolean image(s).
Returns:
Components with the same shape as `images`. False entries in `images` have
value 0, and all True entries map to a component id > 0.
Raises:
TypeError: if `images` is not 2D or 3D.
"""
with ops.name_scope("connected_components"):
image_or_images = ops.convert_to_tensor(images, name="images")
if len(image_or_images.get_shape()) == 2:
images = image_or_images[None, :, :]
elif len(image_or_images.get_shape()) == 3:
images = image_or_images
else:
raise TypeError(
"images should have rank 2 (HW) or 3 (NHW). Static shape is %s" %
image_or_images.get_shape())
components = gen_image_ops.image_connected_components(images)
# TODO(ringwalt): Component id renaming should be done in the op, to avoid
# constructing multiple additional large tensors.
components_flat = array_ops.reshape(components, [-1])
unique_ids, id_index = array_ops.unique(components_flat)
id_is_zero = array_ops.where_v2(math_ops.equal(unique_ids, 0))[:, 0]
# Map each nonzero id to consecutive values.
nonzero_consecutive_ids = math_ops.range(
array_ops.shape(unique_ids)[0] - array_ops.shape(id_is_zero)[0]) + 1
def no_zero():
# No need to insert a zero into the ids.
return nonzero_consecutive_ids
def has_zero():
# Insert a zero in the consecutive ids where zero appears in unique_ids.
# id_is_zero has length 1.
zero_id_ind = math_ops.cast(id_is_zero[0], dtypes.int32)
ids_before = nonzero_consecutive_ids[:zero_id_ind]
ids_after = nonzero_consecutive_ids[zero_id_ind:]
return array_ops.concat([ids_before, [0], ids_after], axis=0)
new_ids = control_flow_ops.cond(
math_ops.equal(array_ops.shape(id_is_zero)[0], 0), no_zero, has_zero)
components = array_ops.reshape(
array_ops.gather(new_ids, id_index), array_ops.shape(components))
if len(image_or_images.get_shape()) == 2:
return components[0, :, :]
else:
return components
ops.NotDifferentiable("BipartiteMatch")
ops.NotDifferentiable("ImageConnectedComponents")
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/image/python/ops/image_ops.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python layer for distort_image_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.image.ops import gen_distort_image_ops
from tensorflow.contrib.util import loader
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import image_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import resource_loader
_distort_image_ops = loader.load_op_library(
resource_loader.get_path_to_datafile('_distort_image_ops.so'))
# pylint: disable=invalid-name
def random_hsv_in_yiq(image,
max_delta_hue=0,
lower_saturation=1,
upper_saturation=1,
lower_value=1,
upper_value=1,
seed=None):
"""Adjust hue, saturation, value of an RGB image randomly in YIQ color space.
Equivalent to `adjust_yiq_hsv()` but uses a `delta_h` randomly
picked in the interval `[-max_delta_hue, max_delta_hue]`, a `scale_saturation`
randomly picked in the interval `[lower_saturation, upper_saturation]`, and
a `scale_value` randomly picked in the interval
`[lower_saturation, upper_saturation]`.
Args:
image: RGB image or images. Size of the last dimension must be 3.
max_delta_hue: float. Maximum value for the random delta_hue. Passing 0
disables adjusting hue.
lower_saturation: float. Lower bound for the random scale_saturation.
upper_saturation: float. Upper bound for the random scale_saturation.
lower_value: float. Lower bound for the random scale_value.
upper_value: float. Upper bound for the random scale_value.
seed: An operation-specific seed. It will be used in conjunction
with the graph-level seed to determine the real seeds that will be
used in this operation. Please see the documentation of
set_random_seed for its interaction with the graph-level random seed.
Returns:
3-D float tensor of shape `[height, width, channels]`.
Raises:
ValueError: if `max_delta`, `lower_saturation`, `upper_saturation`,
`lower_value`, or `upper_Value` is invalid.
"""
if max_delta_hue < 0:
raise ValueError('max_delta must be non-negative.')
if lower_saturation < 0:
raise ValueError('lower_saturation must be non-negative.')
if lower_value < 0:
raise ValueError('lower_value must be non-negative.')
if lower_saturation > upper_saturation:
raise ValueError('lower_saturation must be < upper_saturation.')
if lower_value > upper_value:
raise ValueError('lower_value must be < upper_value.')
if max_delta_hue == 0:
delta_hue = 0
else:
delta_hue = random_ops.random_uniform(
[], -max_delta_hue, max_delta_hue, seed=seed)
if lower_saturation == upper_saturation:
scale_saturation = lower_saturation
else:
scale_saturation = random_ops.random_uniform(
[], lower_saturation, upper_saturation, seed=seed)
if lower_value == upper_value:
scale_value = lower_value
else:
scale_value = random_ops.random_uniform(
[], lower_value, upper_value, seed=seed)
return adjust_hsv_in_yiq(image, delta_hue, scale_saturation, scale_value)
def adjust_hsv_in_yiq(image,
delta_hue=0,
scale_saturation=1,
scale_value=1,
name=None):
"""Adjust hue, saturation, value of an RGB image in YIQ color space.
This is a convenience method that converts an RGB image to float
representation, converts it to YIQ, rotates the color around the Y channel by
delta_hue in radians, scales the chrominance channels (I, Q) by
scale_saturation, scales all channels (Y, I, Q) by scale_value,
converts back to RGB, and then back to the original data type.
`image` is an RGB image. The image hue is adjusted by converting the
image to YIQ, rotating around the luminance channel (Y) by
`delta_hue` in radians, multiplying the chrominance channels (I, Q) by
`scale_saturation`, and multiplying all channels (Y, I, Q) by
`scale_value`. The image is then converted back to RGB.
Args:
image: RGB image or images. Size of the last dimension must be 3.
delta_hue: float, the hue rotation amount, in radians.
scale_saturation: float, factor to multiply the saturation by.
scale_value: float, factor to multiply the value by.
name: A name for this operation (optional).
Returns:
Adjusted image(s), same shape and DType as `image`.
"""
with ops.name_scope(name, 'adjust_hsv_in_yiq', [image]) as name:
image = ops.convert_to_tensor(image, name='image')
# Remember original dtype to so we can convert back if needed
orig_dtype = image.dtype
flt_image = image_ops.convert_image_dtype(image, dtypes.float32)
rgb_altered = gen_distort_image_ops.adjust_hsv_in_yiq(
flt_image, delta_hue, scale_saturation, scale_value)
return image_ops.convert_image_dtype(rgb_altered, orig_dtype)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/image/python/ops/distort_image_ops.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops for building quantized models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,wildcard-import,g-bad-import-order
from tensorflow.contrib.quantization.python import array_ops as quantized_array_ops
from tensorflow.contrib.quantization.python.math_ops import *
from tensorflow.contrib.quantization.python.nn_ops import *
from tensorflow.python.ops import gen_array_ops as quantized_gen_array_ops
from tensorflow.python.ops.gen_array_ops import dequantize
from tensorflow.python.ops.gen_array_ops import quantize_v2
from tensorflow.python.ops.gen_array_ops import quantized_concat
# pylint: enable=unused-import,wildcard-import,g-bad-import-order
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/quantization/__init__.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Quantized Array Operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from tensorflow.python.ops import gen_array_ops as quantized_gen_array_ops
from tensorflow.python.ops.gen_array_ops import dequantize
from tensorflow.python.ops.gen_array_ops import quantize_v2
from tensorflow.python.ops.gen_array_ops import quantized_concat
# pylint: enable=unused-import
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/quantization/python/array_ops.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A module containing TensorFlow ops whose API may change in the future."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,wildcard-import
from tensorflow.contrib.quantization.python.array_ops import *
from tensorflow.contrib.quantization.python.math_ops import *
from tensorflow.contrib.quantization.python.nn_ops import *
# pylint: enable=unused-import,wildcard-import
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/quantization/python/__init__.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Wrappers for primitive Neural Net (NN) Operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,wildcard-import
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops.gen_nn_ops import *
# pylint: enable=unused-import,wildcard-import
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/quantization/python/nn_ops.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Quantized Math Operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,wildcard-import
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops.gen_math_ops import *
# pylint: enable=unused-import,wildcard-import
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/quantization/python/math_ops.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Inter-process communication using MPI."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.python.framework import errors
from tensorflow.python.framework import load_library
from tensorflow.python.framework import ops
from tensorflow.python.platform import resource_loader
from tensorflow.python.platform import tf_logging as logging
def _load_library(name, op_list=None):
"""Loads a .so file containing the specified operators.
Args:
name: The name of the .so file to load.
op_list: A list of names of operators that the library should have. If None
then the .so file's contents will not be verified.
Raises:
NameError if one of the required ops is missing.
"""
try:
filename = resource_loader.get_path_to_datafile(name)
library = load_library.load_op_library(filename)
for expected_op in (op_list or []):
for lib_op in library.OP_LIST.op:
if lib_op.name == expected_op:
break
else:
raise NameError('Could not find operator %s in dynamic library %s' %
(expected_op, name))
return library
except errors.NotFoundError:
logging.warning('%s file could not be loaded.', name)
MPI_LIB = _load_library(
'mpi_collectives.so',
['MPISize', 'MPIRank', 'MPILocalRank', 'MPIAllgather', 'MPIAllreduce'])
def size(name=None):
"""An op which returns the number of MPI processes.
This is equivalent to running `MPI_Comm_size(MPI_COMM_WORLD, ...)` to get the
size of the global communicator.
Returns:
An integer scalar containing the number of MPI processes.
"""
return MPI_LIB.mpi_size(name=name)
ops.NotDifferentiable('MPISize')
def rank(name=None):
"""An op which returns the MPI rank of the calling process.
This is equivalent to running `MPI_Comm_rank(MPI_COMM_WORLD, ...)` to get the
rank of the current process in the global communicator.
Returns:
An integer scalar with the MPI rank of the calling process.
"""
return MPI_LIB.mpi_rank(name=name)
ops.NotDifferentiable('MPIRank')
def init(name=None):
"""An op which initializes MPI on the device on which it is run.
All future MPI ops must be run on the same device that the `init` op was run
on.
"""
return MPI_LIB.mpi_init(name=name)
ops.NotDifferentiable('MPIInit')
def local_rank(name=None):
"""An op which returns the local MPI rank of the calling process, within the
node that it is running on. For example, if there are seven processes running
on a node, their local ranks will be zero through six, inclusive.
This is equivalent to running `MPI_Comm_rank(...)` on a new communicator
which only includes processes on the same node.
Returns:
An integer scalar with the local MPI rank of the calling process.
"""
return MPI_LIB.mpi_local_rank(name=name)
ops.NotDifferentiable('MPILocalRank')
def _allreduce(tensor, name=None):
"""An op which sums an input tensor over all the MPI processes.
The reduction operation is keyed by the name of the op. The tensor type and
shape must be the same on all MPI processes for a given name. The reduction
will not start until all processes are ready to send and receive the tensor.
Returns:
A tensor of the same shape and type as `tensor`, summed across all
processes.
"""
return MPI_LIB.mpi_allreduce(tensor, name=name)
ops.NotDifferentiable('MPIAllreduce')
def allgather(tensor, name=None):
"""An op which concatenates the input tensor with the same input tensor on
all other MPI processes.
The concatenation is done on the first dimension, so the input tensors on the
different processes must have the same rank and shape, except for the first
dimension, which is allowed to be different.
Returns:
A tensor of the same type as `tensor`, concatenated on dimension zero
across all processes. The shape is identical to the input shape, except for
the first dimension, which may be greater and is the sum of all first
dimensions of the tensors in different MPI processes.
"""
# Specify that first allgather is to collect the tensor gather sizes,
# indicated by passing in a scalar (0-D tensor) of value 0
sizes_flag = tf.constant(0, dtype=tf.int64, name='size_flag_const')
my_size = tf.slice(
tf.shape(tensor, out_type=tf.int64), [0], [1], name='size_slice')
if name is None:
name = 'allgather'
sizing_name = '{}_sizing'.format(name)
sizes = MPI_LIB.mpi_allgather(my_size, sizes_flag, name=sizing_name)
return MPI_LIB.mpi_allgather(tensor, sizes, name=name)
ops.NotDifferentiable('MPIAllgather')
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/mpi_collectives/mpi_ops.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
import tensorflow as tf
import tensorflow.contrib.mpi_collectives as mpi
from tensorflow.python.platform import test
average_allgather = False
class AllgatherTest(test.TestCase):
def checkAllgather(self, num_ranks, all_gathered, local_gathered):
# Ensure that indices match.
all_gat_ind = np.sort(all_gathered.indices)
loc_gat_ind = np.sort(local_gathered.indices)
assert(len(loc_gat_ind) == len(all_gat_ind))
for i in range(len(loc_gat_ind)):
assert(loc_gat_ind[i] == all_gat_ind[i])
# For each index, verify same values.
local_checked = []
for i in range(len(local_gathered.indices)):
local_checked.append(False)
for i in range(len(all_gathered.indices)):
all_index = all_gathered.indices[i]
# TODO(jthestness): Make this lookup quicker using sorting.
loc_index = -1
for j in range(len(local_gathered.indices)):
if local_gathered.indices[j] == all_index and not local_checked[j]:
loc_index = j
local_checked[j] = True
break
assert(loc_index >= 0)
correct_output = local_gathered.values[loc_index][0]
if average_allgather:
correct_output = correct_output / float(num_ranks)
assert(all_gathered.values[i][0] == correct_output)
def test_mpi_allgather(self):
# Get MPI rank
my_rank = int(os.environ['PMI_RANK'])
num_ranks = int(os.environ['PMI_SIZE'])
indices_per_rank = 100
tensor_width = 10
# Create IndexedSlices for each rank, some with overlapping indices.
to_gather_indices = []
to_gather_values = []
to_gather = []
for rank_id in range(num_ranks):
indices = []
values = []
my_multiple = rank_id + 1
current_index = my_multiple
for i in range(indices_per_rank):
indices.append(current_index)
ones_tensor = tf.ones([tensor_width])
values.append(tf.multiply(ones_tensor,
tf.fill(ones_tensor.get_shape(),
float(current_index))))
current_index += my_multiple
concat_ind = tf.stack(indices)
concat_vals = tf.stack(values)
to_gather_indices.append(concat_ind)
to_gather_values.append(concat_vals)
to_gather.append(tf.IndexedSlices(concat_vals, concat_ind))
# Collect the local IndexedSlices (indices and values) to create
# correct IndexedSlices output.
correct_gather_indices = tf.concat(to_gather_indices, 0)
correct_gather_values = tf.concat(to_gather_values, 0)
correct_gather = tf.IndexedSlices(correct_gather_values,
correct_gather_indices)
all_gather = mpi.allreduce(to_gather[my_rank], average_allgather)
# NOTE: This assumes that device IDs are numbered the same as ranks.
gpu_options = tf.GPUOptions(visible_device_list=str(my_rank))
config = tf.ConfigProto(gpu_options=gpu_options)
# MPI Session to test allgather.
with mpi.Session(config=config) as sess:
sess.run(tf.global_variables_initializer())
all_gathered, local_gathered = sess.run([all_gather, correct_gather])
# Compare all_gathered with local_gathered.
self.checkAllgather(num_ranks, all_gathered, local_gathered)
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/mpi_collectives/mpi_allgather_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-short-docstring-punctuation
"""## Communicating Between Processes with MPI
TensorFlow natively provides inter-device communication through send and
receive ops and inter-node communication through Distributed TensorFlow, based
on the same send and receive abstractions. On HPC clusters where Infiniband or
other high-speed node interconnects are available, these can end up being
insufficient for synchronous data-parallel training (without asynchronous
gradient descent). This module implements a variety of MPI ops which can take
advantage of hardware-specific MPI libraries for efficient communication.
In order to use this module, TensorFlow must be built with an MPI library,
which can be provided to the `./configure` script at build time. As a user of
TensorFlow, you will need to build TensorFlow yourself to select the MPI
library to use; to do so, follow the [instructions for building TensorFlow from
source](https://www.tensorflow.org/get_started/os_setup#installing_from_sources).
### Utility Ops
In addition to reductions and gathers, this module provides utility operations
for detecting the running MPI configuration.
Example:
```python
import tensorflow.contrib.mpi_collectives as mpi
# Use `mpi.Session` instead of `tf.Session`
with mpi.Session() as session:
rank = session.run(mpi.rank())
print("My MPI Rank:", rank)
if rank == 0:
print("MPI Size:", session.run(mpi.size()))
```
@@init
@@size
@@rank
@@local_rank
### Ring Allreduce and Allgather
When summing or averaging tensors across many processes, communication can
easily become a bottleneck. A naive implementation will send all the tensor
values to the same process, perform the reduction, and then broadcast the
values back to all other processes, effectively creating a synchronous
parameter server in one process. However, the process responsible for
performing the reduction will have to receive and send a massive amount of data
which scales with the number of processes *and* the number of parameters in the
model.
Instead of centralizing the reduction and having one primary reducer, we can
implement a distributed allreduce or allgather. A bandwidth-optimal allreduce
will end up sending 2(N - 1) values for every value in the input tensor,
and can be implemented with a ring allreduce [1]. (Intuitively, a linear reduce
requires at least (N - 1) sends between the different nodes, and a broadcast of
the result also requires (N - 1) sends, for a total of 2 (N - 1); these two
steps cannot be combined in a clever way to reduce the number of required
sends.) This module implements bandwidth-optimal ring allreduce and ring
allgather operations using MPI; by choosing a hardware-appropriate MPI
implementation (such as OpenMPI with CUDA-IPC support), you can train large
models with synchronous gradient descent with minimal communication overhead.
In addition to the `allreduce` and `allgather` functions, a convenience
`DistributedOptimizer` wrapper is provided to simplify using these functions
for reducing model gradients.
Example:
```python
import tensorflow as tf
from tensorflow.contrib import mpi_collectives as mpi
# Construct a simple linear regression model to optimize
W = tf.get_variable("W", shape=[20, 1], dtype=tf.float32)
B = tf.get_variable("B", shape=[1, 1], dtype=tf.float32)
inputs = tf.placeholder("Inputs", shape=[None, 20])
outputs = tf.placeholder("Outputs", shape=[None, 1])
loss = tf.nn.l2_loss(tf.matmul(inputs, W) + B - outputs)
# Training using MPI allreduce with DistributedOptimizer
optimizer = mpi.DistributedOptimizer(tf.train.AdamOptimizer())
train = optimizer.minimize(loss)
# Average loss over all ranks, for printing.
# Do not pass this to an optimizer!
avg_loss = mpi.allreduce(loss)
# On different ranks, feed different input data.
with mpi.Session() as session:
rank = session.run(mpi.rank())
batch_inputs, batch_outputs = construct_batch_for_rank(rank)
feed_dict = {inputs: batch_inputs, outputs: batch_outputs}
_, l = session.run([train, avg_loss], feed_dict=feed_dict)
print("Average Loss:", l)
```
[1] Patarasuk, Pitch and Yuan, Xin. "Bandwidth Optimal All-reduce Algorithms
for Clusters of Workstations".
@@Session
@@DistributedOptimizer
@@allreduce
@@allgather
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib.mpi_collectives.python.ops.mpi_ops import init
from tensorflow.contrib.mpi_collectives.python.ops.mpi_ops import size
from tensorflow.contrib.mpi_collectives.python.ops.mpi_ops import rank
from tensorflow.contrib.mpi_collectives.python.ops.mpi_ops import local_rank
from tensorflow.contrib.mpi_collectives.python.ops.mpi_ops import allgather
from tensorflow.contrib.mpi_collectives.python.ops.mpi_ops import _allreduce
def allreduce(tensor, average=True):
"""Perform an MPI allreduce on a tf.Tensor or tf.IndexedSlices.
Arguments:
tensor: tf.Tensor, tf.Variable, or tf.IndexedSlices to reduce.
The shape of the input must be identical across all ranks.
average: If True, computes the average over all ranks.
Otherwise, computes the sum over all ranks.
This function performs a bandwidth-optimal ring allreduce on the input
tensor. If the input is an tf.IndexedSlices, the function instead does an
allgather on the values and the indices, effectively doing an allreduce on
the represented tensor.
"""
if isinstance(tensor, tf.IndexedSlices):
# For IndexedSlices, do two allgathers intead of an allreduce.
mpi_size = tf.cast(size(), tensor.values.dtype)
values = allgather(tensor.values)
indices = allgather(tensor.indices)
# To make this operation into an average, divide all gathered values by
# the MPI size.
new_values = tf.div(values, mpi_size) if average else values
return tf.IndexedSlices(new_values, indices,
dense_shape=tensor.dense_shape)
else:
mpi_size = tf.cast(size(), tensor.dtype)
summed_tensor = _allreduce(tensor)
new_tensor = (tf.div(summed_tensor, mpi_size)
if average else summed_tensor)
return new_tensor
class DistributedOptimizer(tf.train.Optimizer):
"""An optimizer that wraps another tf.Optimizer, using an MPI allreduce to
average gradient values before applying gradients to model weights."""
def __init__(self, optimizer, name=None, use_locking=False):
"""Construct a new DistributedOptimizer, which uses another optimizer
under the hood for computing single-process gradient values and
applying gradient updates after the gradient values have been averaged
across all the MPI ranks.
Args:
optimizer: Optimizer to use for computing gradients and applying updates.
name: Optional name prefix for the operations created when applying
gradients. Defaults to "Distributed" followed by the provided
optimizer type.
use_locking: Whether to use locking when updating variables. See
Optimizer.__init__ for more info.
"""
if name is None:
name = "Distributed{}".format(type(optimizer).__name__)
self._optimizer = optimizer
super(DistributedOptimizer, self).__init__(
name=name, use_locking=use_locking)
def compute_gradients(self, *args, **kwargs):
"""Compute gradients of all trainable variables.
See Optimizer.compute_gradients() for more info.
In DistributedOptimizer, compute_gradients() is overridden to also
allreduce the gradients before returning them.
"""
gradients = (super(DistributedOptimizer, self)
.compute_gradients(*args, **kwargs))
return [(allreduce(gradient), var) for (gradient, var) in gradients]
def _apply_dense(self, *args, **kwargs):
"""Calls this same method on the underlying optimizer."""
return self._optimizer._apply_dense(*args, **kwargs)
def _apply_sparse(self, *args, **kwargs):
"""Calls this same method on the underlying optimizer."""
return self._optimizer._apply_sparse(*args, **kwargs)
def _apply_sparse_duplicate_indices(self, *args, **kwargs):
"""Calls this same method on the underlying optimizer."""
return self._optimizer._apply_sparse_duplicate_indices(*args,
**kwargs)
def _prepare(self, *args, **kwargs):
"""Calls this same method on the underlying optimizer."""
return self._optimizer._prepare(*args, **kwargs)
def _create_slots(self, *args, **kwargs):
"""Calls this same method on the underlying optimizer."""
return self._optimizer._create_slots(*args, **kwargs)
def _valid_dtypes(self, *args, **kwargs):
"""Calls this same method on the underlying optimizer."""
return self._optimizer._valid_dtypes(*args, **kwargs)
def _finish(self, *args, **kwargs):
"""Calls this same method on the underlying optimizer."""
return self._optimizer._finish(*args, **kwargs)
class Session(tf.Session):
"""A class for running TensorFlow operations, with copies of the same graph
running distributed across different MPI nodes.
The primary difference between `tf.Session` and
`tf.contrib.mpi_collectives.Session` is that the MPI `Session` ensures that
the `Session` options are correct for use with `tf.contrib.mpi`, and
initializes MPI immediately upon the start of the session.
"""
def __init__(self, target='', graph=None, config=None):
"""Creates a new TensorFlow MPI session.
Unlike a normal `tf.Session`, an MPI Session may only use a single GPU,
which must be specified in advance before the session is initialized.
In addition, it only uses a single graph evaluation thread, and
initializes MPI immediately upon starting.
If no `graph` argument is specified when constructing the session,
the default graph will be launched in the session. If you are
using more than one graph (created with `tf.Graph()` in the same
process, you will have to use different sessions for each graph,
but each graph can be used in multiple sessions. In this case, it
is often clearer to pass the graph to be launched explicitly to
the session constructor.
Args:
target: (Optional.) The execution engine to connect to.
graph: (Optional.) The `Graph` to be launched (described above).
config: (Optional.) A `ConfigProto` protocol buffer with configuration
options for the session.
"""
super(Session, self).__init__(target, graph, config=config)
# Initialize MPI on the relevant device.
# TODO: Move this to library load and eliminate mpi.Session()
if graph is None:
graph = tf.get_default_graph()
with graph.as_default():
self.run(init())
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/mpi_collectives/__init__.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
import tensorflow as tf
import tensorflow.contrib.mpi_collectives as mpi
from tensorflow.python.platform import test
average_allreduce = False
max_wrong_count = -1
class AllreduceTest(test.TestCase):
def dumpFailure(self, my_rank, out_loc_red, my_correct, out_all_red,
our_correct):
# Find reduced/allreduced indices that are wrong and print all the
# values from output, slices, reduced, allreduced, so we can debug
# which is incorrect:
wrong_count = 0
red_dims = out_loc_red.shape
assert(len(red_dims) == 2)
for i in range(red_dims[0]):
for j in range(red_dims[1]):
suffix = ""
if out_loc_red[i][j] != my_correct[i][j] or \
out_all_red[i][j] != our_correct[i][j]:
suffix = "WRONG"
wrong_count += 1
print("{}\t{}\t{}\t{}\t{}\t{}"
.format(my_rank, i, j, out_loc_red[i][j],
out_all_red[i][j], suffix), flush=True)
if max_wrong_count > 0 and wrong_count >= max_wrong_count:
return
def test_mpi_allreduce(self):
# Get MPI rank
my_rank = int(os.environ['PMI_RANK'])
num_ranks = int(os.environ['PMI_SIZE'])
stages = 13
batch_size = 1331
hidden_size = batch_size
out_size = batch_size
# Input placeholder (batch_size x hidden) - init to 1s
inputs = tf.placeholder(tf.float32, shape=(batch_size, hidden_size),
name="Input")
# Large matrices (hidden x out_dim) - init random
weights = []
for i in range(stages):
initer = tf.constant_initializer(pow(2.0, i + 1.0))
weights.append(tf.get_variable("weights_{}".format(i),
shape=(hidden_size, out_size),
dtype=tf.float32,
initializer=initer))
# Calculate output through dependent allreduces
stage_input = inputs
for i in range(stages):
inter_output = tf.add(stage_input, weights[i],
name="add_red_{}".format(i))
stage_input = mpi.allreduce(inter_output,
average=average_allreduce)
all_reduced = stage_input
# Local reduced output for verification
local_input = inputs
for i in range(stages):
inter_output = tf.add(local_input, weights[i],
name="addin_loc_{}".format(i))
my_reducer = tf.Variable(initial_value=np.ones((hidden_size, out_size)),
dtype=tf.float32, name="loc_redr_{}".format(i))
for r in range(num_ranks):
my_reducer = tf.add(my_reducer, inter_output,
name="add_loc_{}_{}".format(i, r))
if average_allreduce:
local_input = tf.div(my_reducer, num_ranks,
name="div_loc_{}".format(i))
else:
local_input = my_reducer
local_reduced = local_input
# NOTE: This assumes that device IDs are numbered the same as ranks
gpu_options = tf.GPUOptions(visible_device_list=str(my_rank))
config = tf.ConfigProto(gpu_options=gpu_options)
# MPI Session to test allreduce
with mpi.Session(config=config) as sess:
sess.run(tf.global_variables_initializer())
input_feed = np.ones((batch_size, hidden_size), dtype=np.float32)
our_output = input_feed[0][0]
spread_var = 100
input_feed = input_feed + my_rank * spread_var
my_output = input_feed[0][0]
for i in range(stages):
curr_feed = my_output + pow(2.0, i + 1.0)
my_output = curr_feed * num_ranks + 1
curr_our_feed = our_output + pow(2.0, i + 1.0)
if i == 0:
sum_ranks = num_ranks * (num_ranks - 1) / 2
our_output = curr_our_feed * num_ranks + \
spread_var * sum_ranks
else:
our_output = curr_our_feed * num_ranks
print("rank {}: My output is {}".format(my_rank, my_output))
my_correct = np.zeros((batch_size, hidden_size), dtype=np.float32)
my_correct = my_correct + my_output
print("rank {}: Our output is {}".format(my_rank, our_output))
our_correct = np.zeros((batch_size, hidden_size), dtype=np.float32)
our_correct = our_correct + our_output
for i in range(1000):
if i % 100 == 0:
print("{}: iter {}".format(my_rank, i), flush=True)
feed_dict = {inputs: input_feed}
out_all_red, out_loc_red \
= sess.run([all_reduced, local_reduced],
feed_dict=feed_dict)
if not np.allclose(out_loc_red, my_correct) or \
not np.allclose(out_all_red, our_correct):
print("Test incorrect on iter {}".format(i), flush=True)
self.dumpFailure(my_rank, out_loc_red, my_correct, out_all_red,
our_correct)
assert(np.allclose(out_loc_red, my_correct) and
np.allclose(out_all_red, our_correct))
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/mpi_collectives/mpi_allreduce_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Tests for tensorflow.contrib.mpi_collectives.mpi_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import itertools
import tensorflow as tf
import tensorflow.contrib.mpi_collectives as mpi
def mpi_env_rank_and_size():
"""Get MPI rank and size from environment variables and return them as a
tuple of integers.
Most MPI implementations have an `mpirun` or `mpiexec` command that will
run an MPI executable and set up all communication necessary between the
different processors. As part of that set up, they will set environment
variables that contain the rank and size of the MPI_COMM_WORLD
communicator. We can read those environment variables from Python in order
to ensure that `mpi.rank()` and `mpi.size()` return the expected values.
Since MPI is just a standard, not an implementation, implementations
typically choose their own environment variable names. This function tries
to support several different implementation, but really it only needs to
support whatever implementation we want to use for the TensorFlow test
suite.
If this is not running under MPI, then defaults of rank zero and size one
are returned. (This is appropriate because when you call MPI_Init in an
application not started with mpirun, it will create a new independent
communicator with only one process in it.)
"""
rank_env = "PMI_RANK OMPI_COMM_WORLD_RANK".split()
size_env = "PMI_SIZE OMPI_COMM_WORLD_SIZE".split()
for rank_var, size_var in zip(rank_env, size_env):
rank = os.environ.get(rank_var)
size = os.environ.get(size_var)
if rank is not None and size is not None:
return int(rank), int(size)
# Default to rank zero and size one if there are no environment variables
return 0, 1
class MPITests(tf.test.TestCase):
"""
Tests for MPI ops in tensorflow.contrib.mpi_collectives.
"""
def test_mpi_rank(self):
"""Test that the rank returned by mpi.rank() is correct."""
true_rank, _ = mpi_env_rank_and_size()
with self.test_session() as session:
rank = session.run(mpi.rank())
self.assertEqual(true_rank, rank)
def test_mpi_size(self):
"""Test that the size returned by mpi.size() is correct."""
_, true_size = mpi_env_rank_and_size()
with self.test_session() as session:
size = session.run(mpi.size())
self.assertEqual(true_size, size)
def test_mpi_allreduce_cpu(self):
"""Test on CPU that the allreduce correctly sums 1D, 2D, 3D tensors."""
with self.test_session() as session:
size = session.run(mpi.size())
dtypes = [tf.int32, tf.float32]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
tf.set_random_seed(1234)
tensor = tf.random_uniform([17] * dim, -100, 100, dtype=dtype)
summed = mpi.allreduce(tensor, average=False)
multiplied = tensor * size
max_difference = tf.reduce_max(tf.abs(summed - multiplied))
# Threshold for floating point equality depends on number of
# ranks, since we're comparing against precise multiplication.
if size <= 3:
threshold = 0
elif size < 10:
threshold = 1e-4
elif size < 15:
threshold = 5e-4
else:
break
diff = session.run(max_difference)
self.assertTrue(diff <= threshold,
"mpi.allreduce produces incorrect results")
def test_mpi_allreduce_gpu(self):
"""Test that the allreduce works on GPUs.
This test will crash badly if used with an MPI implementation that does
not support GPU memory transfers directly, as it will call MPI_Send on
a GPU data pointer."""
# Only do this test if there are GPUs available.
if not tf.test.is_gpu_available(cuda_only=True):
return
no_gpus = tf.GPUOptions(visible_device_list="")
cpu_config = tf.ConfigProto(gpu_options=no_gpus)
with self.test_session(config=cpu_config) as session:
local_rank = session.run(mpi.local_rank())
one_gpu = tf.GPUOptions(visible_device_list=str(local_rank))
gpu_config = tf.ConfigProto(gpu_options=one_gpu)
with self.test_session(config=gpu_config) as session:
size = session.run(mpi.size())
dtype = tf.float32
dim = 3
with tf.device("/gpu:0"):
tf.set_random_seed(1234)
tensor = tf.random_uniform([17] * dim, -100, 100, dtype=dtype)
summed = mpi.allreduce(tensor, average=False)
multiplied = tensor * size
max_difference = tf.reduce_max(tf.abs(summed - multiplied))
# Threshold for floating point equality depends on number of
# ranks, since we're comparing against precise multiplication.
if size <= 3:
threshold = 0
elif size < 10:
threshold = 1e-4
elif size < 15:
threshold = 5e-4
else:
return
diff = session.run(max_difference)
self.assertTrue(diff <= threshold,
"mpi.allreduce on GPU produces incorrect results")
def test_mpi_allreduce_error(self):
"""Test that the allreduce raises an error if different ranks try to
send tensors of different rank or dimension."""
with self.test_session() as session:
rank = session.run(mpi.rank())
size = session.run(mpi.size())
# This test does not apply if there is only one worker.
if size == 1:
return
# Same rank, different dimension
tf.set_random_seed(1234)
dims = [17 + rank] * 3
tensor = tf.random_uniform(dims, -1.0, 1.0)
with self.assertRaises(tf.errors.FailedPreconditionError):
session.run(mpi.allreduce(tensor))
# Same number of elements, different rank
tf.set_random_seed(1234)
if rank == 0:
dims = [17, 23 * 57]
else:
dims = [17, 23, 57]
tensor = tf.random_uniform(dims, -1.0, 1.0)
with self.assertRaises(tf.errors.FailedPreconditionError):
session.run(mpi.allreduce(tensor))
def test_mpi_allreduce_type_error(self):
"""Test that the allreduce raises an error if different ranks try to
send tensors of different type."""
with self.test_session() as session:
rank = session.run(mpi.rank())
size = session.run(mpi.size())
# This test does not apply if there is only one worker.
if size == 1:
return
# Same rank, different dimension
dims = [17] * 3
tensor = tf.ones(dims, dtype=tf.int32 if rank % 2 == 0 else tf.float32)
with self.assertRaises(tf.errors.FailedPreconditionError):
session.run(mpi.allreduce(tensor))
def test_mpi_allgather(self):
"""Test that the allgather correctly gathers 1D, 2D, 3D tensors."""
with self.test_session() as session:
size = session.run(mpi.size())
rank = session.run(mpi.rank())
dtypes = tf.int32, tf.float32
dims = 1, 2, 3
for dtype, dim in itertools.product(dtypes, dims):
tensor = tf.ones([17] * dim, dtype=dtype) * rank
gathered = mpi.allgather(tensor)
gathered_tensor = session.run(gathered)
self.assertEqual(list(gathered_tensor.shape),
[17 * size] + [17] * (dim - 1))
for i in range(size):
rank_tensor = tf.slice(gathered_tensor, [i * 17] + [0] * (dim - 1),
[17] + [-1] * (dim - 1))
self.assertEqual(list(rank_tensor.shape), [17] * dim)
self.assertTrue(session.run(tf.reduce_all(tf.equal(rank_tensor, i))),
"mpi.allgather produces incorrect gathered tensor")
def test_mpi_allgather_variable_size(self):
"""Test that the allgather correctly gathers 1D, 2D, 3D tensors,
even if those tensors have different sizes along the first dim."""
with self.test_session() as session:
size = session.run(mpi.size())
rank = session.run(mpi.rank())
dtypes = tf.int32, tf.float32
dims = 1, 2, 3
for dtype, dim in itertools.product(dtypes, dims):
# Support tests up to MPI Size of 35
if size > 35:
break
tensor_sizes = [17, 32, 81, 12, 15, 23, 22] * 5
tensor_sizes = tensor_sizes[:size]
tensor = tf.ones([tensor_sizes[rank]] + [17] * (dim - 1),
dtype=dtype) * rank
gathered = mpi.allgather(tensor)
gathered_tensor = session.run(gathered)
expected_size = sum(tensor_sizes)
self.assertEqual(list(gathered_tensor.shape),
[expected_size] + [17] * (dim - 1))
for i in range(size):
rank_size = [tensor_sizes[i]] + [17] * (dim - 1)
rank_tensor = tf.slice(gathered,
[sum(tensor_sizes[:i])] + [0] * (dim - 1),
rank_size)
self.assertEqual(list(rank_tensor.shape), rank_size)
self.assertTrue(session.run(tf.reduce_all(tf.equal(rank_tensor, i))),
"mpi.allgather produces incorrect gathered tensor")
def test_mpi_allgather_error(self):
"""Test that the allgather returns an error if any dimension besides
the first is different among the tensors being gathered."""
with self.test_session() as session:
rank = session.run(mpi.rank())
size = session.run(mpi.size())
# This test does not apply if there is only one worker.
if size == 1:
return
tensor_size = [17] * 3
tensor_size[1] = 10 * (rank + 1)
tensor = tf.ones(tensor_size, dtype=tf.float32) * rank
with self.assertRaises(tf.errors.FailedPreconditionError):
session.run(mpi.allgather(tensor))
def test_mpi_allgather_type_error(self):
"""Test that the allgather returns an error if the types being gathered
differ among the processes"""
with self.test_session() as session:
rank = session.run(mpi.rank())
size = session.run(mpi.size())
# This test does not apply if there is only one worker.
if size == 1:
return
tensor_size = [17] * 3
dtype = tf.int32 if rank % 2 == 0 else tf.float32
tensor = tf.ones(tensor_size, dtype=dtype) * rank
with self.assertRaises(tf.errors.FailedPreconditionError):
session.run(mpi.allgather(tensor))
if __name__ == '__main__':
tf.test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/mpi_collectives/mpi_ops_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Inter-process communication using MPI."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib.mpi_collectives.ops import gen_mpi_ops
from tensorflow.contrib.util import loader
from tensorflow.python.framework import ops
from tensorflow.python.platform import resource_loader
_mpi_ops_so = loader.load_op_library(
resource_loader.get_path_to_datafile('_mpi_ops.so'))
def size(name=None):
"""An op which returns the number of MPI processes.
This is equivalent to running `MPI_Comm_size(MPI_COMM_WORLD, ...)` to get the
size of the global communicator.
Returns:
An integer scalar containing the number of MPI processes.
"""
return gen_mpi_ops.mpi_size(name=name)
ops.NotDifferentiable('MPISize')
def rank(name=None):
"""An op which returns the MPI rank of the calling process.
This is equivalent to running `MPI_Comm_rank(MPI_COMM_WORLD, ...)` to get the
rank of the current process in the global communicator.
Returns:
An integer scalar with the MPI rank of the calling process.
"""
return gen_mpi_ops.mpi_rank(name=name)
ops.NotDifferentiable('MPIRank')
def init(name=None):
"""An op which initializes MPI on the device on which it is run.
All future MPI ops must be run on the same device that the `init` op was run
on.
"""
return gen_mpi_ops.mpi_init(name=name)
ops.NotDifferentiable('MPIInit')
def local_rank(name=None):
"""An op which returns the local MPI rank of the calling process, within the
node that it is running on. For example, if there are seven processes running
on a node, their local ranks will be zero through six, inclusive.
This is equivalent to running `MPI_Comm_rank(...)` on a new communicator
which only includes processes on the same node.
Returns:
An integer scalar with the local MPI rank of the calling process.
"""
return gen_mpi_ops.mpi_local_rank(name=name)
ops.NotDifferentiable('MPILocalRank')
def _allreduce(tensor, name=None):
"""An op which sums an input tensor over all the MPI processes.
The reduction operation is keyed by the name of the op. The tensor type and
shape must be the same on all MPI processes for a given name. The reduction
will not start until all processes are ready to send and receive the tensor.
Returns:
A tensor of the same shape and type as `tensor`, summed across all
processes.
"""
return gen_mpi_ops.mpi_allreduce(tensor, name=name)
ops.NotDifferentiable('MPIAllreduce')
def allgather(tensor, name=None):
"""An op which concatenates the input tensor with the same input tensor on
all other MPI processes.
The concatenation is done on the first dimension, so the input tensors on the
different processes must have the same rank and shape, except for the first
dimension, which is allowed to be different.
Returns:
A tensor of the same type as `tensor`, concatenated on dimension zero
across all processes. The shape is identical to the input shape, except for
the first dimension, which may be greater and is the sum of all first
dimensions of the tensors in different MPI processes.
"""
# Specify that first allgather is to collect the tensor gather sizes,
# indicated by passing in a scalar (0-D tensor) of value 0
sizes_flag = tf.constant(0, dtype=tf.int64, name='size_flag_const')
my_size = tf.slice(
tf.shape(tensor, out_type=tf.int64), [0], [1], name='size_slice')
if name is None:
name = 'allgather'
sizing_name = '{}_sizing'.format(name)
sizes = gen_mpi_ops.mpi_allgather(my_size, sizes_flag, name=sizing_name)
return gen_mpi_ops.mpi_allgather(tensor, sizes, name=name)
ops.NotDifferentiable('MPIAllgather')
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/mpi_collectives/python/ops/mpi_ops.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Testing utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,wildcard-import
from tensorflow.contrib.testing.python.framework.fake_summary_writer import *
from tensorflow.contrib.testing.python.framework.util_test import *
# pylint: enable=unused-import,wildcard-import
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/testing/__init__.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Fake summary writer for unit tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.framework import summary_pb2
from tensorflow.python.framework import test_util
from tensorflow.python.summary.writer import writer
from tensorflow.python.summary.writer import writer_cache
# TODO(ptucker): Replace with mock framework.
class FakeSummaryWriter(object):
"""Fake summary writer."""
_replaced_summary_writer = None
@classmethod
def install(cls):
if cls._replaced_summary_writer:
raise ValueError('FakeSummaryWriter already installed.')
cls._replaced_summary_writer = writer.FileWriter
writer.FileWriter = FakeSummaryWriter
writer_cache.FileWriter = FakeSummaryWriter
@classmethod
def uninstall(cls):
if not cls._replaced_summary_writer:
raise ValueError('FakeSummaryWriter not installed.')
writer.FileWriter = cls._replaced_summary_writer
writer_cache.FileWriter = cls._replaced_summary_writer
cls._replaced_summary_writer = None
def __init__(self, logdir, graph=None):
self._logdir = logdir
self._graph = graph
self._summaries = {}
self._added_graphs = []
self._added_meta_graphs = []
self._added_session_logs = []
self._added_run_metadata = {}
@property
def summaries(self):
return self._summaries
def assert_summaries(self,
test_case,
expected_logdir=None,
expected_graph=None,
expected_summaries=None,
expected_added_graphs=None,
expected_added_meta_graphs=None,
expected_session_logs=None):
"""Assert expected items have been added to summary writer."""
if expected_logdir is not None:
test_case.assertEqual(expected_logdir, self._logdir)
if expected_graph is not None:
test_case.assertTrue(expected_graph is self._graph)
expected_summaries = expected_summaries or {}
for step in expected_summaries:
test_case.assertTrue(
step in self._summaries,
msg='Missing step %s from %s.' % (step, self._summaries.keys()))
actual_simple_values = {}
for step_summary in self._summaries[step]:
for v in step_summary.value:
# Ignore global_step/sec since it's written by Supervisor in a
# separate thread, so it's non-deterministic how many get written.
if 'global_step/sec' != v.tag:
actual_simple_values[v.tag] = v.simple_value
test_case.assertEqual(expected_summaries[step], actual_simple_values)
if expected_added_graphs is not None:
test_case.assertEqual(expected_added_graphs, self._added_graphs)
if expected_added_meta_graphs is not None:
test_case.assertEqual(len(expected_added_meta_graphs),
len(self._added_meta_graphs))
for expected, actual in zip(expected_added_meta_graphs,
self._added_meta_graphs):
test_util.assert_meta_graph_protos_equal(test_case, expected, actual)
if expected_session_logs is not None:
test_case.assertEqual(expected_session_logs, self._added_session_logs)
def add_summary(self, summ, current_global_step):
"""Add summary."""
if isinstance(summ, bytes):
summary_proto = summary_pb2.Summary()
summary_proto.ParseFromString(summ)
summ = summary_proto
if current_global_step in self._summaries:
step_summaries = self._summaries[current_global_step]
else:
step_summaries = []
self._summaries[current_global_step] = step_summaries
step_summaries.append(summ)
# NOTE: Ignore global_step since its value is non-deterministic.
def add_graph(self, graph, global_step=None, graph_def=None):
"""Add graph."""
if (global_step is not None) and (global_step < 0):
raise ValueError('Invalid global_step %s.' % global_step)
if graph_def is not None:
raise ValueError('Unexpected graph_def %s.' % graph_def)
self._added_graphs.append(graph)
def add_meta_graph(self, meta_graph_def, global_step=None):
"""Add metagraph."""
if (global_step is not None) and (global_step < 0):
raise ValueError('Invalid global_step %s.' % global_step)
self._added_meta_graphs.append(meta_graph_def)
# NOTE: Ignore global_step since its value is non-deterministic.
def add_session_log(self, session_log, global_step=None):
# pylint: disable=unused-argument
self._added_session_logs.append(session_log)
def add_run_metadata(self, run_metadata, tag, global_step=None):
if (global_step is not None) and (global_step < 0):
raise ValueError('Invalid global_step %s.' % global_step)
self._added_run_metadata[tag] = run_metadata
def flush(self):
pass
def reopen(self):
pass
def close(self):
pass
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/testing/python/framework/fake_summary_writer.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import os
import numpy as np
from tensorflow.core.framework import summary_pb2
from tensorflow.python.training import summary_io
def assert_summary(expected_tags, expected_simple_values, summary_proto):
"""Asserts summary contains the specified tags and values.
Args:
expected_tags: All tags in summary.
expected_simple_values: Simply values for some tags.
summary_proto: Summary to validate.
Raises:
ValueError: if expectations are not met.
"""
actual_tags = set()
for value in summary_proto.value:
actual_tags.add(value.tag)
if value.tag in expected_simple_values:
expected = expected_simple_values[value.tag]
actual = value.simple_value
np.testing.assert_almost_equal(
actual, expected, decimal=2, err_msg=value.tag)
expected_tags = set(expected_tags)
if expected_tags != actual_tags:
raise ValueError('Expected tags %s, got %s.' % (expected_tags, actual_tags))
def to_summary_proto(summary_str):
"""Create summary based on latest stats.
Args:
summary_str: Serialized summary.
Returns:
summary_pb2.Summary.
Raises:
ValueError: if tensor is not a valid summary tensor.
"""
summary = summary_pb2.Summary()
summary.ParseFromString(summary_str)
return summary
# TODO(ptucker): Move to a non-test package?
def latest_event_file(base_dir):
"""Find latest event file in `base_dir`.
Args:
base_dir: Base directory in which TF event flies are stored.
Returns:
File path, or `None` if none exists.
"""
file_paths = glob.glob(os.path.join(base_dir, 'events.*'))
return sorted(file_paths)[-1] if file_paths else None
def latest_events(base_dir):
"""Parse events from latest event file in base_dir.
Args:
base_dir: Base directory in which TF event flies are stored.
Returns:
Iterable of event protos.
Raises:
ValueError: if no event files exist under base_dir.
"""
file_path = latest_event_file(base_dir)
return summary_io.summary_iterator(file_path) if file_path else []
def latest_summaries(base_dir):
"""Parse summary events from latest event file in base_dir.
Args:
base_dir: Base directory in which TF event flies are stored.
Returns:
List of event protos.
Raises:
ValueError: if no event files exist under base_dir.
"""
return [e for e in latest_events(base_dir) if e.HasField('summary')]
def simple_values_from_events(events, tags):
"""Parse summaries from events with simple_value.
Args:
events: List of tensorflow.Event protos.
tags: List of string event tags corresponding to simple_value summaries.
Returns:
dict of tag:value.
Raises:
ValueError: if a summary with a specified tag does not contain simple_value.
"""
step_by_tag = {}
value_by_tag = {}
for e in events:
if e.HasField('summary'):
for v in e.summary.value:
tag = v.tag
if tag in tags:
if not v.HasField('simple_value'):
raise ValueError('Summary for %s is not a simple_value.' % tag)
# The events are mostly sorted in step order, but we explicitly check
# just in case.
if tag not in step_by_tag or e.step > step_by_tag[tag]:
step_by_tag[tag] = e.step
value_by_tag[tag] = v.simple_value
return value_by_tag
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/testing/python/framework/util_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A distributed computation library for TF.
See [tensorflow/contrib/distribute/README.md](
https://www.tensorflow.org/code/tensorflow/contrib/distribute/README.md)
for overview and examples.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,wildcard-import
from tensorflow.contrib.distribute.python.collective_all_reduce_strategy import CollectiveAllReduceStrategy
from tensorflow.contrib.distribute.python.mirrored_strategy import MirroredStrategy
from tensorflow.contrib.distribute.python.monitor import Monitor
from tensorflow.contrib.distribute.python.one_device_strategy import OneDeviceStrategy
from tensorflow.contrib.distribute.python.parameter_server_strategy import ParameterServerStrategy
from tensorflow.contrib.distribute.python.tpu_strategy import initialize_tpu_system
from tensorflow.contrib.distribute.python.tpu_strategy import TPUStrategy
from tensorflow.python.distribute.cross_device_ops import *
from tensorflow.python.distribute.distribute_config import DistributeConfig
from tensorflow.python.distribute.distribute_coordinator import run_standard_tensorflow_server
from tensorflow.python.distribute.distribute_lib import *
from tensorflow.python.distribute.distribution_strategy_context import *
from tensorflow.python.distribute.step_fn import *
from tensorflow.python.util.all_util import remove_undocumented
DistributionStrategy = StrategyV1
_allowed_symbols = [
'AllReduceCrossDeviceOps',
'CollectiveAllReduceStrategy',
'CrossDeviceOps',
'DistributeConfig',
'DistributionStrategy',
'DistributionStrategyExtended',
'MirroredStrategy',
'Monitor',
'MultiWorkerAllReduce',
'OneDeviceStrategy',
'ParameterServerStrategy',
'ReductionToOneDeviceCrossDeviceOps',
'Step',
'StandardInputStep',
'StandardSingleLossStep',
'ReplicaContext',
'TPUStrategy',
'initialize_tpu_system',
'get_cross_replica_context',
'get_distribution_strategy',
'get_loss_reduction',
'get_replica_context',
'get_strategy',
'has_distribution_strategy',
'has_strategy',
'in_cross_replica_context',
'require_replica_context',
'run_standard_tensorflow_server',
'UpdateContext',
]
remove_undocumented(__name__, _allowed_symbols)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/distribute/__init__.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Monitor is responsible for training, checkpointing and recovery."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import context
from tensorflow.python.framework import errors
from tensorflow.python.ops import variables
class Monitor(object):
"""Executes training steps, recovers and checkpoints.
Note that this class is particularly preliminary, experimental, and
expected to change.
"""
# TODO(isaprykin): Support step functions that need multiple session calls.
# TODO(isaprykin): Support extra arguments to the step function.
# TODO(isaprykin): Support recovery, checkpointing and summaries.
def __init__(self, step_callable, session=None):
"""Initialize the Monitor with components for executing training steps.
Args:
step_callable: a training `Step` that's capable of signaling when done.
session: a `Session` instance that's needed for graph mode.
Raises:
ValueError: if `session` was provided for eager mode or not provided for
graph mode.
"""
if context.executing_eagerly():
if session is not None:
raise ValueError("Should not provide a `session` in Eager mode.")
self._run_step = step_callable
else:
if session is None:
raise ValueError("Should provide a `session` in Graph mode.")
session.run(step_callable.initialize())
self._run_step = session.make_callable(step_callable())
session.run(variables.global_variables_initializer())
def run_steps(self, num_steps=None):
step = 0
while num_steps is None or step < num_steps:
try:
self._run_step()
step += 1
except errors.OutOfRangeError:
break
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/distribute/python/monitor.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.