python_code
stringlengths 0
1.02M
| repo_name
stringlengths 9
48
| file_path
stringlengths 5
114
|
---|---|---|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=unused-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.training.python.training import sampling_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner_impl
class StratifiedSampleTest(test.TestCase):
def testGraphBuildAssertionFailures(self):
val = [array_ops.zeros([1, 3]), array_ops.ones([1, 5])]
label = constant_op.constant([1], shape=[1]) # must have batch dimension
probs = [.2] * 5
init_probs = [.1, .3, .1, .3, .2]
batch_size = 16
# Label must have only batch dimension if enqueue_many is True.
with self.assertRaises(ValueError):
sampling_ops.stratified_sample(
val,
array_ops.zeros([]),
probs,
batch_size,
init_probs,
enqueue_many=True)
with self.assertRaises(ValueError):
sampling_ops.stratified_sample(
val,
array_ops.zeros([1, 1]),
probs,
batch_size,
init_probs,
enqueue_many=True)
# Label must not be one-hot.
with self.assertRaises(ValueError):
sampling_ops.stratified_sample(val,
constant_op.constant([0, 1, 0, 0, 0]),
probs, batch_size, init_probs)
# Data must be list, not singleton tensor.
with self.assertRaises(TypeError):
sampling_ops.stratified_sample(
array_ops.zeros([1, 3]), label, probs, batch_size, init_probs)
# Data must have batch dimension if enqueue_many is True.
with self.assertRaises(ValueError):
sampling_ops.stratified_sample(
val,
constant_op.constant(1),
probs,
batch_size,
init_probs,
enqueue_many=True)
# Batch dimensions on data and labels should be equal.
with self.assertRaises(ValueError):
sampling_ops.stratified_sample(
[array_ops.zeros([2, 1])],
label,
probs,
batch_size,
init_probs,
enqueue_many=True)
# Probabilities must be numpy array, python list, or tensor.
with self.assertRaises(ValueError):
sampling_ops.stratified_sample(val, label, 1, batch_size, init_probs)
# Probabilities shape must be fully defined.
with self.assertRaises(ValueError):
sampling_ops.stratified_sample(
val,
label,
array_ops.placeholder(
dtypes.float32, shape=[None]),
batch_size,
init_probs)
# In the rejection sampling case, make sure that probability lengths are
# the same.
with self.assertRaises(ValueError):
sampling_ops.stratified_sample(
val, label, [.1] * 10, batch_size, init_probs=[.2] * 5)
# In the rejection sampling case, make sure that zero initial probability
# classes also have zero target probability.
with self.assertRaises(ValueError):
sampling_ops.stratified_sample(
val, label, [.2, .4, .4], batch_size, init_probs=[0, .5, .5])
def testRuntimeAssertionFailures(self):
valid_probs = [.2] * 5
valid_labels = [1, 2, 3]
vals = [array_ops.zeros([3, 1])]
illegal_labels = [
[0, -1, 1], # classes must be nonnegative
[5, 1, 1], # classes must be less than number of classes
[2, 3], # data and label batch size must be the same
]
illegal_probs = [
[.1] * 5, # probabilities must sum to one
[-.5, .5, .5, .4, .1], # probabilities must be non-negative
]
# Set up graph with illegal label vector.
label_ph = array_ops.placeholder(dtypes.int32, shape=[None])
probs_ph = array_ops.placeholder(
dtypes.float32, shape=[5]) # shape must be defined
val_tf, lbl_tf, prob_tf = sampling_ops._verify_input( # pylint: disable=protected-access
vals, label_ph, [probs_ph])
for illegal_label in illegal_labels:
# Run session that should fail.
with self.cached_session() as sess:
with self.assertRaises(errors_impl.InvalidArgumentError):
sess.run([val_tf, lbl_tf],
feed_dict={label_ph: illegal_label,
probs_ph: valid_probs})
for illegal_prob in illegal_probs:
# Run session that should fail.
with self.cached_session() as sess:
with self.assertRaises(errors_impl.InvalidArgumentError):
sess.run([prob_tf],
feed_dict={label_ph: valid_labels,
probs_ph: illegal_prob})
def testCanBeCalledMultipleTimes(self):
batch_size = 20
val_input_batch = [array_ops.zeros([2, 3, 4])]
lbl_input_batch = array_ops.ones([], dtype=dtypes.int32)
probs = np.array([0, 1, 0, 0, 0])
batches = sampling_ops.stratified_sample(
val_input_batch, lbl_input_batch, probs, batch_size, init_probs=probs)
batches += sampling_ops.stratified_sample(
val_input_batch, lbl_input_batch, probs, batch_size, init_probs=probs)
summary_op = logging_ops.merge_summary(
ops.get_collection(ops.GraphKeys.SUMMARIES))
with self.cached_session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(coord=coord)
sess.run(batches + (summary_op,))
coord.request_stop()
coord.join(threads)
def testRejectionBatchingBehavior(self):
batch_size = 20
input_batch_size = 11
val_input_batch = [array_ops.zeros([input_batch_size, 2, 3, 4])]
lbl_input_batch = control_flow_ops.cond(
math_ops.greater(.5, random_ops.random_uniform([])),
lambda: array_ops.ones([input_batch_size], dtype=dtypes.int32) * 1,
lambda: array_ops.ones([input_batch_size], dtype=dtypes.int32) * 3)
probs = np.array([0, .2, 0, .8, 0])
data_batch, labels = sampling_ops.stratified_sample(
val_input_batch,
lbl_input_batch,
probs,
batch_size,
init_probs=[0, .3, 0, .7, 0],
enqueue_many=True)
with self.cached_session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(coord=coord)
sess.run([data_batch, labels])
coord.request_stop()
coord.join(threads)
def testBatchDimensionNotRequired(self):
classes = 5
# Probs must be a tensor, since we pass it directly to _verify_input.
probs = constant_op.constant([1.0 / classes] * classes)
# Make sure that these vals/labels pairs don't throw any runtime exceptions.
legal_input_pairs = [
(np.zeros([2, 3]), [x % classes for x in range(2)]), # batch dim 2
(np.zeros([4, 15]), [x % classes for x in range(4)]), # batch dim 4
(np.zeros([10, 1]), [x % classes for x in range(10)]), # batch dim 10
]
# Set up graph with placeholders.
vals_ph = array_ops.placeholder(
dtypes.float32) # completely undefined shape
labels_ph = array_ops.placeholder(
dtypes.int32) # completely undefined shape
val_tf, labels_tf, _ = sampling_ops._verify_input( # pylint: disable=protected-access
[vals_ph], labels_ph, [probs])
# Run graph to make sure there are no shape-related runtime errors.
for vals, labels in legal_input_pairs:
with self.cached_session() as sess:
sess.run([val_tf, labels_tf],
feed_dict={vals_ph: vals,
labels_ph: labels})
def testRejectionDataListInput(self):
batch_size = 20
val_input_batch = [
array_ops.zeros([2, 3, 4]), array_ops.ones([2, 4]), array_ops.ones(2) *
3
]
lbl_input_batch = array_ops.ones([], dtype=dtypes.int32)
probs = np.array([0, 1, 0, 0, 0])
val_list, lbls = sampling_ops.stratified_sample(
val_input_batch,
lbl_input_batch,
probs,
batch_size,
init_probs=[0, 1, 0, 0, 0])
# Check output shapes.
self.assertTrue(isinstance(val_list, list))
self.assertEqual(len(val_list), len(val_input_batch))
self.assertTrue(isinstance(lbls, ops.Tensor))
with self.cached_session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(coord=coord)
out = sess.run(val_list + [lbls])
coord.request_stop()
coord.join(threads)
# Check output shapes.
self.assertEqual(len(out), len(val_input_batch) + 1)
def normalBehaviorHelper(self, sampler):
# Set up graph.
random_seed.set_random_seed(1234)
lbl1 = 0
lbl2 = 3
# This cond allows the necessary class queues to be populated.
label = control_flow_ops.cond(
math_ops.greater(.5, random_ops.random_uniform([])),
lambda: constant_op.constant(lbl1), lambda: constant_op.constant(lbl2))
val = [np.array([1, 4]) * label]
probs = np.array([.8, 0, 0, .2, 0])
batch_size = 16
data_batch, labels = sampler(val, label, probs, batch_size)
# Run session and keep track of how frequently the labels and values appear.
data_l = []
label_l = []
with self.cached_session() as sess:
# Need to initialize variables that keep running total of classes seen.
variables.global_variables_initializer().run()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(coord=coord)
for _ in range(20):
[data], lbls = sess.run([data_batch, labels])
data_l.append(data)
label_l.append(lbls)
coord.request_stop()
coord.join(threads)
# First check that the data matches the labels.
for lbl, data in zip(label_l, data_l):
for i in range(batch_size):
self.assertListEqual(list(np.array([1, 4]) * lbl[i]), list(data[i, :]))
# Check that the labels are approximately correct.
expected_label = probs[0] * lbl1 + probs[3] * lbl2
lbl_list = range(len(probs))
lbl_std_dev = np.sqrt(np.sum((np.square(lbl_list - expected_label))))
lbl_std_dev_of_mean = lbl_std_dev / np.sqrt(len(label_l)) # CLT
actual_lbl = np.mean(label_l)
# Tolerance is 3 standard deviations of the mean. According to the central
# limit theorem, this should cover 99.7% of cases. Note that since the seed
# is fixed, for a given implementation, this test will pass or fail 100% of
# the time. This use of assertNear is to cover cases where someone changes
# an implementation detail, which would cause the random behavior to differ.
self.assertNear(actual_lbl, expected_label, 3 * lbl_std_dev_of_mean)
def testRejectionNormalBehavior(self):
initial_p = [.7, 0, 0, .3, 0]
def curried_sampler(val, lbls, probs, batch, enqueue_many=False):
return sampling_ops.stratified_sample(
val,
lbls,
probs,
batch,
init_probs=initial_p,
enqueue_many=enqueue_many)
self.normalBehaviorHelper(curried_sampler)
def testRejectionNormalBehaviorWithOnlineInitPEstimate(self):
def curried_sampler(val, lbls, probs, batch, enqueue_many=False):
return sampling_ops.stratified_sample(
val, lbls, probs, batch, init_probs=None, enqueue_many=enqueue_many)
self.normalBehaviorHelper(curried_sampler)
class RejectionSampleTest(test.TestCase):
def testGraphConstructionFailures(self):
accept_prob_fn = lambda _: constant_op.constant(1.0)
batch_size = 32
# Data must have batch dimension if `enqueue_many` is `True`.
with self.assertRaises(ValueError):
sampling_ops.rejection_sample(
[array_ops.zeros([])], accept_prob_fn, batch_size, enqueue_many=True)
# Batch dimensions should be equal if `enqueue_many` is `True`.
with self.assertRaises(ValueError):
sampling_ops.rejection_sample(
[array_ops.zeros([5, 1]), array_ops.zeros([4, 1])],
accept_prob_fn,
batch_size,
enqueue_many=True)
def testRuntimeFailures(self):
prob_ph = array_ops.placeholder(dtypes.float32, [])
accept_prob_fn = lambda _: prob_ph
batch_size = 32
# Set up graph.
random_seed.set_random_seed(1234)
sampling_ops.rejection_sample(
[array_ops.zeros([])],
accept_prob_fn,
batch_size,
runtime_checks=True,
name='rejection_sample')
prob_tensor = ops.get_default_graph().get_tensor_by_name(
'rejection_sample/prob_with_checks:0')
# Run session that should fail.
with self.cached_session() as sess:
for illegal_prob in [-0.1, 1.1]:
with self.assertRaises(errors_impl.InvalidArgumentError):
sess.run(prob_tensor, feed_dict={prob_ph: illegal_prob})
def testNormalBehavior(self):
tensor_list = [
control_flow_ops.cond(
math_ops.greater(.5, random_ops.random_uniform([])),
lambda: constant_op.constant(1.0),
lambda: constant_op.constant(2.0))
]
accept_prob_fn = lambda x: x[0] - 1.0
batch_size = 10
# Set up graph.
sample = sampling_ops.rejection_sample(tensor_list, accept_prob_fn,
batch_size)
with self.cached_session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(coord=coord)
for _ in range(5):
sample_np = sess.run(sample)[0]
self.assertListEqual([2.0] * batch_size, list(sample_np))
coord.request_stop()
coord.join(threads)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/training/python/training/sampling_ops_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Resampling methods for batches of tensors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.training import moving_averages
def _repeat_range(counts, name=None):
"""Repeat integers given by range(len(counts)) each the given number of times.
Example behavior:
[0, 1, 2, 3] -> [1, 2, 2, 3, 3, 3]
Args:
counts: 1D tensor with dtype=int32.
name: optional name for operation.
Returns:
1D tensor with dtype=int32 and dynamic length giving the repeated integers.
"""
with ops.name_scope(name, 'repeat_range', [counts]) as scope:
counts = ops.convert_to_tensor(counts, name='counts')
def cond(unused_output, i):
return i < size
def body(output, i):
value = array_ops.fill(counts[i:i+1], i)
return (output.write(i, value), i + 1)
size = array_ops.shape(counts)[0]
init_output_array = tensor_array_ops.TensorArray(
dtype=dtypes.int32, size=size, infer_shape=False)
output_array, num_writes = control_flow_ops.while_loop(
cond, body, loop_vars=[init_output_array, 0])
return control_flow_ops.cond(
num_writes > 0,
output_array.concat,
lambda: array_ops.zeros(shape=[0], dtype=dtypes.int32),
name=scope)
def resample_at_rate(inputs, rates, scope=None, seed=None, back_prop=False):
"""Given `inputs` tensors, stochastically resamples each at a given rate.
For example, if the inputs are `[[a1, a2], [b1, b2]]` and the rates
tensor contains `[3, 1]`, then the return value may look like `[[a1,
a2, a1, a1], [b1, b2, b1, b1]]`. However, many other outputs are
possible, since this is stochastic -- averaged over many repeated
calls, each set of inputs should appear in the output `rate` times
the number of invocations.
Args:
inputs: A list of tensors, each of which has a shape of `[batch_size, ...]`
rates: A tensor of shape `[batch_size]` containing the resampling rates
for each input.
scope: Scope for the op.
seed: Random seed to use.
back_prop: Whether to allow back-propagation through this op.
Returns:
Selections from the input tensors.
"""
with ops.name_scope(scope, default_name='resample_at_rate',
values=list(inputs) + [rates]):
rates = ops.convert_to_tensor(rates, name='rates')
sample_counts = math_ops.cast(
random_ops.random_poisson(rates, (), rates.dtype, seed=seed),
dtypes.int32)
sample_indices = _repeat_range(sample_counts)
if not back_prop:
sample_indices = array_ops.stop_gradient(sample_indices)
return [array_ops.gather(x, sample_indices) for x in inputs]
def weighted_resample(inputs, weights, overall_rate, scope=None,
mean_decay=0.999, seed=None):
"""Performs an approximate weighted resampling of `inputs`.
This method chooses elements from `inputs` where each item's rate of
selection is proportional to its value in `weights`, and the average
rate of selection across all inputs (and many invocations!) is
`overall_rate`.
Args:
inputs: A list of tensors whose first dimension is `batch_size`.
weights: A `[batch_size]`-shaped tensor with each batch member's weight.
overall_rate: Desired overall rate of resampling.
scope: Scope to use for the op.
mean_decay: How quickly to decay the running estimate of the mean weight.
seed: Random seed.
Returns:
A list of tensors exactly like `inputs`, but with an unknown (and
possibly zero) first dimension.
A tensor containing the effective resampling rate used for each output.
"""
# Algorithm: Just compute rates as weights/mean_weight *
# overall_rate. This way the average weight corresponds to the
# overall rate, and a weight twice the average has twice the rate,
# etc.
with ops.name_scope(scope, 'weighted_resample', inputs) as opscope:
# First: Maintain a running estimated mean weight, with zero debiasing
# enabled (by default) to avoid throwing the average off.
with variable_scope.variable_scope(scope, 'estimate_mean', inputs):
estimated_mean = variable_scope.get_local_variable(
'estimated_mean',
initializer=math_ops.cast(0, weights.dtype),
dtype=weights.dtype)
batch_mean = math_ops.reduce_mean(weights)
mean = moving_averages.assign_moving_average(
estimated_mean, batch_mean, mean_decay)
# Then, normalize the weights into rates using the mean weight and
# overall target rate:
rates = weights * overall_rate / mean
results = resample_at_rate([rates] + inputs, rates,
scope=opscope, seed=seed, back_prop=False)
return (results[1:], results[0])
|
tensorflow-master
|
tensorflow/contrib/training/python/training/resample.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional test for sgdr learning rate decay."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from sgdr_learning_rate_decay import sgdr_decay
from tensorflow.python.platform import googletest
from tensorflow.python.framework import test_util
from tensorflow.python.framework import dtypes
from tensorflow import placeholder
class SGDRDecayTest(test_util.TensorFlowTestCase):
"""Unit tests for SGDR learning rate decay."""
def get_original_values(self, lr, t_e, mult_factor, iter_per_epoch, epochs):
"""Get an array with learning rate values from the consecutive steps using
the original implementation
(https://github.com/loshchil/SGDR/blob/master/SGDR_WRNs.py)."""
t0 = math.pi / 2.0
tt = 0
te_next = t_e
lr_values = []
sh_lr = lr
for epoch in range(epochs):
for _ in range(iter_per_epoch):
# In the original approach training function is executed here
lr_values.append(sh_lr)
dt = 2.0 * math.pi / float(2.0 * t_e)
tt = tt + float(dt) / iter_per_epoch
if tt >= math.pi:
tt = tt - math.pi
cur_t = t0 + tt
new_lr = lr * (1.0 + math.sin(cur_t)) / 2.0 # lr_min = 0, lr_max = lr
sh_lr = new_lr
if (epoch + 1) == te_next: # time to restart
sh_lr = lr
tt = 0 # by setting to 0 we set lr to lr_max, see above
t_e = t_e * mult_factor # change the period of restarts
te_next = te_next + t_e # note the next restart's epoch
return lr_values
def get_sgdr_values(self, lr, initial_period_steps, t_mul, iters):
"""Get an array with learning rate values from the consecutive steps
using current tensorflow implementation."""
with self.cached_session():
step = placeholder(dtypes.int32)
decay = sgdr_decay(lr, step, initial_period_steps, t_mul)
lr_values = []
for i in range(iters):
lr_values.append(decay.eval(feed_dict={step: i}))
return lr_values
def testCompareToOriginal(self):
"""Compare values generated by tensorflow implementation to the values
generated by the original implementation
(https://github.com/loshchil/SGDR/blob/master/SGDR_WRNs.py)."""
with self.cached_session():
lr = 10.0
init_steps = 2
t_mul = 3
iters = 10
epochs = 50
org_lr = self.get_original_values(lr, init_steps, t_mul, iters, epochs)
sgdr_lr = self.get_sgdr_values(lr, init_steps*iters, t_mul, iters*epochs)
for org, sgdr in zip(org_lr, sgdr_lr):
self.assertAllClose(org, sgdr)
def testMDecay(self):
"""Test m_mul argument. Check values for learning rate at the beginning
of the first, second, third and fourth period. """
with self.cached_session():
step = placeholder(dtypes.int32)
lr = 0.1
t_e = 10
t_mul = 3
m_mul = 0.9
decay = sgdr_decay(lr, step, t_e, t_mul, m_mul)
test_step = 0
self.assertAllClose(decay.eval(feed_dict={step: test_step}),
lr)
test_step = t_e
self.assertAllClose(decay.eval(feed_dict={step: test_step}),
lr * m_mul)
test_step = t_e + t_e*t_mul
self.assertAllClose(decay.eval(feed_dict={step: test_step}),
lr * m_mul**2)
test_step = t_e + t_e*t_mul + t_e * (t_mul**2)
self.assertAllClose(decay.eval(feed_dict={step: test_step}),
lr * (m_mul**3))
def testCos(self):
"""Check learning rate values at the beginning, in the middle
and at the end of the period."""
with self.cached_session():
step = placeholder(dtypes.int32)
lr = 0.2
t_e = 1000
t_mul = 1
decay = sgdr_decay(lr, step, t_e, t_mul)
test_step = 0
self.assertAllClose(decay.eval(feed_dict={step: test_step}), lr)
test_step = t_e//2
self.assertAllClose(decay.eval(feed_dict={step: test_step}), lr/2)
test_step = t_e
self.assertAllClose(decay.eval(feed_dict={step: test_step}), lr)
test_step = t_e*3//2
self.assertAllClose(decay.eval(feed_dict={step: test_step}), lr/2)
if __name__ == "__main__":
googletest.main()
|
tensorflow-master
|
tensorflow/contrib/training/python/training/sgdr_learning_rate_decay_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for hparam."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.training.python.training import hparam
from tensorflow.python.platform import test
class HParamsTest(test.TestCase):
def testEmpty(self):
hparams = hparam.HParams()
self.assertDictEqual({}, hparams.values())
hparams.parse('')
self.assertDictEqual({}, hparams.values())
with self.assertRaisesRegexp(ValueError, 'Unknown hyperparameter'):
hparams.parse('xyz=123')
def testContains(self):
hparams = hparam.HParams(foo=1)
self.assertTrue('foo' in hparams)
self.assertFalse('bar' in hparams)
def testSomeValues(self):
hparams = hparam.HParams(aaa=1, b=2.0, c_c='relu6', d='/a/b=c/d')
self.assertDictEqual({
'aaa': 1,
'b': 2.0,
'c_c': 'relu6',
'd': '/a/b=c/d'
}, hparams.values())
expected_str = ('HParams([(\'aaa\', 1), (\'b\', 2.0), (\'c_c\', \'relu6\'),'
' (\'d\', \'/a/b=c/d\')])')
self.assertEqual(expected_str, repr(hparams))
self.assertEqual(expected_str, repr(hparams))
self.assertEqual(1, hparams.aaa)
self.assertEqual(2.0, hparams.b)
self.assertEqual('relu6', hparams.c_c)
self.assertEqual('/a/b=c/d', hparams.d)
hparams.parse('aaa=12')
self.assertDictEqual({
'aaa': 12,
'b': 2.0,
'c_c': 'relu6',
'd': '/a/b=c/d'
}, hparams.values())
self.assertEqual(12, hparams.aaa)
self.assertEqual(2.0, hparams.b)
self.assertEqual('relu6', hparams.c_c)
self.assertEqual('/a/b=c/d', hparams.d)
hparams.parse('c_c=relu4, b=-2.0e10')
self.assertDictEqual({
'aaa': 12,
'b': -2.0e10,
'c_c': 'relu4',
'd': '/a/b=c/d'
}, hparams.values())
self.assertEqual(12, hparams.aaa)
self.assertEqual(-2.0e10, hparams.b)
self.assertEqual('relu4', hparams.c_c)
self.assertEqual('/a/b=c/d', hparams.d)
hparams.parse('c_c=,b=0,')
self.assertDictEqual({
'aaa': 12,
'b': 0,
'c_c': '',
'd': '/a/b=c/d'
}, hparams.values())
self.assertEqual(12, hparams.aaa)
self.assertEqual(0.0, hparams.b)
self.assertEqual('', hparams.c_c)
self.assertEqual('/a/b=c/d', hparams.d)
hparams.parse('c_c=2.3",b=+2,')
self.assertEqual(2.0, hparams.b)
self.assertEqual('2.3"', hparams.c_c)
hparams.parse('d=/a/b/c/d,aaa=11,')
self.assertEqual(11, hparams.aaa)
self.assertEqual(2.0, hparams.b)
self.assertEqual('2.3"', hparams.c_c)
self.assertEqual('/a/b/c/d', hparams.d)
hparams.parse('b=1.5,d=/a=b/c/d,aaa=10,')
self.assertEqual(10, hparams.aaa)
self.assertEqual(1.5, hparams.b)
self.assertEqual('2.3"', hparams.c_c)
self.assertEqual('/a=b/c/d', hparams.d)
with self.assertRaisesRegexp(ValueError, 'Unknown hyperparameter'):
hparams.parse('x=123')
with self.assertRaisesRegexp(ValueError, 'Could not parse'):
hparams.parse('aaa=poipoi')
with self.assertRaisesRegexp(ValueError, 'Could not parse'):
hparams.parse('aaa=1.0')
with self.assertRaisesRegexp(ValueError, 'Could not parse'):
hparams.parse('b=12x')
with self.assertRaisesRegexp(ValueError, 'Could not parse'):
hparams.parse('b=relu')
with self.assertRaisesRegexp(ValueError, 'Must not pass a list'):
hparams.parse('aaa=[123]')
self.assertEqual(10, hparams.aaa)
self.assertEqual(1.5, hparams.b)
self.assertEqual('2.3"', hparams.c_c)
self.assertEqual('/a=b/c/d', hparams.d)
# Exports to proto.
hparam_def = hparams.to_proto()
# Imports from proto.
hparams2 = hparam.HParams(hparam_def=hparam_def)
# Verifies that all hparams are restored.
self.assertEqual(10, hparams2.aaa)
self.assertEqual(1.5, hparams2.b)
self.assertEqual('2.3"', hparams2.c_c)
self.assertEqual('/a=b/c/d', hparams2.d)
def testWithPeriodInVariableName(self):
hparams = hparam.HParams()
hparams.add_hparam(name='a.b', value=0.0)
hparams.parse('a.b=1.0')
self.assertEqual(1.0, getattr(hparams, 'a.b'))
hparams.add_hparam(name='c.d', value=0.0)
with self.assertRaisesRegexp(ValueError, 'Could not parse'):
hparams.parse('c.d=abc')
hparams.add_hparam(name='e.f', value='')
hparams.parse('e.f=abc')
self.assertEqual('abc', getattr(hparams, 'e.f'))
hparams.add_hparam(name='d..', value=0.0)
hparams.parse('d..=10.0')
self.assertEqual(10.0, getattr(hparams, 'd..'))
def testSetFromMap(self):
hparams = hparam.HParams(a=1, b=2.0, c='tanh')
hparams.override_from_dict({'a': -2, 'c': 'identity'})
self.assertDictEqual({'a': -2, 'c': 'identity', 'b': 2.0}, hparams.values())
hparams = hparam.HParams(x=1, b=2.0, d=[0.5])
hparams.override_from_dict({'d': [0.1, 0.2, 0.3]})
self.assertDictEqual({
'd': [0.1, 0.2, 0.3],
'x': 1,
'b': 2.0
}, hparams.values())
def testBoolParsing(self):
for value in 'true', 'false', 'True', 'False', '1', '0':
for initial in False, True:
hparams = hparam.HParams(use_gpu=initial)
hparams.parse('use_gpu=' + value)
self.assertEqual(hparams.use_gpu, value in ['True', 'true', '1'])
# Exports to proto.
hparam_def = hparams.to_proto()
# Imports from proto.
hparams2 = hparam.HParams(hparam_def=hparam_def)
self.assertEqual(hparams.use_gpu, hparams2.use_gpu)
# Check that hparams2.use_gpu is a bool rather than an int.
# The assertEqual() call above won't catch this, since
# (0 == False) and (1 == True) in Python.
self.assertEqual(bool, type(hparams2.use_gpu))
def testBoolParsingFail(self):
hparams = hparam.HParams(use_gpu=True)
with self.assertRaisesRegexp(ValueError, r'Could not parse.*use_gpu'):
hparams.parse('use_gpu=yep')
def testLists(self):
hparams = hparam.HParams(aaa=[1], b=[2.0, 3.0], c_c=['relu6'])
self.assertDictEqual({
'aaa': [1],
'b': [2.0, 3.0],
'c_c': ['relu6']
}, hparams.values())
self.assertEqual([1], hparams.aaa)
self.assertEqual([2.0, 3.0], hparams.b)
self.assertEqual(['relu6'], hparams.c_c)
hparams.parse('aaa=[12]')
self.assertEqual([12], hparams.aaa)
hparams.parse('aaa=[12,34,56]')
self.assertEqual([12, 34, 56], hparams.aaa)
hparams.parse('c_c=[relu4,relu12],b=[1.0]')
self.assertEqual(['relu4', 'relu12'], hparams.c_c)
self.assertEqual([1.0], hparams.b)
hparams.parse('c_c=[],aaa=[-34]')
self.assertEqual([-34], hparams.aaa)
self.assertEqual([], hparams.c_c)
hparams.parse('c_c=[_12,3\'4"],aaa=[+3]')
self.assertEqual([3], hparams.aaa)
self.assertEqual(['_12', '3\'4"'], hparams.c_c)
with self.assertRaisesRegexp(ValueError, 'Unknown hyperparameter'):
hparams.parse('x=[123]')
with self.assertRaisesRegexp(ValueError, 'Could not parse'):
hparams.parse('aaa=[poipoi]')
with self.assertRaisesRegexp(ValueError, 'Could not parse'):
hparams.parse('aaa=[1.0]')
with self.assertRaisesRegexp(ValueError, 'Could not parse'):
hparams.parse('b=[12x]')
with self.assertRaisesRegexp(ValueError, 'Could not parse'):
hparams.parse('b=[relu]')
with self.assertRaisesRegexp(ValueError, 'Must pass a list'):
hparams.parse('aaa=123')
# Exports to proto.
hparam_def = hparams.to_proto()
# Imports from proto.
hparams2 = hparam.HParams(hparam_def=hparam_def)
# Verifies that all hparams are restored.
self.assertEqual([3], hparams2.aaa)
self.assertEqual([1.0], hparams2.b)
self.assertEqual(['_12', '3\'4"'], hparams2.c_c)
def testStr(self):
hparam1 = hparam.HParams(a=1, b=[2.0, 3.0], c='relu6')
hparam1_str = str(hparam1)
# Create the signature
hparam2 = hparam.HParams()
hparam2.add_hparam('a', 4)
hparam2.add_hparam('b', [5.0, 6.0])
hparam2.add_hparam('c', 'relu10')
# Load from string
hparam2.parse(hparam1_str)
# Verifies all hparams are restored
self.assertEqual(hparam2.a, hparam1.a)
self.assertEqual(hparam2.b, hparam1.b)
self.assertEqual(hparam2.c, hparam1.c)
def testParseValuesWithIndexAssigment1(self):
"""Assignment to an index position."""
parse_dict = hparam.parse_values('arr[1]=10', {'arr': int})
self.assertEqual(len(parse_dict), 1)
self.assertTrue(isinstance(parse_dict['arr'], dict))
self.assertDictEqual(parse_dict['arr'], {1: 10})
def testParseValuesWithIndexAssigment1_IgnoreUnknown(self):
"""Assignment to an index position."""
parse_dict = hparam.parse_values(
'arr[1]=10,b=5', {'arr': int}, ignore_unknown=True)
self.assertEqual(len(parse_dict), 1)
self.assertTrue(isinstance(parse_dict['arr'], dict))
self.assertDictEqual(parse_dict['arr'], {1: 10})
def testParseValuesWithIndexAssigment2(self):
"""Assignment to multiple index positions."""
parse_dict = hparam.parse_values('arr[0]=10,arr[5]=20', {'arr': int})
self.assertEqual(len(parse_dict), 1)
self.assertTrue(isinstance(parse_dict['arr'], dict))
self.assertDictEqual(parse_dict['arr'], {0: 10, 5: 20})
def testParseValuesWithIndexAssigment2_IgnoreUnknown(self):
"""Assignment to multiple index positions."""
parse_dict = hparam.parse_values(
'arr[0]=10,arr[5]=20,foo=bar', {'arr': int}, ignore_unknown=True)
self.assertEqual(len(parse_dict), 1)
self.assertTrue(isinstance(parse_dict['arr'], dict))
self.assertDictEqual(parse_dict['arr'], {0: 10, 5: 20})
def testParseValuesWithIndexAssigment3(self):
"""Assignment to index positions in multiple names."""
parse_dict = hparam.parse_values('arr[0]=10,arr[1]=20,L[5]=100,L[10]=200', {
'arr': int,
'L': int
})
self.assertEqual(len(parse_dict), 2)
self.assertTrue(isinstance(parse_dict['arr'], dict))
self.assertDictEqual(parse_dict['arr'], {0: 10, 1: 20})
self.assertTrue(isinstance(parse_dict['L'], dict))
self.assertDictEqual(parse_dict['L'], {5: 100, 10: 200})
def testParseValuesWithIndexAssigment3_IgnoreUnknown(self):
"""Assignment to index positions in multiple names."""
parse_dict = hparam.parse_values(
'arr[0]=10,C=5,arr[1]=20,B[0]=kkk,L[5]=100,L[10]=200', {
'arr': int,
'L': int
},
ignore_unknown=True)
self.assertEqual(len(parse_dict), 2)
self.assertTrue(isinstance(parse_dict['arr'], dict))
self.assertDictEqual(parse_dict['arr'], {0: 10, 1: 20})
self.assertTrue(isinstance(parse_dict['L'], dict))
self.assertDictEqual(parse_dict['L'], {5: 100, 10: 200})
def testParseValuesWithIndexAssigment4(self):
"""Assignment of index positions and scalars."""
parse_dict = hparam.parse_values('x=10,arr[1]=20,y=30', {
'x': int,
'y': int,
'arr': int
})
self.assertEqual(len(parse_dict), 3)
self.assertTrue(isinstance(parse_dict['arr'], dict))
self.assertDictEqual(parse_dict['arr'], {1: 20})
self.assertEqual(parse_dict['x'], 10)
self.assertEqual(parse_dict['y'], 30)
def testParseValuesWithIndexAssigment4_IgnoreUnknown(self):
"""Assignment of index positions and scalars."""
parse_dict = hparam.parse_values(
'x=10,foo[0]=bar,arr[1]=20,zzz=78,y=30', {
'x': int,
'y': int,
'arr': int
},
ignore_unknown=True)
self.assertEqual(len(parse_dict), 3)
self.assertTrue(isinstance(parse_dict['arr'], dict))
self.assertDictEqual(parse_dict['arr'], {1: 20})
self.assertEqual(parse_dict['x'], 10)
self.assertEqual(parse_dict['y'], 30)
def testParseValuesWithIndexAssigment5(self):
"""Different variable types."""
parse_dict = hparam.parse_values('a[0]=5,b[1]=true,c[2]=abc,d[3]=3.14', {
'a': int,
'b': bool,
'c': str,
'd': float
})
self.assertEqual(set(parse_dict.keys()), {'a', 'b', 'c', 'd'})
self.assertTrue(isinstance(parse_dict['a'], dict))
self.assertDictEqual(parse_dict['a'], {0: 5})
self.assertTrue(isinstance(parse_dict['b'], dict))
self.assertDictEqual(parse_dict['b'], {1: True})
self.assertTrue(isinstance(parse_dict['c'], dict))
self.assertDictEqual(parse_dict['c'], {2: 'abc'})
self.assertTrue(isinstance(parse_dict['d'], dict))
self.assertDictEqual(parse_dict['d'], {3: 3.14})
def testParseValuesWithIndexAssigment5_IgnoreUnknown(self):
"""Different variable types."""
parse_dict = hparam.parse_values(
'a[0]=5,cc=4,b[1]=true,c[2]=abc,mm=2,d[3]=3.14', {
'a': int,
'b': bool,
'c': str,
'd': float
},
ignore_unknown=True)
self.assertEqual(set(parse_dict.keys()), {'a', 'b', 'c', 'd'})
self.assertTrue(isinstance(parse_dict['a'], dict))
self.assertDictEqual(parse_dict['a'], {0: 5})
self.assertTrue(isinstance(parse_dict['b'], dict))
self.assertDictEqual(parse_dict['b'], {1: True})
self.assertTrue(isinstance(parse_dict['c'], dict))
self.assertDictEqual(parse_dict['c'], {2: 'abc'})
self.assertTrue(isinstance(parse_dict['d'], dict))
self.assertDictEqual(parse_dict['d'], {3: 3.14})
def testParseValuesWithBadIndexAssigment1(self):
"""Reject assignment of list to variable type."""
with self.assertRaisesRegexp(ValueError,
r'Assignment of a list to a list index.'):
hparam.parse_values('arr[1]=[1,2,3]', {'arr': int})
def testParseValuesWithBadIndexAssigment1_IgnoreUnknown(self):
"""Reject assignment of list to variable type."""
with self.assertRaisesRegexp(ValueError,
r'Assignment of a list to a list index.'):
hparam.parse_values(
'arr[1]=[1,2,3],c=8', {'arr': int}, ignore_unknown=True)
def testParseValuesWithBadIndexAssigment2(self):
"""Reject if type missing."""
with self.assertRaisesRegexp(ValueError,
r'Unknown hyperparameter type for arr'):
hparam.parse_values('arr[1]=5', {})
def testParseValuesWithBadIndexAssigment2_IgnoreUnknown(self):
"""Ignore missing type."""
hparam.parse_values('arr[1]=5', {}, ignore_unknown=True)
def testParseValuesWithBadIndexAssigment3(self):
"""Reject type of the form name[index]."""
with self.assertRaisesRegexp(ValueError,
'Unknown hyperparameter type for arr'):
hparam.parse_values('arr[1]=1', {'arr[1]': int})
def testParseValuesWithBadIndexAssigment3_IgnoreUnknown(self):
"""Ignore type of the form name[index]."""
hparam.parse_values('arr[1]=1', {'arr[1]': int}, ignore_unknown=True)
def testWithReusedVariables(self):
with self.assertRaisesRegexp(ValueError,
'Multiple assignments to variable \'x\''):
hparam.parse_values('x=1,x=1', {'x': int})
with self.assertRaisesRegexp(ValueError,
'Multiple assignments to variable \'arr\''):
hparam.parse_values('arr=[100,200],arr[0]=10', {'arr': int})
with self.assertRaisesRegexp(
ValueError, r'Multiple assignments to variable \'arr\[0\]\''):
hparam.parse_values('arr[0]=10,arr[0]=20', {'arr': int})
with self.assertRaisesRegexp(ValueError,
'Multiple assignments to variable \'arr\''):
hparam.parse_values('arr[0]=10,arr=[100]', {'arr': int})
def testJson(self):
hparams = hparam.HParams(aaa=1, b=2.0, c_c='relu6', d=True)
self.assertDictEqual({
'aaa': 1,
'b': 2.0,
'c_c': 'relu6',
'd': True
}, hparams.values())
self.assertEqual(1, hparams.aaa)
self.assertEqual(2.0, hparams.b)
self.assertEqual('relu6', hparams.c_c)
hparams.parse_json('{"aaa": 12, "b": 3.0, "c_c": "relu4", "d": false}')
self.assertDictEqual({
'aaa': 12,
'b': 3.0,
'c_c': 'relu4',
'd': False
}, hparams.values())
self.assertEqual(12, hparams.aaa)
self.assertEqual(3.0, hparams.b)
self.assertEqual('relu4', hparams.c_c)
json_str = hparams.to_json()
hparams2 = hparam.HParams(aaa=10, b=20.0, c_c='hello', d=False)
hparams2.parse_json(json_str)
self.assertEqual(12, hparams2.aaa)
self.assertEqual(3.0, hparams2.b)
self.assertEqual('relu4', hparams2.c_c)
self.assertEqual(False, hparams2.d)
hparams3 = hparam.HParams(aaa=123)
self.assertEqual('{"aaa": 123}', hparams3.to_json())
self.assertEqual('{\n "aaa": 123\n}', hparams3.to_json(indent=2))
self.assertEqual('{"aaa"=123}', hparams3.to_json(separators=(';', '=')))
hparams4 = hparam.HParams(aaa=123, b='hello', c_c=False)
self.assertEqual('{"aaa": 123, "b": "hello", "c_c": false}',
hparams4.to_json(sort_keys=True))
def testSetHParam(self):
hparams = hparam.HParams(aaa=1, b=2.0, c_c='relu6', d=True)
self.assertDictEqual({
'aaa': 1,
'b': 2.0,
'c_c': 'relu6',
'd': True
}, hparams.values())
self.assertEqual(1, hparams.aaa)
self.assertEqual(2.0, hparams.b)
self.assertEqual('relu6', hparams.c_c)
hparams.set_hparam('aaa', 12)
hparams.set_hparam('b', 3.0)
hparams.set_hparam('c_c', 'relu4')
hparams.set_hparam('d', False)
self.assertDictEqual({
'aaa': 12,
'b': 3.0,
'c_c': 'relu4',
'd': False
}, hparams.values())
self.assertEqual(12, hparams.aaa)
self.assertEqual(3.0, hparams.b)
self.assertEqual('relu4', hparams.c_c)
def testSetHParamListNonListMismatch(self):
hparams = hparam.HParams(a=1, b=[2.0, 3.0])
with self.assertRaisesRegexp(ValueError, r'Must not pass a list'):
hparams.set_hparam('a', [1.0])
with self.assertRaisesRegexp(ValueError, r'Must pass a list'):
hparams.set_hparam('b', 1.0)
def testSetHParamTypeMismatch(self):
hparams = hparam.HParams(
int_=1, str_='str', bool_=True, float_=1.1, list_int=[1, 2], none=None)
with self.assertRaises(ValueError):
hparams.set_hparam('str_', 2.2)
with self.assertRaises(ValueError):
hparams.set_hparam('int_', False)
with self.assertRaises(ValueError):
hparams.set_hparam('bool_', 1)
# Unfortunately there is no automagic conversion of bool-like strings to
# bool.
with self.assertRaises(ValueError):
hparams.set_hparam('bool_', 'true')
with self.assertRaises(ValueError):
hparams.set_hparam('bool_', 'True')
with self.assertRaises(ValueError):
hparams.set_hparam('bool_', 'false')
with self.assertRaises(ValueError):
hparams.set_hparam('bool_', 'False')
with self.assertRaises(ValueError):
hparams.set_hparam('bool_', '0')
with self.assertRaises(ValueError):
hparams.set_hparam('bool_', '1')
with self.assertRaises(ValueError):
hparams.set_hparam('int_', 2.2)
with self.assertRaises(ValueError):
hparams.set_hparam('list_int', [2, 3.3])
with self.assertRaises(ValueError):
hparams.set_hparam('int_', '2')
# Casting int to float is OK
hparams.set_hparam('float_', 1)
# Getting stuck with NoneType :(
hparams.set_hparam('none', '1')
self.assertEqual('1', hparams.none)
def testSetHParamExactTypeMatch(self):
class DummyContext(object):
def __init__(self, a, b=0):
self.a = a
self.b = b
hparams = hparam.HParams(x=DummyContext(a=100, b=100))
# Verify x is assigned directly, without casting.
hparams.set_hparam('x', DummyContext(a=100, b=100))
self.assertEqual(hparams.x.a, 100)
self.assertEqual(hparams.x.b, 100)
def testNonProtoFails(self):
with self.assertRaisesRegexp(AssertionError, ''):
hparam.HParams(hparam_def=1)
with self.assertRaisesRegexp(AssertionError, ''):
hparam.HParams(hparam_def=1.0)
with self.assertRaisesRegexp(AssertionError, ''):
hparam.HParams(hparam_def='hello')
with self.assertRaisesRegexp(AssertionError, ''):
hparam.HParams(hparam_def=[1, 2, 3])
def testGet(self):
hparams = hparam.HParams(aaa=1, b=2.0, c_c='relu6', d=True, e=[5.0, 6.0])
# Existing parameters with default=None.
self.assertEqual(1, hparams.get('aaa'))
self.assertEqual(2.0, hparams.get('b'))
self.assertEqual('relu6', hparams.get('c_c'))
self.assertEqual(True, hparams.get('d'))
self.assertEqual([5.0, 6.0], hparams.get('e', None))
# Existing parameters with compatible defaults.
self.assertEqual(1, hparams.get('aaa', 2))
self.assertEqual(2.0, hparams.get('b', 3.0))
self.assertEqual(2.0, hparams.get('b', 3))
self.assertEqual('relu6', hparams.get('c_c', 'default'))
self.assertEqual(True, hparams.get('d', True))
self.assertEqual([5.0, 6.0], hparams.get('e', [1.0, 2.0, 3.0]))
self.assertEqual([5.0, 6.0], hparams.get('e', [1, 2, 3]))
# Existing parameters with incompatible defaults.
with self.assertRaises(ValueError):
hparams.get('aaa', 2.0)
with self.assertRaises(ValueError):
hparams.get('b', False)
with self.assertRaises(ValueError):
hparams.get('c_c', [1, 2, 3])
with self.assertRaises(ValueError):
hparams.get('d', 'relu')
with self.assertRaises(ValueError):
hparams.get('e', 123.0)
with self.assertRaises(ValueError):
hparams.get('e', ['a', 'b', 'c'])
# Nonexistent parameters.
self.assertEqual(None, hparams.get('unknown'))
self.assertEqual(123, hparams.get('unknown', 123))
self.assertEqual([1, 2, 3], hparams.get('unknown', [1, 2, 3]))
def testDel(self):
hparams = hparam.HParams(aaa=1, b=2.0)
with self.assertRaises(ValueError):
hparams.set_hparam('aaa', 'will fail')
with self.assertRaises(ValueError):
hparams.add_hparam('aaa', 'will fail')
hparams.del_hparam('aaa')
hparams.add_hparam('aaa', 'will work')
self.assertEqual('will work', hparams.get('aaa'))
hparams.set_hparam('aaa', 'still works')
self.assertEqual('still works', hparams.get('aaa'))
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/training/python/training/hparam_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.batch_sequences_with_states."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from tensorflow.contrib.training.python.training import sequence_queueing_state_saver as sqss
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner_impl
from tensorflow.python.training import saver
class BatchSequencesWithStatesTest(test.TestCase):
def setUp(self):
super(BatchSequencesWithStatesTest, self).setUp()
self.value_length = 4
ind1 = np.array([
[0, 0],
[1, 0], [1, 3], [1, 4],
[3, 2], [3, 3]])
val1 = np.array([0, 10, 13, 14, 32, 33])
shape1 = np.array([self.value_length, 6])
sp_tensor1 = sparse_tensor.SparseTensor(
array_ops.constant(ind1, dtypes.int64),
array_ops.constant(val1, dtypes.int64),
array_ops.placeholder_with_default(shape1, shape=[2]))
ind2 = np.array([
[0, 0, 1],
[0, 1, 0],
[0, 1, 2],
[1, 0, 3],
[1, 1, 0],
[1, 1, 1],
[1, 1, 2],
[1, 2, 2]])
val2 = np.array([1, 10, 12, 103, 150, 149, 150, 122])
shape2 = np.array([self.value_length, 3, 4])
sp_tensor2 = sparse_tensor.SparseTensor(
array_ops.constant(ind2, dtypes.int64),
array_ops.constant(val2, dtypes.int64),
array_ops.placeholder_with_default(shape2, shape=[3]))
sp_tensor3 = sparse_tensor.SparseTensor(
array_ops.constant([[1, 9], [2, 2], [2, 10]], dtypes.int64),
array_ops.constant([7, 15, 2], dtypes.int64),
array_ops.constant([5, 12], dtypes.int64)
)
self.sp_tensor3_expected = sparse_tensor.SparseTensorValue(
[[0, 1, 9], [0, 2, 2], [0, 2, 10], [1, 1, 9], [1, 2, 2], [1, 2, 10]],
[7, 15, 2, 7, 15, 2],
[2, 5, 12]
)
self.batch_size = 2
self.key = string_ops.string_join([
"key_", string_ops.as_string(
math_ops.cast(10000 * random_ops.random_uniform(()), dtypes.int32))
])
self.sequences = {
"seq1": np.random.rand(self.value_length, 5),
"seq2": np.random.rand(self.value_length, 4, 2),
"seq3": sp_tensor1,
"seq4": sp_tensor2}
self.context = {
"context1": [3, 4],
"sp_context": sp_tensor3}
self.initial_states = {
"state1": np.random.rand(6, 7),
"state2": np.random.rand(8)
}
def _prefix(self, key_value):
return set(
[s.decode("ascii").split(":")[0].encode("ascii") for s in key_value])
def _testBasics(self, num_unroll, length, pad,
expected_seq1_batch1, expected_seq2_batch1,
expected_seq1_batch2, expected_seq2_batch2,
expected_seq3_batch1, expected_seq3_batch2,
expected_seq4_batch1, expected_seq4_batch2,
key=None, make_keys_unique=False):
with self.cached_session() as sess:
next_batch = sqss.batch_sequences_with_states(
input_key=key if key is not None else self.key,
input_sequences=self.sequences,
input_context=self.context,
input_length=length,
initial_states=self.initial_states,
num_unroll=num_unroll,
batch_size=self.batch_size,
num_threads=3,
# to enforce that we only move on to the next examples after finishing
# all segments of the first ones.
capacity=2,
pad=pad,
make_keys_unique=make_keys_unique,
make_keys_unique_seed=9)
state1 = next_batch.state("state1")
state2 = next_batch.state("state2")
state1_update = next_batch.save_state("state1", state1 + 1)
state2_update = next_batch.save_state("state2", state2 - 1)
# Make sure queue runner with SQSS is added properly to meta graph def.
# Saver requires at least one variable.
v0 = variables.Variable(10.0, name="v0")
ops.add_to_collection("variable_collection", v0)
variables.global_variables_initializer()
save = saver.Saver([v0])
test_dir = os.path.join(test.get_temp_dir(), "sqss_test")
filename = os.path.join(test_dir, "metafile")
meta_graph_def = save.export_meta_graph(filename)
qr_saved = meta_graph_def.collection_def[ops.GraphKeys.QUEUE_RUNNERS]
self.assertTrue(qr_saved.bytes_list.value is not None)
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(coord=coord)
# Step 1
(key_value, next_key_value, seq1_value, seq2_value, seq3_value,
seq4_value, context1_value, context2_value, state1_value, state2_value,
length_value, _, _) = sess.run(
(next_batch.key, next_batch.next_key, next_batch.sequences["seq1"],
next_batch.sequences["seq2"], next_batch.sequences["seq3"],
next_batch.sequences["seq4"], next_batch.context["context1"],
next_batch.context["sp_context"], state1, state2, next_batch.length,
state1_update, state2_update))
expected_first_keys = set([b"00000_of_00002"])
expected_second_keys = set([b"00001_of_00002"])
expected_final_keys = set([b"STOP"])
self.assertEqual(expected_first_keys, self._prefix(key_value))
self.assertEqual(expected_second_keys, self._prefix(next_key_value))
self.assertAllEqual(
np.tile(self.context["context1"], (self.batch_size, 1)),
context1_value)
self.assertAllEqual(self.sp_tensor3_expected.indices,
context2_value.indices)
self.assertAllEqual(self.sp_tensor3_expected.values,
context2_value.values)
self.assertAllEqual(self.sp_tensor3_expected.dense_shape,
context2_value.dense_shape)
self.assertAllEqual(expected_seq1_batch1, seq1_value)
self.assertAllEqual(expected_seq2_batch1, seq2_value)
self.assertAllEqual(expected_seq3_batch1.indices, seq3_value.indices)
self.assertAllEqual(expected_seq3_batch1.values, seq3_value.values)
self.assertAllEqual(expected_seq3_batch1.dense_shape,
seq3_value.dense_shape)
self.assertAllEqual(expected_seq4_batch1.indices, seq4_value.indices)
self.assertAllEqual(expected_seq4_batch1.values, seq4_value.values)
self.assertAllEqual(expected_seq4_batch1.dense_shape,
seq4_value.dense_shape)
self.assertAllEqual(
np.tile(self.initial_states["state1"], (self.batch_size, 1, 1)),
state1_value)
self.assertAllEqual(
np.tile(self.initial_states["state2"], (self.batch_size, 1)),
state2_value)
self.assertAllEqual(length_value, [num_unroll, num_unroll])
# Step 2
(key_value, next_key_value, seq1_value, seq2_value, seq3_value,
seq4_value, context1_value, context2_value, state1_value, state2_value,
length_value, _, _) = sess.run(
(next_batch.key, next_batch.next_key, next_batch.sequences["seq1"],
next_batch.sequences["seq2"], next_batch.sequences["seq3"],
next_batch.sequences["seq4"], next_batch.context["context1"],
next_batch.context["sp_context"], state1, state2, next_batch.length,
state1_update, state2_update))
self.assertEqual(expected_second_keys, self._prefix(key_value))
self.assertEqual(expected_final_keys, self._prefix(next_key_value))
self.assertAllEqual(
np.tile(self.context["context1"], (self.batch_size, 1)),
context1_value)
self.assertAllEqual(self.sp_tensor3_expected.indices,
context2_value.indices)
self.assertAllEqual(self.sp_tensor3_expected.values,
context2_value.values)
self.assertAllEqual(self.sp_tensor3_expected.dense_shape,
context2_value.dense_shape)
self.assertAllEqual(expected_seq1_batch2, seq1_value)
self.assertAllEqual(expected_seq2_batch2, seq2_value)
self.assertAllEqual(expected_seq3_batch2.indices, seq3_value.indices)
self.assertAllEqual(expected_seq3_batch2.values, seq3_value.values)
self.assertAllEqual(expected_seq3_batch2.dense_shape,
seq3_value.dense_shape)
self.assertAllEqual(expected_seq4_batch2.indices, seq4_value.indices)
self.assertAllEqual(expected_seq4_batch2.values, seq4_value.values)
self.assertAllEqual(expected_seq4_batch2.dense_shape,
seq4_value.dense_shape)
self.assertAllEqual(1 + np.tile(self.initial_states["state1"],
(self.batch_size, 1, 1)), state1_value)
self.assertAllEqual(-1 + np.tile(self.initial_states["state2"],
(self.batch_size, 1)), state2_value)
self.assertAllEqual([1, 1], length_value)
coord.request_stop()
coord.join(threads, stop_grace_period_secs=2)
def _testBasicPadding(self, pad, key=None, make_keys_unique=False):
num_unroll = 2 # Divisor of value_length - so no padding necessary.
expected_seq1_batch1 = np.tile(
self.sequences["seq1"][np.newaxis, 0:num_unroll, :],
(self.batch_size, 1, 1))
expected_seq2_batch1 = np.tile(
self.sequences["seq2"][np.newaxis, 0:num_unroll, :, :],
(self.batch_size, 1, 1, 1))
expected_seq1_batch2 = np.tile(
self.sequences["seq1"][np.newaxis, num_unroll:self.value_length, :],
(self.batch_size, 1, 1))
expected_seq2_batch2 = np.tile(
self.sequences["seq2"][np.newaxis, num_unroll:self.value_length, :, :],
(self.batch_size, 1, 1, 1))
ind1_1 = np.array([
# batch entry 1
[0, 0, 0],
[0, 1, 0], [0, 1, 3], [0, 1, 4],
# batch entry 2
[1, 0, 0],
[1, 1, 0], [1, 1, 3], [1, 1, 4]])
ind1_2 = np.array([
# batch entry 1
[0, 1, 2], [0, 1, 3],
# batch entry 2
[1, 1, 2], [1, 1, 3]])
val1_1 = np.array([0, 10, 13, 14,
0, 10, 13, 14])
val1_2 = np.array([32, 33,
32, 33])
shape1 = np.array([self.batch_size, num_unroll, 6])
# For sp_tensor2 all values fall into the first segment.
ind2_1 = np.array([
# batch entry 1
[0, 0, 0, 1],
[0, 0, 1, 0],
[0, 0, 1, 2],
[0, 1, 0, 3],
[0, 1, 1, 0],
[0, 1, 1, 1],
[0, 1, 1, 2],
[0, 1, 2, 2],
# batch entry 2
[1, 0, 0, 1],
[1, 0, 1, 0],
[1, 0, 1, 2],
[1, 1, 0, 3],
[1, 1, 1, 0],
[1, 1, 1, 1],
[1, 1, 1, 2],
[1, 1, 2, 2],
])
val2_1 = np.array([1, 10, 12, 103, 150, 149, 150, 122,
1, 10, 12, 103, 150, 149, 150, 122])
shape2 = np.array([self.batch_size, num_unroll, 3, 4])
expected_seq3_batch1 = sparse_tensor.SparseTensorValue(
ind1_1, val1_1, shape1)
expected_seq3_batch2 = sparse_tensor.SparseTensorValue(
ind1_2, val1_2, shape1)
expected_seq4_batch1 = sparse_tensor.SparseTensorValue(
ind2_1, val2_1, shape2)
expected_seq4_batch2 = sparse_tensor.SparseTensorValue(
np.empty(shape=[0, 4], dtype=np.int64), np.array([]), shape2)
self._testBasics(
num_unroll=num_unroll,
length=3,
pad=pad,
expected_seq1_batch1=expected_seq1_batch1,
expected_seq1_batch2=expected_seq1_batch2,
expected_seq2_batch1=expected_seq2_batch1,
expected_seq2_batch2=expected_seq2_batch2,
expected_seq3_batch1=expected_seq3_batch1,
expected_seq3_batch2=expected_seq3_batch2,
expected_seq4_batch1=expected_seq4_batch1,
expected_seq4_batch2=expected_seq4_batch2,
key=key,
make_keys_unique=make_keys_unique)
def testBasicPadding(self):
self._testBasicPadding(pad=True)
def testBasicNoPadding(self):
self._testBasicPadding(pad=False)
def testRandomKeyGen(self):
self._testBasicPadding(pad=False,
key=constant_op.constant("fixed_key"),
make_keys_unique=True)
def testNotAMultiple(self):
num_unroll = 3 # Not a divisor of value_length -
# so padding would have been necessary.
# Use placeholder_with_default in sequences to make sure we get runtime
# error instead of shape inference error
sequences = {
"seq1": array_ops.placeholder_with_default(self.sequences["seq1"],
shape=(None, 5)),
"seq2": array_ops.placeholder_with_default(self.sequences["seq2"],
shape=(None, 4, 2)),
"seq3": self.sequences["seq3"],
"seq4": self.sequences["seq4"],
}
with self.cached_session() as sess:
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
".*should be a multiple of: 3, but saw "
"value: 4. Consider setting pad=True."):
coord = coordinator.Coordinator()
threads = None
try:
with coord.stop_on_exception():
next_batch = sqss.batch_sequences_with_states(
input_key=self.key,
input_sequences=sequences,
input_context=self.context,
input_length=3,
initial_states=self.initial_states,
num_unroll=num_unroll,
batch_size=self.batch_size,
num_threads=3,
# to enforce that we only move on to the next examples after
# finishing all segments of the first ones.
capacity=2,
pad=False)
threads = queue_runner_impl.start_queue_runners(coord=coord)
sess.run([next_batch.key])
except errors_impl.OutOfRangeError:
pass
finally:
coord.request_stop()
if threads is not None:
coord.join(threads, stop_grace_period_secs=2)
def testAdvancedPadding(self):
num_unroll = 3 # Not a divisor of value_length - so padding to 6 necessary.
expected_seq1_batch1 = np.tile(
self.sequences["seq1"][np.newaxis, 0:num_unroll, :],
(self.batch_size, 1, 1))
expected_seq2_batch1 = np.tile(
self.sequences["seq2"][np.newaxis, 0:num_unroll, :, :],
(self.batch_size, 1, 1, 1))
padded_seq1 = np.concatenate(
[
self.sequences["seq1"][np.newaxis, num_unroll:self.value_length, :],
np.zeros((1, 1, 5)), np.zeros((1, 1, 5))
],
axis=1)
expected_seq1_batch2 = np.concatenate(
[padded_seq1] * self.batch_size, axis=0)
padded_seq2 = np.concatenate(
[
self.sequences["seq2"][np.newaxis, num_unroll:self.value_length, :],
np.zeros((1, 1, 4, 2)), np.zeros((1, 1, 4, 2))
],
axis=1)
expected_seq2_batch2 = np.concatenate(
[padded_seq2] * self.batch_size, axis=0)
ind1_1 = np.array([
# batch entry 1
[0, 0, 0],
[0, 1, 0], [0, 1, 3], [0, 1, 4],
# batch entry 2
[1, 0, 0],
[1, 1, 0], [1, 1, 3], [1, 1, 4]])
ind1_2 = np.array([
# batch entry 1
[0, 0, 2], [0, 0, 3],
# batch entry 2
[1, 0, 2], [1, 0, 3]])
val1_1 = np.array([0, 10, 13, 14,
0, 10, 13, 14])
val1_2 = np.array([32, 33,
32, 33])
shape1 = np.array([self.batch_size, num_unroll, 6])
# For sp_tensor2 all values fall into the first segment.
ind2_1 = np.array([
# batch entry 1
[0, 0, 0, 1],
[0, 0, 1, 0],
[0, 0, 1, 2],
[0, 1, 0, 3],
[0, 1, 1, 0],
[0, 1, 1, 1],
[0, 1, 1, 2],
[0, 1, 2, 2],
# batch entry 2
[1, 0, 0, 1],
[1, 0, 1, 0],
[1, 0, 1, 2],
[1, 1, 0, 3],
[1, 1, 1, 0],
[1, 1, 1, 1],
[1, 1, 1, 2],
[1, 1, 2, 2],
])
val2_1 = np.array([1, 10, 12, 103, 150, 149, 150, 122,
1, 10, 12, 103, 150, 149, 150, 122])
shape2 = np.array([self.batch_size, num_unroll, 3, 4])
expected_seq3_batch1 = sparse_tensor.SparseTensorValue(
ind1_1, val1_1, shape1)
expected_seq3_batch2 = sparse_tensor.SparseTensorValue(
ind1_2, val1_2, shape1)
expected_seq4_batch1 = sparse_tensor.SparseTensorValue(
ind2_1, val2_1, shape2)
expected_seq4_batch2 = sparse_tensor.SparseTensorValue(
np.empty(shape=[0, 4], dtype=np.int64), np.array([]), shape2)
ind1_1 = np.array([
# batch entry 1
[0, 0, 0],
[0, 1, 0], [0, 1, 3], [0, 1, 4],
# batch entry 2
[1, 0, 0],
[1, 1, 0], [1, 1, 3], [1, 1, 4]])
ind1_2 = np.array([
# batch entry 1
[0, 0, 2], [0, 0, 3],
# batch entry 2
[1, 0, 2], [1, 0, 3]])
val1_1 = np.array([0, 10, 13, 14,
0, 10, 13, 14])
val1_2 = np.array([32, 33,
32, 33])
shape1 = np.array([self.batch_size, num_unroll, 6])
# For sp_tensor2 all values fall into the first segment.
ind2_1 = np.array([
# batch entry 1
[0, 0, 0, 1],
[0, 0, 1, 0],
[0, 0, 1, 2],
[0, 1, 0, 3],
[0, 1, 1, 0],
[0, 1, 1, 1],
[0, 1, 1, 2],
[0, 1, 2, 2],
# batch entry 2
[1, 0, 0, 1],
[1, 0, 1, 0],
[1, 0, 1, 2],
[1, 1, 0, 3],
[1, 1, 1, 0],
[1, 1, 1, 1],
[1, 1, 1, 2],
[1, 1, 2, 2],
])
val2_1 = np.array([1, 10, 12, 103, 150, 149, 150, 122,
1, 10, 12, 103, 150, 149, 150, 122])
shape2 = np.array([self.batch_size, num_unroll, 3, 4])
expected_seq3_batch1 = sparse_tensor.SparseTensorValue(
ind1_1, val1_1, shape1)
expected_seq3_batch2 = sparse_tensor.SparseTensorValue(
ind1_2, val1_2, shape1)
expected_seq4_batch1 = sparse_tensor.SparseTensorValue(
ind2_1, val2_1, shape2)
expected_seq4_batch2 = sparse_tensor.SparseTensorValue(
np.empty(shape=[0, 4], dtype=np.int64), np.array([]), shape2)
self._testBasics(
num_unroll=num_unroll,
length=None,
pad=True,
expected_seq1_batch1=expected_seq1_batch1,
expected_seq1_batch2=expected_seq1_batch2,
expected_seq2_batch1=expected_seq2_batch1,
expected_seq2_batch2=expected_seq2_batch2,
expected_seq3_batch1=expected_seq3_batch1,
expected_seq3_batch2=expected_seq3_batch2,
expected_seq4_batch1=expected_seq4_batch1,
expected_seq4_batch2=expected_seq4_batch2)
class PaddingTest(test.TestCase):
def testPaddingInvalidLengths(self):
with ops.Graph().as_default() as g, self.session(graph=g):
sequences = {
"key_1": constant_op.constant([1, 2, 3]), # length 3
"key_2": constant_op.constant([1.5, 2.5]) # length 2
}
_, padded_seq = sqss._padding(sequences, 2)
with self.assertRaisesOpError(
".*All sequence lengths must match, but received lengths.*"):
padded_seq["key_1"].eval()
def testPadding(self):
with ops.Graph().as_default() as g, self.session(graph=g):
sequences = {
"key_1": constant_op.constant([1, 2]),
"key_2": constant_op.constant([0.5, -1.0]),
"key_3": constant_op.constant(["a", "b"]), # padding strings
"key_4": constant_op.constant([[1, 2, 3], [4, 5, 6]])
}
_, padded_seq = sqss._padding(sequences, 5)
expected_padded_seq = {
"key_1": [1, 2, 0, 0, 0],
"key_2": [0.5, -1.0, 0.0, 0.0, 0.0],
"key_3": ["a", "b", "", "", ""],
"key_4": [[1, 2, 3], [4, 5, 6], [0, 0, 0], [0, 0, 0], [0, 0, 0]]
}
for key, val in expected_padded_seq.items():
self.assertTrue(
math_ops.reduce_all(math_ops.equal(val, padded_seq[key])).eval())
def testPaddingOnlySparse(self):
ind1 = np.array([[0], [2]])
val1 = np.array([3, 4])
shape1 = np.array([4])
ind2 = np.array([[1], [2]])
val2 = np.array([9, 12])
shape2 = np.array([5])
with ops.Graph().as_default() as g, self.session(graph=g):
sp_tensor1 = sparse_tensor.SparseTensor(
indices=array_ops.constant(ind1, dtypes.int64),
values=array_ops.constant(val1, dtypes.int64),
dense_shape=array_ops.constant(shape1, dtypes.int64))
sp_tensor2 = sparse_tensor.SparseTensor(
indices=array_ops.constant(ind2, dtypes.int64),
values=array_ops.constant(val2, dtypes.int64),
dense_shape=array_ops.constant(shape2, dtypes.int64))
sp_tensor1_expected = sparse_tensor.SparseTensor(
indices=sp_tensor1.indices,
values=sp_tensor1.values,
dense_shape=[8])
sp_tensor2_expected = sparse_tensor.SparseTensor(
indices=sp_tensor2.indices,
values=sp_tensor2.values,
dense_shape=[8])
sequences = {
"key_1": sp_tensor1,
"key_2": sp_tensor2,
}
_, padded_seq = sqss._padding(sequences, 4)
expected_padded_seq = {
"key_1": sp_tensor1_expected,
"key_2": sp_tensor2_expected,
}
for key, val in expected_padded_seq.items():
self.assertAllEqual(
sparse_ops.sparse_tensor_to_dense(val).eval(),
sparse_ops.sparse_tensor_to_dense(padded_seq[key]).eval())
class SparseTensorReConstructionTest(test.TestCase):
def testAddManyTakeManyRoundTripBatched(self):
with self.test_session(use_gpu=False) as sess:
# N == 4 because shape_value == [4, 5]
indices_value_1 = np.array([[0, 0], [0, 1], [2, 0]], dtype=np.int64)
values_value_1 = np.array([b"a", b"b", b"c"])
shape_value_1 = np.array([4, 5], dtype=np.int64)
sparse_tensor_1 = sparse_tensor.SparseTensor(
array_ops.placeholder(dtypes.int64),
array_ops.placeholder(dtypes.string),
array_ops.placeholder(dtypes.int64))
dict1 = {"key": sparse_tensor_1}
indices_value_2 = np.array([[1, 4], [2, 3]], dtype=np.int64)
values_value_2 = np.array([b"d", b"e"])
shape_value_2 = np.array([4, 5], dtype=np.int64)
sparse_tensor_2 = sparse_tensor.SparseTensor(
array_ops.placeholder(dtypes.int64),
array_ops.placeholder(dtypes.string),
array_ops.placeholder(dtypes.int64))
dict2 = {"key": sparse_tensor_2}
input_seq1, keys1, tensor_list1 = sqss._deconstruct_sparse_tensor_seq(
dict1, shared_name="a")
handles_1 = input_seq1["key"]
input_seq2, _, _ = sqss._deconstruct_sparse_tensor_seq(
dict2, shared_name="a")
handles_2 = input_seq2["key"]
combined_handles = array_ops.stack(
[handles_1[1], handles_1[2], handles_1[3],
handles_2[1], handles_2[2], handles_2[3]])
batched_dict = {"key": combined_handles}
sqss._reconstruct_sparse_tensor_seq(
batched_dict,
keys1,
tensor_list1,
batch_size=2,
num_unroll=3)
roundtrip_value, = sess.run(
[batched_dict["key"]],
feed_dict={sparse_tensor_1.indices: indices_value_1,
sparse_tensor_1.values: values_value_1,
sparse_tensor_1.dense_shape: shape_value_1,
sparse_tensor_2.indices: indices_value_2,
sparse_tensor_2.values: values_value_2,
sparse_tensor_2.dense_shape: shape_value_2})
self.assertAllEqual(roundtrip_value.indices,
np.array([[0, 1, 0], [1, 0, 4], [1, 1, 3]],
dtype=np.int64))
self.assertAllEqual(roundtrip_value.values,
np.array([b"c", b"d", b"e"]))
self.assertAllEqual(roundtrip_value.dense_shape,
np.array([2, 3, 5], dtype=np.int64))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/training/python/training/batch_sequences_with_states_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains functions for evaluation and summarization of metrics.
The evaluation.py module contains helper functions for evaluating TensorFlow
modules using a variety of metrics and summarizing the results.
****************************************
* Evaluating a Checkpointed Model Once *
****************************************
Once we've trained a model, we'll want to evaluate it. The simplest way to do
this is to evaluate the performance of a saved model a single time. In order
to do this, we can specify a number of metrics we'll want to evaluate as well
as specify the summaries we want to save to disk. Furthermore, we can print
out the metrics values to stdout:
# Specify where the checkpoint is stored:
checkpoint_path = ...
# Create model and obtain the predictions:
images, labels = LoadData(...)
predictions = MyModel(images)
# Choose the metrics to compute:
names_to_values, names_to_updates = tf.contrib.metrics.aggregate_metric_map({
"accuracy": tf.compat.v1.metrics.accuracy(labels, predictions),
"mse": tf.compat.v1.metrics.mean_squared_error(labels, predictions),
})
# Define the summaries to write:
for metric_name, metric_value in metrics_to_values.iteritems():
tf.compat.v1.summary.scalar(metric_name, metric_value)
checkpoint_dir = '/tmp/my_model_dir/'
log_dir = '/tmp/my_model_eval/'
# We'll evaluate 1000 batches:
num_evals = 1000
names_to_values = evaluate_once(
checkpoint_path=checkpoint_path,
eval_ops=names_to_updates.values(),
final_ops=names_to_values,
hooks=[
tf.contrib.training.StopAfterNEvalsHook(num_evals),
tf.contrib.training.SummaryAtEndHook(logdir),
],
config=None)
for name in names_to_values:
print('Metric %s has value %f.' % (name, names_to_values[name]))
************************************************
* Evaluating a Checkpointed Model with Metrics *
************************************************
Often, one wants to evaluate a model checkpoint saved on disk. This can be
performed once or repeatedly on a set schedule.
To evaluate a particular model, users define zero or more metrics and zero or
more summaries and call the evaluate_repeatedly method:
# Create model and obtain the predictions:
images, labels = LoadData(...)
predictions = MyModel(images)
# Choose the metrics to compute:
names_to_values, names_to_updates = tf.contrib.metrics.aggregate_metric_map({
"accuracy": tf.compat.v1.metrics.accuracy(labels, predictions),
"mse": tf.compat.v1.metrics.mean_squared_error(labels, predictions),
})
# Define the summaries to write:
for metric_name, metric_value in metrics_to_values.iteritems():
tf.compat.v1.summary.scalar(metric_name, metric_value)
checkpoint_dir = '/tmp/my_model_dir/'
log_dir = '/tmp/my_model_eval/'
# We'll evaluate 1000 batches:
num_evals = 1000
# Evaluate every 10 minutes:
tf.contrib.training.evaluate_repeatedly(
checkpoint_dir,
eval_ops=names_to_updates.values(),
hooks=[
tf.contrib.training.StopAfterNEvalsHook(num_evals),
tf.contrib.training.SummaryAtEndHook(logdir),
],
eval_interval_secs=600)
*******************************************************
* Evaluating a Checkpointed Model with Summaries Only *
*******************************************************
At times, an evaluation can be performed without metrics at all but rather
with only summaries. The user need only leave out the 'eval_ops' argument:
# Create model and obtain the predictions:
images, labels = LoadData(...)
predictions = MyModel(images)
# Define the summaries to write:
tf.compat.v1.summary.scalar(...)
tf.compat.v1.summary.histogram(...)
checkpoint_dir = '/tmp/my_model_dir/'
log_dir = '/tmp/my_model_eval/'
# Evaluate once every 10 minutes.
tf.contrib.training.evaluate_repeatedly(
checkpoint_dir,
hooks=[
tf.contrib.training.SummaryAtEndHook(logdir),
],
eval_interval_secs=600)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
from tensorflow.python.ops import state_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training import evaluation
from tensorflow.python.training import monitored_session
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import training_util
__all__ = [
'StopAfterNEvalsHook',
'SummaryAtEndHook',
'checkpoints_iterator',
'evaluate_once',
'evaluate_repeatedly',
'get_or_create_eval_step',
'wait_for_new_checkpoint',
]
# pylint: disable=protected-access
# pylint: disable=invalid-name
StopAfterNEvalsHook = evaluation._StopAfterNEvalsHook
evaluate_once = evaluation._evaluate_once
get_or_create_eval_step = evaluation._get_or_create_eval_step
# pylint: enable=invalid-name
# pylint: enable=protected-access
def wait_for_new_checkpoint(checkpoint_dir,
last_checkpoint=None,
seconds_to_sleep=1,
timeout=None):
"""Waits until a new checkpoint file is found.
Args:
checkpoint_dir: The directory in which checkpoints are saved.
last_checkpoint: The last checkpoint path used or `None` if we're expecting
a checkpoint for the first time.
seconds_to_sleep: The number of seconds to sleep for before looking for a
new checkpoint.
timeout: The maximum number of seconds to wait. If left as `None`, then the
process will wait indefinitely.
Returns:
a new checkpoint path, or None if the timeout was reached.
"""
logging.info('Waiting for new checkpoint at %s', checkpoint_dir)
stop_time = time.time() + timeout if timeout is not None else None
while True:
checkpoint_path = checkpoint_management.latest_checkpoint(checkpoint_dir)
if checkpoint_path is None or checkpoint_path == last_checkpoint:
if stop_time is not None and time.time() + seconds_to_sleep > stop_time:
return None
time.sleep(seconds_to_sleep)
else:
logging.info('Found new checkpoint at %s', checkpoint_path)
return checkpoint_path
def checkpoints_iterator(checkpoint_dir,
min_interval_secs=0,
timeout=None,
timeout_fn=None):
"""Continuously yield new checkpoint files as they appear.
The iterator only checks for new checkpoints when control flow has been
reverted to it. This means it can miss checkpoints if your code takes longer
to run between iterations than `min_interval_secs` or the interval at which
new checkpoints are written.
The `timeout` argument is the maximum number of seconds to block waiting for
a new checkpoint. It is used in combination with the `timeout_fn` as
follows:
* If the timeout expires and no `timeout_fn` was specified, the iterator
stops yielding.
* If a `timeout_fn` was specified, that function is called and if it returns
a true boolean value the iterator stops yielding.
* If the function returns a false boolean value then the iterator resumes the
wait for new checkpoints. At this point the timeout logic applies again.
This behavior gives control to callers on what to do if checkpoints do not
come fast enough or stop being generated. For example, if callers have a way
to detect that the training has stopped and know that no new checkpoints
will be generated, they can provide a `timeout_fn` that returns `True` when
the training has stopped. If they know that the training is still going on
they return `False` instead.
Args:
checkpoint_dir: The directory in which checkpoints are saved.
min_interval_secs: The minimum number of seconds between yielding
checkpoints.
timeout: The maximum number of seconds to wait between checkpoints. If left
as `None`, then the process will wait indefinitely.
timeout_fn: Optional function to call after a timeout. If the function
returns True, then it means that no new checkpoints will be generated and
the iterator will exit. The function is called with no arguments.
Yields:
String paths to latest checkpoint files as they arrive.
"""
checkpoint_path = None
while True:
new_checkpoint_path = wait_for_new_checkpoint(
checkpoint_dir, checkpoint_path, timeout=timeout)
if new_checkpoint_path is None:
if not timeout_fn:
# timed out
logging.info('Timed-out waiting for a checkpoint.')
return
if timeout_fn():
# The timeout_fn indicated that we are truly done.
return
else:
# The timeout_fn indicated that more checkpoints may come.
continue
start = time.time()
checkpoint_path = new_checkpoint_path
yield checkpoint_path
time_to_next_eval = start + min_interval_secs - time.time()
if time_to_next_eval > 0:
time.sleep(time_to_next_eval)
class SummaryAtEndHook(session_run_hook.SessionRunHook):
"""A run hook that saves a summary with the results of evaluation."""
def __init__(self,
log_dir=None,
summary_writer=None,
summary_op=None,
feed_dict=None):
"""Constructs the Summary Hook.
Args:
log_dir: The directory where the summary events are saved to. Used only
when `summary_writer` is not specified.
summary_writer: A `tf.compat.v1.summary.FileWriter` to write summary
events with.
summary_op: The summary op to run. If left as `None`, then all summaries
in the tf.GraphKeys.SUMMARIES collection are used.
feed_dict: An optional feed dictionary to use when evaluating the
summaries.
Raises:
ValueError: If both `log_dir` and `summary_writer` are `None`.
"""
self._summary_op = summary_op
self._replace_summary_op = summary_op is None
self._feed_dict = feed_dict
self._summary_writer = summary_writer
self._log_dir = log_dir
if self._log_dir is None and self._summary_writer is None:
raise ValueError('One of log_dir or summary_writer should be used.')
def begin(self):
if self._replace_summary_op:
# This can still remain None if there are no summaries.
self._summary_op = summary.merge_all()
self._global_step = training_util.get_or_create_global_step()
def after_create_session(self, session, coord):
if self._summary_writer is None and self._log_dir:
self._summary_writer = summary.FileWriterCache.get(self._log_dir)
def end(self, session):
if self._summary_op is not None:
global_step = training_util.global_step(session, self._global_step)
summary_str = session.run(self._summary_op, self._feed_dict)
if self._summary_writer:
self._summary_writer.add_summary(summary_str, global_step)
if self._summary_writer:
self._summary_writer.flush()
def _scaffold_with_init(scaffold, saver, checkpoint_path):
"""Creates a scaffold that loads the given checkpoint using an init_fn.
Args:
scaffold: The scaffold to copy.
saver: The saver to use when restoring the checkpoint.
checkpoint_path: An absolute path to a checkpoint.
Returns:
A scaffold with an init_fn that loads the given checkpoint. If the scaffold
provided already has an init_fn, the scaffold is returned unchanged.
"""
def restore_checkpoint(_, session):
saver.restore(session, checkpoint_path)
if not scaffold.init_fn:
scaffold = monitored_session.Scaffold(
init_op=scaffold.init_op,
init_feed_dict=scaffold.init_feed_dict,
init_fn=restore_checkpoint,
ready_op=scaffold.ready_op,
local_init_op=scaffold.local_init_op,
summary_op=scaffold.summary_op,
saver=scaffold.saver)
return scaffold
def evaluate_repeatedly(checkpoint_dir,
master='',
scaffold=None,
eval_ops=None,
feed_dict=None,
final_ops=None,
final_ops_feed_dict=None,
eval_interval_secs=60,
hooks=None,
config=None,
max_number_of_evaluations=None,
timeout=None,
timeout_fn=None):
"""Repeatedly searches for a checkpoint in `checkpoint_dir` and evaluates it.
During a single evaluation, the `eval_ops` is run until the session is
interrupted or requested to finish. This is typically requested via a
`tf.contrib.training.StopAfterNEvalsHook` which results in `eval_ops` running
the requested number of times.
Optionally, a user can pass in `final_ops`, a single `Tensor`, a list of
`Tensors` or a dictionary from names to `Tensors`. The `final_ops` is
evaluated a single time after `eval_ops` has finished running and the fetched
values of `final_ops` are returned. If `final_ops` is left as `None`, then
`None` is returned.
One may also consider using a `tf.contrib.training.SummaryAtEndHook` to record
summaries after the `eval_ops` have run. If `eval_ops` is `None`, the
summaries run immediately after the model checkpoint has been restored.
Note that `evaluate_once` creates a local variable used to track the number of
evaluations run via `tf.contrib.training.get_or_create_eval_step`.
Consequently, if a custom local init op is provided via a `scaffold`, the
caller should ensure that the local init op also initializes the eval step.
Args:
checkpoint_dir: The directory where checkpoints are stored.
master: The address of the TensorFlow master.
scaffold: An tf.compat.v1.train.Scaffold instance for initializing variables
and restoring variables. Note that `scaffold.init_fn` is used by the
function to restore the checkpoint. If you supply a custom init_fn, then
it must also take care of restoring the model from its checkpoint.
eval_ops: A single `Tensor`, a list of `Tensors` or a dictionary of names to
`Tensors`, which is run until the session is requested to stop, commonly
done by a `tf.contrib.training.StopAfterNEvalsHook`.
feed_dict: The feed dictionary to use when executing the `eval_ops`.
final_ops: A single `Tensor`, a list of `Tensors` or a dictionary of names
to `Tensors`.
final_ops_feed_dict: A feed dictionary to use when evaluating `final_ops`.
eval_interval_secs: The minimum number of seconds between evaluations.
hooks: List of `tf.estimator.SessionRunHook` callbacks which are run inside
the evaluation loop.
config: An instance of `tf.compat.v1.ConfigProto` that will be used to
configure the `Session`. If left as `None`, the default will be used.
max_number_of_evaluations: The maximum times to run the evaluation. If left
as `None`, then evaluation runs indefinitely.
timeout: The maximum number of seconds to wait between checkpoints. If left
as `None`, then the process will wait indefinitely.
timeout_fn: Optional function to call after a timeout. If the function
returns True, then it means that no new checkpoints will be generated and
the iterator will exit. The function is called with no arguments.
Returns:
The fetched values of `final_ops` or `None` if `final_ops` is `None`.
"""
eval_step = get_or_create_eval_step()
# Prepare the run hooks.
hooks = hooks or []
if eval_ops is not None:
update_eval_step = state_ops.assign_add(eval_step, 1)
for h in hooks:
if isinstance(h, StopAfterNEvalsHook):
h._set_evals_completed_tensor(update_eval_step) # pylint: disable=protected-access
if isinstance(eval_ops, dict):
eval_ops['update_eval_step'] = update_eval_step
elif isinstance(eval_ops, (tuple, list)):
eval_ops = list(eval_ops) + [update_eval_step]
else:
eval_ops = [eval_ops, update_eval_step]
final_ops_hook = basic_session_run_hooks.FinalOpsHook(final_ops,
final_ops_feed_dict)
hooks.append(final_ops_hook)
num_evaluations = 0
for checkpoint_path in checkpoints_iterator(
checkpoint_dir,
min_interval_secs=eval_interval_secs,
timeout=timeout,
timeout_fn=timeout_fn):
session_creator = monitored_session.ChiefSessionCreator(
scaffold=scaffold,
checkpoint_filename_with_path=checkpoint_path,
master=master,
config=config)
with monitored_session.MonitoredSession(
session_creator=session_creator, hooks=hooks) as session:
logging.info('Starting evaluation at ' +
time.strftime('%Y-%m-%d-%H:%M:%S', time.gmtime()))
if eval_ops is not None:
while not session.should_stop():
session.run(eval_ops, feed_dict)
logging.info('Finished evaluation at ' +
time.strftime('%Y-%m-%d-%H:%M:%S', time.gmtime()))
num_evaluations += 1
if (max_number_of_evaluations is not None and
num_evaluations >= max_number_of_evaluations):
return final_ops_hook.final_ops_values
return final_ops_hook.final_ops_values
|
tensorflow-master
|
tensorflow/contrib/training/python/training/evaluation.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SequenceQueueingStateSaver and wrappers.
Please see the reading data how-to for context.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numbers
import six
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.summary import summary
from tensorflow.python.training import queue_runner
# pylint: disable=protected-access
_restore_sparse = sparse_ops._take_many_sparse_from_tensors_map
_store_sparse = sparse_ops._add_many_sparse_to_tensors_map
# pylint: enable=protected-access
class _SequenceInputWrapper(object):
"""A wrapper object for storing sequence-related input.
The SequenceInputWapper accepts four objects:
length: A scalar int containing the length of the input sequence.
key: A scalar string containing the unique key of the input sequence.
sequences: A dict mapping labels, like `input`, to tensors
whose initial index dimension is at least size `length`.
context: A dict mapping labels, like `global_target`, to tensors
that represent data across the entire example.
"""
def __init__(self, length, key, sequences, context):
length = ops.convert_to_tensor(length, name="length")
key = ops.convert_to_tensor(key, name="key")
if not isinstance(sequences, dict):
raise TypeError("sequences must be a dict")
if not isinstance(context, dict):
raise TypeError("context must be a dict")
if not sequences:
raise ValueError("must have at least one sequence tensor")
for k in sequences.keys():
if not isinstance(k, six.string_types):
raise TypeError("sequence key must be string: %s" % k)
if ":" in k:
raise ValueError("sequence key may not have a colon: '%s'" % k)
for k in context.keys():
if not isinstance(k, six.string_types):
raise TypeError("context key must be string: %s" % k)
if ":" in k:
raise ValueError("context key may not have a colon: '%s'" % k)
sequences = dict((k, ops.convert_to_tensor(
v, name="sequence_%s" % k)) for k, v in sequences.items())
context = dict((k, ops.convert_to_tensor(
v, name="context_%s" % k)) for k, v in context.items())
self._length = length
self._key = key
self._sequences = sequences
self._context = context
@property
def length(self):
return self._length
@property
def key(self):
return self._key
@property
def sequences(self):
return self._sequences
@property
def context(self):
return self._context
def _check_multiple_of(value, multiple_of):
"""Checks that value `value` is a non-zero multiple of `multiple_of`.
Args:
value: an int32 scalar Tensor.
multiple_of: an int or int32 scalar Tensor.
Returns:
new_value: an int32 scalar Tensor matching `value`, but which includes an
assertion that `value` is a multiple of `multiple_of`.
"""
assert isinstance(value, ops.Tensor)
with ops.control_dependencies([
control_flow_ops.Assert(
math_ops.logical_and(
math_ops.equal(math_ops.mod(value, multiple_of), 0),
math_ops.not_equal(value, 0)), [
string_ops.string_join([
"Tensor %s should be a multiple of: " % value.name,
string_ops.as_string(multiple_of), ", but saw value: ",
string_ops.as_string(value),
". Consider setting pad=True."
])
])
]):
new_value = array_ops.identity(value, name="multiple_of_checked")
return new_value
def _check_rank(value, expected_rank):
"""Check the rank of Tensor `value`, via shape inference and assertions.
Args:
value: A Tensor, possibly with shape associated shape information.
expected_rank: int32 scalar (optionally a `Tensor`).
Returns:
new_value: A Tensor matching `value`. Accessing this tensor tests
assertions on its rank. If expected_rank is not a `Tensor`, then
new_value's shape's rank has been set.
Raises:
ValueError: if `expected_rank` is not a `Tensor` and the rank of `value`
is known and is not equal to `expected_rank`.
"""
assert isinstance(value, ops.Tensor)
with ops.control_dependencies([
control_flow_ops.Assert(
math_ops.equal(expected_rank, array_ops.rank(value)), [
string_ops.string_join([
"Rank of tensor %s should be: " % value.name,
string_ops.as_string(expected_rank), ", shape received:"
]), array_ops.shape(value)
])
]):
new_value = array_ops.identity(value, name="rank_checked")
if isinstance(expected_rank, ops.Tensor):
expected_rank_value = tensor_util.constant_value(expected_rank)
if expected_rank_value is not None:
expected_rank = int(expected_rank_value)
if not isinstance(expected_rank, ops.Tensor):
try:
new_value.set_shape(new_value.get_shape().with_rank(expected_rank))
except ValueError as e:
raise ValueError("Rank check failed for %s: %s" % (value.name, str(e)))
return new_value
def _check_shape(value, expected_shape):
"""Check the shape of Tensor `value`, via shape inference and assertions.
Args:
value: A Tensor, possibly with shape associated shape information.
expected_shape: a `TensorShape`, list of `int32`, or a vector `Tensor`.
Returns:
new_value: A Tensor matching `value`. Accessing this tensor tests
assertions on its shape. If expected_shape is not a `Tensor`, then
new_value's shape has been set.
Raises:
ValueError: if `expected_shape` is not a `Tensor` and the shape of `value`
is known and is not equal to `expected_shape`.
"""
assert isinstance(value, ops.Tensor)
if isinstance(expected_shape, tensor_shape.TensorShape):
expected_shape = expected_shape.as_list()
if isinstance(expected_shape, ops.Tensor):
expected_shape_value = tensor_util.constant_value(expected_shape)
if expected_shape_value is not None:
expected_shape = [int(d) for d in expected_shape_value]
if isinstance(expected_shape, ops.Tensor):
value = _check_rank(value, array_ops.size(expected_shape))
else:
value = _check_rank(value, len(expected_shape))
with ops.control_dependencies([
control_flow_ops.Assert(
math_ops.reduce_all(
math_ops.equal(expected_shape, array_ops.shape(value))), [
string_ops.string_join([
"Shape of tensor %s should be: " % value.name,
string_ops.as_string(expected_shape),
", shape received: ",
string_ops.as_string(array_ops.shape(value))
])
])
]):
new_value = array_ops.identity(value, name="shape_checked")
if not isinstance(expected_shape, ops.Tensor):
try:
new_value.set_shape(new_value.get_shape().merge_with(expected_shape))
except ValueError as e:
raise ValueError("Shape check failed for %s: %s" % (value.name, str(e)))
return new_value
def _check_dimensions(value, dimensions, expected_sizes, debug_prefix):
"""Check the dimensions of Tensor `value`, via shape inference and assertions.
Args:
value: A Tensor, with optional / partial shape associated shape information.
dimensions: An int list, the dimensions to check.
expected_sizes: list of mixed ints and int32 scalar tensors.
Optionally also a vector `Tensor`.
debug_prefix: A string, used for naming ops and printing debugging messages.
Returns:
new_value: A Tensor matching `value`. Accessing this tensor tests
assertions on its shape. If expected_sizes is not a `Tensor`, then
new_value's shape has been set for all `dimensions[i]` where
`expected_sizes[i]` is not a `Tensor`.
Raises:
TypeError: if any of the input contains invalid types:
if `value` is not a `Tensor`.
if `dimensions` is not a `list` or `tuple`.
ValueError: if input has incorrect sizes or inferred shapes do not match:
if `dimensions` contains repeated dimensions.
if `expected_sizes` is not a `Tensor` and its length does not match that
`dimensions`.
if `value`'s shape has a well-defined rank, and one of the values in
`dimensions` is equal to or above this rank.
if `value`'s shape is well defined for some `dimensions[i]`, and
`expected_sizes[i]` is not a `Tensor`, and these two values do
not match.
"""
if not isinstance(dimensions, (list, tuple)):
raise TypeError("dimensions must be a list or tuple")
if len(set(dimensions)) != len(dimensions):
raise ValueError("dimensions are not unique: %s" % dimensions)
if not isinstance(value, ops.Tensor):
raise TypeError("value is not a Tensor: %s" % value)
value_shape = value.get_shape()
if not isinstance(expected_sizes, ops.Tensor):
if len(dimensions) != len(expected_sizes):
raise ValueError("len(dimensions) != len(expected_sizes): %d vs. %d" %
(len(dimensions), len(expected_sizes)))
if value_shape.ndims is not None:
if value_shape.ndims <= max(dimensions):
raise ValueError(
"%s: rank of input is not greater than max(dimensions): "
"%d vs. %d" % (debug_prefix, value.get_shape().ndims,
max(dimensions)))
value_dims = value_shape.as_list()
for d, s in zip(dimensions, expected_sizes):
if not isinstance(s, ops.Tensor):
value_dims[d] = s
try:
value.set_shape(value.get_shape().merge_with(value_dims))
except ValueError as e:
raise ValueError("Dimensions check failed for %s: %s" %
(debug_prefix, str(e)))
with ops.control_dependencies([
control_flow_ops.Assert(
math_ops.equal(expected_size, array_ops.shape(value)[dimension]), [
string_ops.string_join([
"Dimension %d of tensor labeled %s should be: " %
(dimension, debug_prefix),
string_ops.as_string(expected_size), ", shape received: ",
string_ops.as_string(array_ops.shape(value))
])
]) for (dimension, expected_size) in zip(dimensions, expected_sizes)
]):
new_value = array_ops.identity(value, name="dims_checked_%s" % debug_prefix)
return new_value
def _prepare_sequence_inputs(inputs, states):
"""Convert input to tensors and validate shape information.
Args:
inputs: A `_SequenceInputWrapper` instance.
states: A dictionary mapping state names to input constants or tensors.
Returns:
The tuple (length, key, sorted_states, sorted_sequences, sorted_context),
where each value has been checked for valid shape, and the sorted_* dicts
are instances of OrderedDict; with key-value pairs sorted by key.
Raises:
ValueError: if the shapes of inputs.context.values(), states.values(),
or inputs.sequences.values() are not fully defined (with the exception
of the dimension of any `Tensor` in inputs.sequences.values()).
TypeError: if the dtype of length is not int32.
"""
# Convert state initial values to tensors
states = dict((k, ops.convert_to_tensor(
v, name="state_%s" % k)) for k, v in states.items())
def _assert_fully_defined(label, dict_, ignore_first_dimension=False):
start_dimension = 1 if ignore_first_dimension else 0
for k, v in dict_.items():
if not v.get_shape()[start_dimension:].is_fully_defined():
raise ValueError("Shape for %s %s is not fully defined %s: %s" %
(label, k, "(ignoring first dimension)" if
ignore_first_dimension else "", v.get_shape()))
_assert_fully_defined("state", states)
_assert_fully_defined("context", inputs.context)
# Sequences' first dimension (time) may be variable
_assert_fully_defined(
"sequence", inputs.sequences, ignore_first_dimension=True)
# Get dictionaries' dtypes ordered by name - ordering is important
# when switching between dicts and tuples for passing to Barrier.
def _sort_by_name(d):
return collections.OrderedDict(sorted(d.items(), key=lambda k_v: k_v[0]))
sorted_sequences = _sort_by_name(inputs.sequences)
sorted_context = _sort_by_name(inputs.context)
sorted_states = _sort_by_name(states)
length = _check_rank(inputs.length, 0)
key = _check_rank(inputs.key, 0)
if length.dtype != dtypes.int32:
raise TypeError("length dtype must be int32, but received: %s" %
length.dtype)
if key.dtype != dtypes.string:
raise TypeError("key dtype must be string, but received: %s" % key.dtype)
return (length, key, sorted_states, sorted_sequences, sorted_context)
# NextQueuedSequenceBatch works closely with
# SequenceQueueingStateSaver and requires access to its private properties
# pylint: disable=protected-access
class NextQueuedSequenceBatch(object):
"""NextQueuedSequenceBatch stores deferred SequenceQueueingStateSaver data.
This class is instantiated by `SequenceQueueingStateSaver` and is accessible
via its `next_batch` property.
"""
def __init__(self, state_saver):
self._state_saver = state_saver
@property
def total_length(self):
"""The lengths of the original (non-truncated) unrolled examples.
Returns:
An integer vector of length `batch_size`, the total lengths.
"""
return self._state_saver._received_total_length
@property
def length(self):
"""The lengths of the given truncated unrolled examples.
For initial iterations, for which `sequence * num_unroll < length`,
this number is `num_unroll`. For the remainder,
this number is between `0` and `num_unroll`.
Returns:
An integer vector of length `batch_size`, the lengths.
"""
return self._state_saver._received_length
@property
def batch_size(self):
"""The batch_size of the given batch.
Usually, this is the batch_size requested when initializing the SQSS, but
if allow_small_batch=True this will become smaller when inputs are
exhausted.
Returns:
A scalar integer tensor, the batch_size
"""
return self._state_saver._received_batch_size
@property
def insertion_index(self):
"""The insertion indices of the examples (when they were first added).
These indices start with the value -2**63 and increase with every
call to the prefetch op. Each whole example gets its own insertion
index, and this is used to prioritize the example so that its truncated
segments appear in adjacent iterations, even if new examples are inserted
by the prefetch op between iterations.
Returns:
An int64 vector of length `batch_size`, the insertion indices.
"""
return self._state_saver._received_indices
@property
def key(self):
"""The key names of the given truncated unrolled examples.
The format of the key is:
```python
"%05d_of_%05d:%s" % (sequence, sequence_count, original_key)
```
where `original_key` is the unique key read in by the prefetcher.
Returns:
A string vector of length `batch_size`, the keys.
"""
return self._state_saver._received_keys
@property
def next_key(self):
"""The key names of the next (in iteration) truncated unrolled examples.
The format of the key is:
```python
"%05d_of_%05d:%s" % (sequence + 1, sequence_count, original_key)
```
if `sequence + 1 < sequence_count`, otherwise:
```python
"STOP:%s" % original_key
```
where `original_key` is the unique key read in by the prefetcher.
Returns:
A string vector of length `batch_size`, the keys.
"""
return self._state_saver._received_next_key
@property
def sequence(self):
"""An int32 vector, length `batch_size`: the sequence index of each entry.
When an input is split up, the sequence values
```
0, 1, ..., sequence_count - 1
```
are assigned to each split.
Returns:
An int32 vector `Tensor`.
"""
return self._state_saver._received_sequence
@property
def sequence_count(self):
"""An int32 vector, length `batch_size`: the sequence count of each entry.
When an input is split up, the number of splits is equal to:
`padded_length / num_unroll`. This is the sequence_count.
Returns:
An int32 vector `Tensor`.
"""
return self._state_saver._received_sequence_count
@property
def context(self):
"""A dict mapping keys of `input_context` to batched context.
Returns:
A dict mapping keys of `input_context` to tensors.
If we had at input:
```python
context["name"].get_shape() == [d1, d2, ...]
```
then for this property:
```python
context["name"].get_shape() == [batch_size, d1, d2, ...]
```
"""
return self._state_saver._received_context
@property
def sequences(self):
"""A dict mapping keys of `input_sequences` to split and rebatched data.
Returns:
A dict mapping keys of `input_sequences` to tensors.
If we had at input:
```python
sequences["name"].get_shape() == [None, d1, d2, ...]
```
where `None` meant the sequence time was dynamic, then for this property:
```python
sequences["name"].get_shape() == [batch_size, num_unroll, d1, d2, ...].
```
"""
return self._state_saver._received_sequences
def state(self, state_name):
"""Returns batched state tensors.
Args:
state_name: string, matches a key provided in `initial_states`.
Returns:
A `Tensor`: a batched set of states, either initial states (if this is
the first run of the given example), or a value as stored during
a previous iteration via `save_state` control flow.
Its type is the same as `initial_states["state_name"].dtype`.
If we had at input:
```python
initial_states[state_name].get_shape() == [d1, d2, ...],
```
then
```python
state(state_name).get_shape() == [batch_size, d1, d2, ...]
```
Raises:
KeyError: if `state_name` does not match any of the initial states
declared in `initial_states`.
"""
return self._state_saver._received_states[state_name]
def save_state(self, state_name, value, name=None):
"""Returns an op to save the current batch of state `state_name`.
Args:
state_name: string, matches a key provided in `initial_states`.
value: A `Tensor`.
Its type must match that of `initial_states[state_name].dtype`.
If we had at input:
```python
initial_states[state_name].get_shape() == [d1, d2, ...]
```
then the shape of `value` must match:
```python
tf.shape(value) == [batch_size, d1, d2, ...]
```
name: string (optional). The name scope for newly created ops.
Returns:
A control flow op that stores the new state of each entry into
the state saver. This op must be run for every iteration that
accesses data from the state saver (otherwise the state saver
will never progress through its states and run out of capacity).
Raises:
KeyError: if `state_name` does not match any of the initial states
declared in `initial_states`.
"""
if state_name not in self._state_saver._received_states.keys():
raise KeyError("state was not declared: %s" % state_name)
default_name = "InputQueueingStateSaver_SaveState"
with ops.name_scope(name, default_name, values=[value]):
# Place all operations on the CPU. Barriers and queues are only
# implemented for CPU, but all the other book-keeping operations
# (reshape, shape, range, ...) would be placed on GPUs if available,
# unless we explicitly tie them to CPU.
with ops.colocate_with(self._state_saver._capacity_queue.queue_ref):
indices_where_not_done = array_ops.reshape(
array_ops.where(
math_ops.logical_not(self._state_saver._sequence_is_done)),
[-1])
keeping_next_key = array_ops.gather(
self._state_saver._received_next_key, indices_where_not_done)
value = _check_shape(
array_ops.identity(
value, name="convert_%s" % state_name),
array_ops.shape(self._state_saver._received_states[state_name]))
keeping_state = array_ops.gather(value, indices_where_not_done)
return self._state_saver._barrier.insert_many(
self._state_saver._get_barrier_index("state", state_name),
keeping_next_key,
keeping_state,
name="BarrierInsertState_%s" % state_name)
# pylint: enable=protected-access
class SequenceQueueingStateSaver(object):
"""SequenceQueueingStateSaver provides access to stateful values from input.
This class is meant to be used instead of, e.g., a `Queue`, for splitting
variable-length sequence inputs into segments of sequences with fixed length
and batching them into mini-batches. It maintains contexts and state for a
sequence across the segments. It can be used in conjunction with a
`QueueRunner` (see the example below).
The `SequenceQueueingStateSaver` (SQSS) accepts one example at a time via the
inputs `input_length`, `input_key`, `input_sequences` (a dict),
`input_context` (a dict), and `initial_states` (a dict).
The sequences, values in `input_sequences`, may have variable first dimension
(the `padded_length`), though this dimension must always be a multiple of
`num_unroll`. All other dimensions must be fixed and accessible via
`get_shape` calls. The length prior to padding can be recorded in
`input_length`. The context values in `input_context` must all have fixed and
well defined dimensions. The initial state values must all have fixed and
well defined dimensions.
The SQSS splits the sequences of an input example into segments of length
`num_unroll`. Across examples minibatches of size `batch_size` are formed.
These minibatches contain a segment of the sequences, copy the context values,
and maintain state, length, and key information of the original input
examples. In the first segment of an example the state is still the initial
state. It can then be updated; and updated state values are accessible in
subsequent segments of the same example. After each segment
`batch.save_state()` must be called which is done by the state_saving_rnn.
Without this call, the dequeue op associated with the SQSS will not run.
Internally, SQSS has a queue for the input examples. Its `capacity` is
configurable. If set smaller than `batch_size` then the dequeue op will block
indefinitely. A small multiple of `batch_size` is a good rule of thumb to
prevent that queue from becoming a bottleneck and slowing down training.
If set too large (and note that it defaults to unbounded) memory consumption
goes up. Moreover, when iterating over the same input examples multiple times
reusing the same `key` the `capacity` must be smaller than the number of
examples.
The prefetcher, which reads one unrolled, variable-length input sequence at
a time, is accessible via `prefetch_op`. The underlying `Barrier` object
is accessible via `barrier`. Processed minibatches, as well as
state read and write capabilities are accessible via `next_batch`.
Specifically, `next_batch` provides access to all of the minibatched
data, including the following, see `NextQueuedSequenceBatch` for details:
* `total_length`, `length`, `insertion_index`, `key`, `next_key`,
* `sequence` (the index each minibatch entry's time segment index),
* `sequence_count` (the total time segment count for each minibatch entry),
* `context` (a dict of the copied minibatched context values),
* `sequences` (a dict of the split minibatched variable-length sequences),
* `state` (to access the states of the current segments of these entries)
* `save_state` (to save the states for the next segments of these entries)
Example usage:
```python
batch_size = 32
num_unroll = 20
lstm_size = 8
cell = tf.compat.v1.nn.rnn_cell.BasicLSTMCell(num_units=lstm_size)
initial_state_values = tf.zeros(cell.state_size, dtype=tf.float32)
raw_data = get_single_input_from_input_reader()
length, key, sequences, context = my_parser(raw_data)
assert "input" in sequences.keys()
assert "label" in context.keys()
initial_states = {"lstm_state": initial_state_value}
stateful_reader = tf.SequenceQueueingStateSaver(
batch_size, num_unroll,
length=length, input_key=key, input_sequences=sequences,
input_context=context, initial_states=initial_states,
capacity=batch_size*100)
batch = stateful_reader.next_batch
inputs = batch.sequences["input"]
context_label = batch.context["label"]
inputs_by_time = tf.split(value=inputs, num_or_size_splits=num_unroll, axis=1)
assert len(inputs_by_time) == num_unroll
lstm_output, _ = tf.contrib.rnn.static_state_saving_rnn(
cell,
inputs_by_time,
state_saver=batch,
state_name="lstm_state")
# Start a prefetcher in the background
sess = tf.compat.v1.Session()
num_threads = 3
queue_runner = tf.compat.v1.train.QueueRunner(
stateful_reader, [stateful_reader.prefetch_op] * num_threads)
tf.compat.v1.train.add_queue_runner(queue_runner)
tf.compat.v1.train.start_queue_runners(sess=session)
while True:
# Step through batches, perform training or inference...
session.run([lstm_output])
```
**Note**: Usually the barrier is given to a QueueRunner as in the
examples above. The QueueRunner will close the barrier if the prefetch_op
receives an OutOfRange Error from upstream input queues (i.e., reaches
the end of the input). If the barrier is closed no further new examples
are added to the SQSS. The underlying barrier might, however, still
contain further unroll-steps of examples that have not undergone all
iterations. To gracefully finish all examples, the flag
`allow_small_batch` must be set to true, which causes the SQSS to issue
progressively smaller mini-batches with the remaining examples.
"""
def __init__(self,
batch_size,
num_unroll,
input_length,
input_key,
input_sequences,
input_context,
initial_states,
capacity=None,
allow_small_batch=False,
name=None):
"""Creates the SequenceQueueingStateSaver.
Args:
batch_size: int or int32 scalar `Tensor`, how large minibatches should
be when accessing the `state()` method and `context`, `sequences`, etc,
properties.
num_unroll: Python integer, how many time steps to unroll at a time.
The input sequences of length `k` are then split into `k / num_unroll`
many segments.
input_length: An int32 scalar `Tensor`, the length of the sequence prior
to padding. This value may be at most `padded_length` for any given
input (see below for the definition of `padded_length`).
Batched and total lengths of the current iteration are made accessible
via the `length` and `total_length` properties. The shape of
input_length (scalar) must be fully specified.
input_key: A string scalar `Tensor`, the **unique** key for the given
input. This is used to keep track of the split minibatch elements
of this input. Batched keys of the current iteration are made
accessible via the `key` property. The shape of `input_key` (scalar)
must be fully specified.
input_sequences: A dict mapping string names to `Tensor` values. The
values must all have matching first dimension, called `padded_length`.
The `SequenceQueueingStateSaver` will split these tensors along
this first dimension into minibatch elements of dimension
`num_unroll`. Batched and segmented sequences of the current iteration
are made accessible via the `sequences` property.
**Note**: `padded_length` may be dynamic, and may vary from input
to input, but must always be a multiple of `num_unroll`. The remainder
of the shape (other than the first dimension) must be fully specified.
input_context: A dict mapping string names to `Tensor` values. The values
are treated as "global" across all time splits of the given input,
and will be copied across for all minibatch elements accordingly.
Batched and copied context of the current iteration are made
accessible via the `context` property.
**Note**: All input_context values must have fully defined shapes.
initial_states: A dict mapping string state names to multi-dimensional
values (e.g. constants or tensors). This input defines the set of
states that will be kept track of during computing iterations, and
which can be accessed via the `state` and `save_state` methods.
**Note**: All initial_state values must have fully defined shapes.
capacity: The max capacity of the SQSS in number of examples. Needs to be
at least `batch_size`. Defaults to unbounded.
allow_small_batch: If true, the SQSS will return smaller batches when
there aren't enough input examples to fill a whole batch and the end of
the input has been reached (i.e., the underlying barrier has been
closed).
name: An op name string (optional).
Raises:
TypeError: if any of the inputs is not an expected type.
ValueError: if any of the input values is inconsistent, e.g. if
not enough shape information is available from inputs to build
the state saver.
"""
if capacity is not None and isinstance(batch_size, ops.Tensor):
with ops.control_dependencies([check_ops.assert_greater_equal(
math_ops.cast(capacity, dtype=dtypes.int64),
math_ops.cast(batch_size, dtype=dtypes.int64),
message="capacity needs to be >= batch_size.")]):
input_key = array_ops.identity(input_key)
elif capacity is not None and capacity < batch_size:
raise ValueError("capacity %d needs to be >= batch_size %d" % (
capacity, batch_size))
# The barrier is ignorant of the number of actual examples, since a long
# example that requires many iterations produces more elements in the
# barrier than a short example. Furthermore, we don't have an upper bound
# on the length of examples, and hence have to keep the capacity of the
# barrier at infinite to avoid dead-lock. Instead we have to keep track of
# the number of active examples in this class, and block the prefetch_op
# when capacity is reached. To this end, we employ a FIFOQueue in which we
# store one token (its value doesn't matter) for each input example, and
# dequeue a token for each completed example. Since the capacity of this
# queue is limited the enqueue operation will block if capacity is reached.
self._capacity_queue = data_flow_ops.FIFOQueue(
capacity=capacity, dtypes=[dtypes.int32], shapes=[[]])
# Place all operations on the CPU. Barriers and queues are only implemented
# for CPU, but all the other book-keeping operations
# (reshape, shape, range, ...) would be placed on GPUs if available,
# unless we explicitly tie them to CPU.
with ops.colocate_with(self._capacity_queue.queue_ref):
if not isinstance(initial_states, dict):
raise TypeError("initial_states must be a dictionary")
if not initial_states:
raise ValueError(
"initial_states may not be empty: at least one state variable is "
"required to properly enqueue split sequences to run in separate "
"iterations")
for k in initial_states:
if not isinstance(k, six.string_types):
raise TypeError("state name must be a string: %s" % k)
if ":" in k:
raise ValueError("state name may not have a colon: '%s'" % k)
op_vars = ([input_length, input_key] + list(input_sequences.values()) +
list(input_context.values()))
with ops.name_scope(name, "InputQueueingStateSaver", op_vars) as scope:
inputs = _SequenceInputWrapper(input_length, input_key, input_sequences,
input_context)
self._batch_size = batch_size
self._num_unroll = num_unroll
self._name = scope
# This step makes sure all shapes are well defined. We can now
# use get_shape() on any tensor in the output of this function
# and get a fully-defined shape.
(self._length, self._key, self._sorted_states, self._sorted_sequences,
self._sorted_context) = _prepare_sequence_inputs(inputs,
initial_states)
self._padded_length = array_ops.identity(
array_ops.shape(six.next(six.itervalues(self._sorted_sequences)))[
0],
name="padded_length") # The name is useful for debugging
self._padded_length = _check_multiple_of(self._padded_length,
self._num_unroll)
# sequences should have length == all matching
self._sorted_sequences = collections.OrderedDict(
(k, _check_dimensions(
v, [0], [self._padded_length],
debug_prefix="sorted_sequences_%s" % k))
for k, v in self._sorted_sequences.items())
self._uninitialized_states = self._sorted_states
# Once this is set, self._get_barrier_*_index are available for use.
self._store_index_maps(self._sorted_sequences, self._sorted_context,
self._sorted_states)
# Make sure that the length is <= the padded_length
with ops.control_dependencies([
control_flow_ops.Assert(
math_ops.less_equal(self._length, self._padded_length), [
"Input length should be <= than length from sequences:",
self._length, " vs. ", self._padded_length
])
]):
self._length = array_ops.identity(self._length)
# Only create barrier; enqueue and dequeue operations happen when you
# access prefetch_op and next_batch.
self._create_barrier()
self._scope = scope
self._allow_small_batch = allow_small_batch
self._prefetch_op = None
self._next_batch = None
@property
def name(self):
return self._name
@property
def barrier(self):
return self._barrier
@property
def batch_size(self):
return self._batch_size
@property
def num_unroll(self):
return self._num_unroll
@property
def prefetch_op(self):
"""The op used to prefetch new data into the state saver.
Running it once enqueues one new input example into the state saver.
The first time this gets called, it additionally creates the prefetch_op.
Subsequent calls simply return the previously created `prefetch_op`.
It should be run in a separate thread via e.g. a `QueueRunner`.
Returns:
An `Operation` that performs prefetching.
"""
if not self._prefetch_op:
with ops.name_scope(None), ops.name_scope(
self._scope, values=[self._barrier.barrier_ref]):
self._create_prefetch_op()
return self._prefetch_op
@property
def next_batch(self):
"""The `NextQueuedSequenceBatch` providing access to batched output data.
Also provides access to the `state` and `save_state` methods.
The first time this gets called, it additionally prepares barrier reads
and creates `NextQueuedSequenceBatch` / next_batch objects. Subsequent
calls simply return the previously created `next_batch`.
In order to access data in `next_batch` without blocking, the `prefetch_op`
must have been run at least `batch_size` times (ideally in a separate
thread, or launched via a `QueueRunner`). After processing a segment in
`next_batch()`, `batch.save_state()` must be called which is done by the
state_saving_rnn. Without this call, the dequeue op associated with the SQSS
will not run.
Returns:
A cached `NextQueuedSequenceBatch` instance.
"""
# This is needed to prevent errors if next_batch is called before
# prefetch_op is created.
if not self._prefetch_op:
with ops.name_scope(None), ops.name_scope(
self._scope, values=[self._barrier.barrier_ref]):
self._create_prefetch_op()
if not self._next_batch:
with ops.name_scope(None), ops.name_scope(
self._scope, values=[self._barrier.barrier_ref]):
self._prepare_barrier_reads()
return self._next_batch
def close(self, cancel_pending_enqueues=False, name=None):
"""Closes the barrier and the FIFOQueue.
This operation signals that no more segments of new sequences will be
enqueued. New segments of already inserted sequences may still be enqueued
and dequeued if there is a sufficient number filling a batch or
allow_small_batch is true. Otherwise dequeue operations will fail
immediately.
Args:
cancel_pending_enqueues: (Optional.) A boolean, defaulting to
`False`. If `True`, all pending enqueues to the underlying queues will
be cancelled, and completing already started sequences is not possible.
name: Optional name for the op.
Returns:
The operation that closes the barrier and the FIFOQueue.
"""
with ops.name_scope(name, "SQSSClose", [self._prefetch_op]) as name:
barrier_close = self.barrier.close(cancel_pending_enqueues,
"BarrierClose")
fifo_queue_close = self._capacity_queue.close(cancel_pending_enqueues,
"FIFOClose")
return control_flow_ops.group(barrier_close, fifo_queue_close, name=name)
def _store_index_maps(self, sequences, context, states):
"""Prepares the internal dictionaries _name_to_index and _index_to_name.
These dictionaries are used to keep track of indices into the barrier.
Args:
sequences: `OrderedDict` of string, `Tensor` pairs.
context: `OrderedDict` of string, `Tensor` pairs.
states: `OrderedDict` of string, `Tensor` pairs.
"""
assert isinstance(sequences, dict)
assert isinstance(context, dict)
assert isinstance(states, dict)
self._name_to_index = {
name: ix
for (ix, name) in enumerate([
"__length", "__total_length", "__next_key", "__sequence",
"__sequence_count"
] + ["__sequence__%s" % k for k in sequences.keys()] + [
"__context__%s" % k for k in context.keys()
] + ["__state__%s" % k for k in states.keys()])}
self._index_to_name = [
name
for (name, _) in sorted(
self._name_to_index.items(), key=lambda n_ix: n_ix[1])
]
def _get_barrier_length_index(self):
return self._name_to_index["__length"]
def _get_barrier_total_length_index(self):
return self._name_to_index["__total_length"]
def _get_barrier_next_key_index(self):
return self._name_to_index["__next_key"]
def _get_barrier_sequence_index(self):
return self._name_to_index["__sequence"]
def _get_barrier_sequence_count_index(self):
return self._name_to_index["__sequence_count"]
def _get_barrier_index(self, index_type, name):
assert index_type in ("sequence", "context", "state")
key = "__%s__%s" % (index_type, name)
assert key in self._name_to_index, (
"Requested a name not in the value type %s: %s" % (index_type, name))
return self._name_to_index[key]
def _create_barrier(self):
"""Create the barrier.
This method initializes the Barrier object with the right types and shapes.
"""
# Create the barrier
sequence_dtypes = [v.dtype for k, v in self._sorted_sequences.items()]
context_dtypes = [v.dtype for k, v in self._sorted_context.items()]
state_dtypes = [v.dtype for k, v in self._sorted_states.items()]
types = ([
dtypes.int32, # length
dtypes.int32, # total_length
dtypes.string, # next_keys
dtypes.int32, # sequence
dtypes.int32
] # expanded_sequence_count
+ sequence_dtypes + context_dtypes + state_dtypes)
sequence_shapes = [
[self._num_unroll] + self._sorted_sequences[k].get_shape().as_list()[1:]
for k in self._sorted_sequences.keys()
]
context_shapes = [
self._sorted_context[k].get_shape().as_list()
for k in self._sorted_context.keys()
]
state_shapes = [
self._sorted_states[k].get_shape().as_list()
for k in self._sorted_states.keys()
]
shapes = ([
(), # length
(), # total_length
(), # next_keys
(), # sequence
()
] # expanded_sequence_count
+ sequence_shapes + context_shapes + state_shapes)
self._barrier = data_flow_ops.Barrier(types=types, shapes=shapes)
def _create_prefetch_op(self):
"""Group insert_many ops and create prefetch_op.
This method implements the "meat" of the logic underlying the
`SequenceQueueingStateSaver`. It performs dynamic reshaping of
sequences, copying of context, and initial insertion of these values,
as well as the key, next_key, sequence, sequence_count, and initial
states into the barrier.
"""
# Step 1: identify how many barrier entries to split this input
# into, store the result as a scalar
sequence_count = math_ops.div(self._padded_length, self._num_unroll)
sequence_count_vec = array_ops.expand_dims(sequence_count, 0)
# The final unrolled sequence's length is num_unroll only in
# the case that num_unroll divides it evenly.
ones = array_ops.ones(sequence_count_vec, dtype=dtypes.int32)
sequence = math_ops.range(sequence_count)
expanded_length = math_ops.maximum(
0, self._length - self._num_unroll * sequence)
expanded_length = math_ops.minimum(self._num_unroll, expanded_length)
expanded_total_length = self._length * ones
expanded_sequence_count = sequence_count * ones
current_keys = string_ops.string_join(
[
string_ops.as_string(
sequence, width=5, fill="0"), "_of_", string_ops.as_string(
sequence_count, width=5, fill="0"), ":", self._key
],
name="StringJoinCurrentKeys")
next_keys = array_ops.concat(
[
array_ops.slice(current_keys, [1], [-1]), array_ops.expand_dims(
string_ops.string_join(
["STOP:", self._key], name="StringJoinStop"),
0)
],
0,
name="concat_next_keys")
reshaped_sequences = collections.OrderedDict((
k,
_check_dimensions(
# Reshape sequences to sequence_count rows
array_ops.reshape(
v,
array_ops.concat(
[
array_ops.expand_dims(sequence_count, 0),
array_ops.expand_dims(self._num_unroll, 0),
v.get_shape().as_list()[1:]
],
0,
name="concat_sequences_%s" % k),
name="reshape_sequences_%s" % k),
[0, 1] + list(range(2, v.get_shape().ndims + 1)),
[sequence_count, self._num_unroll] + v.get_shape().as_list()[1:],
debug_prefix="reshaped_sequences_%s" %
k)) for k, v in self._sorted_sequences.items())
expanded_context = collections.OrderedDict(
(
k,
_check_dimensions(
# Copy context to be sequence_count rows
array_ops.tile(
array_ops.expand_dims(v, 0),
array_ops.concat(
[
array_ops.expand_dims(sequence_count, 0),
[1] * v.get_shape().ndims
],
0,
name="concat_context_%s" % k),
name="tile_context_%s" % k),
[0] + list(range(1, v.get_shape().ndims + 1)),
[sequence_count] + v.get_shape().as_list(),
debug_prefix="expanded_context_%s" % k))
for k, v in self._sorted_context.items())
# Storing into the barrier, for each current_key:
# sequence_ix, sequence_count, next_key, length,
# context... (copied), sequences... (truncated)
# Also storing into the barrier for the first key
# states (using initial_states).
insert_sequence_op = self._barrier.insert_many(
self._get_barrier_sequence_index(),
current_keys,
sequence,
name="BarrierInsertSequence")
insert_sequence_count_op = self._barrier.insert_many(
self._get_barrier_sequence_count_index(),
current_keys,
expanded_sequence_count,
name="BarrierInsertSequenceCount")
insert_next_key_op = self._barrier.insert_many(
self._get_barrier_next_key_index(),
current_keys,
next_keys,
name="BarrierInsertNextKey")
insert_length_op = self._barrier.insert_many(
self._get_barrier_length_index(),
current_keys,
expanded_length,
name="BarrierInsertLength")
insert_total_length_op = self._barrier.insert_many(
self._get_barrier_total_length_index(),
current_keys,
expanded_total_length,
name="BarrierInsertTotalLength")
insert_context_ops = dict((name, self._barrier.insert_many(
self._get_barrier_index("context", name),
current_keys,
value,
name="BarrierInsertContext_%s" % name))
for (name, value) in expanded_context.items())
insert_sequences_ops = dict((name, self._barrier.insert_many(
self._get_barrier_index("sequence", name),
current_keys,
value,
name="BarrierInsertSequences_%s" % name))
for (name, value) in reshaped_sequences.items())
# An op that blocks if we reached capacity in number of active examples.
TOKEN_WITH_IGNORED_VALUE = 21051976 # pylint: disable=invalid-name
insert_capacity_token_op = self._capacity_queue.enqueue(
(TOKEN_WITH_IGNORED_VALUE,))
# Insert just the initial state. Specifically force this to run
# the insert sequence op *first* so that the Barrier receives
# an insert with *all* the segments and the segments all get the same index.
with ops.control_dependencies(
[insert_sequence_op, insert_capacity_token_op]):
insert_initial_state_ops = dict(
(name, self._barrier.insert_many(
self._get_barrier_index("state", name),
array_ops.stack([current_keys[0]]),
array_ops.stack([value]),
name="BarrierInitialInsertState_%s" % name))
for (name, value) in self._uninitialized_states.items())
all_inserts = ([
insert_capacity_token_op, insert_sequence_op, insert_sequence_count_op,
insert_next_key_op, insert_length_op, insert_total_length_op
] + list(insert_initial_state_ops.values()) +
list(insert_context_ops.values()) +
list(insert_sequences_ops.values()))
self._prefetch_op = control_flow_ops.group(
*all_inserts, name="StateSaverPrefetchGroup")
def _prepare_barrier_reads(self):
"""Creates ops for reading the barrier, as used by properties like `length`.
"""
# Ops for reading from the barrier. These ops must be run in a
# different thread than the prefetcher op to avoid blocking.
received = self._barrier.take_many(
self._batch_size, self._allow_small_batch, name="BarrierTakeMany")
self._received_indices = received[0]
self._received_keys = received[1]
received_values = received[2]
self._received_sequence = received_values[self._get_barrier_sequence_index(
)]
self._received_sequence_count = received_values[
self._get_barrier_sequence_count_index()]
self._received_next_key = received_values[self._get_barrier_next_key_index(
)]
self._received_length = received_values[self._get_barrier_length_index()]
self._received_total_length = received_values[
self._get_barrier_total_length_index()]
self._received_context = collections.OrderedDict(
(name, received_values[self._get_barrier_index("context", name)])
for name in self._sorted_context.keys())
self._received_sequences = collections.OrderedDict(
(name, received_values[self._get_barrier_index("sequence", name)])
for name in self._sorted_sequences.keys())
self._received_batch_size = array_ops.squeeze(
array_ops.shape(self._received_length))
# Which examples are we done with?
self._sequence_is_done = (
self._received_sequence + 1 >= self._received_sequence_count)
# Compute the number of finished sequences and dequeue as many tokens from
# the capacity queue.
finished_sequences = (math_ops.reduce_sum(
math_ops.cast(self._sequence_is_done, dtypes.int32)))
# TODO(ebrevdo): convert to dequeue_up_to when FIFOQueue supports it.
dequeue_op = self._capacity_queue.dequeue_many(finished_sequences)
# Tie the dequeue_op to the received_state, such that it is definitely
# carried out.
with ops.control_dependencies([dequeue_op]):
self._received_states = collections.OrderedDict(
(name, array_ops.identity(received_values[self._get_barrier_index(
"state", name)])) for name in self._sorted_states.keys())
self._next_batch = NextQueuedSequenceBatch(self)
def batch_sequences_with_states(input_key,
input_sequences,
input_context,
input_length,
initial_states,
num_unroll,
batch_size,
num_threads=3,
capacity=1000,
allow_small_batch=True,
pad=True,
make_keys_unique=False,
make_keys_unique_seed=None,
name=None):
"""Creates batches of segments of sequential input.
This method creates a `SequenceQueueingStateSaver` (SQSS) and adds it to
the queuerunners. It returns a `NextQueuedSequenceBatch`.
It accepts one example at a time identified by a unique `input_key`.
`input_sequence` is a dict with values that are tensors with time as first
dimension. This time dimension must be the same across those tensors of an
example. It can vary across examples. Although it always has to be a multiple
of `num_unroll`. Hence, padding may be necessary and it is turned on by
default by `pad=True`.
`input_length` is a Tensor scalar or an int recording the time dimension prior
to padding. It should be between 0 and the time dimension. One reason we want
to keep track of it is so that we can take it into consideration when
computing the loss. If `pad=True` then `input_length` can be `None` and will
be inferred.
This methods segments `input_sequence` into segments of length `num_unroll`.
It batches input sequences from `batch_size` many examples. These mini-batches
are available through the `sequence` property of the output. Moreover, for
each entry in the batch we can access its original `input_key` in `key` and
its input length in `total_length`. `length` records within this segment how
many non-padded time steps there are.
Static features of an example that do not vary across time can be part of the
`input_context`, a dict with Tensor values. This method copies the context for
each segment and makes it available in the `context` of the output.
This method can maintain and update a state for each example. It accepts some
initial_states as a dict with Tensor values. The first mini-batch an example
is contained has initial_states as entry of the `state`. If save_state is
called then the next segment will have the updated entry of the `state`.
See `NextQueuedSequenceBatch` for a complete list of properties and methods.
Example usage:
```python
batch_size = 32
num_unroll = 20
num_enqueue_threads = 3
lstm_size = 8
cell = tf.compat.v1.nn.rnn_cell.BasicLSTMCell(num_units=lstm_size)
key, sequences, context = my_parser(raw_data)
initial_state_values = tf.zeros((state_size,), dtype=tf.float32)
initial_states = {"lstm_state": initial_state_values}
batch = tf.batch_sequences_with_states(
input_key=key,
input_sequences=sequences,
input_context=context,
input_length=tf.shape(sequences["input"])[0],
initial_states=initial_states,
num_unroll=num_unroll,
batch_size=batch_size,
num_threads=num_enqueue_threads,
capacity=batch_size * num_enqueue_threads * 2)
inputs = batch.sequences["input"]
context_label = batch.context["label"]
inputs_by_time = tf.split(value=inputs, num_or_size_splits=num_unroll, axis=1)
assert len(inputs_by_time) == num_unroll
lstm_output, _ = tf.contrib.rnn.static_state_saving_rnn(
cell,
inputs_by_time,
state_saver=batch,
state_name="lstm_state")
# Start a prefetcher in the background
sess = tf.compat.v1.Session()
tf.compat.v1.train.start_queue_runners(sess=session)
while True:
# Step through batches, perform training or inference...
session.run([lstm_output])
```
Args:
input_key: A string scalar `Tensor`, the **unique** key for the given
input example. This is used to keep track of the split minibatch elements
of this input. Batched keys of the current iteration are made
accessible via the `key` property. The shape of `input_key` (scalar) must
be fully specified. Consider setting `make_keys_unique` to True when
iterating over the same input multiple times.
**Note**: if `make_keys_unique=False` then `input_key`s must be unique.
input_sequences: A dict mapping string names to `Tensor` values. The values
must all have matching first dimension, called `value_length`. They may
vary from input to input. The remainder of the shape (other than the first
dimension) must be fully specified.
The `SequenceQueueingStateSaver` will split these tensors along
this first dimension into minibatch elements of dimension `num_unrolled`.
Batched and segmented sequences of the current iteration are made
accessible via the `sequences` property.
**Note**: if `pad=False`, then `value_length` must always be a multiple
of `num_unroll`.
input_context: A dict mapping string names to `Tensor` values. The values
are treated as "global" across all time splits of the given input example,
and will be copied across for all minibatch elements accordingly.
Batched and copied context of the current iteration are made
accessible via the `context` property.
**Note**: All input_context values must have fully defined shapes.
input_length: None or an int32 scalar `Tensor`, the length of the sequence
prior to padding. If `input_length=None` and `pad=True` then the length
will be inferred and will be equal to `value_length`. If `pad=False` then
`input_length` cannot be `None`: `input_length` must be specified. Its
shape of `input_length` (scalar) must be fully specified. Its value may be
at most `value_length` for any given input (see above for the definition
of `value_length`). Batched and total lengths of the current iteration are
made accessible via the `length` and `total_length` properties.
initial_states: A dict mapping string state names to multi-dimensional
values (e.g. constants or tensors). This input defines the set of
states that will be kept track of during computing iterations, and
which can be accessed via the `state` and `save_state` methods.
**Note**: All initial_state values must have fully defined shapes.
num_unroll: Python integer, how many time steps to unroll at a time.
The input sequences of length k are then split into k / num_unroll many
segments.
batch_size: int or int32 scalar `Tensor`, how large minibatches should
be when accessing the `state()` method and `context`, `sequences`, etc,
properties.
num_threads: The int number of threads enqueuing input examples into a
queue.
capacity: The max capacity of the queue in number of examples. Needs to be
at least `batch_size`. Defaults to 1000. When iterating over the same
input example multiple times reusing their keys the `capacity` must be
smaller than the number of examples.
allow_small_batch: If true, the queue will return smaller batches when
there aren't enough input examples to fill a whole batch and the end of
the input has been reached.
pad: If `True`, `input_sequences` will be padded to multiple of
`num_unroll`. In that case `input_length` may be `None` and is assumed to
be the length of first dimension of values in `input_sequences`
(i.e. `value_length`).
make_keys_unique: Whether to append a random integer to the `input_key` in
an effort to make it unique. The seed can be set via
`make_keys_unique_seed`.
make_keys_unique_seed: If `make_keys_unique=True` this fixes the seed with
which a random postfix is generated.
name: An op name string (optional).
Returns:
A NextQueuedSequenceBatch with segmented and batched inputs and their
states.
Raises:
TypeError: if any of the inputs is not an expected type.
ValueError: if any of the input values is inconsistent, e.g. if
not enough shape information is available from inputs to build
the state saver.
"""
tensor_list = (list(input_sequences.values()) + list(input_context.values()) +
list(initial_states.values()))
with ops.name_scope(name, "batch_sequences_with_states", tensor_list) as name:
if pad:
length, input_sequences = _padding(input_sequences, num_unroll)
input_length = input_length if input_length is not None else length
elif input_sequences:
# Assert that value_length is a multiple of num_unroll.
checked_input_sequences = {}
for key, value in input_sequences.items():
if (isinstance(value, sparse_tensor.SparseTensor) or
isinstance(value, sparse_tensor.SparseTensorValue)):
value_length = value.dense_shape[0]
with ops.control_dependencies([
control_flow_ops.Assert(
math_ops.logical_and(
math_ops.equal(value_length % num_unroll, 0),
math_ops.not_equal(value_length, 0)),
[
string_ops.string_join([
"SparseTensor %s first dimension should be a "
"multiple of: " % key,
string_ops.as_string(num_unroll),
", but saw value: ",
string_ops.as_string(value_length),
". Consider setting pad=True."])])]):
checked_input_sequences[key] = sparse_tensor.SparseTensor(
indices=array_ops.identity(
value.indices, name="multiple_of_checked"),
values=array_ops.identity(
value.values, name="multiple_of_checked"),
dense_shape=array_ops.identity(
value.dense_shape, name="multiple_of_checked"))
else:
if not isinstance(value, ops.Tensor):
try:
value = ops.convert_to_tensor(value)
except TypeError:
raise TypeError(
"Unsupported input_sequences expected Tensor or SparseTensor "
"values, got: %s for key %s" % (str(type(value)), key))
value_length = array_ops.shape(value)[0]
with ops.control_dependencies([
control_flow_ops.Assert(
math_ops.logical_and(
math_ops.equal(value_length % num_unroll, 0),
math_ops.not_equal(value_length, 0)),
[
string_ops.string_join([
"Tensor %s first dimension should be a multiple "
"of: " % key,
string_ops.as_string(num_unroll),
", but saw value: ",
string_ops.as_string(value_length),
". Consider setting pad=True."
])
])
]):
checked_input_sequences[key] = array_ops.identity(
value, name="multiple_of_checked")
input_sequences = checked_input_sequences
# Move SparseTensors in context into input_sequences.
_move_sparse_tensor_out_context(input_context, input_sequences, num_unroll)
# Deconstruct SparseTensors in sequence into a dense Tensor before inputting
# to SQSS.
(transformed_input_seq,
sparse_tensor_keys,
tensor_list) = _deconstruct_sparse_tensor_seq(input_sequences)
if make_keys_unique:
input_key = string_ops.string_join([
input_key,
string_ops.as_string(
random_ops.random_uniform(
(), minval=0, maxval=100000000, dtype=dtypes.int32,
seed=make_keys_unique_seed))])
# setup stateful queue reader
stateful_reader = SequenceQueueingStateSaver(
batch_size,
num_unroll,
input_length=input_length,
input_key=input_key,
input_sequences=transformed_input_seq,
input_context=input_context,
initial_states=initial_states,
capacity=capacity,
allow_small_batch=allow_small_batch)
barrier = stateful_reader.barrier
summary.scalar("queue/%s/ready_segment_batches_" % barrier.name,
math_ops.cast(barrier.ready_size(), dtypes.float32))
q_runner = queue_runner.QueueRunner(
stateful_reader, [stateful_reader.prefetch_op] * num_threads,
queue_closed_exception_types=(errors.OutOfRangeError,
errors.CancelledError))
queue_runner.add_queue_runner(q_runner)
batch = stateful_reader.next_batch
# Reconstruct SparseTensors in sequence.
_reconstruct_sparse_tensor_seq(
batch.sequences,
sparse_tensor_keys,
tensor_list,
batch_size,
num_unroll)
# Move select SparseTensors back to context.
_move_sparse_tensor_in_context(batch.context, batch.sequences)
return batch
def _padding(sequences, num_unroll):
"""For a dictionary of sequences, pads tensors to a multiple of `num_unroll`.
Args:
sequences: dictionary with `Tensor` values.
num_unroll: int specifying to what multiple to pad sequences to.
Returns:
length: Scalar `Tensor` of dimension 0 of all the values in sequences.
padded_sequence: Dictionary of sequences that are padded to a multiple of
`num_unroll`.
Raises:
ValueError: If `num_unroll` not an int or sequences not a dictionary from
string to `Tensor`.
"""
if not isinstance(num_unroll, numbers.Integral):
raise ValueError("Unsupported num_unroll expected int, got: %s" %
str(num_unroll))
if not isinstance(sequences, dict):
raise TypeError("Unsupported sequences expected dict, got: %s" %
str(sequences))
for key, value in sequences.items():
if not isinstance(key, six.string_types):
raise TypeError("Unsupported sequences key expected string, got: %s" %
str(key))
if not sequences:
return 0, {}
# Sort 'sequences_dict' so 'length' will have a predictable value below.
sequences_dict = collections.OrderedDict()
for key, value in sorted(sequences.items()):
if not (isinstance(value, sparse_tensor.SparseTensor) or
isinstance(value, sparse_tensor.SparseTensorValue)):
sequences_dict[key] = ops.convert_to_tensor(value)
else:
sequences_dict[key] = value
lengths = [array_ops.shape(value)[0] for value in sequences_dict.values()
if isinstance(value, ops.Tensor)]
if lengths:
length = lengths[0]
all_lengths_equal = [
control_flow_ops.Assert(
math_ops.equal(l, length), [string_ops.string_join(
["All sequence lengths must match, but received lengths: ",
string_ops.as_string(lengths)])])
for l in lengths]
length = control_flow_ops.with_dependencies(all_lengths_equal, length)
else: # Only have SparseTensors
sparse_lengths = [value.dense_shape[0] for value in sequences_dict.values()
if isinstance(value, sparse_tensor.SparseTensor)]
length = math_ops.reduce_max(math_ops.cast(sparse_lengths, dtypes.int32))
unroll = array_ops.constant(num_unroll)
padded_length = length + ((unroll - (length % unroll)) % unroll)
padded_sequences = {}
for key, value in sequences_dict.items():
if isinstance(value, ops.Tensor):
# 1. create shape of paddings
# first dimension of value will be increased by num_paddings to
# padded_length
num_paddings = [padded_length - array_ops.shape(value)[0]]
# the shape of the paddings that we concat with the original value will be
# [num_paddings, tf.shape(value)[1], tf.shape(value)[2], ...,
# tf.shape(value)[tf.rank(value) - 1])]
padding_shape = array_ops.concat(
(num_paddings, array_ops.shape(value)[1:]), 0)
# 2. fill padding shape with dummies
dummy = array_ops.constant(
"" if value.dtype == dtypes.string else 0, dtype=value.dtype)
paddings = array_ops.fill(dims=padding_shape, value=dummy)
# 3. concat values with paddings
padded_sequences[key] = array_ops.concat([value, paddings], 0)
else:
padded_shape = array_ops.concat(
[[math_ops.cast(padded_length, dtypes.int64)], value.dense_shape[1:]],
0)
padded_sequences[key] = sparse_tensor.SparseTensor(
indices=value.indices,
values=value.values,
dense_shape=padded_shape)
return length, padded_sequences
_SPARSE_CONTEXT_PREFIX_KEY = "_context_in_seq_"
def _move_sparse_tensor_out_context(input_context, input_sequences, num_unroll):
"""Moves `SparseTensor`s from `input_context` into `input_sequences` as seq.
For `key, value` pairs in `input_context` with `SparseTensor` `value` removes
them from `input_context` and transforms the `value` into a sequence and
then adding `key`, transformed `value` into `input_sequences`.
The transformation is done by adding a new first dimension of `value_length`
equal to that of the other values in input_sequences` and tiling the `value`
every `num_unroll` steps.
Args:
input_context: dictionary with `Tensor` or `SparseTensor` values. To be
modified to take out `SparseTensor` values.
input_sequences: dictionary with `Tensor` or `SparseTensor` values. To be
modified to add transformed `SparseTensor` values from `input_context`.
num_unroll: int specifying to what multiple to pad sequences to.
"""
value_length = array_ops.constant(1)
if input_sequences:
seq = list(input_sequences.values())[0]
if isinstance(seq, ops.Tensor):
with ops.control_dependencies([seq]):
value_length = array_ops.shape(seq)[0]
else:
value_length = seq.dense_shape[0]
value_length = math_ops.cast(value_length, dtype=dtypes.int64)
def _copy_sparse_tensor(sp_tensor):
"""Operation to tile a sparse tensor along a newly added 0 dimension.
Adding a new first dimension of `value_length` and tiling the `sp_tensor`
every `num_unroll` steps.
Args:
sp_tensor: `SparseTensor`.
Returns:
`SparseTensor` sequence with `sp_tensor` tiled.
"""
n = value_length // num_unroll
n = math_ops.cast(n, dtype=dtypes.int32)
values = array_ops.tile(sp_tensor.values, array_ops.expand_dims(n, 0))
shape = array_ops.concat(
[array_ops.expand_dims(value_length, 0), sp_tensor.dense_shape], 0)
# Construct new indices by multiplying old ones and prepending [0, n).
# First multiply indices n times along a newly created 0-dimension.
multiplied_indices = array_ops.tile(
array_ops.expand_dims(sp_tensor.indices, 0),
array_ops.stack([n, 1, 1]))
# Construct indicator for [0, n).
# [ [ [0] [0] ... [0] ]
# [ [num_unroll] [num_unroll] ... [num_unroll] ]
# ...
# [ [num_unroll*(n-1)] [num_unroll*(n-1)] ... [num_unroll*(n-1)] ] ]
# of shape [n, shape(sp_tensor.indices)[0], 1]
# Get current dimensions of indices.
dim0 = array_ops.shape(sp_tensor.indices)[0]
dim1 = array_ops.shape(sp_tensor.indices)[1]
ind = math_ops.range(start=0, limit=value_length, delta=num_unroll)
# ind.set_shape([n])
ind = array_ops.expand_dims(ind, 1)
ind = array_ops.expand_dims(ind, 2)
ind = array_ops.tile(ind, [1, dim0, 1])
# Concatenate both and reshape.
indices = array_ops.concat([ind, multiplied_indices], 2)
indices = array_ops.reshape(indices, [dim0 * n, dim1 + 1])
return sparse_tensor.SparseTensor(indices=indices,
values=values,
dense_shape=shape)
sparse_tensor_keys = [
k for k in sorted(input_context.keys())
if (isinstance(input_context[k], sparse_tensor.SparseTensor) or
isinstance(input_context[k], sparse_tensor.SparseTensorValue))]
for key in sparse_tensor_keys:
input_sequences[_SPARSE_CONTEXT_PREFIX_KEY + key] = _copy_sparse_tensor(
input_context[key])
del input_context[key]
def _move_sparse_tensor_in_context(context, sequences):
sparse_tensor_keys = [
k for k in sorted(sequences) if k.startswith(_SPARSE_CONTEXT_PREFIX_KEY)]
for key in sparse_tensor_keys:
new_key = key[len(_SPARSE_CONTEXT_PREFIX_KEY):]
sp_tensor = sequences[key]
# Take out time dimension.
sp_tensor = sparse_tensor.SparseTensor(
sp_tensor.indices, # with only 0s at column 1 representing time.
sp_tensor.values,
array_ops.concat(
[[sp_tensor.dense_shape[0]], # batch
[1], # time
sp_tensor.dense_shape[2:]], # SparseTensor shape prior to batching
0))
new_shape = array_ops.concat(
[[sp_tensor.dense_shape[0]], sp_tensor.dense_shape[2:]], 0)
context[new_key] = sparse_ops.sparse_reshape(sp_tensor, new_shape)
del sequences[key]
def _deconstruct_sparse_tensor_seq(input_sequence, shared_name=None):
"""Converts `SparseTensor` values into `Tensors` of IDs and meta data.
Given a dict of keys -> `Tensor` or `SparseTensor` transforms the
`SparseTensor` values into `Tensor` values of IDs by calling `_store_sparse`.
The IDs are pointers into and underlying `SparseTensorsMap` that is being
constructed. Additional meta data is returned in order to be able to
reconstruct `SparseTensor` values after batching and segmenting the IDs
`Tensor`.
Args:
input_sequence: dictionary with `Tensor` or `SparseTensor` values.
shared_name: The shared name for the underlying `SparseTensorsMap`
(optional, defaults to the name of the newly created op).
Returns:
A tuple `(sequence, sparse_tensor_keys, tensor_list)` where `sequence` is
dictionary with the same keys as `input_sequence` but only `Tensor` values,
`sparse_tensor_keys` is a list of the keys of the `SparseTensor` values that
were converted, and `tensor_list` is a list of the same length with
`Tensor` objects.
"""
sparse_tensor_keys = [
k for k in sorted(input_sequence.keys())
if (isinstance(input_sequence[k], sparse_tensor.SparseTensor) or
isinstance(input_sequence[k], sparse_tensor.SparseTensorValue))]
if not sparse_tensor_keys:
return input_sequence, None, sparse_tensor_keys
sparse_tensor_list = [input_sequence[k] for k in sparse_tensor_keys]
tensor_list = [_store_sparse(sp_tensor, shared_name=shared_name)
for sp_tensor in sparse_tensor_list]
transformed_input_seq = dict(input_sequence)
tensor_op_list = []
for i, k in enumerate(sparse_tensor_keys):
transformed_input_seq[k] = tensor_list[i]
tensor_op_list += [tensor_list[i].op]
return transformed_input_seq, sparse_tensor_keys, tensor_op_list
def _reconstruct_sparse_tensor_seq(sequence,
sparse_tensor_keys,
tensor_op_list,
batch_size,
num_unroll):
"""Inverse of _deconstruct_sparse_tensor_seq.
Given a dict of keys -> `Tensor` reconstructs `SparseTensor` values for keys
in `sparse_tensor_keys`. Their `Tensor` values are assumed to be IDs into the
underlying `SparseTensorsMap`. The `dense_shape` of the `SparseTensor`s is
`[batch_size, num_unroll, d_0, d_1, ..., d_n]` when the original
`SparseTensor` that got deconstructed with `_deconstruct_sparse_tensor_seq`
has a `dense_shape` of `[None, d_0, d_1, ..., d_n]`.
Args:
sequence: dictionary with only `Tensor` values that is being updated.
sparse_tensor_keys: list of the keys present in `sequence` identifying
`SparseTensor` values that should be reconstructed.
tensor_op_list: list of the same length as `sparse_tensor_keys` with
`Tensor` objects.
batch_size: int or int32 scalar `Tensor`, how large minibatches should
be.
num_unroll: Python integer, how many time steps were unrolled at a time.
"""
def _flatten_tensor(tensor):
"""Flattens `Tensor` of `shape [batch_size, num_unroll]` into 1D `Tensor`.
The main use of this function is to work around the limitation of
`_restore_sparse` to only accept 1D handles.
Args:
tensor: 2D `Tensor` of `shape [batch_size, num_unroll]`
Returns:
1D `Tensor`.
"""
return array_ops.reshape(tensor, [-1])
def _unflatten_sparse_tensor(sp_tensor):
"""Recreates `[batch_size, num_unroll]` dimensions in the `SparseTensor`.
Counter-part of `_flatten_tensor` which is called on the input of
`_restore_sparse` while this method is called on the output of it.
Together they work around the limitation of `_restore_sparse` to only
accept 1D handles.
The `indices` in `sp_tensor` is a 2D `Tensor` of `shape [N, ndims]`, where
`N` is the number of `values` and `ndims` is the number of dimension in its
dense counterpart. Among `ndims` the first entry corresponds to the batch
dimension `[0, num_unroll * batch_size)` from which we need to recreate the
2 dimensions `batch_size` and `num_unroll`.
The reason this reconstruction works is because the output of
`_restore_sparse` despite being a `SparseTensor` is actually dense w.r.t.
that first entry.
Args:
sp_tensor: A SparseTensor.
Returns:
A SparseTensor with a +1 higher rank than the input.
"""
idx_batch = math_ops.cast(
math_ops.floor(sp_tensor.indices[:, 0] / num_unroll), dtypes.int64)
idx_time = math_ops.mod(sp_tensor.indices[:, 0], num_unroll)
indices = array_ops.concat(
[
array_ops.expand_dims(idx_batch, 1),
array_ops.expand_dims(idx_time, 1), sp_tensor.indices[:, 1:]
],
axis=1)
dense_shape = array_ops.concat(
[[math_ops.cast(batch_size, dtype=dtypes.int64)],
[math_ops.cast(num_unroll, dtype=dtypes.int64)],
sp_tensor.dense_shape[1:]], axis=0)
return sparse_tensor.SparseTensor(
indices=indices,
values=sp_tensor.values,
dense_shape=dense_shape)
if not sparse_tensor_keys:
return
tensor_list = [sequence[k] for k in sparse_tensor_keys]
sp_tensors = [
_restore_sparse(sparse_map_op=i,
# Flatten the 2D Tensor [batch_size, num_unroll] of
# handles to a 1D Tensor.
# Reconstruct the dimensions later.
# TODO(b/34247140): Remove this workaround.
sparse_handles=_flatten_tensor(s), rank=None)
for i, s in zip(tensor_op_list, tensor_list)]
num_unroll = ops.convert_to_tensor(num_unroll, dtype=dtypes.int64,
name="num_unroll_int64")
# Recreate the [batch_size, num_unroll] dimensions in the SparseTensors.
# The dense_shape will have a +1 higher rank.
# TODO(b/34247140): Remove this workaround.
sp_tensors_higher_dim = [_unflatten_sparse_tensor(s) for s in sp_tensors]
# Set values to SparseTensors for sparse_tensor_keys.
for i, key in enumerate(sparse_tensor_keys):
sequence[key] = sp_tensors_higher_dim[i]
return
|
tensorflow-master
|
tensorflow/contrib/training/python/training/sequence_queueing_state_saver.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
|
tensorflow-master
|
tensorflow/contrib/training/python/training/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.SequenceQueueingStateSaver."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
from tensorflow.contrib.training.python.training import sequence_queueing_state_saver as sqss
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.platform import test
class SequenceQueueingStateSaverTest(test.TestCase):
def testSequenceInputWrapper(self):
with self.cached_session():
length = 3
key = "key"
padded_length = 4
sequences = {
"seq1": np.random.rand(padded_length, 5),
"seq2": np.random.rand(padded_length, 4, 2)
}
context = {"context1": [3, 4]}
input_wrapper = sqss._SequenceInputWrapper(length, key, sequences,
context)
self.assertTrue(isinstance(input_wrapper.length, ops.Tensor))
self.assertTrue(isinstance(input_wrapper.key, ops.Tensor))
self.assertTrue(isinstance(input_wrapper.sequences["seq1"], ops.Tensor))
self.assertTrue(isinstance(input_wrapper.sequences["seq2"], ops.Tensor))
self.assertTrue(isinstance(input_wrapper.context["context1"], ops.Tensor))
def testStateSaverWithTwoSimpleSteps(self):
with self.cached_session() as sess:
batch_size_value = 2
batch_size = constant_op.constant(batch_size_value)
num_unroll = 2
length = 3
key = string_ops.string_join([
"key_", string_ops.as_string(
math_ops.cast(10000 * random_ops.random_uniform(()),
dtypes.int32))
])
padded_length = 4
sequences = {
"seq1": np.random.rand(padded_length, 5),
"seq2": np.random.rand(padded_length, 4, 2)
}
context = {"context1": [3, 4]}
initial_states = {
"state1": np.random.rand(6, 7),
"state2": np.random.rand(8)
}
state_saver = sqss.SequenceQueueingStateSaver(
batch_size=batch_size,
num_unroll=num_unroll,
input_length=length,
input_key=key,
input_sequences=sequences,
input_context=context,
initial_states=initial_states,
capacity=100)
initial_key_value_0, _ = sess.run((key, state_saver.prefetch_op))
initial_key_value_1, _ = sess.run((key, state_saver.prefetch_op))
initial_key_value_0 = initial_key_value_0.decode("ascii")
initial_key_value_1 = initial_key_value_1.decode("ascii")
# Step 1
next_batch = state_saver.next_batch
(key_value, next_key_value, seq1_value, seq2_value, context1_value,
state1_value, state2_value, length_value, _, _) = sess.run(
(next_batch.key, next_batch.next_key, next_batch.sequences["seq1"],
next_batch.sequences["seq2"], next_batch.context["context1"],
next_batch.state("state1"), next_batch.state("state2"),
next_batch.length,
next_batch.save_state("state1", next_batch.state("state1") + 1),
next_batch.save_state("state2", next_batch.state("state2") - 1)))
expected_first_keys = set(
("00000_of_00002:%s" % x).encode("ascii")
for x in (initial_key_value_0, initial_key_value_1))
expected_second_keys = set(
("00001_of_00002:%s" % x).encode("ascii")
for x in (initial_key_value_0, initial_key_value_1))
expected_final_keys = set(
("STOP:%s" % x).encode("ascii")
for x in (initial_key_value_0, initial_key_value_1))
self.assertEqual(set(key_value), expected_first_keys)
self.assertEqual(set(next_key_value), expected_second_keys)
self.assertAllEqual(context1_value,
np.tile(context["context1"], (batch_size_value, 1)))
self.assertAllEqual(seq1_value,
np.tile(sequences["seq1"][np.newaxis, 0:2, :],
(batch_size_value, 1, 1)))
self.assertAllEqual(seq2_value,
np.tile(sequences["seq2"][np.newaxis, 0:2, :, :],
(batch_size_value, 1, 1, 1)))
self.assertAllEqual(state1_value,
np.tile(initial_states["state1"],
(batch_size_value, 1, 1)))
self.assertAllEqual(state2_value,
np.tile(initial_states["state2"],
(batch_size_value, 1)))
self.assertAllEqual(length_value, [2, 2])
# Step 2
(key_value, next_key_value, seq1_value, seq2_value, context1_value,
state1_value, state2_value, length_value, _, _) = sess.run(
(next_batch.key, next_batch.next_key, next_batch.sequences["seq1"],
next_batch.sequences["seq2"], next_batch.context["context1"],
next_batch.state("state1"), next_batch.state("state2"),
next_batch.length,
next_batch.save_state("state1", next_batch.state("state1") + 1),
next_batch.save_state("state2", next_batch.state("state2") - 1)))
self.assertEqual(set(key_value), expected_second_keys)
self.assertEqual(set(next_key_value), expected_final_keys)
self.assertAllEqual(context1_value,
np.tile(context["context1"], (batch_size_value, 1)))
self.assertAllEqual(seq1_value,
np.tile(sequences["seq1"][np.newaxis, 2:4, :],
(batch_size_value, 1, 1)))
self.assertAllEqual(seq2_value,
np.tile(sequences["seq2"][np.newaxis, 2:4, :, :],
(batch_size_value, 1, 1, 1)))
self.assertAllEqual(state1_value, 1 + np.tile(initial_states["state1"],
(batch_size_value, 1, 1)))
self.assertAllEqual(state2_value, -1 + np.tile(initial_states["state2"],
(batch_size_value, 1)))
self.assertAllEqual(length_value, [1, 1])
# Finished. Let's make sure there's nothing left in the barrier.
self.assertEqual(0, state_saver.barrier.ready_size().eval())
def testStateSaverFailsIfPaddedLengthIsNotMultipleOfNumUnroll(self):
with self.cached_session() as sess:
batch_size = constant_op.constant(32)
num_unroll = 17
bad_padded_length = 3
length = array_ops.placeholder(dtypes.int32)
key = array_ops.placeholder(dtypes.string)
sequences = {
"seq1": array_ops.placeholder(
dtypes.float32, shape=(None, 5))
}
context = {}
initial_states = {
"state1": array_ops.placeholder(
dtypes.float32, shape=())
}
state_saver = sqss.SequenceQueueingStateSaver(
batch_size=batch_size,
num_unroll=num_unroll,
input_length=length,
input_key=key,
input_sequences=sequences,
input_context=context,
initial_states=initial_states)
with self.assertRaisesOpError(
"should be a multiple of: 17, but saw value: %d" % bad_padded_length):
sess.run([state_saver.prefetch_op],
feed_dict={
length: 1,
key: "key",
sequences["seq1"]: np.random.rand(bad_padded_length, 5),
initial_states["state1"]: 1.0
})
def _testStateSaverFailsIfCapacityTooSmall(self, batch_size):
with self.cached_session() as sess:
num_unroll = 2
length = array_ops.placeholder(dtypes.int32)
key = array_ops.placeholder(dtypes.string)
sequences = {
"seq1": array_ops.placeholder(
dtypes.float32, shape=(None, 5)),
"seq2": array_ops.placeholder(
dtypes.float32, shape=(None,))
}
context = {}
initial_states = {
"state1": array_ops.placeholder(
dtypes.float32, shape=())
}
state_saver = sqss.SequenceQueueingStateSaver(
batch_size=batch_size,
num_unroll=num_unroll,
input_length=length,
input_key=key,
input_sequences=sequences,
input_context=context,
initial_states=initial_states,
capacity=10)
sess.run([state_saver.prefetch_op],
feed_dict={
length: 1,
key: "key",
sequences["seq1"]: np.random.rand(num_unroll, 5),
sequences["seq2"]: np.random.rand(num_unroll),
initial_states["state1"]: 1.0
})
def testStateSaverFailsIfCapacityTooSmallTensor(self):
batch_size_value = 32
batch_size = constant_op.constant(batch_size_value)
with self.assertRaisesOpError(
".*capacity needs to be >= batch_size.*"):
self._testStateSaverFailsIfCapacityTooSmall(batch_size)
def testStateSaverFailsIfCapacityTooSmallInt(self):
batch_size = 32
with self.assertRaisesRegexp(
ValueError,
"capacity %d needs to be >= batch_size %d" % (10, batch_size)):
self._testStateSaverFailsIfCapacityTooSmall(batch_size)
def testStateSaverFailsIfInconsistentPaddedLength(self):
with self.cached_session() as sess:
batch_size = constant_op.constant(32)
num_unroll = 17
length = array_ops.placeholder(dtypes.int32)
key = array_ops.placeholder(dtypes.string)
sequences = {
"seq1": array_ops.placeholder(
dtypes.float32, shape=(None, 5)),
"seq2": array_ops.placeholder(
dtypes.float32, shape=(None,))
}
context = {}
initial_states = {
"state1": array_ops.placeholder(
dtypes.float32, shape=())
}
state_saver = sqss.SequenceQueueingStateSaver(
batch_size=batch_size,
num_unroll=num_unroll,
input_length=length,
input_key=key,
input_sequences=sequences,
input_context=context,
initial_states=initial_states)
with self.assertRaisesOpError(
"Dimension 0 of tensor labeled sorted_sequences_seq2 "
"should be: %d, shape received: %d" % (num_unroll, 2 * num_unroll)):
sess.run([state_saver.prefetch_op],
feed_dict={
length: 1,
key: "key",
sequences["seq1"]: np.random.rand(num_unroll, 5),
sequences["seq2"]: np.random.rand(2 * num_unroll),
initial_states["state1"]: 1.0
})
def testStateSaverFailsIfInconsistentWriteState(self):
# TODO(b/26910386): Identify why this infrequently causes timeouts.
with self.cached_session() as sess:
batch_size = constant_op.constant(1)
num_unroll = 17
length = array_ops.placeholder(dtypes.int32)
key = array_ops.placeholder(dtypes.string)
sequences = {
"seq1": array_ops.placeholder(
dtypes.float32, shape=(None, 5))
}
context = {}
initial_states = {
"state1": array_ops.placeholder(
dtypes.float32, shape=())
}
state_saver = sqss.SequenceQueueingStateSaver(
batch_size=batch_size,
num_unroll=num_unroll,
input_length=length,
input_key=key,
input_sequences=sequences,
input_context=context,
initial_states=initial_states)
next_batch = state_saver.next_batch
with self.assertRaisesRegexp(KeyError, "state was not declared: state2"):
save_op = next_batch.save_state("state2", None)
with self.assertRaisesRegexp(ValueError, "Rank check failed for.*state1"):
save_op = next_batch.save_state("state1", np.random.rand(1, 1))
with self.assertRaisesOpError(
r"convert_state1:0 should be: 1, shape received:\] \[1 1\]"):
state_input = array_ops.placeholder(dtypes.float32)
with ops.control_dependencies([state_saver.prefetch_op]):
save_op = next_batch.save_state("state1", state_input)
sess.run([save_op],
feed_dict={
length: 1,
key: "key",
sequences["seq1"]: np.random.rand(num_unroll, 5),
initial_states["state1"]: 1.0,
state_input: np.random.rand(1, 1)
})
def testStateSaverWithManyInputsReadWriteThread(self):
batch_size_value = 32
num_proc_threads = 100
with self.cached_session() as sess:
batch_size = constant_op.constant(batch_size_value)
num_unroll = 17
length = array_ops.placeholder(dtypes.int32)
key = array_ops.placeholder(dtypes.string)
sequences = {
"seq1": array_ops.placeholder(
dtypes.float32, shape=(None, 5)),
"seq2": array_ops.placeholder(
dtypes.float32, shape=(None, 4, 2)),
"seq3": array_ops.placeholder(
dtypes.float64, shape=(None,))
}
context = {
"context1": array_ops.placeholder(
dtypes.string, shape=(3, 4)),
"context2": array_ops.placeholder(
dtypes.int64, shape=())
}
initial_states = {
"state1": array_ops.placeholder(
dtypes.float32, shape=(6, 7)),
"state2": array_ops.placeholder(
dtypes.int32, shape=())
}
state_saver = sqss.SequenceQueueingStateSaver(
batch_size=batch_size,
num_unroll=num_unroll,
input_length=length,
input_key=key,
input_sequences=sequences,
input_context=context,
initial_states=initial_states)
next_batch = state_saver.next_batch
cancel_op = state_saver.close(cancel_pending_enqueues=True)
update_1 = next_batch.save_state("state1", 1 + next_batch.state("state1"))
update_2 = next_batch.save_state("state2",
-1 + next_batch.state("state2"))
original_values = {}
def insert(which):
for i in range(20):
# Insert varying length inputs
pad_i = num_unroll * (1 + (i % 10))
length_i = int(np.random.rand() * pad_i)
key_value = "key_%02d_%04d" % (which, i)
stored_state = {
"length": length_i,
"seq1": np.random.rand(pad_i, 5),
"seq2": np.random.rand(pad_i, 4, 2),
"seq3": np.random.rand(pad_i),
"context1": np.random.rand(3, 4).astype(np.str),
"context2": np.asarray(
100 * np.random.rand(), dtype=np.int32),
"state1": np.random.rand(6, 7),
"state2": np.asarray(
100 * np.random.rand(), dtype=np.int32)
}
original_values[key_value] = stored_state
sess.run([state_saver.prefetch_op],
feed_dict={
length: stored_state["length"],
key: key_value,
sequences["seq1"]: stored_state["seq1"],
sequences["seq2"]: stored_state["seq2"],
sequences["seq3"]: stored_state["seq3"],
context["context1"]: stored_state["context1"],
context["context2"]: stored_state["context2"],
initial_states["state1"]: stored_state["state1"],
initial_states["state2"]: stored_state["state2"]
})
processed_count = [0]
def process_and_check_state():
next_batch = state_saver.next_batch
while True:
try:
(got_key, next_key, length, total_length, sequence, sequence_count,
context1, context2, seq1, seq2, seq3, state1, state2, _,
_) = (sess.run([
next_batch.key, next_batch.next_key, next_batch.length,
next_batch.total_length, next_batch.sequence,
next_batch.sequence_count, next_batch.context["context1"],
next_batch.context["context2"], next_batch.sequences["seq1"],
next_batch.sequences["seq2"], next_batch.sequences["seq3"],
next_batch.state("state1"), next_batch.state("state2"),
update_1, update_2
]))
except errors_impl.OutOfRangeError:
# SQSS has been closed
break
self.assertEqual(len(got_key), batch_size_value)
processed_count[0] += len(got_key)
for i in range(batch_size_value):
key_name = got_key[i].decode("ascii").split(":")[1]
# We really saved this unique key
self.assertTrue(key_name in original_values)
# The unique key matches next_key
self.assertEqual(key_name,
next_key[i].decode("ascii").split(":")[1])
# Pull out the random values we used to create this example
stored_state = original_values[key_name]
self.assertEqual(total_length[i], stored_state["length"])
self.assertEqual("%05d_of_%05d:%s" %
(sequence[i], sequence_count[i], key_name),
got_key[i].decode("ascii"))
expected_length = max(
0,
min(num_unroll,
stored_state["length"] - sequence[i] * num_unroll))
self.assertEqual(length[i], expected_length)
expected_state1 = stored_state["state1"] + sequence[i]
expected_state2 = stored_state["state2"] - sequence[i]
expected_sequence1 = stored_state["seq1"][sequence[i] * num_unroll:(
sequence[i] + 1) * num_unroll]
expected_sequence2 = stored_state["seq2"][sequence[i] * num_unroll:(
sequence[i] + 1) * num_unroll]
expected_sequence3 = stored_state["seq3"][sequence[i] * num_unroll:(
sequence[i] + 1) * num_unroll]
self.assertAllClose(state1[i], expected_state1)
self.assertAllEqual(state2[i], expected_state2)
# context1 is strings, which come back as bytes
self.assertAllEqual(context1[i].astype(np.str),
stored_state["context1"])
self.assertAllEqual(context2[i], stored_state["context2"])
self.assertAllClose(seq1[i], expected_sequence1)
self.assertAllClose(seq2[i], expected_sequence2)
self.assertAllClose(seq3[i], expected_sequence3)
# Total number of inserts will be a multiple of batch_size
insert_threads = [
self.checkedThread(
insert, args=(which,)) for which in range(batch_size_value)
]
process_threads = [
self.checkedThread(process_and_check_state)
for _ in range(num_proc_threads)
]
for t in insert_threads:
t.start()
for t in process_threads:
t.start()
for t in insert_threads:
t.join()
time.sleep(3) # Allow the threads to run and process for a while
cancel_op.run()
for t in process_threads:
t.join()
# Each thread processed at least 2 sequence segments
self.assertGreater(processed_count[0], 2 * 20 * batch_size_value)
def testStateSaverProcessesExamplesInOrder(self):
with self.cached_session() as sess:
batch_size_value = 32
batch_size = constant_op.constant(batch_size_value)
num_unroll = 17
length = array_ops.placeholder(dtypes.int32)
key = array_ops.placeholder(dtypes.string)
sequences = {
"seq1": array_ops.placeholder(
dtypes.float32, shape=(None, 5))
}
context = {"context1": array_ops.placeholder(dtypes.string, shape=(3, 4))}
initial_states = {
"state1": array_ops.placeholder(
dtypes.float32, shape=())
}
state_saver = sqss.SequenceQueueingStateSaver(
batch_size=batch_size,
num_unroll=num_unroll,
input_length=length,
input_key=key,
input_sequences=sequences,
input_context=context,
initial_states=initial_states)
next_batch = state_saver.next_batch
update = next_batch.save_state("state1", 1 + next_batch.state("state1"))
get_ready_size = state_saver.barrier.ready_size()
get_incomplete_size = state_saver.barrier.incomplete_size()
global_insert_key = [0]
def insert(insert_key):
# Insert varying length inputs
sess.run([state_saver.prefetch_op],
feed_dict={
length: np.random.randint(2 * num_unroll),
key: "%05d" % insert_key[0],
sequences["seq1"]: np.random.rand(2 * num_unroll, 5),
context["context1"]: np.random.rand(3, 4).astype(np.str),
initial_states["state1"]: 0.0
})
insert_key[0] += 1
for _ in range(batch_size_value * 100):
insert(global_insert_key)
def process_and_validate(check_key):
true_step = int(check_key[0] / 2) # Each entry has two slices
check_key[0] += 1
got_keys, input_index, _ = sess.run(
[next_batch.key, next_batch.insertion_index, update])
decoded_keys = [int(x.decode("ascii").split(":")[-1]) for x in got_keys]
min_key = min(decoded_keys)
min_index = int(min(input_index)) # numpy scalar
max_key = max(decoded_keys)
max_index = int(max(input_index)) # numpy scalar
# The current min key should be above the previous min
self.assertEqual(min_key, true_step * batch_size_value)
self.assertEqual(max_key, (true_step + 1) * batch_size_value - 1)
self.assertEqual(2**63 + min_index, true_step * batch_size_value)
self.assertEqual(2**63 + max_index,
(true_step + 1) * batch_size_value - 1)
# There are now (batch_size * 100 * 2) / batch_size = 200 full steps
global_step_key = [0]
for _ in range(200):
process_and_validate(global_step_key)
# Processed everything in the queue
self.assertEqual(get_incomplete_size.eval(), 0)
self.assertEqual(get_ready_size.eval(), 0)
def testStateSaverCanHandleVariableBatchsize(self):
with self.cached_session() as sess:
batch_size = array_ops.placeholder(dtypes.int32)
num_unroll = 17
length = array_ops.placeholder(dtypes.int32)
key = array_ops.placeholder(dtypes.string)
sequences = {
"seq1": array_ops.placeholder(
dtypes.float32, shape=(None, 5))
}
context = {"context1": array_ops.placeholder(dtypes.string, shape=(3, 4))}
initial_states = {
"state1": array_ops.placeholder(
dtypes.float32, shape=())
}
state_saver = sqss.SequenceQueueingStateSaver(
batch_size=batch_size,
num_unroll=num_unroll,
input_length=length,
input_key=key,
input_sequences=sequences,
input_context=context,
initial_states=initial_states)
next_batch = state_saver.next_batch
update = next_batch.save_state("state1", 1 + next_batch.state("state1"))
for insert_key in range(128):
# Insert varying length inputs
sess.run([state_saver.prefetch_op],
feed_dict={
length: np.random.randint(2 * num_unroll),
key: "%05d" % insert_key,
sequences["seq1"]: np.random.rand(2 * num_unroll, 5),
context["context1"]: np.random.rand(3, 4).astype(np.str),
initial_states["state1"]: 0.0
})
all_received_indices = []
# Pull out and validate batch sizes 0, 1, ..., 7
for batch_size_value in range(8):
got_keys, input_index, context1, seq1, state1, _ = sess.run(
[
next_batch.key, next_batch.insertion_index,
next_batch.context["context1"], next_batch.sequences["seq1"],
next_batch.state("state1"), update
],
feed_dict={batch_size: batch_size_value})
# Indices may have come in out of order within the batch
all_received_indices.append(input_index.tolist())
self.assertEqual(got_keys.size, batch_size_value)
self.assertEqual(input_index.size, batch_size_value)
self.assertEqual(context1.shape, (batch_size_value, 3, 4))
self.assertEqual(seq1.shape, (batch_size_value, num_unroll, 5))
self.assertEqual(state1.shape, (batch_size_value,))
# Each input was split into 2 iterations (sequences size == 2*num_unroll)
expected_indices = [[], [0], [0, 1], [1, 2, 3], [2, 3, 4, 5],
[4, 5, 6, 7, 8], [6, 7, 8, 9, 10, 11],
[9, 10, 11, 12, 13, 14, 15]]
self.assertEqual(len(all_received_indices), len(expected_indices))
for received, expected in zip(all_received_indices, expected_indices):
self.assertAllEqual([x + 2**63 for x in received], expected)
def testStateSaverScopeNames(self):
batch_size = constant_op.constant(2)
sqss_scope_name = "unique_scope_name_for_sqss"
num_unroll = 2
length = 3
key = string_ops.string_join([
"key_", string_ops.as_string(
math_ops.cast(10000 * random_ops.random_uniform(()), dtypes.int32))
])
padded_length = 4
sequences = {
"seq1": np.random.rand(padded_length, 5),
"seq2": np.random.rand(padded_length, 4, 2)
}
context = {"context1": [3, 4]}
initial_states = {
"state1": np.random.rand(6, 7),
"state2": np.random.rand(8)
}
state_saver = sqss.SequenceQueueingStateSaver(
batch_size=batch_size,
num_unroll=num_unroll,
input_length=length,
input_key=key,
input_sequences=sequences,
input_context=context,
initial_states=initial_states,
name=sqss_scope_name)
prefetch_op = state_saver.prefetch_op
next_batch = state_saver.next_batch
self.assertTrue(
state_saver.barrier.barrier_ref.name.startswith("%s/" %
sqss_scope_name))
self.assertTrue(prefetch_op.name.startswith("%s/" % sqss_scope_name))
self.assertTrue(next_batch.key.name.startswith("%s/" % sqss_scope_name))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/training/python/training/sequence_queueing_state_saver_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.contrib.training.bucket."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import numpy as np
from tensorflow.contrib.training.python.training import bucket_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner_impl
def _which_bucket(bucket_edges, v):
"""Identify which bucket v falls into.
Args:
bucket_edges: int array, bucket edges
v: int scalar, index
Returns:
int scalar, the bucket.
If v < bucket_edges[0], return 0.
If bucket_edges[0] <= v < bucket_edges[1], return 1.
...
If bucket_edges[-2] <= v < bucket_edges[-1], return len(bucket_edges).
If v >= bucket_edges[-1], return len(bucket_edges) + 1
"""
v = np.asarray(v)
full = [0] + bucket_edges
found = np.where(np.logical_and(v >= full[:-1], v < full[1:]))[0]
if not found.size:
return len(full)
return found[0]
class BucketTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
self.scalar_int_feed = array_ops.placeholder(dtypes_lib.int32, ())
self.unk_int64_feed = array_ops.placeholder(dtypes_lib.int64, (None,))
self.vec3_str_feed = array_ops.placeholder(dtypes_lib.string, (3,))
self.sparse_c = sparse_tensor.SparseTensor(
indices=[[0]],
values=[1.0],
dense_shape=[1])
self._coord = coordinator.Coordinator()
# Make capacity very large so we can feed all the inputs in the
# main thread without blocking
input_queue = data_flow_ops.PaddingFIFOQueue(
5000,
dtypes=[dtypes_lib.int32, dtypes_lib.int64, dtypes_lib.string],
shapes=[(), (None,), (3,)])
self._input_enqueue_op = input_queue.enqueue(
(self.scalar_int_feed, self.unk_int64_feed, self.vec3_str_feed))
self.scalar_int, self.unk_int64, self.vec3_str = input_queue.dequeue()
self._threads = None
self._close_op = input_queue.close()
self._sess = None
def enqueue_inputs(self, sess, feed_dict):
sess.run(self._input_enqueue_op, feed_dict=feed_dict)
def start_queue_runners(self, sess):
# Store session to be able to close inputs later
if self._sess is None:
self._sess = sess
self._threads = queue_runner_impl.start_queue_runners(coord=self._coord)
def tearDown(self):
if self._sess is not None:
self._sess.run(self._close_op)
self._coord.request_stop()
self._coord.join(self._threads)
def testSingleBucket(self):
bucketed_dynamic = bucket_ops.bucket(
tensors=[self.scalar_int, self.unk_int64, self.vec3_str, self.sparse_c],
which_bucket=constant_op.constant(0),
num_buckets=2,
batch_size=32,
num_threads=10,
dynamic_pad=True)
# Check shape inference on bucketing outputs
self.assertAllEqual(
[[32], [32, None], [32, 3], [None, None]],
[out.get_shape().as_list() for out in bucketed_dynamic[1]])
with self.cached_session() as sess:
for v in range(32):
self.enqueue_inputs(sess, {
self.scalar_int_feed: v,
self.unk_int64_feed: v * [v],
self.vec3_str_feed: 3 * [str(v)]
})
self.start_queue_runners(sess)
# Get a single minibatch
bucketed_values = sess.run(bucketed_dynamic)
# (which_bucket, bucket_tensors).
self.assertEqual(2, len(bucketed_values))
# Count number of bucket_tensors.
self.assertEqual(4, len(bucketed_values[1]))
# Ensure bucket 0 was used for all minibatch entries.
self.assertAllEqual(0, bucketed_values[0])
expected_scalar_int = np.arange(32)
expected_unk_int64 = np.zeros((32, 31)).astype(np.int64)
for i in range(32):
expected_unk_int64[i, :i] = i
expected_vec3_str = np.vstack(3 * [np.arange(32).astype(bytes)]).T
# Must resort the output because num_threads > 1 leads to
# sometimes-inconsistent insertion order.
resort = np.argsort(bucketed_values[1][0])
self.assertAllEqual(expected_scalar_int, bucketed_values[1][0][resort])
self.assertAllEqual(expected_unk_int64, bucketed_values[1][1][resort])
self.assertAllEqual(expected_vec3_str, bucketed_values[1][2][resort])
def testBatchSizePerBucket(self):
which_bucket = control_flow_ops.cond(self.scalar_int < 5,
lambda: constant_op.constant(0),
lambda: constant_op.constant(1))
batch_sizes = [5, 10]
bucketed_dynamic = bucket_ops.bucket(
tensors=[self.scalar_int, self.unk_int64, self.vec3_str, self.sparse_c],
which_bucket=which_bucket,
num_buckets=2,
batch_size=batch_sizes,
num_threads=1,
dynamic_pad=True)
# Check shape inference on bucketing outputs
self.assertAllEqual(
[[None], [None, None], [None, 3], [None, None]],
[out.get_shape().as_list() for out in bucketed_dynamic[1]])
with self.cached_session() as sess:
for v in range(15):
self.enqueue_inputs(sess, {
self.scalar_int_feed: v,
self.unk_int64_feed: v * [v],
self.vec3_str_feed: 3 * [str(v)]
})
self.start_queue_runners(sess)
# Get two minibatches (one with small values, one with large).
bucketed_values_0 = sess.run(bucketed_dynamic)
bucketed_values_1 = sess.run(bucketed_dynamic)
# Figure out which output has the small values
if bucketed_values_0[0] < 5:
bucketed_values_large, bucketed_values_small = (bucketed_values_1,
bucketed_values_0)
else:
bucketed_values_small, bucketed_values_large = (bucketed_values_0,
bucketed_values_1)
# Ensure bucket 0 was used for all minibatch entries.
self.assertAllEqual(0, bucketed_values_small[0])
self.assertAllEqual(1, bucketed_values_large[0])
# Check that the batch sizes differ per bucket
self.assertEqual(5, len(bucketed_values_small[1][0]))
self.assertEqual(10, len(bucketed_values_large[1][0]))
def testEvenOddBuckets(self):
which_bucket = (self.scalar_int % 2)
bucketed_dynamic = bucket_ops.bucket(
tensors=[self.scalar_int, self.unk_int64, self.vec3_str, self.sparse_c],
which_bucket=which_bucket,
num_buckets=2,
batch_size=32,
num_threads=10,
dynamic_pad=True)
# Check shape inference on bucketing outputs
self.assertAllEqual(
[[32], [32, None], [32, 3], [None, None]],
[out.get_shape().as_list() for out in bucketed_dynamic[1]])
with self.cached_session() as sess:
for v in range(64):
self.enqueue_inputs(sess, {
self.scalar_int_feed: v,
self.unk_int64_feed: v * [v],
self.vec3_str_feed: 3 * [str(v)]
})
self.start_queue_runners(sess)
# Get two minibatches (one containing even values, one containing odds)
bucketed_values_0 = sess.run(bucketed_dynamic)
bucketed_values_1 = sess.run(bucketed_dynamic)
# (which_bucket, bucket_tensors).
self.assertEqual(2, len(bucketed_values_0))
self.assertEqual(2, len(bucketed_values_1))
# Count number of bucket_tensors.
self.assertEqual(4, len(bucketed_values_0[1]))
self.assertEqual(4, len(bucketed_values_1[1]))
# Figure out which output has the even values (there's
# randomness due to the multithreaded nature of bucketing)
if bucketed_values_0[0] % 2 == 1:
bucketed_values_even, bucketed_values_odd = (bucketed_values_1,
bucketed_values_0)
else:
bucketed_values_even, bucketed_values_odd = (bucketed_values_0,
bucketed_values_1)
# Ensure bucket 0 was used for all minibatch entries.
self.assertAllEqual(0, bucketed_values_even[0])
self.assertAllEqual(1, bucketed_values_odd[0])
# Test the first bucket outputted, the events starting at 0
expected_scalar_int = np.arange(0, 32 * 2, 2)
expected_unk_int64 = np.zeros((32, 31 * 2)).astype(np.int64)
for i in range(0, 32):
expected_unk_int64[i, :2 * i] = 2 * i
expected_vec3_str = np.vstack(3 *
[np.arange(0, 32 * 2, 2).astype(bytes)]).T
# Must resort the output because num_threads > 1 leads to
# sometimes-inconsistent insertion order.
resort = np.argsort(bucketed_values_even[1][0])
self.assertAllEqual(expected_scalar_int,
bucketed_values_even[1][0][resort])
self.assertAllEqual(expected_unk_int64,
bucketed_values_even[1][1][resort])
self.assertAllEqual(expected_vec3_str, bucketed_values_even[1][2][resort])
# Test the second bucket outputted, the odds starting at 1
expected_scalar_int = np.arange(1, 32 * 2 + 1, 2)
expected_unk_int64 = np.zeros((32, 31 * 2 + 1)).astype(np.int64)
for i in range(0, 32):
expected_unk_int64[i, :2 * i + 1] = 2 * i + 1
expected_vec3_str = np.vstack(
3 * [np.arange(1, 32 * 2 + 1, 2).astype(bytes)]).T
# Must resort the output because num_threads > 1 leads to
# sometimes-inconsistent insertion order.
resort = np.argsort(bucketed_values_odd[1][0])
self.assertAllEqual(expected_scalar_int,
bucketed_values_odd[1][0][resort])
self.assertAllEqual(expected_unk_int64, bucketed_values_odd[1][1][resort])
self.assertAllEqual(expected_vec3_str, bucketed_values_odd[1][2][resort])
def testEvenOddBucketsFilterOutAllOdd(self):
which_bucket = (self.scalar_int % 2)
keep_input = math_ops.equal(which_bucket, 0)
bucketed_dynamic = bucket_ops.bucket(
tensors=[self.scalar_int, self.unk_int64, self.vec3_str],
which_bucket=which_bucket,
num_buckets=2,
batch_size=32,
num_threads=10,
keep_input=keep_input,
dynamic_pad=True)
# Check shape inference on bucketing outputs
self.assertAllEqual(
[[32], [32, None], [32, 3]],
[out.get_shape().as_list() for out in bucketed_dynamic[1]])
with self.cached_session() as sess:
for v in range(128):
self.enqueue_inputs(sess, {
self.scalar_int_feed: v,
self.unk_int64_feed: v * [v],
self.vec3_str_feed: 3 * [str(v)]
})
self.start_queue_runners(sess)
# Get two minibatches ([0, 2, ...] and [64, 66, ...])
bucketed_values_even0 = sess.run(bucketed_dynamic)
bucketed_values_even1 = sess.run(bucketed_dynamic)
# Ensure that bucket 1 was completely filtered out
self.assertAllEqual(0, bucketed_values_even0[0])
self.assertAllEqual(0, bucketed_values_even1[0])
# Merge their output for sorting and comparison
bucketed_values_all_elem0 = np.concatenate((bucketed_values_even0[1][0],
bucketed_values_even1[1][0]))
self.assertAllEqual(
np.arange(0, 128, 2), sorted(bucketed_values_all_elem0))
def testFailOnWrongBucketCapacities(self):
with self.assertRaisesRegexp(ValueError, r"must have exactly num_buckets"):
bucket_ops.bucket( # 2 buckets and 3 capacities raises ValueError.
tensors=[self.scalar_int, self.unk_int64, self.vec3_str],
which_bucket=constant_op.constant(0), num_buckets=2,
batch_size=32, bucket_capacities=[3, 4, 5])
class BucketBySequenceLengthTest(test.TestCase):
def _testBucketBySequenceLength(self,
allow_small_batch,
bucket_capacities=None,
drain_entire_queue=True):
ops.reset_default_graph()
# All inputs must be identical lengths across tuple index.
# The input reader will get input_length from the first tuple
# entry.
data_len = 4
labels_len = 3
input_pairs = [(length, ([np.int64(length)] * data_len,
[str(length).encode("ascii")] * labels_len))
for length in (1, 3, 4, 5, 6, 10)]
lengths = array_ops.placeholder(dtypes_lib.int32, ())
data = array_ops.placeholder(dtypes_lib.int64, (data_len,))
labels = array_ops.placeholder(dtypes_lib.string, (labels_len,))
batch_size = 8
bucket_boundaries = [3, 4, 5, 10]
num_pairs_to_enqueue = 50 * batch_size + 100
# Make capacity very large so we can feed all the inputs in the
# main thread without blocking
input_queue = data_flow_ops.FIFOQueue(
5000, (dtypes_lib.int32, dtypes_lib.int64, dtypes_lib.string), (
(), (data_len,), (labels_len,)))
input_enqueue_op = input_queue.enqueue((lengths, data, labels))
lengths_t, data_t, labels_t = input_queue.dequeue()
close_input_op = input_queue.close()
(out_lengths_t, data_and_labels_t) = (bucket_ops.bucket_by_sequence_length(
input_length=lengths_t,
tensors=[data_t, labels_t],
batch_size=batch_size,
bucket_boundaries=bucket_boundaries,
bucket_capacities=bucket_capacities,
allow_smaller_final_batch=allow_small_batch,
num_threads=10))
expected_batch_size = None if allow_small_batch else batch_size
self.assertEqual(out_lengths_t.get_shape().as_list(), [expected_batch_size])
self.assertEqual(data_and_labels_t[0].get_shape().as_list(),
[expected_batch_size, data_len])
self.assertEqual(data_and_labels_t[1].get_shape().as_list(),
[expected_batch_size, labels_len])
def _read_test(sess):
num_pairs_dequeued = 0
try:
while drain_entire_queue or num_pairs_dequeued < 40 * batch_size:
(out_lengths, (data, labels)) = sess.run(
(out_lengths_t, data_and_labels_t))
num_pairs_dequeued += out_lengths.shape[0]
if allow_small_batch:
self.assertEqual(data_len, data.shape[1])
self.assertEqual(labels_len, labels.shape[1])
self.assertGreaterEqual(batch_size, out_lengths.shape[0])
self.assertGreaterEqual(batch_size, data.shape[0])
self.assertGreaterEqual(batch_size, labels.shape[0])
else:
self.assertEqual((batch_size, data_len), data.shape)
self.assertEqual((batch_size, labels_len), labels.shape)
self.assertEqual((batch_size,), out_lengths.shape)
for (lr, dr, tr) in zip(out_lengths, data, labels):
# Make sure length matches data (here it's the same value).
self.assertEqual(dr[0], lr)
# Make sure data & labels match.
self.assertEqual(dr[0], int(tr[0].decode("ascii")))
# Make sure for each row, data came from the same bucket.
self.assertEqual(
_which_bucket(bucket_boundaries, dr[0]),
_which_bucket(bucket_boundaries, dr[1]))
except errors.OutOfRangeError:
if allow_small_batch:
self.assertEqual(num_pairs_to_enqueue, num_pairs_dequeued)
else:
# Maximum left over in the queues should be at most one less than the
# batch_size, for every bucket.
num_buckets = len(bucket_boundaries) + 2
self.assertLessEqual(
num_pairs_to_enqueue - (batch_size - 1) * num_buckets,
num_pairs_dequeued)
with self.cached_session() as sess:
coord = coordinator.Coordinator()
# Feed the inputs, then close the input thread.
for _ in range(num_pairs_to_enqueue):
which = random.randint(0, len(input_pairs) - 1)
length, pair = input_pairs[which]
sess.run(input_enqueue_op,
feed_dict={lengths: length,
data: pair[0],
labels: pair[1]})
sess.run(close_input_op)
# Start the queue runners
threads = queue_runner_impl.start_queue_runners(coord=coord)
# Read off the top of the bucket and ensure correctness of output
_read_test(sess)
coord.request_stop()
coord.join(threads)
def testBucketBySequenceLength(self):
self._testBucketBySequenceLength(allow_small_batch=False)
def testBucketBySequenceLengthAllow(self):
self._testBucketBySequenceLength(allow_small_batch=True)
def testBucketBySequenceLengthBucketCapacities(self):
# Above bucket_boundaries = [3, 4, 5, 10] so we need 5 capacities.
with self.assertRaisesRegexp(ValueError, r"must have exactly num_buckets"):
self._testBucketBySequenceLength(allow_small_batch=False,
bucket_capacities=[32, 32, 32, 32])
# Test with different capacities.
capacities = [48, 40, 32, 24, 16]
self._testBucketBySequenceLength(allow_small_batch=True,
bucket_capacities=capacities)
def testBucketBySequenceLengthShutdown(self):
self._testBucketBySequenceLength(allow_small_batch=True,
drain_entire_queue=False)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/training/python/training/bucket_ops_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Strategies for placing variables on parameter servers.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import hashlib
import numpy as np
from tensorflow.python.framework import tensor_shape
class RandomStrategy(object):
"""Returns a random PS task for op placement.
This may perform better than the default round-robin placement if you
have a large number of variables. Depending on your architecture and
number of parameter servers, round-robin can lead to situations where
all of one type of variable is placed on a single PS task, which may
lead to contention issues.
This strategy uses a hash function on the name of each op for deterministic
placement.
"""
def __init__(self, num_ps_tasks, seed=0):
"""Creates a new `RandomStrategy`."""
self._num_tasks = num_ps_tasks
self._seed = seed
def __call__(self, op):
"""Chooses a ps task index for the given `Operation`."""
key = "%s_%d" % (op.name, self._seed)
key = key.encode("utf-8")
# Use MD5 instead of Python's built-in hash() to get consistent outputs
# between runs.
n = int(hashlib.md5(key).hexdigest(), 16)
return int(n % self._num_tasks)
class GreedyLoadBalancingStrategy(object):
"""Returns the least-loaded ps task for op placement.
The load is calculated by a user-specified load function passed in at
construction. There are no units for load, and the load function is
responsible for providing an internally consistent measure.
Note that this strategy is very sensitive to the exact order in which
ps ops (typically variables) are created, as it greedily places ops
on the least-loaded ps at the point each op is processed.
One reasonable heuristic is the `byte_size_load_fn`, which
estimates load as the number of bytes that would be used to store and
transmit the entire variable. More advanced load functions
could consider the difference in access patterns across ops, or trade
off CPU-intensive ops with RAM-intensive ops with network bandwidth.
This class is intended to be used as a `ps_strategy` in
`tf.compat.v1.train.replica_device_setter`.
"""
def __init__(self, num_tasks, load_fn):
"""Create a new `LoadBalancingStrategy`.
Args:
num_tasks: Number of ps tasks to cycle among.
load_fn: A callable that takes an `Operation` and returns a
numeric load value for that op.
"""
self._num_tasks = num_tasks
self._load_fn = load_fn
self._ps_loads = np.zeros(num_tasks)
def __call__(self, op):
"""Choose a ps task index for the given `Operation`.
Args:
op: A `Operation` to be placed on ps.
Returns:
The next ps task index to use for the `Operation`. Greedily
places the op on the least-loaded ps task so far, as determined
by the load function.
"""
task = np.argmin(self._ps_loads)
self._ps_loads[task] += self._load_fn(op)
return task
def byte_size_load_fn(op):
"""Load function that computes the byte size of a single-output `Operation`.
This is intended to be used with `"Variable"` ops, which have a single
`Tensor` output with the contents of the variable. However, it can also be
used for calculating the size of any op that has a single output.
Intended to be used with `GreedyLoadBalancingStrategy`.
Args:
op: An `Operation` with a single output, typically a "Variable" op.
Returns:
The number of bytes in the output `Tensor`.
Raises:
ValueError: if `op` does not have a single output, or if the shape of the
single output is not fully-defined.
"""
if len(op.outputs) != 1:
raise ValueError("Op %s must have a single output" % op)
output = op.outputs[0]
elem_size = output.dtype.size
shape = output.get_shape()
if not shape.is_fully_defined():
# Due to legacy behavior, scalar "Variable" ops have output Tensors that
# have unknown shape when the op is created (and hence passed to this
# load function for placement), even though the scalar shape is set
# explicitly immediately afterward.
shape = tensor_shape.TensorShape(op.get_attr("shape"))
shape.assert_is_fully_defined()
return shape.num_elements() * elem_size
|
tensorflow-master
|
tensorflow/contrib/training/python/training/device_setter.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Hyperparameter values."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import numbers
import re
import six
from tensorflow.contrib.training.python.training import hparam_pb2
from tensorflow.python.framework import ops
from tensorflow.python.util import compat
from tensorflow.python.util import deprecation
# Define the regular expression for parsing a single clause of the input
# (delimited by commas). A legal clause looks like:
# <variable name>[<index>]? = <rhs>
# where <rhs> is either a single token or [] enclosed list of tokens.
# For example: "var[1] = a" or "x = [1,2,3]"
PARAM_RE = re.compile(r"""
(?P<name>[a-zA-Z][\w\.]*) # variable name: "var" or "x"
(\[\s*(?P<index>\d+)\s*\])? # (optional) index: "1" or None
\s*=\s*
((?P<val>[^,\[]*) # single value: "a" or None
|
\[(?P<vals>[^\]]*)\]) # list of values: None or "1,2,3"
($|,\s*)""", re.VERBOSE)
def _parse_fail(name, var_type, value, values):
"""Helper function for raising a value error for bad assignment."""
raise ValueError(
'Could not parse hparam \'%s\' of type \'%s\' with value \'%s\' in %s' %
(name, var_type.__name__, value, values))
def _reuse_fail(name, values):
"""Helper function for raising a value error for reuse of name."""
raise ValueError('Multiple assignments to variable \'%s\' in %s' % (name,
values))
def _process_scalar_value(name, parse_fn, var_type, m_dict, values,
results_dictionary):
"""Update results_dictionary with a scalar value.
Used to update the results_dictionary to be returned by parse_values when
encountering a clause with a scalar RHS (e.g. "s=5" or "arr[0]=5".)
Mutates results_dictionary.
Args:
name: Name of variable in assignment ("s" or "arr").
parse_fn: Function for parsing the actual value.
var_type: Type of named variable.
m_dict: Dictionary constructed from regex parsing.
m_dict['val']: RHS value (scalar)
m_dict['index']: List index value (or None)
values: Full expression being parsed
results_dictionary: The dictionary being updated for return by the parsing
function.
Raises:
ValueError: If the name has already been used.
"""
try:
parsed_value = parse_fn(m_dict['val'])
except ValueError:
_parse_fail(name, var_type, m_dict['val'], values)
# If no index is provided
if not m_dict['index']:
if name in results_dictionary:
_reuse_fail(name, values)
results_dictionary[name] = parsed_value
else:
if name in results_dictionary:
# The name has already been used as a scalar, then it
# will be in this dictionary and map to a non-dictionary.
if not isinstance(results_dictionary.get(name), dict):
_reuse_fail(name, values)
else:
results_dictionary[name] = {}
index = int(m_dict['index'])
# Make sure the index position hasn't already been assigned a value.
if index in results_dictionary[name]:
_reuse_fail('{}[{}]'.format(name, index), values)
results_dictionary[name][index] = parsed_value
def _process_list_value(name, parse_fn, var_type, m_dict, values,
results_dictionary):
"""Update results_dictionary from a list of values.
Used to update results_dictionary to be returned by parse_values when
encountering a clause with a list RHS (e.g. "arr=[1,2,3]".)
Mutates results_dictionary.
Args:
name: Name of variable in assignment ("arr").
parse_fn: Function for parsing individual values.
var_type: Type of named variable.
m_dict: Dictionary constructed from regex parsing.
m_dict['val']: RHS value (scalar)
values: Full expression being parsed
results_dictionary: The dictionary being updated for return by the parsing
function.
Raises:
ValueError: If the name has an index or the values cannot be parsed.
"""
if m_dict['index'] is not None:
raise ValueError('Assignment of a list to a list index.')
elements = filter(None, re.split('[ ,]', m_dict['vals']))
# Make sure the name hasn't already been assigned a value
if name in results_dictionary:
raise _reuse_fail(name, values)
try:
results_dictionary[name] = [parse_fn(e) for e in elements]
except ValueError:
_parse_fail(name, var_type, m_dict['vals'], values)
def _cast_to_type_if_compatible(name, param_type, value):
"""Cast hparam to the provided type, if compatible.
Args:
name: Name of the hparam to be cast.
param_type: The type of the hparam.
value: The value to be cast, if compatible.
Returns:
The result of casting `value` to `param_type`.
Raises:
ValueError: If the type of `value` is not compatible with param_type.
* If `param_type` is a string type, but `value` is not.
* If `param_type` is a boolean, but `value` is not, or vice versa.
* If `param_type` is an integer type, but `value` is not.
* If `param_type` is a float type, but `value` is not a numeric type.
"""
fail_msg = (
"Could not cast hparam '%s' of type '%s' from value %r" %
(name, param_type, value))
# If `value` is already of type `param_type`, return it directly.
# `isinstance` is too weak (e.g. isinstance(True, int) == True).
if type(value) == param_type: # pylint: disable=unidiomatic-typecheck
return value
# Some callers use None, for which we can't do any casting/checking. :(
if issubclass(param_type, type(None)):
return value
# Avoid converting a non-string type to a string.
if (issubclass(param_type, (six.string_types, six.binary_type)) and
not isinstance(value, (six.string_types, six.binary_type))):
raise ValueError(fail_msg)
# Avoid converting a number or string type to a boolean or vice versa.
if issubclass(param_type, bool) != isinstance(value, bool):
raise ValueError(fail_msg)
# Avoid converting float to an integer (the reverse is fine).
if (issubclass(param_type, numbers.Integral) and
not isinstance(value, numbers.Integral)):
raise ValueError(fail_msg)
# Avoid converting a non-numeric type to a numeric type.
if (issubclass(param_type, numbers.Number) and
not isinstance(value, numbers.Number)):
raise ValueError(fail_msg)
return param_type(value)
def parse_values(values, type_map, ignore_unknown=False):
"""Parses hyperparameter values from a string into a python map.
`values` is a string containing comma-separated `name=value` pairs.
For each pair, the value of the hyperparameter named `name` is set to
`value`.
If a hyperparameter name appears multiple times in `values`, a ValueError
is raised (e.g. 'a=1,a=2', 'a[1]=1,a[1]=2').
If a hyperparameter name in both an index assignment and scalar assignment,
a ValueError is raised. (e.g. 'a=[1,2,3],a[0] = 1').
The hyperparameter name may contain '.' symbols, which will result in an
attribute name that is only accessible through the getattr and setattr
functions. (And must be first explicit added through add_hparam.)
WARNING: Use of '.' in your variable names is allowed, but is not well
supported and not recommended.
The `value` in `name=value` must follows the syntax according to the
type of the parameter:
* Scalar integer: A Python-parsable integer point value. E.g.: 1,
100, -12.
* Scalar float: A Python-parsable floating point value. E.g.: 1.0,
-.54e89.
* Boolean: Either true or false.
* Scalar string: A non-empty sequence of characters, excluding comma,
spaces, and square brackets. E.g.: foo, bar_1.
* List: A comma separated list of scalar values of the parameter type
enclosed in square brackets. E.g.: [1,2,3], [1.0,1e-12], [high,low].
When index assignment is used, the corresponding type_map key should be the
list name. E.g. for "arr[1]=0" the type_map must have the key "arr" (not
"arr[1]").
Args:
values: String. Comma separated list of `name=value` pairs where
'value' must follow the syntax described above.
type_map: A dictionary mapping hyperparameter names to types. Note every
parameter name in values must be a key in type_map. The values must
conform to the types indicated, where a value V is said to conform to a
type T if either V has type T, or V is a list of elements of type T.
Hence, for a multidimensional parameter 'x' taking float values,
'x=[0.1,0.2]' will parse successfully if type_map['x'] = float.
ignore_unknown: Bool. Whether values that are missing a type in type_map
should be ignored. If set to True, a ValueError will not be raised for
unknown hyperparameter type.
Returns:
A python map mapping each name to either:
* A scalar value.
* A list of scalar values.
* A dictionary mapping index numbers to scalar values.
(e.g. "x=5,L=[1,2],arr[1]=3" results in {'x':5,'L':[1,2],'arr':{1:3}}")
Raises:
ValueError: If there is a problem with input.
* If `values` cannot be parsed.
* If a list is assigned to a list index (e.g. 'a[1] = [1,2,3]').
* If the same rvalue is assigned two different values (e.g. 'a=1,a=2',
'a[1]=1,a[1]=2', or 'a=1,a=[1]')
"""
results_dictionary = {}
pos = 0
while pos < len(values):
m = PARAM_RE.match(values, pos)
if not m:
raise ValueError('Malformed hyperparameter value: %s' % values[pos:])
# Check that there is a comma between parameters and move past it.
pos = m.end()
# Parse the values.
m_dict = m.groupdict()
name = m_dict['name']
if name not in type_map:
if ignore_unknown:
continue
raise ValueError('Unknown hyperparameter type for %s' % name)
type_ = type_map[name]
# Set up correct parsing function (depending on whether type_ is a bool)
if type_ == bool:
def parse_bool(value):
if value in ['true', 'True']:
return True
elif value in ['false', 'False']:
return False
else:
try:
return bool(int(value))
except ValueError:
_parse_fail(name, type_, value, values)
parse = parse_bool
else:
parse = type_
# If a singe value is provided
if m_dict['val'] is not None:
_process_scalar_value(name, parse, type_, m_dict, values,
results_dictionary)
# If the assigned value is a list:
elif m_dict['vals'] is not None:
_process_list_value(name, parse, type_, m_dict, values,
results_dictionary)
else: # Not assigned a list or value
_parse_fail(name, type_, '', values)
return results_dictionary
class HParams(object):
"""Class to hold a set of hyperparameters as name-value pairs.
A `HParams` object holds hyperparameters used to build and train a model,
such as the number of hidden units in a neural net layer or the learning rate
to use when training.
You first create a `HParams` object by specifying the names and values of the
hyperparameters.
To make them easily accessible the parameter names are added as direct
attributes of the class. A typical usage is as follows:
```python
# Create a HParams object specifying names and values of the model
# hyperparameters:
hparams = HParams(learning_rate=0.1, num_hidden_units=100)
# The hyperparameter are available as attributes of the HParams object:
hparams.learning_rate ==> 0.1
hparams.num_hidden_units ==> 100
```
Hyperparameters have type, which is inferred from the type of their value
passed at construction type. The currently supported types are: integer,
float, boolean, string, and list of integer, float, boolean, or string.
You can override hyperparameter values by calling the
[`parse()`](#HParams.parse) method, passing a string of comma separated
`name=value` pairs. This is intended to make it possible to override
any hyperparameter values from a single command-line flag to which
the user passes 'hyper-param=value' pairs. It avoids having to define
one flag for each hyperparameter.
The syntax expected for each value depends on the type of the parameter.
See `parse()` for a description of the syntax.
Example:
```python
# Define a command line flag to pass name=value pairs.
# For example using argparse:
import argparse
parser = argparse.ArgumentParser(description='Train my model.')
parser.add_argument('--hparams', type=str,
help='Comma separated list of "name=value" pairs.')
args = parser.parse_args()
...
def my_program():
# Create a HParams object specifying the names and values of the
# model hyperparameters:
hparams = tf.contrib.training.HParams(
learning_rate=0.1,
num_hidden_units=100,
activations=['relu', 'tanh'])
# Override hyperparameters values by parsing the command line
hparams.parse(args.hparams)
# If the user passed `--hparams=learning_rate=0.3` on the command line
# then 'hparams' has the following attributes:
hparams.learning_rate ==> 0.3
hparams.num_hidden_units ==> 100
hparams.activations ==> ['relu', 'tanh']
# If the hyperparameters are in json format use parse_json:
hparams.parse_json('{"learning_rate": 0.3, "activations": "relu"}')
```
"""
_HAS_DYNAMIC_ATTRIBUTES = True # Required for pytype checks.
def __init__(self, hparam_def=None, model_structure=None, **kwargs):
"""Create an instance of `HParams` from keyword arguments.
The keyword arguments specify name-values pairs for the hyperparameters.
The parameter types are inferred from the type of the values passed.
The parameter names are added as attributes of `HParams` object, so they
can be accessed directly with the dot notation `hparams._name_`.
Example:
```python
# Define 3 hyperparameters: 'learning_rate' is a float parameter,
# 'num_hidden_units' an integer parameter, and 'activation' a string
# parameter.
hparams = tf.contrib.training.HParams(
learning_rate=0.1, num_hidden_units=100, activation='relu')
hparams.activation ==> 'relu'
```
Note that a few names are reserved and cannot be used as hyperparameter
names. If you use one of the reserved name the constructor raises a
`ValueError`.
Args:
hparam_def: Serialized hyperparameters, encoded as a hparam_pb2.HParamDef
protocol buffer. If provided, this object is initialized by
deserializing hparam_def. Otherwise **kwargs is used.
model_structure: An instance of ModelStructure, defining the feature
crosses to be used in the Trial.
**kwargs: Key-value pairs where the key is the hyperparameter name and
the value is the value for the parameter.
Raises:
ValueError: If both `hparam_def` and initialization values are provided,
or if one of the arguments is invalid.
"""
# Register the hyperparameters and their type in _hparam_types.
# This simplifies the implementation of parse().
# _hparam_types maps the parameter name to a tuple (type, bool).
# The type value is the type of the parameter for scalar hyperparameters,
# or the type of the list elements for multidimensional hyperparameters.
# The bool value is True if the value is a list, False otherwise.
self._hparam_types = {}
self._model_structure = model_structure
if hparam_def:
self._init_from_proto(hparam_def)
if kwargs:
raise ValueError('hparam_def and initialization values are '
'mutually exclusive')
else:
for name, value in six.iteritems(kwargs):
self.add_hparam(name, value)
def _init_from_proto(self, hparam_def):
"""Creates a new HParams from `HParamDef` protocol buffer.
Args:
hparam_def: `HParamDef` protocol buffer.
"""
assert isinstance(hparam_def, hparam_pb2.HParamDef)
for name, value in hparam_def.hparam.items():
kind = value.WhichOneof('kind')
if kind.endswith('_value'):
# Single value.
if kind.startswith('int64'):
# Setting attribute value to be 'int' to ensure the type is compatible
# with both Python2 and Python3.
self.add_hparam(name, int(getattr(value, kind)))
elif kind.startswith('bytes'):
# Setting attribute value to be 'str' to ensure the type is compatible
# with both Python2 and Python3. UTF-8 encoding is assumed.
self.add_hparam(name, compat.as_str(getattr(value, kind)))
else:
self.add_hparam(name, getattr(value, kind))
else:
# List of values.
if kind.startswith('int64'):
# Setting attribute value to be 'int' to ensure the type is compatible
# with both Python2 and Python3.
self.add_hparam(name, [int(v) for v in getattr(value, kind).value])
elif kind.startswith('bytes'):
# Setting attribute value to be 'str' to ensure the type is compatible
# with both Python2 and Python3. UTF-8 encoding is assumed.
self.add_hparam(
name, [compat.as_str(v) for v in getattr(value, kind).value])
else:
self.add_hparam(name, [v for v in getattr(value, kind).value])
def add_hparam(self, name, value):
"""Adds {name, value} pair to hyperparameters.
Args:
name: Name of the hyperparameter.
value: Value of the hyperparameter. Can be one of the following types:
int, float, string, int list, float list, or string list.
Raises:
ValueError: if one of the arguments is invalid.
"""
# Keys in kwargs are unique, but 'name' could the name of a pre-existing
# attribute of this object. In that case we refuse to use it as a
# hyperparameter name.
if getattr(self, name, None) is not None:
raise ValueError('Hyperparameter name is reserved: %s' % name)
if isinstance(value, (list, tuple)):
if not value:
raise ValueError(
'Multi-valued hyperparameters cannot be empty: %s' % name)
self._hparam_types[name] = (type(value[0]), True)
else:
self._hparam_types[name] = (type(value), False)
setattr(self, name, value)
def set_hparam(self, name, value):
"""Set the value of an existing hyperparameter.
This function verifies that the type of the value matches the type of the
existing hyperparameter.
Args:
name: Name of the hyperparameter.
value: New value of the hyperparameter.
Raises:
KeyError: If the hyperparameter doesn't exist.
ValueError: If there is a type mismatch.
"""
param_type, is_list = self._hparam_types[name]
if isinstance(value, list):
if not is_list:
raise ValueError(
'Must not pass a list for single-valued parameter: %s' % name)
setattr(self, name, [
_cast_to_type_if_compatible(name, param_type, v) for v in value])
else:
if is_list:
raise ValueError(
'Must pass a list for multi-valued parameter: %s.' % name)
setattr(self, name, _cast_to_type_if_compatible(name, param_type, value))
def del_hparam(self, name):
"""Removes the hyperparameter with key 'name'.
Does nothing if it isn't present.
Args:
name: Name of the hyperparameter.
"""
if hasattr(self, name):
delattr(self, name)
del self._hparam_types[name]
def parse(self, values):
"""Override existing hyperparameter values, parsing new values from a string.
See parse_values for more detail on the allowed format for values.
Args:
values: String. Comma separated list of `name=value` pairs where 'value'
must follow the syntax described above.
Returns:
The `HParams` instance.
Raises:
ValueError: If `values` cannot be parsed or a hyperparameter in `values`
doesn't exist.
"""
type_map = {}
for name, t in self._hparam_types.items():
param_type, _ = t
type_map[name] = param_type
values_map = parse_values(values, type_map)
return self.override_from_dict(values_map)
def override_from_dict(self, values_dict):
"""Override existing hyperparameter values, parsing new values from a dictionary.
Args:
values_dict: Dictionary of name:value pairs.
Returns:
The `HParams` instance.
Raises:
KeyError: If a hyperparameter in `values_dict` doesn't exist.
ValueError: If `values_dict` cannot be parsed.
"""
for name, value in values_dict.items():
self.set_hparam(name, value)
return self
@deprecation.deprecated(None, 'Use `override_from_dict`.')
def set_from_map(self, values_map):
"""DEPRECATED. Use override_from_dict."""
return self.override_from_dict(values_dict=values_map)
def set_model_structure(self, model_structure):
self._model_structure = model_structure
def get_model_structure(self):
return self._model_structure
def to_json(self, indent=None, separators=None, sort_keys=False):
"""Serializes the hyperparameters into JSON.
Args:
indent: If a non-negative integer, JSON array elements and object members
will be pretty-printed with that indent level. An indent level of 0, or
negative, will only insert newlines. `None` (the default) selects the
most compact representation.
separators: Optional `(item_separator, key_separator)` tuple. Default is
`(', ', ': ')`.
sort_keys: If `True`, the output dictionaries will be sorted by key.
Returns:
A JSON string.
"""
return json.dumps(
self.values(),
indent=indent,
separators=separators,
sort_keys=sort_keys)
def parse_json(self, values_json):
"""Override existing hyperparameter values, parsing new values from a json object.
Args:
values_json: String containing a json object of name:value pairs.
Returns:
The `HParams` instance.
Raises:
KeyError: If a hyperparameter in `values_json` doesn't exist.
ValueError: If `values_json` cannot be parsed.
"""
values_map = json.loads(values_json)
return self.override_from_dict(values_map)
def values(self):
"""Return the hyperparameter values as a Python dictionary.
Returns:
A dictionary with hyperparameter names as keys. The values are the
hyperparameter values.
"""
return {n: getattr(self, n) for n in self._hparam_types.keys()}
def get(self, key, default=None):
"""Returns the value of `key` if it exists, else `default`."""
if key in self._hparam_types:
# Ensure that default is compatible with the parameter type.
if default is not None:
param_type, is_param_list = self._hparam_types[key]
type_str = 'list<%s>' % param_type if is_param_list else str(param_type)
fail_msg = ("Hparam '%s' of type '%s' is incompatible with "
'default=%s' % (key, type_str, default))
is_default_list = isinstance(default, list)
if is_param_list != is_default_list:
raise ValueError(fail_msg)
try:
if is_default_list:
for value in default:
_cast_to_type_if_compatible(key, param_type, value)
else:
_cast_to_type_if_compatible(key, param_type, default)
except ValueError as e:
raise ValueError('%s. %s' % (fail_msg, e))
return getattr(self, key)
return default
def __contains__(self, key):
return key in self._hparam_types
def __str__(self):
hpdict = self.values()
output_list = ['{}={}'.format(key, hpdict[key]) for key in hpdict]
return ','.join(output_list)
def __repr__(self):
strval = str(sorted(self.values().items()))
return '%s(%s)' % (type(self).__name__, strval)
@staticmethod
def _get_kind_name(param_type, is_list):
"""Returns the field name given parameter type and is_list.
Args:
param_type: Data type of the hparam.
is_list: Whether this is a list.
Returns:
A string representation of the field name.
Raises:
ValueError: If parameter type is not recognized.
"""
if issubclass(param_type, bool):
# This check must happen before issubclass(param_type, six.integer_types),
# since Python considers bool to be a subclass of int.
typename = 'bool'
elif issubclass(param_type, six.integer_types):
# Setting 'int' and 'long' types to be 'int64' to ensure the type is
# compatible with both Python2 and Python3.
typename = 'int64'
elif issubclass(param_type, (six.string_types, six.binary_type)):
# Setting 'string' and 'bytes' types to be 'bytes' to ensure the type is
# compatible with both Python2 and Python3.
typename = 'bytes'
elif issubclass(param_type, float):
typename = 'float'
else:
raise ValueError('Unsupported parameter type: %s' % str(param_type))
suffix = 'list' if is_list else 'value'
return '_'.join([typename, suffix])
def to_proto(self, export_scope=None): # pylint: disable=unused-argument
"""Converts a `HParams` object to a `HParamDef` protocol buffer.
Args:
export_scope: Optional `string`. Name scope to remove.
Returns:
A `HParamDef` protocol buffer.
"""
hparam_proto = hparam_pb2.HParamDef()
for name in self._hparam_types:
# Parse the values.
param_type, is_list = self._hparam_types.get(name, (None, None))
kind = HParams._get_kind_name(param_type, is_list)
if is_list:
if kind.startswith('bytes'):
v_list = [compat.as_bytes(v) for v in getattr(self, name)]
else:
v_list = [v for v in getattr(self, name)]
getattr(hparam_proto.hparam[name], kind).value.extend(v_list)
else:
v = getattr(self, name)
if kind.startswith('bytes'):
v = compat.as_bytes(getattr(self, name))
setattr(hparam_proto.hparam[name], kind, v)
return hparam_proto
@staticmethod
def from_proto(hparam_def, import_scope=None): # pylint: disable=unused-argument
return HParams(hparam_def=hparam_def)
ops.register_proto_function(
'hparams',
proto_type=hparam_pb2.HParamDef,
to_proto=HParams.to_proto,
from_proto=HParams.from_proto)
|
tensorflow-master
|
tensorflow/contrib/training/python/training/hparam.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests `FeedingQueueRunner` using arrays and `DataFrames`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.estimator.inputs.queues.feeding_functions import _enqueue_data as enqueue_data
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner_impl
# pylint: disable=g-import-not-at-top
try:
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
def get_rows(array, row_indices):
rows = [array[i] for i in row_indices]
return np.vstack(rows)
class FeedingQueueRunnerTestCase(test.TestCase):
"""Tests for `FeedingQueueRunner`."""
def testArrayFeeding(self):
with ops.Graph().as_default():
array = np.arange(32).reshape([16, 2])
q = enqueue_data(array, capacity=100)
batch_size = 3
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for i in range(100):
indices = [
j % array.shape[0]
for j in range(batch_size * i, batch_size * (i + 1))
]
expected_dq = get_rows(array, indices)
dq = sess.run(dq_op)
np.testing.assert_array_equal(indices, dq[0])
np.testing.assert_array_equal(expected_dq, dq[1])
coord.request_stop()
coord.join(threads)
def testArrayFeedingMultiThread(self):
with ops.Graph().as_default():
array = np.arange(256).reshape([128, 2])
q = enqueue_data(array, capacity=128, num_threads=8, shuffle=True)
batch_size = 3
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for _ in range(100):
dq = sess.run(dq_op)
indices = dq[0]
expected_dq = get_rows(array, indices)
np.testing.assert_array_equal(expected_dq, dq[1])
coord.request_stop()
coord.join(threads)
def testPandasFeeding(self):
if not HAS_PANDAS:
return
with ops.Graph().as_default():
array1 = np.arange(32)
array2 = np.arange(32, 64)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(64, 96))
q = enqueue_data(df, capacity=100)
batch_size = 5
dq_op = q.dequeue_many(5)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for i in range(100):
indices = [
j % array1.shape[0]
for j in range(batch_size * i, batch_size * (i + 1))
]
expected_df_indices = df.index[indices]
expected_rows = df.iloc[indices]
dq = sess.run(dq_op)
np.testing.assert_array_equal(expected_df_indices, dq[0])
for col_num, col in enumerate(df.columns):
np.testing.assert_array_equal(expected_rows[col].values,
dq[col_num + 1])
coord.request_stop()
coord.join(threads)
def testPandasFeedingMultiThread(self):
if not HAS_PANDAS:
return
with ops.Graph().as_default():
array1 = np.arange(128, 256)
array2 = 2 * array1
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(128))
q = enqueue_data(df, capacity=128, num_threads=8, shuffle=True)
batch_size = 5
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for _ in range(100):
dq = sess.run(dq_op)
indices = dq[0]
expected_rows = df.iloc[indices]
for col_num, col in enumerate(df.columns):
np.testing.assert_array_equal(expected_rows[col].values,
dq[col_num + 1])
coord.request_stop()
coord.join(threads)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/training/python/training/feeding_queue_runner_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Sampling functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.training import input as input_ops
__all__ = [
'rejection_sample',
'stratified_sample',
]
def rejection_sample(tensors,
accept_prob_fn,
batch_size,
queue_threads=1,
enqueue_many=False,
prebatch_capacity=16,
prebatch_threads=1,
runtime_checks=False,
name=None):
"""Stochastically creates batches by rejection sampling.
Each list of non-batched tensors is evaluated by `accept_prob_fn`, to produce
a scalar tensor between 0 and 1. This tensor corresponds to the probability of
being accepted. When `batch_size` tensor groups have been accepted, the batch
queue will return a mini-batch.
Args:
tensors: List of tensors for data. All tensors are either one item or a
batch, according to enqueue_many.
accept_prob_fn: A python lambda that takes a non-batch tensor from each
item in `tensors`, and produces a scalar tensor.
batch_size: Size of batch to be returned.
queue_threads: The number of threads for the queue that will hold the final
batch.
enqueue_many: Bool. If true, interpret input tensors as having a batch
dimension.
prebatch_capacity: Capacity for the large queue that is used to convert
batched tensors to single examples.
prebatch_threads: Number of threads for the large queue that is used to
convert batched tensors to single examples.
runtime_checks: Bool. If true, insert runtime checks on the output of
`accept_prob_fn`. Using `True` might have a performance impact.
name: Optional prefix for ops created by this function.
Raises:
ValueError: enqueue_many is True and labels doesn't have a batch
dimension, or if enqueue_many is False and labels isn't a scalar.
ValueError: enqueue_many is True, and batch dimension on data and labels
don't match.
ValueError: if a zero initial probability class has a nonzero target
probability.
Returns:
A list of tensors of the same length as `tensors`, with batch dimension
`batch_size`.
Example:
# Get tensor for a single data and label example.
data, label = data_provider.Get(['data', 'label'])
# Get stratified batch according to data tensor.
accept_prob_fn = lambda x: (tf.tanh(x[0]) + 1) / 2
data_batch = tf.contrib.training.rejection_sample(
[data, label], accept_prob_fn, 16)
# Run batch through network.
...
"""
with variable_scope.variable_scope(name, 'rejection_sample', tensors):
tensor_list = ops.convert_n_to_tensor_or_indexed_slices(tensors)
# Reduce the case of a batched example to that of a batch of a single
# example by taking a batch of size one.
if enqueue_many:
# Validate that batch dimension of the input is consistent.
tensor_list = _verify_data_inputs(tensor_list)
# Make a single queue to hold input examples. Reshape output so examples
# don't have singleton batch dimension.
batched = input_ops.batch(
tensor_list,
batch_size=1,
num_threads=prebatch_threads,
capacity=prebatch_capacity,
enqueue_many=True)
tensor_list = [array_ops.squeeze(x, [0]) for x in batched]
# Set up a queue containing batches that have the distribution.
cur_prob = accept_prob_fn(tensor_list)
if runtime_checks:
cur_prob = array_ops.identity(
control_flow_ops.with_dependencies([
check_ops.assert_less_equal(0.0, cur_prob),
check_ops.assert_less_equal(cur_prob, 1.0)
], cur_prob),
name='prob_with_checks')
minibatch = input_ops.maybe_batch(
tensor_list,
keep_input=random_ops.random_uniform([]) < cur_prob,
batch_size=batch_size,
num_threads=queue_threads)
# Queues return a single tensor if the list of enqueued tensors is one. Since
# we want the type to always be the same, always return a list.
if isinstance(minibatch, ops.Tensor):
minibatch = [minibatch]
return minibatch
def stratified_sample(tensors,
labels,
target_probs,
batch_size,
init_probs=None,
enqueue_many=False,
queue_capacity=16,
threads_per_queue=1,
name=None):
"""Stochastically creates batches based on per-class probabilities.
This method discards examples. Internally, it creates one queue to amortize
the cost of disk reads, and one queue to hold the properly-proportioned
batch.
Args:
tensors: List of tensors for data. All tensors are either one item or a
batch, according to enqueue_many.
labels: Tensor for label of data. Label is a single integer or a batch,
depending on `enqueue_many`. It is not a one-hot vector.
target_probs: Target class proportions in batch. An object whose type has a
registered Tensor conversion function.
batch_size: Size of batch to be returned.
init_probs: Class proportions in the data. An object whose type has a
registered Tensor conversion function, or `None` for estimating the
initial distribution.
enqueue_many: Bool. If true, interpret input tensors as having a batch
dimension.
queue_capacity: Capacity of the large queue that holds input examples.
threads_per_queue: Number of threads for the large queue that holds input
examples and for the final queue with the proper class proportions.
name: Optional prefix for ops created by this function.
Raises:
ValueError: If `tensors` isn't iterable.
ValueError: `enqueue_many` is True and labels doesn't have a batch
dimension, or if `enqueue_many` is False and labels isn't a scalar.
ValueError: `enqueue_many` is True, and batch dimension on data and labels
don't match.
ValueError: if probs don't sum to one.
ValueError: if a zero initial probability class has a nonzero target
probability.
TFAssertion: if labels aren't integers in [0, num classes).
Returns:
(data_batch, label_batch), where data_batch is a list of tensors of the same
length as `tensors`
Example:
# Get tensor for a single data and label example.
data, label = data_provider.Get(['data', 'label'])
# Get stratified batch according to per-class probabilities.
target_probs = [...distribution you want...]
[data_batch], labels = tf.contrib.training.stratified_sample(
[data], label, target_probs)
# Run batch through network.
...
"""
with ops.name_scope(name, 'stratified_sample', list(tensors) + [labels]):
tensor_list = ops.convert_n_to_tensor_or_indexed_slices(tensors)
labels = ops.convert_to_tensor(labels)
target_probs = ops.convert_to_tensor(target_probs, dtype=dtypes.float32)
# Reduce the case of a single example to that of a batch of size 1.
if not enqueue_many:
tensor_list = [array_ops.expand_dims(tensor, 0) for tensor in tensor_list]
labels = array_ops.expand_dims(labels, 0)
# If `init_probs` is `None`, set up online estimation of data distribution.
if init_probs is None:
# We use `target_probs` to get the number of classes, so its shape must be
# fully defined at graph construction time.
target_probs.get_shape().assert_is_fully_defined()
init_probs = _estimate_data_distribution(
labels, target_probs.get_shape().num_elements())
else:
init_probs = ops.convert_to_tensor(init_probs, dtype=dtypes.float32)
# Validate that input is consistent.
tensor_list, labels, [init_probs, target_probs] = _verify_input(
tensor_list, labels, [init_probs, target_probs])
# Check that all zero initial probabilities also have zero target
# probabilities.
assert_op = control_flow_ops.Assert(
math_ops.reduce_all(
math_ops.logical_or(
math_ops.not_equal(init_probs, 0),
math_ops.equal(target_probs, 0))),
['All classes with zero initial probability must also have zero target '
'probability: ', init_probs, target_probs
])
init_probs = control_flow_ops.with_dependencies([assert_op], init_probs)
# Calculate acceptance sampling probabilities.
accept_probs = _calculate_acceptance_probabilities(init_probs, target_probs)
proportion_rejected = math_ops.reduce_sum((1 - accept_probs) * init_probs)
accept_probs = control_flow_ops.cond(
math_ops.less(proportion_rejected, .5),
lambda: accept_probs,
lambda: logging_ops.Print( # pylint: disable=g-long-lambda
accept_probs, [accept_probs],
message='Proportion of examples rejected by sampler is high.',
first_n=10))
# Make a single queue to hold input examples. Reshape output so examples
# don't have singleton batch dimension.
batched = input_ops.batch(
tensor_list + [labels],
batch_size=1,
num_threads=threads_per_queue,
capacity=queue_capacity,
enqueue_many=True)
val_list = [array_ops.squeeze(x, [0]) for x in batched[:-1]]
label = array_ops.squeeze(batched[-1], [0])
# Set up second queue containing batches that have the desired class
# proportions.
cur_prob = array_ops.gather(accept_probs, label)
batched = input_ops.maybe_batch(
val_list + [label],
keep_input=random_ops.random_uniform([]) < cur_prob,
batch_size=batch_size,
num_threads=threads_per_queue)
return batched[:-1], batched[-1]
def _estimate_data_distribution(labels, num_classes, smoothing_constant=10):
"""Estimate data distribution as labels are seen."""
# Variable to track running count of classes. Smooth by a nonzero value to
# avoid division-by-zero. Higher values provide more stability at the cost of
# slower convergence.
if smoothing_constant <= 0:
raise ValueError('smoothing_constant must be nonzero.')
num_examples_per_class_seen = variable_scope.variable(
initial_value=[smoothing_constant] * num_classes,
trainable=False,
name='class_count',
dtype=dtypes.int64)
# Update the class-count based on what labels are seen in batch.
num_examples_per_class_seen = num_examples_per_class_seen.assign_add(
math_ops.reduce_sum(
array_ops.one_hot(
labels, num_classes, dtype=dtypes.int64), 0))
# Normalize count into a probability.
# NOTE: Without the `+= 0` line below, the test
# `testMultiThreadedEstimateDataDistribution` fails. The reason is that
# before this line, `num_examples_per_class_seen` is a Tensor that shares a
# buffer with an underlying `ref` object. When the `ref` is changed by another
# thread, `num_examples_per_class_seen` changes as well. Since this can happen
# in the middle of the normalization computation, we get probabilities that
# are very far from summing to one. Adding `+= 0` copies the contents of the
# tensor to a new buffer, which will be consistent from the start to the end
# of the normalization computation.
num_examples_per_class_seen += 0
init_prob_estimate = math_ops.truediv(
num_examples_per_class_seen,
math_ops.reduce_sum(num_examples_per_class_seen))
# Must return float32 (not float64) to agree with downstream `_verify_input`
# checks.
return math_ops.cast(init_prob_estimate, dtypes.float32)
def _verify_data_inputs(tensor_list):
"""Verify that batched data inputs are well-formed."""
for tensor in tensor_list:
# Data tensor should have a batch dimension.
shape = tensor.get_shape().with_rank_at_least(1)
# Data batch dimensions must be compatible.
tensor_shape.dimension_at_index(shape, 0).assert_is_compatible_with(
tensor_list[0].get_shape()[0])
return tensor_list
def _verify_input(tensor_list, labels, probs_list):
"""Verify that batched inputs are well-formed."""
checked_probs_list = []
for probs in probs_list:
# Since number of classes shouldn't change at runtime, probabilities shape
# should be fully defined.
probs.get_shape().assert_is_fully_defined()
# Probabilities must be 1D.
probs.get_shape().assert_has_rank(1)
# Probabilities must be nonnegative and sum to one.
tol = 1e-6
prob_sum = math_ops.reduce_sum(probs)
checked_probs = control_flow_ops.with_dependencies([
check_ops.assert_non_negative(probs),
check_ops.assert_less(prob_sum, 1.0 + tol),
check_ops.assert_less(1.0 - tol, prob_sum)
], probs)
checked_probs_list.append(checked_probs)
# All probabilities should be the same length.
prob_length = checked_probs_list[0].get_shape().num_elements()
for checked_prob in checked_probs_list:
if checked_prob.get_shape().num_elements() != prob_length:
raise ValueError('Probability parameters must have the same length.')
# Labels tensor should only have batch dimension.
labels.get_shape().assert_has_rank(1)
for tensor in tensor_list:
# Data tensor should have a batch dimension.
shape = tensor.get_shape().with_rank_at_least(1)
# Data and label batch dimensions must be compatible.
tensor_shape.dimension_at_index(shape, 0).assert_is_compatible_with(
labels.get_shape()[0])
# Data and labels must have the same, strictly positive batch size. Since we
# can't assume we know the batch size at graph creation, add runtime checks.
labels_batch_size = array_ops.shape(labels)[0]
lbl_assert = check_ops.assert_positive(labels_batch_size)
# Make each tensor depend on its own checks.
labels = control_flow_ops.with_dependencies([lbl_assert], labels)
tensor_list = [
control_flow_ops.with_dependencies([
lbl_assert,
check_ops.assert_equal(array_ops.shape(x)[0], labels_batch_size)
], x) for x in tensor_list
]
# Label's classes must be integers 0 <= x < num_classes.
labels = control_flow_ops.with_dependencies([
check_ops.assert_integer(labels), check_ops.assert_non_negative(labels),
check_ops.assert_less(labels, math_ops.cast(prob_length, labels.dtype))
], labels)
return tensor_list, labels, checked_probs_list
def _calculate_acceptance_probabilities(init_probs, target_probs):
"""Calculate the per-class acceptance rates.
Args:
init_probs: The class probabilities of the data.
target_probs: The desired class proportion in minibatches.
Returns:
A list of the per-class acceptance probabilities.
This method is based on solving the following analysis:
Let F be the probability of a rejection (on any example).
Let p_i be the proportion of examples in the data in class i (init_probs)
Let a_i is the rate the rejection sampler should *accept* class i
Let t_i is the target proportion in the minibatches for class i (target_probs)
```
F = sum_i(p_i * (1-a_i))
= 1 - sum_i(p_i * a_i) using sum_i(p_i) = 1
```
An example with class `i` will be accepted if `k` rejections occur, then an
example with class `i` is seen by the rejector, and it is accepted. This can
be written as follows:
```
t_i = sum_k=0^inf(F^k * p_i * a_i)
= p_i * a_j / (1 - F) using geometric series identity, since 0 <= F < 1
= p_i * a_i / sum_j(p_j * a_j) using F from above
```
Note that the following constraints hold:
```
0 <= p_i <= 1, sum_i(p_i) = 1
0 <= a_i <= 1
0 <= t_i <= 1, sum_i(t_i) = 1
```
A solution for a_i in terms of the other variables is the following:
```a_i = (t_i / p_i) / max_i[t_i / p_i]```
"""
# Make list of t_i / p_i.
ratio_l = target_probs / init_probs
# Replace NaNs with 0s.
ratio_l = array_ops.where(
math_ops.is_nan(ratio_l), array_ops.zeros_like(ratio_l), ratio_l)
# Calculate list of acceptance probabilities.
max_ratio = math_ops.reduce_max(ratio_l)
return ratio_l / max_ratio
|
tensorflow-master
|
tensorflow/contrib/training/python/training/sampling_ops.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A `QueueRunner` that takes a feed function as an argument."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from tensorflow.python.estimator.inputs.queues.feeding_queue_runner import _FeedingQueueRunner as FeedingQueueRunner
# pylint: enable=unused-import
|
tensorflow-master
|
tensorflow/contrib/training/python/training/feeding_queue_runner.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SGDR learning rate decay function."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops, control_flow_ops
def sgdr_decay(learning_rate, global_step, initial_period_steps,
t_mul=2.0, m_mul=1.0, name=None):
"""Implements Stochastic Gradient Descent with Warm Restarts (SGDR).
As described in "SGDR: Stochastic Gradient Descent
with Warm Restarts" by Ilya Loshchilov & Frank Hutter, Proceedings of
ICLR'2017, available at https://arxiv.org/pdf/1608.03983.pdf
The learning rate decreases according to cosine annealing:
```python
learning_rate * 0.5 * (1 + cos(x_val * pi)) # for x_val defined in [0, 1]
```
Thus, at the beginning (when the restart index i = 0),
the learning rate decreases for `initial_period_steps` steps from the initial
learning rate `learning_rate` (when `x_val=0`, we get `cos(0)=1`) to
0 (when `x_val=1`, we get `cos(pi)=-1`).
The decrease within the i-th period takes `t_i` steps,
where `t_0` = `initial_period_steps` is the user-defined number of batch
iterations (not epochs as in the paper) to be performed before the first
restart is launched.
Then, we perform the first restart (i=1) by setting the learning rate to
`learning_rate*(m_mul^i)`, where `m_mul in [0,1]` (set to 1 by default).
The i-th restart runs for `t_i=t_0*(t_mul^i)` steps, i.e., every new
restart runs `t_mul` times longer than the previous one.
Importantly, when one has no access to a validation set, SGDR suggests
to report the best expected / recommended solution in the following way:
When we are within our initial run (i=0), every new solution represents
SGDR's recommended solution. Instead, when i>0, the recommended solution is
the one obtained at the end of each restart.
Note that the minimum learning rate is set to 0 for simplicity,
you can adjust the code to deal with any positive minimum learning rate
as defined in the paper.
`initial_period_steps` is the duration of the first period measured in terms
of number of minibatch updates. If one wants to use epochs, one should compute
the number of updates required for an epoch.
For example, assume the following parameters and intention:
Minibatch size: 100
Training dataset size: 10000
If the user wants the first decay period to span across 5 epochs, then
`initial_period_steps` = 5 * 10000/100 = 500
Train for 10000 batch iterations with the initial learning rate set to
0.1, then restart to run 2 times longer, i.e, for 20000 batch iterations
and with the initial learning rate 0.05, then restart again and again,
doubling the runtime of each new period and with two times smaller
initial learning rate.
To accomplish the above, one would write:
```python
...
global_step = tf.Variable(0, trainable=False)
starter_learning_rate = 0.1
learning_rate = sgdr_decay(starter_learning_rate, global_step,
initial_period_steps=10000, t_mul=2, m_mul=0.5)
# Passing global_step to minimize() will increment it at each step.
learning_step = (
tf.compat.v1.train.GradientDescentOptimizer(learning_rate)
.minimize(...my loss..., global_step=global_step)
)
# Step | 0 | 1000 | 5000 | 9000 | 9999 | 10000 | 11000 |
# LR | 0.1 | 0.097 | 0.05 | 0.002 | 0.00 | 0.05 | 0.0496 |
# Step | 20000 | 29000 | 29999 | 30000 |
# LR | 0.025 | 0.0003 | 0.00 | 0.025 |
```
Args:
learning_rate: A scalar `float32` or `float64` `Tensor` or a
Python number. The initial learning rate.
global_step: A scalar `int32` or `int64` `Tensor` or a Python number.
Global step to use for the decay computation. Must not be negative.
initial_period_steps: Duration of the first period measured as the number
of minibatch updates, if one wants to use epochs, one should compute
the number of updates required for an epoch.
t_mul: A scalar `float32` or `float64` `Tensor` or a Python number.
Must be positive.
Used to derive the number of iterations in the i-th period:
`initial_period_steps * (t_mul^i)`. Defaults to 2.0.
m_mul: A scalar `float32` or `float64` `Tensor` or a Python number.
Must be positive.
Used to derive the initial learning rate of the i-th period:
`learning_rate * (m_mul^i)`. Defaults to 1.0
Returns:
A scalar `Tensor` of the same type as `learning_rate`.
The learning rate for a provided global_step.
Raises:
ValueError: if `global_step` is not supplied.
"""
if global_step is None:
raise ValueError("global_step is required for sgdr_decay.")
with ops.name_scope(name, "SGDRDecay",
[learning_rate, global_step,
initial_period_steps, t_mul, m_mul]) as name:
learning_rate = ops.convert_to_tensor(learning_rate,
name="initial_learning_rate")
dtype = learning_rate.dtype
global_step = math_ops.cast(global_step, dtype)
t_0 = math_ops.cast(initial_period_steps, dtype)
t_mul = math_ops.cast(t_mul, dtype)
m_mul = math_ops.cast(m_mul, dtype)
c_one = math_ops.cast(constant_op.constant(1.0), dtype)
c_half = math_ops.cast(constant_op.constant(0.5), dtype)
c_pi = math_ops.cast(constant_op.constant(math.pi), dtype)
# Find normalized value of the current step
x_val = math_ops.div(global_step, t_0)
def compute_step(x_val, geometric=False):
if geometric:
# Consider geometric series where t_mul != 1
# 1 + t_mul + t_mul^2 ... = (1 - t_mul^i_restart) / (1 - t_mul)
# First find how many restarts were performed for a given x_val
# Find maximal integer i_restart value for which this equation holds
# x_val >= (1 - t_mul^i_restart) / (1 - t_mul)
# x_val * (1 - t_mul) <= (1 - t_mul^i_restart)
# t_mul^i_restart <= (1 - x_val * (1 - t_mul))
# tensorflow allows only log with base e
# i_restart <= log(1 - x_val * (1 - t_mul) / log(t_mul)
# Find how many restarts were performed
i_restart = math_ops.floor(
math_ops.log(c_one - x_val * (c_one - t_mul)) / math_ops.log(t_mul))
# Compute the sum of all restarts before the current one
sum_r = (c_one - t_mul ** i_restart) / (c_one - t_mul)
# Compute our position within the current restart
x_val = (x_val - sum_r) / t_mul ** i_restart
else:
# Find how many restarts were performed
i_restart = math_ops.floor(x_val)
# Compute our position within the current restart
x_val = x_val - i_restart
return i_restart, x_val
i_restart, x_val = control_flow_ops.cond(
math_ops.equal(t_mul, c_one),
lambda: compute_step(x_val, geometric=False),
lambda: compute_step(x_val, geometric=True))
# If m_mul < 1, then the initial learning rate of every new restart will be
# smaller, i.e., by a factor of m_mul ** i_restart at i_restart-th restart
m_fac = learning_rate * (m_mul ** i_restart)
return math_ops.multiply(c_half * m_fac,
(math_ops.cos(x_val * c_pi) + c_one), name=name)
|
tensorflow-master
|
tensorflow/contrib/training/python/training/sgdr_learning_rate_decay.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.contrib.training.training."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from tensorflow.contrib.framework.python.ops import variables as variables_lib
from tensorflow.contrib.layers.python.layers import layers
from tensorflow.contrib.training.python.training import training
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables as variables_lib2
from tensorflow.python.ops.losses import losses
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import monitored_session
from tensorflow.python.training import saver as saver_lib
# pylint: enable=g-import-not-at-top
def logistic_classifier(inputs):
return layers.fully_connected(inputs, 1, activation_fn=math_ops.sigmoid)
def batchnorm_classifier(inputs):
inputs = layers.batch_norm(inputs, decay=0.1, fused=False)
return layers.fully_connected(inputs, 1, activation_fn=math_ops.sigmoid)
class ClipGradsTest(test.TestCase):
def testClipGrads(self):
xs = variables_lib2.Variable(0.0)
ys = xs * 4.0
grads = gradients_impl.gradients([ys], [xs])
gradients_to_variables = list(zip(grads, [xs]))
clipped_gradients_to_variables = training.clip_gradient_norms(
gradients_to_variables, 3.0)
with self.cached_session() as session:
session.run(variables_lib2.global_variables_initializer())
self.assertAlmostEqual(4.0, gradients_to_variables[0][0].eval())
self.assertAlmostEqual(3.0, clipped_gradients_to_variables[0][0].eval())
def testClipGradsFn(self):
xs = variables_lib2.Variable(0.0)
ys = xs * 4.0
grads = gradients_impl.gradients([ys], [xs])
gradients_to_variables = list(zip(grads, [xs]))
clipped_gradients_to_variables = training.clip_gradient_norms_fn(3.0)(
gradients_to_variables)
with self.cached_session() as session:
session.run(variables_lib2.global_variables_initializer())
self.assertAlmostEqual(4.0, gradients_to_variables[0][0].eval())
self.assertAlmostEqual(3.0, clipped_gradients_to_variables[0][0].eval())
class CreateTrainOpTest(test.TestCase):
def setUp(self):
np.random.seed(0)
# Create an easy training set:
self._inputs = np.random.rand(16, 4).astype(np.float32)
self._labels = np.random.randint(0, 2, size=(16, 1)).astype(np.float32)
def testTrainOpInCollection(self):
with ops.Graph().as_default():
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = batchnorm_classifier(tf_inputs)
loss = losses.log_loss(tf_labels, tf_predictions)
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = training.create_train_op(loss, optimizer)
# Make sure the training op was recorded in the proper collection
self.assertTrue(train_op in ops.get_collection(ops.GraphKeys.TRAIN_OP))
def testUseUpdateOps(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
expected_mean = np.mean(self._inputs, axis=(0))
expected_var = np.var(self._inputs, axis=(0))
tf_predictions = batchnorm_classifier(tf_inputs)
loss = losses.log_loss(tf_labels, tf_predictions)
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = training.create_train_op(loss, optimizer)
moving_mean = variables_lib.get_variables_by_name('moving_mean')[0]
moving_variance = variables_lib.get_variables_by_name('moving_variance')[
0]
with self.cached_session() as session:
# Initialize all variables
session.run(variables_lib2.global_variables_initializer())
mean, variance = session.run([moving_mean, moving_variance])
# After initialization moving_mean == 0 and moving_variance == 1.
self.assertAllClose(mean, [0] * 4)
self.assertAllClose(variance, [1] * 4)
for _ in range(10):
session.run(train_op)
mean = moving_mean.eval()
variance = moving_variance.eval()
# After 10 updates with decay 0.1 moving_mean == expected_mean and
# moving_variance == expected_var.
self.assertAllClose(mean, expected_mean)
self.assertAllClose(variance, expected_var)
def testEmptyUpdateOps(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = batchnorm_classifier(tf_inputs)
loss = losses.log_loss(tf_labels, tf_predictions)
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = training.create_train_op(loss, optimizer, update_ops=[])
moving_mean = variables_lib.get_variables_by_name('moving_mean')[0]
moving_variance = variables_lib.get_variables_by_name('moving_variance')[
0]
with self.cached_session() as session:
# Initialize all variables
session.run(variables_lib2.global_variables_initializer())
mean, variance = session.run([moving_mean, moving_variance])
# After initialization moving_mean == 0 and moving_variance == 1.
self.assertAllClose(mean, [0] * 4)
self.assertAllClose(variance, [1] * 4)
for _ in range(10):
session.run(train_op)
mean = moving_mean.eval()
variance = moving_variance.eval()
# Since we skip update_ops the moving_vars are not updated.
self.assertAllClose(mean, [0] * 4)
self.assertAllClose(variance, [1] * 4)
def testGlobalStepIsIncrementedByDefault(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = batchnorm_classifier(tf_inputs)
loss = losses.log_loss(tf_labels, tf_predictions)
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = training.create_train_op(loss, optimizer)
global_step = variables_lib.get_or_create_global_step()
with self.cached_session() as session:
# Initialize all variables
session.run(variables_lib2.global_variables_initializer())
for _ in range(10):
session.run(train_op)
# After 10 updates global_step should be 10.
self.assertAllClose(global_step.eval(), 10)
def testGlobalStepNotIncrementedWhenSetToNone(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = batchnorm_classifier(tf_inputs)
loss = losses.log_loss(tf_labels, tf_predictions)
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = training.create_train_op(loss, optimizer, global_step=None)
global_step = variables_lib.get_or_create_global_step()
with self.cached_session() as session:
# Initialize all variables
session.run(variables_lib2.global_variables_initializer())
for _ in range(10):
session.run(train_op)
# Since train_op don't use global_step it shouldn't change.
self.assertAllClose(global_step.eval(), 0)
class TrainBatchNormClassifierTest(test.TestCase):
def setUp(self):
# Create an easy training set:
np.random.seed(0)
self._inputs = np.zeros((16, 4))
self._labels = np.random.randint(0, 2, size=(16, 1)).astype(np.float32)
for i in range(16):
j = int(2 * self._labels[i] + np.random.randint(0, 2))
self._inputs[i, j] = 1
def testTrainWithNoInitAssignCanAchieveZeroLoss(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = batchnorm_classifier(tf_inputs)
losses.log_loss(tf_labels, tf_predictions)
total_loss = losses.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = training.create_train_op(total_loss, optimizer)
loss = training.train(
train_op,
None,
hooks=[basic_session_run_hooks.StopAtStepHook(num_steps=300)],
save_summaries_steps=None,
save_checkpoint_secs=None)
self.assertLess(loss, .1)
class TrainTest(test.TestCase):
def setUp(self):
# Create an easy training set:
np.random.seed(0)
self._inputs = np.zeros((16, 4))
self._labels = np.random.randint(0, 2, size=(16, 1)).astype(np.float32)
for i in range(16):
j = int(2 * self._labels[i] + np.random.randint(0, 2))
self._inputs[i, j] = 1
def testCanAchieveZeroLoss(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = logistic_classifier(tf_inputs)
losses.log_loss(tf_labels, tf_predictions)
total_loss = losses.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = training.create_train_op(total_loss, optimizer)
loss = training.train(
train_op,
None,
hooks=[basic_session_run_hooks.StopAtStepHook(num_steps=300)],
save_summaries_steps=None,
save_checkpoint_secs=None)
self.assertIsNotNone(loss)
self.assertLess(loss, .015)
def testTrainWithLocalVariable(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
local_multiplier = variables_lib.local_variable(1.0)
tf_predictions = logistic_classifier(tf_inputs) * local_multiplier
losses.log_loss(tf_labels, tf_predictions)
total_loss = losses.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = training.create_train_op(total_loss, optimizer)
loss = training.train(
train_op,
None,
hooks=[basic_session_run_hooks.StopAtStepHook(num_steps=300)],
save_summaries_steps=None,
save_checkpoint_secs=None)
self.assertIsNotNone(loss)
self.assertLess(loss, .015)
def testResumeTrainAchievesRoughlyTheSameLoss(self):
number_of_steps = [300, 1, 5]
logdir = os.path.join(self.get_temp_dir(), 'resume_train_same_loss')
for i in range(len(number_of_steps)):
with ops.Graph().as_default():
random_seed.set_random_seed(i)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = logistic_classifier(tf_inputs)
losses.log_loss(tf_labels, tf_predictions)
total_loss = losses.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = training.create_train_op(total_loss, optimizer)
saver = saver_lib.Saver()
loss = training.train(
train_op,
logdir,
hooks=[
basic_session_run_hooks.StopAtStepHook(
num_steps=number_of_steps[i]),
basic_session_run_hooks.CheckpointSaverHook(
logdir, save_steps=50, saver=saver),
],
save_checkpoint_secs=None,
save_summaries_steps=None)
self.assertIsNotNone(loss)
self.assertLess(loss, .015)
def create_train_op(self, learning_rate=1.0, gradient_multiplier=1.0):
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = logistic_classifier(tf_inputs)
losses.log_loss(tf_labels, tf_predictions)
total_loss = losses.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(
learning_rate=learning_rate)
def transform_grads_fn(grads):
if gradient_multiplier != 1.0:
variables = variables_lib2.trainable_variables()
gradient_multipliers = {var: gradient_multiplier for var in variables}
with ops.name_scope('multiply_grads'):
return training.multiply_gradients(grads, gradient_multipliers)
else:
return grads
return training.create_train_op(
total_loss, optimizer, transform_grads_fn=transform_grads_fn)
def testTrainWithInitFromCheckpoint(self):
logdir1 = os.path.join(self.get_temp_dir(), 'tmp_logs1/')
logdir2 = os.path.join(self.get_temp_dir(), 'tmp_logs2/')
if gfile.Exists(logdir1): # For running on jenkins.
gfile.DeleteRecursively(logdir1)
if gfile.Exists(logdir2): # For running on jenkins.
gfile.DeleteRecursively(logdir2)
# First, train the model one step (make sure the error is high).
with ops.Graph().as_default():
random_seed.set_random_seed(0)
train_op = self.create_train_op()
saver = saver_lib.Saver()
loss = training.train(
train_op,
logdir1,
hooks=[
basic_session_run_hooks.CheckpointSaverHook(
logdir1, save_steps=1, saver=saver),
basic_session_run_hooks.StopAtStepHook(num_steps=1),
],
save_checkpoint_secs=None,
save_summaries_steps=None)
self.assertGreater(loss, .5)
# Next, train the model to convergence.
with ops.Graph().as_default():
random_seed.set_random_seed(1)
train_op = self.create_train_op()
saver = saver_lib.Saver()
loss = training.train(
train_op,
logdir1,
hooks=[
basic_session_run_hooks.CheckpointSaverHook(
logdir1, save_steps=300, saver=saver),
basic_session_run_hooks.StopAtStepHook(num_steps=300),
],
save_checkpoint_secs=None,
save_summaries_steps=None)
self.assertIsNotNone(loss)
self.assertLess(loss, .02)
# Finally, advance the model a single step and validate that the loss is
# still low.
with ops.Graph().as_default():
random_seed.set_random_seed(2)
train_op = self.create_train_op()
model_variables = variables_lib2.global_variables()
model_path = checkpoint_management.latest_checkpoint(logdir1)
assign_fn = variables_lib.assign_from_checkpoint_fn(
model_path, model_variables)
def init_fn(_, session):
assign_fn(session)
loss = training.train(
train_op,
None,
scaffold=monitored_session.Scaffold(init_fn=init_fn),
hooks=[basic_session_run_hooks.StopAtStepHook(num_steps=1)],
save_checkpoint_secs=None,
save_summaries_steps=None)
self.assertIsNotNone(loss)
self.assertLess(loss, .02)
def ModelLoss(self):
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = logistic_classifier(tf_inputs)
losses.log_loss(tf_labels, tf_predictions)
return losses.get_total_loss()
def testTrainAllVarsHasLowerLossThanTrainSubsetOfVars(self):
logdir = os.path.join(self.get_temp_dir(), 'tmp_logs3/')
if gfile.Exists(logdir): # For running on jenkins.
gfile.DeleteRecursively(logdir)
# First, train only the weights of the model.
with ops.Graph().as_default():
random_seed.set_random_seed(0)
total_loss = self.ModelLoss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
weights = variables_lib.get_variables_by_name('weights')
train_op = training.create_train_op(
total_loss, optimizer, variables_to_train=weights)
saver = saver_lib.Saver()
loss = training.train(
train_op,
logdir,
hooks=[
basic_session_run_hooks.CheckpointSaverHook(
logdir, save_steps=200, saver=saver),
basic_session_run_hooks.StopAtStepHook(num_steps=200),
],
save_checkpoint_secs=None,
save_summaries_steps=None)
self.assertGreater(loss, .015)
self.assertLess(loss, .05)
# Next, train the biases of the model.
with ops.Graph().as_default():
random_seed.set_random_seed(1)
total_loss = self.ModelLoss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
biases = variables_lib.get_variables_by_name('biases')
train_op = training.create_train_op(
total_loss, optimizer, variables_to_train=biases)
saver = saver_lib.Saver()
loss = training.train(
train_op,
logdir,
hooks=[
basic_session_run_hooks.CheckpointSaverHook(
logdir, save_steps=300, saver=saver),
basic_session_run_hooks.StopAtStepHook(num_steps=300),
],
save_checkpoint_secs=None,
save_summaries_steps=None)
self.assertGreater(loss, .015)
self.assertLess(loss, .05)
# Finally, train both weights and bias to get lower loss.
with ops.Graph().as_default():
random_seed.set_random_seed(2)
total_loss = self.ModelLoss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = training.create_train_op(total_loss, optimizer)
saver = saver_lib.Saver()
loss = training.train(
train_op,
logdir,
hooks=[
basic_session_run_hooks.StopAtStepHook(num_steps=400),
],
save_checkpoint_secs=None,
save_summaries_steps=None)
self.assertIsNotNone(loss)
self.assertLess(loss, .015)
def testTrainingSubsetsOfVariablesOnlyUpdatesThoseVariables(self):
# First, train only the weights of the model.
with ops.Graph().as_default():
random_seed.set_random_seed(0)
total_loss = self.ModelLoss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
weights, biases = variables_lib.get_variables()
train_op = training.create_train_op(total_loss, optimizer)
train_weights = training.create_train_op(
total_loss, optimizer, variables_to_train=[weights])
train_biases = training.create_train_op(
total_loss, optimizer, variables_to_train=[biases])
with self.cached_session() as session:
# Initialize the variables.
session.run(variables_lib2.global_variables_initializer())
# Get the initial weights and biases values.
weights_values, biases_values = session.run([weights, biases])
self.assertGreater(np.linalg.norm(weights_values), 0)
self.assertAlmostEqual(np.linalg.norm(biases_values), 0)
# Update weights and biases.
loss = session.run(train_op)
self.assertGreater(loss, .5)
new_weights, new_biases = session.run([weights, biases])
# Check that the weights and biases have been updated.
self.assertGreater(np.linalg.norm(weights_values - new_weights), 0)
self.assertGreater(np.linalg.norm(biases_values - new_biases), 0)
weights_values, biases_values = new_weights, new_biases
# Update only weights.
loss = session.run(train_weights)
self.assertGreater(loss, .5)
new_weights, new_biases = session.run([weights, biases])
# Check that the weights have been updated, but biases have not.
self.assertGreater(np.linalg.norm(weights_values - new_weights), 0)
self.assertAlmostEqual(np.linalg.norm(biases_values - new_biases), 0)
weights_values = new_weights
# Update only biases.
loss = session.run(train_biases)
self.assertGreater(loss, .5)
new_weights, new_biases = session.run([weights, biases])
# Check that the biases have been updated, but weights have not.
self.assertAlmostEqual(np.linalg.norm(weights_values - new_weights), 0)
self.assertGreater(np.linalg.norm(biases_values - new_biases), 0)
def testTrainWithAlteredGradients(self):
# Use the same learning rate but different gradient multipliers
# to train two models. Model with equivalently larger learning
# rate (i.e., learning_rate * gradient_multiplier) has smaller
# training loss.
multipliers = [1., 1000.]
number_of_steps = 10
learning_rate = 0.001
# First, train the model with equivalently smaller learning rate.
with ops.Graph().as_default():
random_seed.set_random_seed(0)
train_op = self.create_train_op(
learning_rate=learning_rate, gradient_multiplier=multipliers[0])
loss0 = training.train(
train_op,
None,
hooks=[
basic_session_run_hooks.StopAtStepHook(num_steps=number_of_steps),
],
save_checkpoint_secs=None,
save_summaries_steps=None)
self.assertIsNotNone(loss0)
self.assertGreater(loss0, .5)
# Second, train the model with equivalently larger learning rate.
with ops.Graph().as_default():
random_seed.set_random_seed(0)
train_op = self.create_train_op(
learning_rate=learning_rate, gradient_multiplier=multipliers[1])
loss1 = training.train(
train_op,
None,
hooks=[
basic_session_run_hooks.StopAtStepHook(num_steps=number_of_steps),
],
save_checkpoint_secs=None,
save_summaries_steps=None)
self.assertIsNotNone(loss1)
self.assertLess(loss1, .5)
# The loss of the model trained with larger learning rate should
# be smaller.
self.assertGreater(loss0, loss1)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/training/python/training/training_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.contrib.training.evaluation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import os
import time
import numpy as np
from tensorflow.contrib.framework.python.ops import variables
from tensorflow.contrib.layers.python.layers import layers
from tensorflow.contrib.losses.python.losses import loss_ops
from tensorflow.contrib.training.python.training import evaluation
from tensorflow.contrib.training.python.training import training
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session as session_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import metrics
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.summary import summary as summary_lib
from tensorflow.python.summary import summary_iterator
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import saver as saver_lib
class CheckpointIteratorTest(test.TestCase):
def testReturnsEmptyIfNoCheckpointsFound(self):
checkpoint_dir = os.path.join(self.get_temp_dir(), 'no_checkpoints_found')
num_found = 0
for _ in evaluation.checkpoints_iterator(checkpoint_dir, timeout=0):
num_found += 1
self.assertEqual(num_found, 0)
def testReturnsSingleCheckpointIfOneCheckpointFound(self):
checkpoint_dir = os.path.join(self.get_temp_dir(), 'one_checkpoint_found')
if not gfile.Exists(checkpoint_dir):
gfile.MakeDirs(checkpoint_dir)
global_step = variables.get_or_create_global_step()
saver = saver_lib.Saver() # Saves the global step.
with self.cached_session() as session:
session.run(variables_lib.global_variables_initializer())
save_path = os.path.join(checkpoint_dir, 'model.ckpt')
saver.save(session, save_path, global_step=global_step)
num_found = 0
for _ in evaluation.checkpoints_iterator(checkpoint_dir, timeout=0):
num_found += 1
self.assertEqual(num_found, 1)
def testReturnsSingleCheckpointIfOneShardedCheckpoint(self):
checkpoint_dir = os.path.join(self.get_temp_dir(),
'one_checkpoint_found_sharded')
if not gfile.Exists(checkpoint_dir):
gfile.MakeDirs(checkpoint_dir)
global_step = variables.get_or_create_global_step()
# This will result in 3 different checkpoint shard files.
with ops.device('/cpu:0'):
variables_lib.Variable(10, name='v0')
with ops.device('/cpu:1'):
variables_lib.Variable(20, name='v1')
saver = saver_lib.Saver(sharded=True)
with session_lib.Session(
target='',
config=config_pb2.ConfigProto(device_count={'CPU': 2})) as session:
session.run(variables_lib.global_variables_initializer())
save_path = os.path.join(checkpoint_dir, 'model.ckpt')
saver.save(session, save_path, global_step=global_step)
num_found = 0
for _ in evaluation.checkpoints_iterator(checkpoint_dir, timeout=0):
num_found += 1
self.assertEqual(num_found, 1)
def testTimeoutFn(self):
timeout_fn_calls = [0]
def timeout_fn():
timeout_fn_calls[0] += 1
return timeout_fn_calls[0] > 3
results = list(
evaluation.checkpoints_iterator(
'/non-existent-dir', timeout=0.1, timeout_fn=timeout_fn))
self.assertEqual([], results)
self.assertEqual(4, timeout_fn_calls[0])
class WaitForNewCheckpointTest(test.TestCase):
def testReturnsNoneAfterTimeout(self):
start = time.time()
ret = evaluation.wait_for_new_checkpoint(
'/non-existent-dir', 'foo', timeout=1.0, seconds_to_sleep=0.5)
end = time.time()
self.assertIsNone(ret)
# We've waited one second.
self.assertGreater(end, start + 0.5)
# The timeout kicked in.
self.assertLess(end, start + 1.1)
def logistic_classifier(inputs):
return layers.fully_connected(inputs, 1, activation_fn=math_ops.sigmoid)
class EvaluateOnceTest(test.TestCase):
def setUp(self):
super(EvaluateOnceTest, self).setUp()
# Create an easy training set:
np.random.seed(0)
self._inputs = np.zeros((16, 4))
self._labels = np.random.randint(0, 2, size=(16, 1)).astype(np.float32)
for i in range(16):
j = int(2 * self._labels[i] + np.random.randint(0, 2))
self._inputs[i, j] = 1
def _train_model(self, checkpoint_dir, num_steps):
"""Trains a simple classification model.
Note that the data has been configured such that after around 300 steps,
the model has memorized the dataset (e.g. we can expect %100 accuracy).
Args:
checkpoint_dir: The directory where the checkpoint is written to.
num_steps: The number of steps to train for.
"""
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = logistic_classifier(tf_inputs)
loss = loss_ops.log_loss(tf_predictions, tf_labels)
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = training.create_train_op(loss, optimizer)
loss = training.train(
train_op,
checkpoint_dir,
hooks=[basic_session_run_hooks.StopAtStepHook(num_steps)])
if num_steps >= 300:
assert loss < .015
def testEvaluatePerfectModel(self):
checkpoint_dir = os.path.join(self.get_temp_dir(),
'evaluate_perfect_model_once')
# Train a Model to completion:
self._train_model(checkpoint_dir, num_steps=300)
# Run
inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
labels = constant_op.constant(self._labels, dtype=dtypes.float32)
logits = logistic_classifier(inputs)
predictions = math_ops.round(logits)
accuracy, update_op = metrics.accuracy(
predictions=predictions, labels=labels)
checkpoint_path = evaluation.wait_for_new_checkpoint(checkpoint_dir)
final_ops_values = evaluation.evaluate_once(
checkpoint_path=checkpoint_path,
eval_ops=update_op,
final_ops={'accuracy': accuracy},
hooks=[
evaluation.StopAfterNEvalsHook(1),
])
self.assertTrue(final_ops_values['accuracy'] > .99)
def testEvalOpAndFinalOp(self):
checkpoint_dir = os.path.join(self.get_temp_dir(), 'eval_ops_and_final_ops')
# Train a model for a single step to get a checkpoint.
self._train_model(checkpoint_dir, num_steps=1)
checkpoint_path = evaluation.wait_for_new_checkpoint(checkpoint_dir)
# Create the model so we have something to restore.
inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
logistic_classifier(inputs)
num_evals = 5
final_increment = 9.0
my_var = variables.local_variable(0.0, name='MyVar')
eval_ops = state_ops.assign_add(my_var, 1.0)
final_ops = array_ops.identity(my_var) + final_increment
final_ops_values = evaluation.evaluate_once(
checkpoint_path=checkpoint_path,
eval_ops=eval_ops,
final_ops={'value': final_ops},
hooks=[
evaluation.StopAfterNEvalsHook(num_evals),
])
self.assertEqual(final_ops_values['value'], num_evals + final_increment)
def testOnlyFinalOp(self):
checkpoint_dir = os.path.join(self.get_temp_dir(), 'only_final_ops')
# Train a model for a single step to get a checkpoint.
self._train_model(checkpoint_dir, num_steps=1)
checkpoint_path = evaluation.wait_for_new_checkpoint(checkpoint_dir)
# Create the model so we have something to restore.
inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
logistic_classifier(inputs)
final_increment = 9.0
my_var = variables.local_variable(0.0, name='MyVar')
final_ops = array_ops.identity(my_var) + final_increment
final_ops_values = evaluation.evaluate_once(
checkpoint_path=checkpoint_path, final_ops={'value': final_ops})
self.assertEqual(final_ops_values['value'], final_increment)
class EvaluateRepeatedlyTest(test.TestCase):
def setUp(self):
super(EvaluateRepeatedlyTest, self).setUp()
# Create an easy training set:
np.random.seed(0)
self._inputs = np.zeros((16, 4))
self._labels = np.random.randint(0, 2, size=(16, 1)).astype(np.float32)
for i in range(16):
j = int(2 * self._labels[i] + np.random.randint(0, 2))
self._inputs[i, j] = 1
def _train_model(self, checkpoint_dir, num_steps):
"""Trains a simple classification model.
Note that the data has been configured such that after around 300 steps,
the model has memorized the dataset (e.g. we can expect %100 accuracy).
Args:
checkpoint_dir: The directory where the checkpoint is written to.
num_steps: The number of steps to train for.
"""
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = logistic_classifier(tf_inputs)
loss = loss_ops.log_loss(tf_predictions, tf_labels)
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = training.create_train_op(loss, optimizer)
loss = training.train(
train_op,
checkpoint_dir,
hooks=[basic_session_run_hooks.StopAtStepHook(num_steps)])
def testEvaluatePerfectModel(self):
checkpoint_dir = os.path.join(self.get_temp_dir(),
'evaluate_perfect_model_repeated')
# Train a Model to completion:
self._train_model(checkpoint_dir, num_steps=300)
# Run
inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
labels = constant_op.constant(self._labels, dtype=dtypes.float32)
logits = logistic_classifier(inputs)
predictions = math_ops.round(logits)
accuracy, update_op = metrics.accuracy(
predictions=predictions, labels=labels)
final_values = evaluation.evaluate_repeatedly(
checkpoint_dir=checkpoint_dir,
eval_ops=update_op,
final_ops={'accuracy': accuracy},
hooks=[
evaluation.StopAfterNEvalsHook(1),
],
max_number_of_evaluations=1)
self.assertTrue(final_values['accuracy'] > .99)
def testEvaluationLoopTimeout(self):
checkpoint_dir = os.path.join(self.get_temp_dir(),
'evaluation_loop_timeout')
if not gfile.Exists(checkpoint_dir):
gfile.MakeDirs(checkpoint_dir)
# We need a variable that the saver will try to restore.
variables.get_or_create_global_step()
# Run with placeholders. If we actually try to evaluate this, we'd fail
# since we're not using a feed_dict.
cant_run_op = array_ops.placeholder(dtype=dtypes.float32)
start = time.time()
final_values = evaluation.evaluate_repeatedly(
checkpoint_dir=checkpoint_dir,
eval_ops=cant_run_op,
hooks=[evaluation.StopAfterNEvalsHook(10)],
timeout=6)
end = time.time()
self.assertFalse(final_values)
# Assert that we've waited for the duration of the timeout (minus the sleep
# time).
self.assertGreater(end - start, 5.0)
# Then the timeout kicked in and stops the loop.
self.assertLess(end - start, 7)
def testEvaluationLoopTimeoutWithTimeoutFn(self):
checkpoint_dir = os.path.join(self.get_temp_dir(),
'evaluation_loop_timeout_with_timeout_fn')
# Train a Model to completion:
self._train_model(checkpoint_dir, num_steps=300)
# Run
inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
labels = constant_op.constant(self._labels, dtype=dtypes.float32)
logits = logistic_classifier(inputs)
predictions = math_ops.round(logits)
accuracy, update_op = metrics.accuracy(
predictions=predictions, labels=labels)
timeout_fn_calls = [0]
def timeout_fn():
timeout_fn_calls[0] += 1
return timeout_fn_calls[0] > 3
final_values = evaluation.evaluate_repeatedly(
checkpoint_dir=checkpoint_dir,
eval_ops=update_op,
final_ops={'accuracy': accuracy},
hooks=[
evaluation.StopAfterNEvalsHook(1),
],
eval_interval_secs=1,
max_number_of_evaluations=2,
timeout=0.1,
timeout_fn=timeout_fn)
# We should have evaluated once.
self.assertTrue(final_values['accuracy'] > .99)
# And called 4 times the timeout fn
self.assertEqual(4, timeout_fn_calls[0])
def testEvaluateWithEvalFeedDict(self):
# Create a checkpoint.
checkpoint_dir = os.path.join(self.get_temp_dir(),
'evaluate_with_eval_feed_dict')
self._train_model(checkpoint_dir, num_steps=1)
# We need a variable that the saver will try to restore.
variables.get_or_create_global_step()
# Create a variable and an eval op that increments it with a placeholder.
my_var = variables.local_variable(0.0, name='my_var')
increment = array_ops.placeholder(dtype=dtypes.float32)
eval_ops = state_ops.assign_add(my_var, increment)
increment_value = 3
num_evals = 5
expected_value = increment_value * num_evals
final_values = evaluation.evaluate_repeatedly(
checkpoint_dir=checkpoint_dir,
eval_ops=eval_ops,
feed_dict={increment: 3},
final_ops={'my_var': array_ops.identity(my_var)},
hooks=[
evaluation.StopAfterNEvalsHook(num_evals),
],
max_number_of_evaluations=1)
self.assertEqual(final_values['my_var'], expected_value)
def _create_names_to_metrics(self, predictions, labels):
accuracy0, update_op0 = metrics.accuracy(labels, predictions)
accuracy1, update_op1 = metrics.accuracy(labels, predictions + 1)
names_to_values = {'Accuracy': accuracy0, 'Another_accuracy': accuracy1}
names_to_updates = {'Accuracy': update_op0, 'Another_accuracy': update_op1}
return names_to_values, names_to_updates
def _verify_events(self, output_dir, names_to_values):
"""Verifies that the given `names_to_values` are found in the summaries.
Also checks that a GraphDef was written out to the events file.
Args:
output_dir: An existing directory where summaries are found.
names_to_values: A dictionary of strings to values.
"""
# Check that the results were saved. The events file may have additional
# entries, e.g. the event version stamp, so have to parse things a bit.
output_filepath = glob.glob(os.path.join(output_dir, '*'))
self.assertEqual(len(output_filepath), 1)
events = summary_iterator.summary_iterator(output_filepath[0])
summaries = []
graph_def = None
for event in events:
if event.summary.value:
summaries.append(event.summary)
elif event.graph_def:
graph_def = event.graph_def
values = []
for summary in summaries:
for value in summary.value:
values.append(value)
saved_results = {v.tag: v.simple_value for v in values}
for name in names_to_values:
self.assertAlmostEqual(names_to_values[name], saved_results[name], 5)
self.assertIsNotNone(graph_def)
def testSummariesAreFlushedToDisk(self):
checkpoint_dir = os.path.join(self.get_temp_dir(), 'summaries_are_flushed')
logdir = os.path.join(self.get_temp_dir(), 'summaries_are_flushed_eval')
if gfile.Exists(logdir):
gfile.DeleteRecursively(logdir)
# Train a Model to completion:
self._train_model(checkpoint_dir, num_steps=300)
# Create the model (which can be restored).
inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
logistic_classifier(inputs)
names_to_values = {'bread': 3.4, 'cheese': 4.5, 'tomato': 2.0}
for k in names_to_values:
v = names_to_values[k]
summary_lib.scalar(k, v)
evaluation.evaluate_repeatedly(
checkpoint_dir=checkpoint_dir,
hooks=[
evaluation.SummaryAtEndHook(log_dir=logdir),
],
max_number_of_evaluations=1)
self._verify_events(logdir, names_to_values)
def testSummaryAtEndHookWithoutSummaries(self):
logdir = os.path.join(self.get_temp_dir(),
'summary_at_end_hook_without_summaires')
if gfile.Exists(logdir):
gfile.DeleteRecursively(logdir)
with ops.Graph().as_default():
# Purposefully don't add any summaries. The hook will just dump the
# GraphDef event.
hook = evaluation.SummaryAtEndHook(log_dir=logdir)
hook.begin()
with self.cached_session() as session:
hook.after_create_session(session, None)
hook.end(session)
self._verify_events(logdir, {})
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/training/python/training/evaluation_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Tuner interface for hyper-parameters tuning."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
from tensorflow.contrib.framework.python.framework import experimental
@six.add_metaclass(abc.ABCMeta)
class Tuner(object):
"""Tuner class is the interface for Experiment hyper-parameters tuning.
Example:
```
def _create_my_experiment(run_config, hparams):
hidden_units = [hparams.unit_per_layer] * hparams.num_hidden_layers
return tf.contrib.learn.Experiment(
estimator=DNNClassifier(config=run_config, hidden_units=hidden_units),
train_input_fn=my_train_input,
eval_input_fn=my_eval_input)
tuner = create_tuner(study_configuration, objective_key)
learn_runner.tune(experiment_fn=_create_my_experiment, tuner)
"""
@experimental
@abc.abstractmethod
def next_trial(self):
"""Switch to the next trial.
Ask the tuning service for a new trial for hyper-parameters tuning.
Returns:
A boolean indicating if a trial was assigned to the tuner.
Raises:
RuntimeError: If the tuner is initialized correctly.
"""
raise NotImplementedError("Calling an abstract method.")
@experimental
@abc.abstractmethod
def run_experiment(self, experiment_fn):
"""Creates an Experiment by calling `experiment_fn` and executes it.
It creates a `RunConfig`, which captures the current execution environment
configuration and retrieves the hyper-parameters for current trial from the
tuning service. Both are passed to the `experiment_fn` and used to create
the Experiment for current trial execution. When finished, the measure will
be reported to the tuning service.
If the `RunConfig` does not include a task type, then an exception is
raised. The task type should be one of the types supported by the tuner. If
tuner does not support the task type directly, it could delegate the task to
Experiment, which is usually a function of Experiment. An exception would be
raised, if neither tuner nor Experiment could support the task type.
Args:
experiment_fn: A function that creates an `Experiment`. It should accept
an argument `run_config` which should be used to create the `Estimator`
(passed as `config` to its constructor), and an argument `hparams`,
which should be used for hyper-parameters tuning. It must return an
`Experiment`.
"""
raise NotImplementedError("Calling an abstract method.")
|
tensorflow-master
|
tensorflow/contrib/training/python/training/tuner.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=unused-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.training.python.training import sampling_ops
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner_impl
class SamplingOpsThreadingTest(test.TestCase):
def testMultiThreadedEstimateDataDistribution(self):
num_classes = 10
# Set up graph.
random_seed.set_random_seed(1234)
label = math_ops.cast(
math_ops.round(random_ops.random_uniform([1]) * num_classes),
dtypes_lib.int32)
prob_estimate = sampling_ops._estimate_data_distribution( # pylint: disable=protected-access
label, num_classes)
# Check that prob_estimate is well-behaved in a multithreaded context.
_, _, [prob_estimate] = sampling_ops._verify_input( # pylint: disable=protected-access
[], label, [prob_estimate])
# Use queues to run multiple threads over the graph, each of which
# fetches `prob_estimate`.
queue = data_flow_ops.FIFOQueue(
capacity=25,
dtypes=[prob_estimate.dtype],
shapes=[prob_estimate.get_shape()])
enqueue_op = queue.enqueue([prob_estimate])
queue_runner_impl.add_queue_runner(
queue_runner_impl.QueueRunner(queue, [enqueue_op] * 25))
out_tensor = queue.dequeue()
# Run the multi-threaded session.
with self.cached_session() as sess:
# Need to initialize variables that keep running total of classes seen.
variables.global_variables_initializer().run()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(coord=coord)
for _ in range(25):
sess.run([out_tensor])
coord.request_stop()
coord.join(threads)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/training/python/training/sampling_ops_threading_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operations for bucketing data into groups.
The classes and functions in this module are used to queue up data into
buckets conditional on side information (e.g. sequence length).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.layers import utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.summary import summary
from tensorflow.python.training import input as input_py
from tensorflow.python.training import queue_runner
# pylint: disable=protected-access
_as_original_type = input_py._as_original_type
_as_tensor_list = input_py._as_tensor_list
_restore_sparse_tensors = input_py._restore_sparse_tensors
_dtypes = input_py._dtypes
_store_sparse_tensors = input_py._store_sparse_tensors
_validate_keep_input = input_py._validate_keep_input
_shapes = input_py._shapes
_which_queue = input_py._which_queue
# pylint: enable=protected-access
def _validate_bucket(tensor_list):
tensor_list = ops.convert_n_to_tensor_or_indexed_slices(tensor_list)
if not tensor_list:
raise ValueError("Expected at least one tensor in bucket().")
return tensor_list
def bucket(tensors,
which_bucket,
batch_size,
num_buckets,
num_threads=1,
capacity=32,
bucket_capacities=None,
shapes=None,
dynamic_pad=False,
allow_smaller_final_batch=False,
keep_input=True,
shared_name=None,
name=None):
"""Lazy bucketing of input tensors according to `which_bucket`.
The argument `tensors` can be a list or a dictionary of tensors.
The value returned by the function will be of the same type
as `tensors`.
The tensors entering this function are put into the bucket given by
`which_bucket`. Each bucket has its own queue. When a bucket contains
`batch_size` elements, this minibatch is pushed onto a top queue. The
tensors returned from this function are a the result of dequeueing the
next minibatch from this top queue.
This function is implemented using several queues. A `QueueRunner` for the
queues is added to the current `Graph`'s `QUEUE_RUNNER` collection.
As the returned tensors are the result of a dequeue operation, evaluating
them will throw a `tf.errors.OutOfRangeError` when the input queue is
exhausted. If these tensors are feeding another input queue, its queue runner
will catch this exception, however, if they are used in your main thread
you are responsible for catching this yourself.
*N.B.:* If `dynamic_pad` is `False`, you must ensure that either
(i) the `shapes` argument is passed, or (ii) all of the tensors in
`tensors` must have fully-defined shapes. `ValueError` will be
raised if neither of these conditions holds.
If `dynamic_pad` is `True`, it is sufficient that the *rank* of the
tensors is known, but individual dimensions may have shape `None`.
In this case, for each enqueue the dimensions with value `None`
may have a variable length; upon dequeue, the output tensors will be padded
on the right to the maximum shape of the tensors in the current minibatch.
For numbers, this padding takes value 0. For strings, this padding is
the empty string. See `PaddingFIFOQueue` for more info.
If `allow_smaller_final_batch` is `True`, a smaller batch value than
`batch_size` is returned when the queues are closed and there are not enough
elements to fill the batch, otherwise the pending elements are discarded.
In addition, all output tensors' static shapes, as accessed via the
`get_shape()` method will have a 0th `Dimension` value of `None`, and
operations that depend on fixed batch_size would fail.
Args:
tensors: The list or dictionary of tensors, representing a single element,
to bucket. Nested lists are not supported.
which_bucket: An `int32` scalar Tensor taking a value in `[0, num_buckets)`.
batch_size: The new batch size pulled from the queue (all queues will have
the same size). If a list is passed in then each bucket will have a
different batch_size.
(python int, int32 scalar or iterable of integers of length num_buckets).
num_buckets: A python integer, the number of buckets.
num_threads: An integer. The number of threads enqueuing `tensors`.
capacity: An integer. The maximum number of minibatches in the top queue,
and also (by default) the maximum number of elements within each bucket.
bucket_capacities: (Optional) None or a list of integers, the capacities of
each bucket. If None, capacity is used (default). If specified, it must
be a list of integers of length num_buckets: the i-th element is used
as capacity for the i-th bucket queue.
shapes: (Optional) The shapes for each example. Defaults to the
inferred shapes for `tensors`.
dynamic_pad: Boolean. Allow variable dimensions in input shapes.
The given dimensions are padded upon dequeue so that tensors within a
batch have the same shapes.
allow_smaller_final_batch: (Optional) Boolean. If `True`, allow the final
batches to be smaller if there are insufficient items left in the queues.
keep_input: A `bool` scalar Tensor. If provided, this tensor controls
whether the input is added to the queue or not. If it evaluates `True`,
then `tensors` are added to the bucket; otherwise they are dropped. This
tensor essentially acts as a filtering mechanism.
shared_name: (Optional). If set, the queues will be shared under the given
name across multiple sessions.
name: (Optional) A name for the operations.
Returns:
A tuple `(bucket, outputs)` where `bucket` is
a `int32` scalar tensor and `outputs` is a list or
dictionary of batched outputs corresponding to elements of `tensors`.
Every step will receive a new bucket of outputs.
Raises:
ValueError: If the `shapes` are not specified, and cannot be
inferred from the elements of `tensors` or if batch_size is a sequence
but its length != num_buckets. Also if bucket_capacities is not None but
its length != num_buckets.
"""
batch_size_per_bucket = False
if isinstance(batch_size, (list, tuple)):
batch_size_per_bucket = True
if len(batch_size) != num_buckets:
raise ValueError(
"If batch_size is a list it must have num_buckets elements")
else:
batch_size = [batch_size] * num_buckets
if bucket_capacities is None:
bucket_capacities = [capacity] * num_buckets
if len(bucket_capacities) != num_buckets:
raise ValueError(
"The list bucket_capacities (%s) must have exactly num_buckets (%d) "
"elements." % (str(bucket_capacities), num_buckets))
tensor_list = _as_tensor_list(tensors)
with ops.name_scope(name, "bucket", tensor_list) as name:
tensor_list = _validate_bucket(tensor_list)
keep_input = _validate_keep_input(keep_input, enqueue_many=False)
(tensor_list, sparse_info) = _store_sparse_tensors(
tensor_list, enqueue_many=False, keep_input=keep_input)
# Round-trip batch_size to a tensor, and possibly back
for i, bucket_batch_size in enumerate(batch_size):
bucket_batch_size = ops.convert_to_tensor(
bucket_batch_size, dtype=dtypes.int32, name="batch_size")
static_batch_size = tensor_util.constant_value(bucket_batch_size)
batch_size[i] = (static_batch_size if static_batch_size is not None else
bucket_batch_size)
types = _dtypes([tensor_list])
shapes = _shapes([tensor_list], shapes, enqueue_many=False)
which_bucket = ops.convert_to_tensor(
which_bucket, dtype=dtypes.int32, name="which_bucket")
queue_creator = _which_queue(dynamic_pad)
bucket_queues = []
for i in range(num_buckets):
shared_name_i = ("%s_%d" % (shared_name, i) if shared_name is not None
else None)
bucket_queues.append(
queue_creator(
capacity=bucket_capacities[i],
dtypes=types,
shapes=shapes,
shared_name=shared_name_i,
name="bucket_queue_%d" % i))
maybe_static_batch_size = (
None if (allow_smaller_final_batch or batch_size_per_bucket)
else static_batch_size)
bucket_shapes = [
tensor_shape.vector(maybe_static_batch_size).concatenate(s)
for s in bucket_queues[0].shapes
]
# top_queue is a PaddingFIFOQueue even if the bucket queues are regular FIFO
# queues because if we use allow_smaller_final_batch, shapes will
# contain Nones in their first entry; as a result, a regular
# FIFOQueue would die when being passed shapes that are not fully defined.
top_queue = data_flow_ops.PaddingFIFOQueue(
capacity=capacity,
dtypes=[dtypes.int32] + types,
shapes=[tensor_shape.scalar()] + bucket_shapes,
shared_name=shared_name,
name="top_queue")
def enqueue_which():
"""Return an op that enqueues conditionally in one of the queues."""
def enqueue_single(i):
return bucket_queues[i].enqueue(tensor_list)
enqueues = [
control_flow_ops.cond(
math_ops.equal(which_bucket, i),
functools.partial(enqueue_single, i), control_flow_ops.no_op)
for i in range(num_buckets)
]
return control_flow_ops.group(*enqueues, name="group_enqueues")
maybe_enqueue = utils.smart_cond(
keep_input,
enqueue_which,
control_flow_ops.no_op)
bucket_enqueue_ops = [maybe_enqueue] * num_threads
if allow_smaller_final_batch:
which_dequeue = lambda q: q.dequeue_up_to
else:
which_dequeue = lambda q: q.dequeue_many
def make_list(t):
if isinstance(t, (list, tuple)):
return t
else:
return [t]
enqueues_to_top = [
top_queue.enqueue(
[constant_op.constant(i)] + make_list(which_dequeue(q)(
bs, name="read_bucket_%d" % i)),
name="enqueue_from_bucket_%d" % i)
for i, (q, bs) in enumerate(zip(bucket_queues, batch_size))
]
queue_runner.add_queue_runner(
queue_runner.QueueRunner(
bucket_queues[0], enqueues_to_top,
close_op=top_queue.close(),
cancel_op=top_queue.close(cancel_pending_enqueues=True),
queue_closed_exception_types=(errors.OutOfRangeError,
errors.CancelledError)))
queue_runner.add_queue_runner(
queue_runner.QueueRunner(
top_queue,
bucket_enqueue_ops,
close_op=control_flow_ops.group(
*[q.close() for q in bucket_queues]),
cancel_op=control_flow_ops.group(
*[q.close(cancel_pending_enqueues=True)
for q in bucket_queues]),
queue_closed_exception_types=(errors.OutOfRangeError,
errors.CancelledError)))
for q in bucket_queues:
summary.scalar("bucket/%s/size" % q.name,
math_ops.cast(top_queue.size(), dtypes.float32))
summary.scalar("bucket/%s/fraction_of_%d_full" % (top_queue.name, capacity),
math_ops.cast(top_queue.size(), dtypes.float32) *
(1. / capacity))
dequeued = top_queue.dequeue(name="dequeue_top")
which_bucket_dequeued = dequeued[0]
dequeued = dequeued[1:]
if len(dequeued) == 1:
dequeued = dequeued[0]
dequeued = _restore_sparse_tensors(dequeued, sparse_info)
return (which_bucket_dequeued, _as_original_type(tensors, dequeued))
def bucket_by_sequence_length(input_length,
tensors,
batch_size,
bucket_boundaries,
num_threads=1,
capacity=32,
bucket_capacities=None,
shapes=None,
dynamic_pad=False,
allow_smaller_final_batch=False,
keep_input=True,
shared_name=None,
name=None):
"""Lazy bucketing of inputs according to their length.
This method calls `tf.contrib.training.bucket` under the hood, after first
subdividing the bucket boundaries into separate buckets and identifying which
bucket the given `input_length` belongs to. See the documentation for
`which_bucket` for details of the other arguments.
Args:
input_length: `int32` scalar `Tensor`, the sequence length of tensors.
tensors: The list or dictionary of tensors, representing a single element,
to bucket. Nested lists are not supported.
batch_size: The new batch size pulled from the queue (all queues will have
the same size). If a list is passed in then each bucket will have a
different batch_size.
(python int, int32 scalar or iterable of integers of length num_buckets).
bucket_boundaries: int list, increasing non-negative numbers.
The edges of the buckets to use when bucketing tensors. Two extra buckets
are created, one for `input_length < bucket_boundaries[0]` and
one for `input_length >= bucket_boundaries[-1]`.
num_threads: An integer. The number of threads enqueuing `tensors`.
capacity: An integer. The maximum number of minibatches in the top queue,
and also the maximum number of elements within each bucket.
bucket_capacities: (Optional) None or a list of integers, the capacities of
each bucket. If None, capacity is used (default). If specified, it must
be a list of integers of length one larger than bucket_boundaries.
Its i-th element is used as capacity for the i-th bucket queue.
shapes: (Optional) The shapes for each example. Defaults to the
inferred shapes for `tensors`.
dynamic_pad: Boolean. Allow variable dimensions in input shapes.
The given dimensions are padded upon dequeue so that tensors within a
batch have the same shapes.
allow_smaller_final_batch: (Optional) Boolean. If `True`, allow the final
batches to be smaller if there are insufficient items left in the queues.
keep_input: A `bool` scalar Tensor. If provided, this tensor controls
whether the input is added to the queue or not. If it evaluates `True`,
then `tensors` are added to the bucket; otherwise they are dropped. This
tensor essentially acts as a filtering mechanism.
shared_name: (Optional). If set, the queues will be shared under the given
name across multiple sessions.
name: (Optional) A name for the operations.
Returns:
A tuple `(sequence_length, outputs)` where `sequence_length` is
a 1-D `Tensor` of size `batch_size` and `outputs` is a list or dictionary
of batched, bucketed, outputs corresponding to elements of `tensors`.
Raises:
TypeError: if `bucket_boundaries` is not a list of python integers.
ValueError: if `bucket_boundaries` is empty or contains non-increasing
values or if batch_size is a list and it's length doesn't equal the number
of buckets.
"""
tensor_list = _as_tensor_list(tensors)
if not isinstance(bucket_boundaries, (list, tuple)):
raise TypeError(
"bucket_boundaries must be a list or tuple, but received: %s" %
bucket_boundaries)
if not bucket_boundaries:
raise ValueError("bucket_boundaries must not be empty")
for (s, e) in zip(bucket_boundaries[:-1], bucket_boundaries[1:]):
if not isinstance(s, int) or not isinstance(e, int):
raise TypeError("bucket boundaries must be integers, but saw: %s and %s" %
(s, e))
if s >= e:
raise ValueError(
"Buckets must contain sequential increasing lengths, but saw: "
"%d before %d" % (s, e))
with ops.name_scope(name, "bucket_by_sequence_length",
[input_length] + tensor_list) as name:
input_length = ops.convert_to_tensor(
input_length, dtype=dtypes.int32, name="input_length")
# Bucketing conditions are:
# l < b[0]
# b[0] <= l < b[1]
# b[1] <= l < b[2]
# ...
# b[N-2] <= l < b[N-1]
# b[N-1] <= l
# Equivalent to:
# [-inf, b[0], b[1], ..., b[N-1]] <= l < [b[0], b[1], ..., b[N-1], inf]
buckets_min = [np.iinfo(np.int32).min] + list(bucket_boundaries)
buckets_max = list(bucket_boundaries) + [np.iinfo(np.int32).max]
conditions_c = math_ops.logical_and(
math_ops.less_equal(buckets_min, input_length),
math_ops.less(input_length, buckets_max))
which_bucket = math_ops.reduce_min(array_ops.where(conditions_c))
which_bucket = math_ops.cast(which_bucket, dtypes.int32)
if shapes is not None:
shapes = [tensor_shape.scalar()] + shapes
_, dequeued = bucket(
tensors=[input_length] + tensor_list,
which_bucket=which_bucket,
batch_size=batch_size,
num_buckets=len(bucket_boundaries) + 1,
num_threads=num_threads,
capacity=capacity,
bucket_capacities=bucket_capacities,
shapes=shapes,
dynamic_pad=dynamic_pad,
allow_smaller_final_batch=allow_smaller_final_batch,
keep_input=keep_input,
shared_name=shared_name)
return (dequeued[0], _as_original_type(tensors, dequeued[1:]))
__all__ = ["bucket", "bucket_by_sequence_length"]
|
tensorflow-master
|
tensorflow/contrib/training/python/training/bucket_ops.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains various routines and helper functions for training models.
This script contains various functions for training models. These include
manipulating gradients, creating a `train_op` (an operation that computes the
loss and applies the gradients) and a training loop function. The training loop
allows the user to pass in the `train_op` and runs the optimization according
to user-specified arguments.
************************************
* A simple working training script *
************************************
# Load data and create the model:
images, labels = LoadData(...)
predictions = MyModel(images)
# Define the loss:
tf.contrib.losses.log_loss(predictions, labels)
total_loss = tf.contrib.losses.get_total_loss()
# Define the optimizer:
optimizer = tf.compat.v1.train.MomentumOptimizer(FLAGS.learning_rate,
FLAGS.momentum)
# Create the train_op
train_op = tf.contrib.training.create_train_op(total_loss, optimizer)
# Run training.
tf.contrib.training.train(train_op, my_log_dir)
*************************
* Creating the train_op *
*************************
In order to use the `train` function, one needs a train_op: an `Operation` that
(a) computes the loss, (b) applies the gradients to update the weights and
(c) returns the value of the loss. tf.contrib.training.create_train_op creates
such an `Operation`. This function also provides the ability to manipulate
the gradients using a few arguments:
# Create the train_op and clip the gradient norms:
train_op = tf.contrib.training.create_train_op(
total_loss,
optimizer,
transform_grads_fn=clip_gradient_norms_fn(3))
# Create the train_op and scale the gradients by providing a map from variable
# name (or variable) to a scaling coefficient:
def transform_grads_fn(grads):
gradient_multipliers = {
'conv0/weights': 1.2,
'fc8/weights': 3.4,
}
return tf.contrib.training.multiply_gradients(
grads, gradient_multipliers)
train_op = tf.contrib.training.create_train_op(
total_loss,
optimizer,
transform_grads_fn=transform_grads_fn)
****************************************************************
* Performing additional (non-gradient) updates during training *
****************************************************************
Many networks utilize modules, like BatchNorm, that require performing a series
of non-gradient updates during training. tf.contrib.training.create_train_op
allows a user to pass in a list of update_ops to call along with the gradient
updates.
train_op = tf.contrib.training.create_train_op(
total_loss, optimizer, update_ops)
By default, tf.contrib.training.create_train_op includes all update ops that are
part of the `tf.GraphKeys.UPDATE_OPS` collection. Additionally, the
tf.contrib.layers.batch_norm function adds the moving mean and moving variance
updates to this collection. Consequently, users who want to use
tf.contrib.layers.batch_norm will not need to take any additional steps in order
to have the moving mean and moving variance updates be computed.
However, users with additional, specialized updates can either override the
default update ops or simply add additional update ops to the
`tf.GraphKeys.UPDATE_OPS` collection:
# Force `create_train_op` to NOT use ANY update_ops:
train_op = tf.contrib.training.create_train_op(
total_loss,
optimizer,
update_ops=[])
# Use an alternative set of update ops:
train_op = tf.contrib.training.create_train_op(
total_loss,
optimizer,
update_ops=my_other_update_ops)
# Use a set of update ops in addition to the default updates:
tf.compat.v1.add_to_collection(tf.GraphKeys.UPDATE_OPS, my_update0)
tf.compat.v1.add_to_collection(tf.GraphKeys.UPDATE_OPS, my_update1)
train_op = tf.contrib.training.create_train_op(
total_loss,
optimizer)
# Which is the same as:
train_op = tf.contrib.training.create_train_op(
total_loss,
optimizer,
update_ops=tf.compat.v1.get_collection(tf.GraphKeys.UPDATE_OPS))
******************************************
* Initializing a model from a checkpoint *
******************************************
It is common to want to 'warm-start' a model from a pre-trained checkpoint.
One can use a tf.Scaffold and an initializing function to do so.
...
# Create the train_op
train_op = tf.contrib.training.create_train_op(total_loss, optimizer)
# Create the initial assignment op
checkpoint_path = '/path/to/old_model_checkpoint'
variables_to_restore = tf.contrib.framework.get_model_variables()
init_fn = tf.contrib.framework.assign_from_checkpoint_fn(
checkpoint_path, variables_to_restore)
# Run training.
scaffold = tf.Scaffold(init_fn=init_fn)
tf.contrib.training.train(train_op, my_log_dir, scaffold=scaffold)
***************************************************************************
* Initializing a model from a checkpoint whose variable names don't match *
***************************************************************************
At times, a user may want to initialize a new model with values from a
checkpoint whose variable names do not match those of the current model. In this
case, one needs to create a mapping from the checkpoint variable names to the
current model variables. This requires only a small modification of the code
above:
...
# Creates a model with two variables, var0 and var1
predictions = MyModel(images)
...
# Create the train_op
train_op = tf.contrib.training.create_train_op(total_loss, optimizer)
checkpoint_path = '/path/to/old_model_checkpoint'
# Create the mapping:
variables_to_restore = {
'name_var_0_in_checkpoint':
tf.contrib.framework.get_unique_variable('var0'),
'name_var_1_in_checkpoint':
tf.contrib.framework.get_unique_variable('var1')
}
init_fn = tf.contrib.framework.assign_from_checkpoint_fn(
checkpoint_path, variables_to_restore)
scaffold = tf.Scaffold(init_fn=init_fn)
# Run training.
tf.contrib.training.train(train_op, my_log_dir, scaffold=scaffold)
*************************************************
* Fine-Tuning Part of a model from a checkpoint *
*************************************************
Rather than initializing all of the weights of a given model, we sometimes
only want to restore some of the weights from a checkpoint. To do this, one
need only filter those variables to initialize as follows:
...
# Create the train_op
train_op = tf.contrib.training.create_train_op(total_loss, optimizer)
checkpoint_path = '/path/to/old_model_checkpoint'
# Specify the variables to restore via a list of inclusion or exclusion
# patterns:
variables_to_restore = tf.contrib.framework.get_variables_to_restore(
include=["conv"], exclude=["fc8", "fc9])
# or
variables_to_restore = tf.contrib.framework.get_variables_to_restore(
exclude=["conv"])
init_fn = tf.contrib.framework.assign_from_checkpoint_fn(
checkpoint_path, variables_to_restore)
scaffold = tf.Scaffold(init_fn=init_fn)
# Run training.
tf.contrib.training.train(train_op, my_log_dir, scaffold=scaffold)
******************************************************
* Initializing model variables from values in memory *
******************************************************
One may want to initialize the weights of a model from values coming from an
arbitrary source (a text document, matlab file, etc). While this is technically
feasible using assign operations, this strategy results in the values of your
weights being stored in the graph. For large models, this becomes prohibitively
large. However, it's possible to perform this initial assignment without having
to store the values of the initial model in the graph itself by using
placeholders and a feed dictionary:
...
# Create the train_op
train_op = tf.contrib.training.create_train_op(total_loss, optimizer)
# Create the mapping from variable names to values:
var0_initial_value = ReadFromDisk(...)
var1_initial_value = ReadFromDisk(...)
var_names_to_values = {
'var0': var0_initial_value,
'var1': var1_initial_value,
}
init_fn = tf.contrib.framework.assign_from_values_fn(var_names_to_values)
scaffold = tf.Scaffold(init_fn=init_fn)
# Run training.
tf.contrib.training.train(train_op, my_log_dir, scaffold=scaffold)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import variables as tf_variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary
from tensorflow.python.training import monitored_session
from tensorflow.python.training import optimizer as tf_optimizer
from tensorflow.python.training import training_util
# TODO(nsilberman): move add_gradients_summaries, clip_gradient_norms and
# multiply_gradients into contrib/summaries and contrib/optimizers.py
__all__ = [
'add_gradients_summaries',
'clip_gradient_norms',
'clip_gradient_norms_fn',
'create_train_op',
'multiply_gradients',
'train',
]
def add_gradients_summaries(grads_and_vars):
"""Add summaries to gradients.
Args:
grads_and_vars: A list of gradient to variable pairs (tuples).
Returns:
The list of created summaries.
"""
summaries = []
for grad, var in grads_and_vars:
if grad is not None:
if isinstance(grad, ops.IndexedSlices):
grad_values = grad.values
else:
grad_values = grad
summaries.append(
summary.histogram(var.op.name + '_gradient', grad_values))
summaries.append(
summary.scalar(var.op.name + '_gradient_norm',
clip_ops.global_norm([grad_values])))
else:
logging.info('Var %s has no gradient', var.op.name)
return summaries
def clip_gradient_norms(gradients_to_variables, max_norm):
"""Clips the gradients by the given value.
Args:
gradients_to_variables: A list of gradient to variable pairs (tuples).
max_norm: the maximum norm value.
Returns:
A list of clipped gradient to variable pairs.
"""
clipped_grads_and_vars = []
for grad, var in gradients_to_variables:
if grad is not None:
if isinstance(grad, ops.IndexedSlices):
tmp = clip_ops.clip_by_norm(grad.values, max_norm)
grad = ops.IndexedSlices(tmp, grad.indices, grad.dense_shape)
else:
grad = clip_ops.clip_by_norm(grad, max_norm)
clipped_grads_and_vars.append((grad, var))
return clipped_grads_and_vars
def clip_gradient_norms_fn(max_norm):
"""Returns a `transform_grads_fn` function for gradient clipping."""
def clip_norms(gradients_to_variables):
return clip_gradient_norms(gradients_to_variables, max_norm)
return clip_norms
def multiply_gradients(grads_and_vars, gradient_multipliers):
"""Multiply specified gradients.
Args:
grads_and_vars: A list of gradient to variable pairs (tuples).
gradient_multipliers: A map from either `Variables` or `Variable` op names
to the coefficient by which the associated gradient should be scaled.
Returns:
The updated list of gradient to variable pairs.
Raises:
ValueError: If `grads_and_vars` is not a list or if `gradient_multipliers`
is empty or None or if `gradient_multipliers` is not a dictionary.
"""
if not isinstance(grads_and_vars, list):
raise ValueError('`grads_and_vars` must be a list.')
if not gradient_multipliers:
raise ValueError('`gradient_multipliers` is empty.')
if not isinstance(gradient_multipliers, dict):
raise ValueError('`gradient_multipliers` must be a dict.')
multiplied_grads_and_vars = []
for grad, var in grads_and_vars:
if var in gradient_multipliers or var.op.name in gradient_multipliers:
key = var if var in gradient_multipliers else var.op.name
if grad is None:
raise ValueError('Requested multiple of `None` gradient.')
if isinstance(grad, ops.IndexedSlices):
tmp = grad.values * ops.convert_to_tensor(
gradient_multipliers[key], dtype=grad.dtype)
grad = ops.IndexedSlices(tmp, grad.indices, grad.dense_shape)
else:
grad *= ops.convert_to_tensor(
gradient_multipliers[key], dtype=grad.dtype)
multiplied_grads_and_vars.append((grad, var))
return multiplied_grads_and_vars
_USE_GLOBAL_STEP = 0
def create_train_op(total_loss,
optimizer,
global_step=_USE_GLOBAL_STEP,
update_ops=None,
variables_to_train=None,
transform_grads_fn=None,
summarize_gradients=False,
gate_gradients=tf_optimizer.Optimizer.GATE_OP,
aggregation_method=None,
colocate_gradients_with_ops=False,
check_numerics=True):
"""Creates an `Operation` that evaluates the gradients and returns the loss.
Args:
total_loss: A `Tensor` representing the total loss.
optimizer: A tf.Optimizer to use for computing the gradients.
global_step: A `Tensor` representing the global step variable. If left as
`_USE_GLOBAL_STEP`, then tf.contrib.framework.global_step() is used.
update_ops: An optional list of updates to execute. If `update_ops` is
`None`, then the update ops are set to the contents of the
`tf.GraphKeys.UPDATE_OPS` collection. If `update_ops` is not `None`, but
it doesn't contain all of the update ops in `tf.GraphKeys.UPDATE_OPS`, a
warning will be displayed.
variables_to_train: an optional list of variables to train. If None, it will
default to all tf.compat.v1.trainable_variables().
transform_grads_fn: A function which takes a single argument, a list of
gradient to variable pairs (tuples), performs any requested gradient
updates, such as gradient clipping or multipliers, and returns the updated
list.
summarize_gradients: Whether or not add summaries for each gradient.
gate_gradients: How to gate the computation of gradients. See tf.Optimizer.
aggregation_method: Specifies the method used to combine gradient terms.
Valid values are defined in the class `AggregationMethod`.
colocate_gradients_with_ops: Whether or not to try colocating the gradients
with the ops that generated them.
check_numerics: Whether or not we apply check_numerics.
Returns:
A `Tensor` that when evaluated, computes the gradients and returns the total
loss value.
"""
if global_step is _USE_GLOBAL_STEP:
global_step = training_util.get_or_create_global_step()
# Update ops use GraphKeys.UPDATE_OPS collection if update_ops is None.
global_update_ops = set(ops.get_collection(ops.GraphKeys.UPDATE_OPS))
if update_ops is None:
update_ops = global_update_ops
else:
update_ops = set(update_ops)
if not global_update_ops.issubset(update_ops):
logging.warning('update_ops in create_train_op does not contain all the '
'update_ops in GraphKeys.UPDATE_OPS')
# Make sure update_ops are computed before total_loss.
if update_ops:
with ops.control_dependencies(update_ops):
barrier = control_flow_ops.no_op(name='update_barrier')
total_loss = control_flow_ops.with_dependencies([barrier], total_loss)
if variables_to_train is None:
# Default to tf.compat.v1.trainable_variables()
variables_to_train = tf_variables.trainable_variables()
else:
# Make sure that variables_to_train are in
# tf.compat.v1.trainable_variables()
for v in variables_to_train:
assert v.trainable or v in tf_variables.trainable_variables()
assert variables_to_train
# Create the gradients. Note that apply_gradients adds the gradient
# computation to the current graph.
grads = optimizer.compute_gradients(
total_loss,
variables_to_train,
gate_gradients=gate_gradients,
aggregation_method=aggregation_method,
colocate_gradients_with_ops=colocate_gradients_with_ops)
if transform_grads_fn:
grads = transform_grads_fn(grads)
# Summarize gradients.
if summarize_gradients:
with ops.name_scope('summarize_grads'):
add_gradients_summaries(grads)
# Create gradient updates.
grad_updates = optimizer.apply_gradients(grads, global_step=global_step)
with ops.name_scope('train_op'):
# Make sure total_loss is valid.
if check_numerics:
total_loss = array_ops.check_numerics(total_loss,
'LossTensor is inf or nan')
# Ensure the train_tensor computes grad_updates.
train_op = control_flow_ops.with_dependencies([grad_updates], total_loss)
# Add the operation used for training to the 'train_op' collection
train_ops = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP)
if train_op not in train_ops:
train_ops.append(train_op)
return train_op
def train(train_op,
logdir,
master='',
is_chief=True,
scaffold=None,
hooks=None,
chief_only_hooks=None,
save_checkpoint_secs=600,
save_summaries_steps=100,
config=None,
max_wait_secs=7200,
run_metadata=None):
"""Runs the training loop.
Args:
train_op: A `Tensor` that, when executed, will apply the gradients and
return the loss value.
logdir: The directory where the graph and checkpoints are saved.
master: The URL of the master.
is_chief: Specifies whether or not the training is being run by the primary
replica during replica training.
scaffold: An tf.compat.v1.train.Scaffold instance.
hooks: List of `tf.estimator.SessionRunHook` callbacks which are run inside
the training loop.
chief_only_hooks: List of `tf.estimator.SessionRunHook` instances which are
run inside the training loop for the chief trainer only.
save_checkpoint_secs: The frequency, in seconds, that a checkpoint is saved
using a default checkpoint saver. If `save_checkpoint_secs` is set to
`None`, then the default checkpoint saver isn't used.
save_summaries_steps: The frequency, in number of global steps, that the
summaries are written to disk using a default summary saver. If
`save_summaries_steps` is set to `None`, then the default summary saver
isn't used.
config: An instance of `tf.compat.v1.ConfigProto`.
max_wait_secs: Maximum time workers should wait for the session to become
available. This should be kept relatively short to help detect incorrect
code, but sometimes may need to be increased if the chief takes a while to
start up.
run_metadata: A [`RunMetadata`] protocol buffer.
Returns:
the value of the loss function after training.
Raises:
ValueError: if `logdir` is `None` and either `save_checkpoint_secs` or
`save_summaries_steps` are `None.
"""
if logdir is None and is_chief:
if save_summaries_steps:
raise ValueError(
'logdir cannot be None when save_summaries_steps is not None')
if save_checkpoint_secs:
raise ValueError(
'logdir cannot be None when save_checkpoint_secs is not None')
with monitored_session.MonitoredTrainingSession(
master=master,
is_chief=is_chief,
checkpoint_dir=logdir,
scaffold=scaffold,
hooks=hooks,
chief_only_hooks=chief_only_hooks,
save_checkpoint_secs=save_checkpoint_secs,
save_summaries_steps=save_summaries_steps,
config=config,
max_wait_secs=max_wait_secs) as session:
loss = None
while not session.should_stop():
loss = session.run(train_op, run_metadata=run_metadata)
return loss
|
tensorflow-master
|
tensorflow/contrib/training/python/training/training.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
import numpy
from tensorflow.contrib.training.python.training import resample
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class ResampleTest(test.TestCase):
"""Tests that resampling runs and outputs are close to expected values."""
def testRepeatRange(self):
cases = [
([], []),
([0], []),
([1], [0]),
([1, 0], [0]),
([0, 1], [1]),
([3], [0, 0, 0]),
([0, 1, 2, 3], [1, 2, 2, 3, 3, 3]),
]
with self.cached_session() as sess:
for inputs, expected in cases:
array_inputs = numpy.array(inputs, dtype=numpy.int32)
actual = sess.run(resample._repeat_range(array_inputs))
self.assertAllEqual(actual, expected)
def testRoundtrip(self, rate=0.25, count=5, n=500):
"""Tests `resample(x, weights)` and resample(resample(x, rate), 1/rate)`."""
foo = self.get_values(count)
bar = self.get_values(count)
weights = self.get_weights(count)
resampled_in, rates = resample.weighted_resample(
[foo, bar], constant_op.constant(weights), rate, seed=123)
resampled_back_out = resample.resample_at_rate(
resampled_in, 1.0 / rates, seed=456)
init = control_flow_ops.group(variables.local_variables_initializer(),
variables.global_variables_initializer())
with self.cached_session() as s:
s.run(init) # initialize
# outputs
counts_resampled = collections.Counter()
counts_reresampled = collections.Counter()
for _ in range(n):
resampled_vs, reresampled_vs = s.run([resampled_in, resampled_back_out])
self.assertAllEqual(resampled_vs[0], resampled_vs[1])
self.assertAllEqual(reresampled_vs[0], reresampled_vs[1])
for v in resampled_vs[0]:
counts_resampled[v] += 1
for v in reresampled_vs[0]:
counts_reresampled[v] += 1
# assert that resampling worked as expected
self.assert_expected(weights, rate, counts_resampled, n)
# and that re-resampling gives the approx identity.
self.assert_expected(
[1.0 for _ in weights],
1.0,
counts_reresampled,
n,
abs_delta=0.1 * n * count)
def testCorrectRates(self, rate=0.25, count=10, n=500, rtol=0.1):
"""Tests that the rates returned by weighted_resample are correct."""
# The approach here is to verify that:
# - sum(1/rate) approximates the size of the original collection
# - sum(1/rate * value) approximates the sum of the original inputs,
# - sum(1/rate * value)/sum(1/rate) approximates the mean.
vals = self.get_values(count)
weights = self.get_weights(count)
resampled, rates = resample.weighted_resample([vals],
constant_op.constant(weights),
rate)
invrates = 1.0 / rates
init = control_flow_ops.group(variables.local_variables_initializer(),
variables.global_variables_initializer())
expected_sum_op = math_ops.reduce_sum(vals)
with self.cached_session() as s:
s.run(init)
expected_sum = n * s.run(expected_sum_op)
weight_sum = 0.0
weighted_value_sum = 0.0
for _ in range(n):
val, inv_rate = s.run([resampled[0], invrates])
weight_sum += sum(inv_rate)
weighted_value_sum += sum(val * inv_rate)
# sum(inv_rate) ~= N*count:
expected_count = count * n
self.assertAlmostEqual(
expected_count, weight_sum, delta=(rtol * expected_count))
# sum(vals) * n ~= weighted_sum(resampled, 1.0/weights)
self.assertAlmostEqual(
expected_sum, weighted_value_sum, delta=(rtol * expected_sum))
# Mean ~= weighted mean:
expected_mean = expected_sum / float(n * count)
self.assertAlmostEqual(
expected_mean,
weighted_value_sum / weight_sum,
delta=(rtol * expected_mean))
def testZeroRateUnknownShapes(self, count=10):
"""Tests that resampling runs with completely runtime shapes."""
# Use placeholcers without shape set:
vals = array_ops.placeholder(dtype=dtypes.int32)
rates = array_ops.placeholder(dtype=dtypes.float32)
resampled = resample.resample_at_rate([vals], rates)
with self.cached_session() as s:
rs, = s.run(resampled, {
vals: list(range(count)),
rates: numpy.zeros(
shape=[count], dtype=numpy.float32)
})
self.assertEqual(rs.shape, (0,))
def testDtypes(self, count=10):
"""Test that we can define the ops with float64 weights."""
vals = self.get_values(count)
weights = math_ops.cast(self.get_weights(count), dtypes.float64)
# should not error:
resample.resample_at_rate([vals], weights)
resample.weighted_resample(
[vals], weights, overall_rate=math_ops.cast(1.0, dtypes.float64))
def get_weights(self, n, mean=10.0, stddev=5):
"""Returns random positive weight values."""
assert mean > 0, 'Weights have to be positive.'
results = []
while len(results) < n:
v = numpy.random.normal(mean, stddev)
if v > 0:
results.append(v)
return results
def get_values(self, n):
return constant_op.constant(list(range(n)))
def assert_expected(self,
weights,
overall_rate,
counts,
n,
tol=2.0,
abs_delta=0):
# Overall, we expect sum(counts) there to be `overall_rate` * n *
# len(weights)... with a stddev on that expectation equivalent to
# performing (n * len(weights)) trials each with probability of
# overall_rate.
expected_overall_count = len(weights) * n * overall_rate
actual_overall_count = sum(counts.values())
stddev = math.sqrt(len(weights) * n * overall_rate * (1 - overall_rate))
self.assertAlmostEqual(
expected_overall_count,
actual_overall_count,
delta=(stddev * tol + abs_delta))
# And we can form a similar expectation for each item -- it should
# appear in the results a number of time proportional to its
# weight, which is similar to performing `expected_overall_count`
# trials each with a probability of weight/weight_sum.
weight_sum = sum(weights)
fractions = [w / weight_sum for w in weights]
expected_counts = [expected_overall_count * f for f in fractions]
stddevs = [
math.sqrt(expected_overall_count * f * (1 - f)) for f in fractions
]
for i in range(len(expected_counts)):
expected_count = expected_counts[i]
actual_count = counts[i]
self.assertAlmostEqual(
expected_count, actual_count, delta=(stddevs[i] * tol + abs_delta))
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/training/python/training/resample_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module that implements sparsemax and sparsemax loss, see [1].
[1]: https://arxiv.org/abs/1602.02068
## Sparsemax
@@sparsemax
@@sparsemax_loss
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.sparsemax.python.ops.sparsemax import sparsemax
from tensorflow.contrib.sparsemax.python.ops.sparsemax_loss \
import sparsemax_loss
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = ['sparsemax', 'sparsemax_loss']
remove_undocumented(__name__, _allowed_symbols)
|
tensorflow-master
|
tensorflow/contrib/sparsemax/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SparsemaxLossOp."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.sparsemax import sparsemax, sparsemax_loss
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.framework import constant_op
from tensorflow.python.platform import test
test_obs = 10
class SparsemaxLossTest(test.TestCase):
def _np_sparsemax(self, z):
z = z - np.mean(z, axis=1)[:, np.newaxis]
# sort z
z_sorted = np.sort(z, axis=1)[:, ::-1]
# calculate k(z)
z_cumsum = np.cumsum(z_sorted, axis=1)
k = np.arange(1, z.shape[1] + 1)
z_check = 1 + k * z_sorted > z_cumsum
# use argmax to get the index by row as .nonzero() doesn't
# take an axis argument. np.argmax return the first index, but the last
# index is required here, use np.flip to get the last index and
# `z.shape[axis]` to compensate for np.flip afterwards.
k_z = z.shape[1] - np.argmax(z_check[:, ::-1], axis=1)
# calculate tau(z)
tau_sum = z_cumsum[np.arange(0, z.shape[0]), k_z - 1]
tau_z = ((tau_sum - 1) / k_z).reshape(-1, 1)
# calculate p
return np.maximum(0, z - tau_z)
def _np_sparsemax_loss(self, z, q):
z = z - np.mean(z, axis=1)[:, np.newaxis]
# Calculate q^T * z
z_k = np.sum(q * z, axis=1)
# calculate sum over S(z)
p = self._np_sparsemax(z)
s = p > 0
# z_i^2 - tau(z)^2 = p_i (2 * z_i - p_i) for i \in S(z)
S_sum = np.sum(s * p * (2 * z - p), axis=1)
# because q is binary, sum([q_1^2, q_2^2, ...]) is just sum(q)
q_norm = np.sum(q, axis=1)
return -z_k + 0.5 * S_sum + 0.5 * q_norm
def _np_sparsemax_loss_grad(self, z, q):
# chain rule
grad = 1
return grad * (-q + self._np_sparsemax(z))
def _tf_sparsemax(self, z, dtype, use_gpu):
with self.test_session(use_gpu=use_gpu):
tf_sparsemax_op = sparsemax(z.astype(dtype))
tf_sparsemax_out = tf_sparsemax_op.eval()
return tf_sparsemax_op, tf_sparsemax_out
def _tf_sparsemax_loss(self, z, q, dtype, use_gpu):
z = z.astype(dtype)
q = q.astype(dtype)
with self.test_session(use_gpu=use_gpu):
tf_sparsemax_op = sparsemax(z)
tf_loss_op = sparsemax_loss(z, tf_sparsemax_op, q)
tf_loss_out = tf_loss_op.eval()
return tf_loss_op, tf_loss_out
def _test_sparsemax_loss_against_numpy(self, dtype, random, use_gpu):
"""check sparsemax-loss kernel against numpy"""
z = random.uniform(low=-3, high=3, size=(test_obs, 10))
q = np.zeros((test_obs, 10))
q[np.arange(0, test_obs), random.randint(0, 10, size=test_obs)] = 1
tf_loss_op, tf_loss_out = self._tf_sparsemax_loss(z, q, dtype, use_gpu)
np_loss = self._np_sparsemax_loss(z, q).astype(dtype)
self.assertAllCloseAccordingToType(
np_loss, tf_loss_out, half_atol=1e-2, half_rtol=5e-3)
self.assertShapeEqual(np_loss, tf_loss_op)
def _test_sparsemax_loss_of_nan(self, dtype, random, use_gpu):
"""check sparsemax-loss transfers nan"""
q = np.asarray([[0, 0, 1], [0, 0, 1], [0, 0, 1]])
z_nan = np.asarray([[0, np.nan, 0], [0, np.nan, np.nan],
[np.nan, np.nan, np.nan]]).astype(dtype)
_, tf_loss_nan = self._tf_sparsemax_loss(z_nan, q, dtype, use_gpu)
self.assertAllCloseAccordingToType([np.nan, np.nan, np.nan], tf_loss_nan)
def _test_sparsemax_loss_of_inf(self, dtype, random, use_gpu):
"""check sparsemax-loss is infinity safe"""
q = np.asarray([[0, 0, 1], [0, 0, 1], [0, 0, 1], [0, 0, 1]])
z_neg = np.asarray([
[0, -np.inf, 0],
[0, -np.inf, -np.inf],
[-np.inf, -np.inf, 0],
[-np.inf, -np.inf, -np.inf],
]).astype(dtype)
z_pos = np.asarray([[0, np.inf, 0], [0, np.inf,
np.inf], [np.inf, np.inf, 0],
[np.inf, np.inf, np.inf]]).astype(dtype)
z_mix = np.asarray([[0, np.inf, 0], [0, np.inf, -np.inf],
[-np.inf, np.inf, 0], [-np.inf, np.inf,
-np.inf]]).astype(dtype)
_, tf_loss_neg = self._tf_sparsemax_loss(z_neg, q, dtype, use_gpu)
self.assertAllCloseAccordingToType([0.25, np.inf, 0, np.nan], tf_loss_neg)
_, tf_loss_pos = self._tf_sparsemax_loss(z_pos, q, dtype, use_gpu)
self.assertAllCloseAccordingToType([np.nan, np.nan, np.nan, np.nan],
tf_loss_pos)
_, tf_loss_mix = self._tf_sparsemax_loss(z_mix, q, dtype, use_gpu)
self.assertAllCloseAccordingToType([np.nan, np.nan, np.nan, np.nan],
tf_loss_mix)
def _test_constant_add(self, dtype, random, use_gpu):
"""check sparsemax-loss proposition 3"""
z = random.uniform(low=-3, high=3, size=(test_obs, 10))
c = random.uniform(low=-3, high=3, size=(test_obs, 1))
q = np.zeros((test_obs, 10))
q[np.arange(0, test_obs), np.random.randint(0, 10, size=test_obs)] = 1
_, tf_loss_zpc = self._tf_sparsemax_loss(z + c, q, dtype, use_gpu)
_, tf_loss_z = self._tf_sparsemax_loss(z, q, dtype, use_gpu)
self.assertAllCloseAccordingToType(
tf_loss_zpc,
tf_loss_z,
float_atol=5e-6,
float_rtol=5e-6,
half_atol=1e-2,
half_rtol=1e-2)
def _test_sparsemax_loss_positive(self, dtype, random, use_gpu):
"""check sparsemax-loss proposition 4"""
z = random.uniform(low=-3, high=3, size=(test_obs, 10))
q = np.zeros((test_obs, 10))
q[np.arange(0, test_obs), random.randint(0, 10, size=test_obs)] = 1
tf_loss_op, tf_loss_out = self._tf_sparsemax_loss(z, q, dtype, use_gpu)
self.assertAllCloseAccordingToType(np.abs(tf_loss_out), tf_loss_out)
self.assertShapeEqual(np.zeros(test_obs), tf_loss_op)
def _test_sparsemax_loss_zero(self, dtype, random, use_gpu):
"""check sparsemax-loss proposition 5"""
# construct z and q, such that z_k >= 1 + max_{j!=k} z_k holds for
# delta_0 = 1.
z = random.uniform(low=-3, high=3, size=(test_obs, 10))
z[:, 0] = np.max(z, axis=1) + 1.05
q = np.zeros((test_obs, 10))
q[:, 0] = 1
tf_loss_op, tf_loss_out = self._tf_sparsemax_loss(z, q, dtype, use_gpu)
tf_sparsemax_op, tf_sparsemax_out = self._tf_sparsemax(z, dtype, use_gpu)
self.assertAllCloseAccordingToType(np.zeros(test_obs), tf_loss_out)
self.assertShapeEqual(np.zeros(test_obs), tf_loss_op)
self.assertAllCloseAccordingToType(q, tf_sparsemax_out)
self.assertShapeEqual(q, tf_sparsemax_op)
def _test_gradient_against_estimate(self, dtype, random, use_gpu):
"""check sparsemax-loss Rop, against estimated-loss Rop"""
z = random.uniform(low=-3, high=3, size=(test_obs, 10)).astype(dtype)
q = np.zeros((test_obs, 10)).astype(dtype)
q[np.arange(0, test_obs), np.random.randint(0, 10, size=test_obs)] = 1
logits = array_ops.placeholder(dtype, name='z')
sparsemax_op = sparsemax(logits)
loss_op = sparsemax_loss(logits, sparsemax_op, q)
with self.test_session(use_gpu=use_gpu):
err = gradient_checker.compute_gradient_error(
logits, z.shape, loss_op, (test_obs,), x_init_value=z, delta=1e-9)
self.assertLess(err, 1e-4)
def _test_gradient_against_numpy(self, dtype, random, use_gpu):
"""check sparsemax-loss Rop, against numpy Rop"""
z = random.uniform(low=-3, high=3, size=(test_obs, 10))
q = np.zeros((test_obs, 10))
q[np.arange(0, test_obs), np.random.randint(0, 10, size=test_obs)] = 1
logits = constant_op.constant(z.astype(dtype), name='z')
sparsemax_op = sparsemax(logits)
loss_op = sparsemax_loss(logits, sparsemax_op, q.astype(dtype))
loss_grad_op = gradients_impl.gradients(loss_op, [logits])[0]
with self.test_session(use_gpu=use_gpu):
tf_grad = loss_grad_op.eval()
np_grad = self._np_sparsemax_loss_grad(z, q).astype(dtype)
self.assertAllCloseAccordingToType(
np_grad, tf_grad, half_atol=1e-2, half_rtol=5e-3)
self.assertShapeEqual(np_grad, loss_grad_op)
def _test_dtype(self, dtype):
random = np.random.RandomState(1)
self._test_sparsemax_loss_against_numpy(dtype, random, use_gpu=False)
self._test_sparsemax_loss_of_nan(dtype, random, use_gpu=False)
self._test_sparsemax_loss_of_inf(dtype, random, use_gpu=False)
self._test_constant_add(dtype, random, use_gpu=False)
self._test_sparsemax_loss_positive(dtype, random, use_gpu=False)
self._test_sparsemax_loss_zero(dtype, random, use_gpu=False)
# sparsemax is not a smooth function so gradient estimation is only
# possibol for float64.
if dtype == 'float64':
self._test_gradient_against_estimate(dtype, random, use_gpu=False)
self._test_gradient_against_numpy(dtype, random, use_gpu=False)
def testFloat(self):
self._test_dtype('float32')
def testDouble(self):
self._test_dtype('float64')
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/sparsemax/python/kernel_tests/sparsemax_loss_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SparsemaxOp."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.sparsemax import sparsemax
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.framework import constant_op
from tensorflow.python.platform import test
test_obs = 10
class SparsemaxTest(test.TestCase):
def _np_sparsemax(self, z):
z = z - np.mean(z, axis=1)[:, np.newaxis]
# sort z
z_sorted = np.sort(z, axis=1)[:, ::-1]
# calculate k(z)
z_cumsum = np.cumsum(z_sorted, axis=1)
k = np.arange(1, z.shape[1] + 1)
z_check = 1 + k * z_sorted > z_cumsum
# use argmax to get the index by row as .nonzero() doesn't
# take an axis argument. np.argmax return the first index, but the last
# index is required here, use np.flip to get the last index and
# `z.shape[axis]` to compensate for np.flip afterwards.
k_z = z.shape[1] - np.argmax(z_check[:, ::-1], axis=1)
# calculate tau(z)
tau_sum = z_cumsum[np.arange(0, z.shape[0]), k_z - 1]
tau_z = ((tau_sum - 1) / k_z).reshape(-1, 1)
# calculate p
return np.maximum(0, z - tau_z)
def _np_sparsemax_grad(self, z):
# chain rule
grad = np.ones_like(z)
# Construct S(z)
probability = self._np_sparsemax(z)
support = probability > 0
# Calculate \hat{v}, which will be a vector (scalar for each z)
v_hat = np.sum(grad * support, axis=1) / np.sum(support, axis=1)
# Calculates J(z) * v
return support * (grad - v_hat[:, np.newaxis])
def _tf_sparsemax(self, z, dtype, use_gpu):
with self.test_session(use_gpu=use_gpu):
tf_sparsemax_op = sparsemax(z.astype(dtype))
tf_sparsemax_out = tf_sparsemax_op.eval()
return tf_sparsemax_op, tf_sparsemax_out
def _test_sparsemax_against_numpy(self, dtype, random, use_gpu):
"""check sparsemax kernel against numpy"""
z = random.uniform(low=-3, high=3, size=(test_obs, 10))
tf_sparsemax_op, tf_sparsemax_out = self._tf_sparsemax(z, dtype, use_gpu)
p_sparemax = self._np_sparsemax(z).astype(dtype)
self.assertAllCloseAccordingToType(
p_sparemax, tf_sparsemax_out, half_atol=5e-3)
self.assertShapeEqual(p_sparemax, tf_sparsemax_op)
def _test_sparsemax_of_nan(self, dtype, random, use_gpu):
"""check sparsemax transfers nan"""
z_nan = np.asarray([
[0, np.nan, 0],
[0, np.nan, np.nan],
[np.nan, np.nan, np.nan],
]).astype(dtype)
_, tf_sparsemax_nan = self._tf_sparsemax(z_nan, dtype, use_gpu)
self.assertAllCloseAccordingToType(
[[np.nan, np.nan, np.nan], [np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan]], tf_sparsemax_nan)
def _test_sparsemax_of_inf(self, dtype, random, use_gpu):
"""check sparsemax is infinity safe"""
z_neg = np.asarray([
[0, -np.inf, 0],
[0, -np.inf, -np.inf],
[-np.inf, -np.inf, -np.inf],
]).astype(dtype)
z_pos = np.asarray([[0, np.inf, 0], [0, np.inf, np.inf],
[np.inf, np.inf, np.inf]]).astype(dtype)
z_mix = np.asarray([[0, np.inf, 0], [0, np.inf, -np.inf],
[-np.inf, np.inf, -np.inf]]).astype(dtype)
_, tf_sparsemax_neg = self._tf_sparsemax(z_neg, dtype, use_gpu)
self.assertAllCloseAccordingToType(
[[0.5, 0, 0.5], [1, 0, 0], [np.nan, np.nan, np.nan]], tf_sparsemax_neg)
_, tf_sparsemax_pos = self._tf_sparsemax(z_pos, dtype, use_gpu)
self.assertAllCloseAccordingToType(
[[np.nan, np.nan, np.nan], [np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan]], tf_sparsemax_pos)
_, tf_sparsemax_mix = self._tf_sparsemax(z_mix, dtype, use_gpu)
self.assertAllCloseAccordingToType(
[[np.nan, np.nan, np.nan], [np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan]], tf_sparsemax_mix)
def _test_sparsemax_of_zero(self, dtype, random, use_gpu):
"""check sparsemax proposition 1, part 1"""
z = np.zeros((1, 10))
tf_sparsemax_op, tf_sparsemax_out = self._tf_sparsemax(z, dtype, use_gpu)
p_sparemax = np.ones_like(z, dtype=dtype) / z.size
self.assertAllCloseAccordingToType(p_sparemax, tf_sparsemax_out)
self.assertShapeEqual(p_sparemax, tf_sparsemax_op)
def _test_sparsemax_of_to_inf(self, dtype, random, use_gpu):
"""check sparsemax proposition 1, part 2"""
z = random.uniform(low=-3, high=3, size=(test_obs, 10))
# assume |A(z)| = 1, as z is continues random
z_sort_arg = np.argsort(z, axis=1)[:, ::-1]
z_sort = np.sort(z, axis=-1)[:, ::-1]
gamma_z = z_sort[:, 0] - z_sort[:, 1]
epsilon = (0.99 * gamma_z * 1).reshape(-1, 1)
# construct the expected 1_A(z) array
p_expected = np.zeros((test_obs, 10), dtype=dtype)
p_expected[np.arange(0, test_obs), z_sort_arg[:, 0]] = 1
tf_sparsemax_op, tf_sparsemax_out = self._tf_sparsemax((1 / epsilon) * z,
dtype, use_gpu)
self.assertAllCloseAccordingToType(p_expected, tf_sparsemax_out)
self.assertShapeEqual(p_expected, tf_sparsemax_op)
def _test_constant_add(self, dtype, random, use_gpu):
"""check sparsemax proposition 2"""
z = random.uniform(low=-3, high=3, size=(test_obs, 10)).astype(dtype)
c = random.uniform(low=-3, high=3, size=(test_obs, 1)).astype(dtype)
_, tf_sparsemax_zpc = self._tf_sparsemax(z + c, dtype, use_gpu)
_, tf_sparsemax_z = self._tf_sparsemax(z, dtype, use_gpu)
self.assertAllCloseAccordingToType(
tf_sparsemax_zpc, tf_sparsemax_z, half_atol=5e-3)
def _test_permutation(self, dtype, random, use_gpu):
"""check sparsemax proposition 3"""
z = random.uniform(low=-3, high=3, size=(test_obs, 10))
_, p = self._tf_sparsemax(z, dtype, use_gpu)
for i in range(test_obs):
per = random.permutation(10)
tf_sparsemax_op, tf_sparsemax_out = self._tf_sparsemax(
z[i, per].reshape(1, -1), dtype, use_gpu)
p_expected = p[i, per].reshape(1, -1)
self.assertAllCloseAccordingToType(
p_expected, tf_sparsemax_out, half_atol=5e-3)
self.assertShapeEqual(p_expected, tf_sparsemax_op)
def _test_diffrence(self, dtype, random, use_gpu):
"""check sparsemax proposition 4"""
z = random.uniform(low=-3, high=3, size=(test_obs, 10))
_, p = self._tf_sparsemax(z, dtype, use_gpu)
etol = {'float16': 1e-2, 'float32': 1e-6, 'float64': 1e-9}[dtype]
for val in range(0, test_obs):
for i in range(0, 10):
for j in range(0, 10):
# check condition, the obesite pair will be checked anyway
if z[val, i] > z[val, j]:
continue
self.assertTrue(
0 <= p[val, j] - p[val, i] <= z[val, j] - z[val, i] + etol,
'0 <= %.10f <= %.10f' % (p[val, j] - p[val, i],
z[val, j] - z[val, i] + etol))
def _test_two_dimentional(self, dtype, random, use_gpu):
"""check two dimentation sparsemax case"""
t = np.linspace(-2, 2, test_obs, dtype=dtype)
z = np.vstack([t, np.zeros(test_obs, dtype=dtype)]).T
tf_sparsemax_op, tf_sparsemax_out = self._tf_sparsemax(z, dtype, use_gpu)
p0_expected = np.select([t < -1, t <= 1, t > 1], [0, (t + 1) / 2, 1])
self.assertAllCloseAccordingToType(p0_expected, tf_sparsemax_out[:, 0])
self.assertAllCloseAccordingToType(1 - p0_expected, tf_sparsemax_out[:, 1])
self.assertShapeEqual(z, tf_sparsemax_op)
def _test_gradient_against_estimate(self, dtype, random, use_gpu):
"""check sparsemax Rop, against estimated Rop"""
z = random.uniform(low=-3, high=3, size=(test_obs, 10)).astype(dtype)
logits = array_ops.placeholder(dtype, name='z')
sparsemax_op = sparsemax(logits)
with self.test_session(use_gpu=use_gpu):
err = gradient_checker.compute_gradient_error(
logits, z.shape, sparsemax_op, z.shape, x_init_value=z, delta=1e-9)
self.assertLess(err, 1e-4)
def _test_gradient_against_numpy(self, dtype, random, use_gpu):
"""check sparsemax Rop, against numpy Rop"""
z = random.uniform(low=-3, high=3, size=(test_obs, 10)).astype(dtype)
logits = constant_op.constant(z, name='z')
sparsemax_op = sparsemax(logits)
sparsemax_grad_op = gradients_impl.gradients(sparsemax_op, [logits])[0]
with self.test_session(use_gpu=use_gpu):
tf_grad = sparsemax_grad_op.eval()
np_grad = self._np_sparsemax_grad(z)
self.assertAllCloseAccordingToType(np_grad, tf_grad)
self.assertShapeEqual(np_grad, sparsemax_grad_op)
def _test_dtype(self, dtype):
random = np.random.RandomState(1)
self._test_sparsemax_against_numpy(dtype, random, use_gpu=False)
self._test_sparsemax_of_nan(dtype, random, use_gpu=False)
self._test_sparsemax_of_inf(dtype, random, use_gpu=False)
self._test_sparsemax_of_zero(dtype, random, use_gpu=False)
self._test_sparsemax_of_to_inf(dtype, random, use_gpu=False)
self._test_constant_add(dtype, random, use_gpu=False)
self._test_permutation(dtype, random, use_gpu=False)
self._test_diffrence(dtype, random, use_gpu=False)
self._test_two_dimentional(dtype, random, use_gpu=False)
# sparsemax is not a smooth function so gradient estimation is only
# possibol for float64.
if dtype == 'float64':
self._test_gradient_against_estimate(dtype, random, use_gpu=False)
self._test_gradient_against_numpy(dtype, random, use_gpu=False)
def testFloat(self):
self._test_dtype('float32')
def testDouble(self):
self._test_dtype('float64')
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/sparsemax/python/kernel_tests/sparsemax_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Sparsemax op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
__all__ = ["sparsemax"]
def sparsemax(logits, name=None):
"""Computes sparsemax activations [1].
For each batch `i` and class `j` we have
$$sparsemax[i, j] = max(logits[i, j] - tau(logits[i, :]), 0)$$
[1]: https://arxiv.org/abs/1602.02068
Args:
logits: A `Tensor`. Must be one of the following types: `half`, `float32`,
`float64`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `logits`.
"""
with ops.name_scope(name, "sparsemax", [logits]) as name:
logits = ops.convert_to_tensor(logits, name="logits")
obs = array_ops.shape(logits)[0]
dims = array_ops.shape(logits)[1]
# In the paper, they call the logits z.
# The mean(logits) can be substracted from logits to make the algorithm
# more numerically stable. the instability in this algorithm comes mostly
# from the z_cumsum. Substacting the mean will cause z_cumsum to be close
# to zero. However, in practise the numerical instability issues are very
# minor and substacting the mean causes extra issues with inf and nan
# input.
z = logits
# sort z
z_sorted, _ = nn.top_k(z, k=dims)
# calculate k(z)
z_cumsum = math_ops.cumsum(z_sorted, axis=1)
k = math_ops.range(
1, math_ops.cast(dims, logits.dtype) + 1, dtype=logits.dtype)
z_check = 1 + k * z_sorted > z_cumsum
# because the z_check vector is always [1,1,...1,0,0,...0] finding the
# (index + 1) of the last `1` is the same as just summing the number of 1.
k_z = math_ops.reduce_sum(math_ops.cast(z_check, dtypes.int32), axis=1)
# calculate tau(z)
# If there are inf values or all values are -inf, the k_z will be zero,
# this is mathematically invalid and will also cause the gather_nd to fail.
# Prevent this issue for now by setting k_z = 1 if k_z = 0, this is then
# fixed later (see p_safe) by returning p = nan. This results in the same
# behavior as softmax.
k_z_safe = math_ops.maximum(k_z, 1)
indices = array_ops.stack([math_ops.range(0, obs), k_z_safe - 1], axis=1)
tau_sum = array_ops.gather_nd(z_cumsum, indices)
tau_z = (tau_sum - 1) / math_ops.cast(k_z, logits.dtype)
# calculate p
p = math_ops.maximum(
math_ops.cast(0, logits.dtype), z - tau_z[:, array_ops.newaxis])
# If k_z = 0 or if z = nan, then the input is invalid
p_safe = array_ops.where(
math_ops.logical_or(
math_ops.equal(k_z, 0), math_ops.is_nan(z_cumsum[:, -1])),
array_ops.fill([obs, dims], math_ops.cast(float("nan"), logits.dtype)),
p)
return p_safe
|
tensorflow-master
|
tensorflow/contrib/sparsemax/python/ops/sparsemax.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Sparsemax Loss op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
__all__ = ["sparsemax_loss"]
def sparsemax_loss(logits, sparsemax, labels, name=None):
"""Computes sparsemax loss function [1].
[1]: https://arxiv.org/abs/1602.02068
Args:
logits: A `Tensor`. Must be one of the following types: `half`, `float32`,
`float64`.
sparsemax: A `Tensor`. Must have the same type as `logits`.
labels: A `Tensor`. Must have the same type as `logits`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `logits`.
"""
with ops.name_scope(name, "sparsemax_loss",
[logits, sparsemax, labels]) as name:
logits = ops.convert_to_tensor(logits, name="logits")
sparsemax = ops.convert_to_tensor(sparsemax, name="sparsemax")
labels = ops.convert_to_tensor(labels, name="labels")
# In the paper, they call the logits z.
# A constant can be substracted from logits to make the algorithm
# more numerically stable in theory. However, there are really no major
# source numerical instability in this algorithm.
z = logits
# sum over support
# Use a conditional where instead of a multiplication to support z = -inf.
# If z = -inf, and there is no support (sparsemax = 0), a multiplication
# would cause 0 * -inf = nan, which is not correct in this case.
sum_s = array_ops.where(
math_ops.logical_or(sparsemax > 0, math_ops.is_nan(sparsemax)),
sparsemax * (z - 0.5 * sparsemax), array_ops.zeros_like(sparsemax))
# - z_k + ||q||^2
q_part = labels * (0.5 * labels - z)
# Fix the case where labels = 0 and z = -inf, where q_part would
# otherwise be 0 * -inf = nan. But since the lables = 0, no cost for
# z = -inf should be consideredself.
# The code below also coveres the case where z = inf. Howeverm in this
# caose the sparsemax will be nan, which means the sum_s will also be nan,
# therefor this case doesn't need addtional special treatment.
q_part_safe = array_ops.where(
math_ops.logical_and(math_ops.equal(labels, 0), math_ops.is_inf(z)),
array_ops.zeros_like(z), q_part)
return math_ops.reduce_sum(sum_s + q_part_safe, axis=1)
|
tensorflow-master
|
tensorflow/contrib/sparsemax/python/ops/sparsemax_loss.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Experimental utilities for tf.feature_column."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,line-too-long,wildcard-import
from tensorflow.contrib.feature_column.python.feature_column.sequence_feature_column import *
from tensorflow.python.util.all_util import remove_undocumented
# pylint: enable=unused-import,line-too-long,wildcard-import
_allowed_symbols = [
'sequence_categorical_column_with_hash_bucket',
'sequence_categorical_column_with_identity',
'sequence_categorical_column_with_vocabulary_list',
'sequence_categorical_column_with_vocabulary_file',
'sequence_input_layer',
'sequence_numeric_column',
]
remove_undocumented(__name__, allowed_exception_list=_allowed_symbols)
|
tensorflow-master
|
tensorflow/contrib/feature_column/__init__.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Integration test for sequence feature columns with SequenceExamples."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import string
import tempfile
from google.protobuf import text_format
from tensorflow.contrib.feature_column.python.feature_column import sequence_feature_column as sfc
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.feature_column import feature_column as fc
from tensorflow.python.keras.layers import recurrent
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.util import compat
class SequenceFeatureColumnIntegrationTest(test.TestCase):
def _make_sequence_example(self):
example = example_pb2.SequenceExample()
example.context.feature['int_ctx'].int64_list.value.extend([5])
example.context.feature['float_ctx'].float_list.value.extend([123.6])
for val in range(0, 10, 2):
feat = feature_pb2.Feature()
feat.int64_list.value.extend([val] * val)
example.feature_lists.feature_list['int_list'].feature.extend([feat])
for val in range(1, 11, 2):
feat = feature_pb2.Feature()
feat.bytes_list.value.extend([compat.as_bytes(str(val))] * val)
example.feature_lists.feature_list['str_list'].feature.extend([feat])
return example
def _build_feature_columns(self):
col = fc._categorical_column_with_identity('int_ctx', num_buckets=100)
ctx_cols = [
fc._embedding_column(col, dimension=10),
fc._numeric_column('float_ctx')
]
identity_col = sfc.sequence_categorical_column_with_identity(
'int_list', num_buckets=10)
bucket_col = sfc.sequence_categorical_column_with_hash_bucket(
'bytes_list', hash_bucket_size=100)
seq_cols = [
fc._embedding_column(identity_col, dimension=10),
fc._embedding_column(bucket_col, dimension=20)
]
return ctx_cols, seq_cols
def test_sequence_example_into_input_layer(self):
examples = [_make_sequence_example().SerializeToString()] * 100
ctx_cols, seq_cols = self._build_feature_columns()
def _parse_example(example):
ctx, seq = parsing_ops.parse_single_sequence_example(
example,
context_features=fc.make_parse_example_spec(ctx_cols),
sequence_features=fc.make_parse_example_spec(seq_cols))
ctx.update(seq)
return ctx
ds = dataset_ops.Dataset.from_tensor_slices(examples)
ds = ds.map(_parse_example)
ds = ds.batch(20)
# Test on a single batch
features = ds.make_one_shot_iterator().get_next()
# Tile the context features across the sequence features
seq_layer, _ = sfc.sequence_input_layer(features, seq_cols)
ctx_layer = fc.input_layer(features, ctx_cols)
input_layer = sfc.concatenate_context_input(ctx_layer, seq_layer)
rnn_layer = recurrent.RNN(recurrent.SimpleRNNCell(10))
output = rnn_layer(input_layer)
with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
features_r = sess.run(features)
self.assertAllEqual(features_r['int_list'].dense_shape, [20, 3, 6])
output_r = sess.run(output)
self.assertAllEqual(output_r.shape, [20, 10])
class SequenceExampleParsingTest(test.TestCase):
def test_seq_ex_in_sequence_categorical_column_with_identity(self):
self._test_parsed_sequence_example(
'int_list', sfc.sequence_categorical_column_with_identity,
10, [3, 6], [2, 4, 6])
def test_seq_ex_in_sequence_categorical_column_with_hash_bucket(self):
self._test_parsed_sequence_example(
'bytes_list', sfc.sequence_categorical_column_with_hash_bucket,
10, [3, 4], [compat.as_bytes(x) for x in 'acg'])
def test_seq_ex_in_sequence_categorical_column_with_vocabulary_list(self):
self._test_parsed_sequence_example(
'bytes_list', sfc.sequence_categorical_column_with_vocabulary_list,
list(string.ascii_lowercase), [3, 4],
[compat.as_bytes(x) for x in 'acg'])
def test_seq_ex_in_sequence_categorical_column_with_vocabulary_file(self):
_, fname = tempfile.mkstemp()
with open(fname, 'w') as f:
f.write(string.ascii_lowercase)
self._test_parsed_sequence_example(
'bytes_list', sfc.sequence_categorical_column_with_vocabulary_file,
fname, [3, 4], [compat.as_bytes(x) for x in 'acg'])
def _test_parsed_sequence_example(
self, col_name, col_fn, col_arg, shape, values):
"""Helper function to check that each FeatureColumn parses correctly.
Args:
col_name: string, name to give to the feature column. Should match
the name that the column will parse out of the features dict.
col_fn: function used to create the feature column. For example,
sequence_numeric_column.
col_arg: second arg that the target feature column is expecting.
shape: the expected dense_shape of the feature after parsing into
a SparseTensor.
values: the expected values at index [0, 2, 6] of the feature
after parsing into a SparseTensor.
"""
example = _make_sequence_example()
columns = [
fc._categorical_column_with_identity('int_ctx', num_buckets=100),
fc._numeric_column('float_ctx'),
col_fn(col_name, col_arg)
]
context, seq_features = parsing_ops.parse_single_sequence_example(
example.SerializeToString(),
context_features=fc.make_parse_example_spec(columns[:2]),
sequence_features=fc.make_parse_example_spec(columns[2:]))
with self.cached_session() as sess:
ctx_result, seq_result = sess.run([context, seq_features])
self.assertEqual(list(seq_result[col_name].dense_shape), shape)
self.assertEqual(
list(seq_result[col_name].values[[0, 2, 6]]), values)
self.assertEqual(list(ctx_result['int_ctx'].dense_shape), [1])
self.assertEqual(ctx_result['int_ctx'].values[0], 5)
self.assertEqual(list(ctx_result['float_ctx'].shape), [1])
self.assertAlmostEqual(ctx_result['float_ctx'][0], 123.6, places=1)
_SEQ_EX_PROTO = """
context {
feature {
key: "float_ctx"
value {
float_list {
value: 123.6
}
}
}
feature {
key: "int_ctx"
value {
int64_list {
value: 5
}
}
}
}
feature_lists {
feature_list {
key: "bytes_list"
value {
feature {
bytes_list {
value: "a"
}
}
feature {
bytes_list {
value: "b"
value: "c"
}
}
feature {
bytes_list {
value: "d"
value: "e"
value: "f"
value: "g"
}
}
}
}
feature_list {
key: "float_list"
value {
feature {
float_list {
value: 1.0
}
}
feature {
float_list {
value: 3.0
value: 3.0
value: 3.0
}
}
feature {
float_list {
value: 5.0
value: 5.0
value: 5.0
value: 5.0
value: 5.0
}
}
}
}
feature_list {
key: "int_list"
value {
feature {
int64_list {
value: 2
value: 2
}
}
feature {
int64_list {
value: 4
value: 4
value: 4
value: 4
}
}
feature {
int64_list {
value: 6
value: 6
value: 6
value: 6
value: 6
value: 6
}
}
}
}
}
"""
def _make_sequence_example():
example = example_pb2.SequenceExample()
return text_format.Parse(_SEQ_EX_PROTO, example)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/feature_column/python/feature_column/sequence_feature_column_integration_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for sequential_feature_column."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl.testing import parameterized
import numpy as np
from tensorflow.contrib.feature_column.python.feature_column import sequence_feature_column as sfc
from tensorflow.python.feature_column import feature_column as fc
from tensorflow.python.feature_column import feature_column_lib as fc_lib
from tensorflow.python.feature_column.feature_column import _LazyBuilder
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.platform import test
from tensorflow.python.training import monitored_session
class SequenceInputLayerTest(test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
{'testcase_name': '2D',
'sparse_input_args_a': {
# example 0, ids [2]
# example 1, ids [0, 1]
'indices': ((0, 0), (1, 0), (1, 1)),
'values': (2, 0, 1),
'dense_shape': (2, 2)},
'sparse_input_args_b': {
# example 0, ids [1]
# example 1, ids [2, 0]
'indices': ((0, 0), (1, 0), (1, 1)),
'values': (1, 2, 0),
'dense_shape': (2, 2)},
'expected_input_layer': [
# example 0, ids_a [2], ids_b [1]
[[5., 6., 14., 15., 16.], [0., 0., 0., 0., 0.]],
# example 1, ids_a [0, 1], ids_b [2, 0]
[[1., 2., 17., 18., 19.], [3., 4., 11., 12., 13.]],],
'expected_sequence_length': [1, 2]},
{'testcase_name': '3D',
'sparse_input_args_a': {
# feature 0, ids [[2], [0, 1]]
# feature 1, ids [[0, 0], [1]]
'indices': (
(0, 0, 0), (0, 1, 0), (0, 1, 1),
(1, 0, 0), (1, 0, 1), (1, 1, 0)),
'values': (2, 0, 1, 0, 0, 1),
'dense_shape': (2, 2, 2)},
'sparse_input_args_b': {
# feature 0, ids [[1, 1], [1]]
# feature 1, ids [[2], [0]]
'indices': ((0, 0, 0), (0, 0, 1), (0, 1, 0), (1, 0, 0), (1, 1, 0)),
'values': (1, 1, 1, 2, 0),
'dense_shape': (2, 2, 2)},
'expected_input_layer': [
# feature 0, [a: 2, -, b: 1, 1], [a: 0, 1, b: 1, -]
[[5., 6., 14., 15., 16.], [2., 3., 14., 15., 16.]],
# feature 1, [a: 0, 0, b: 2, -], [a: 1, -, b: 0, -]
[[1., 2., 17., 18., 19.], [3., 4., 11., 12., 13.]]],
'expected_sequence_length': [2, 2]},
)
def test_embedding_column(
self, sparse_input_args_a, sparse_input_args_b, expected_input_layer,
expected_sequence_length):
sparse_input_a = sparse_tensor.SparseTensorValue(**sparse_input_args_a)
sparse_input_b = sparse_tensor.SparseTensorValue(**sparse_input_args_b)
vocabulary_size = 3
embedding_dimension_a = 2
embedding_values_a = (
(1., 2.), # id 0
(3., 4.), # id 1
(5., 6.) # id 2
)
embedding_dimension_b = 3
embedding_values_b = (
(11., 12., 13.), # id 0
(14., 15., 16.), # id 1
(17., 18., 19.) # id 2
)
def _get_initializer(embedding_dimension, embedding_values):
def _initializer(shape, dtype, partition_info):
self.assertAllEqual((vocabulary_size, embedding_dimension), shape)
self.assertEqual(dtypes.float32, dtype)
self.assertIsNone(partition_info)
return embedding_values
return _initializer
categorical_column_a = sfc.sequence_categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
embedding_column_a = fc._embedding_column(
categorical_column_a,
dimension=embedding_dimension_a,
initializer=_get_initializer(embedding_dimension_a, embedding_values_a))
categorical_column_b = sfc.sequence_categorical_column_with_identity(
key='bbb', num_buckets=vocabulary_size)
embedding_column_b = fc._embedding_column(
categorical_column_b,
dimension=embedding_dimension_b,
initializer=_get_initializer(embedding_dimension_b, embedding_values_b))
input_layer, sequence_length = sfc.sequence_input_layer(
features={
'aaa': sparse_input_a,
'bbb': sparse_input_b,
},
# Test that columns are reordered alphabetically.
feature_columns=[embedding_column_b, embedding_column_a])
global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertItemsEqual(
('sequence_input_layer/aaa_embedding/embedding_weights:0',
'sequence_input_layer/bbb_embedding/embedding_weights:0'),
tuple([v.name for v in global_vars]))
with monitored_session.MonitoredSession() as sess:
self.assertAllEqual(embedding_values_a, global_vars[0].eval(session=sess))
self.assertAllEqual(embedding_values_b, global_vars[1].eval(session=sess))
self.assertAllEqual(expected_input_layer, input_layer.eval(session=sess))
self.assertAllEqual(
expected_sequence_length, sequence_length.eval(session=sess))
def test_embedding_column_with_non_sequence_categorical(self):
"""Tests that error is raised for non-sequence embedding column."""
vocabulary_size = 3
sparse_input = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
indices=((0, 0), (1, 0), (1, 1)),
values=(2, 0, 1),
dense_shape=(2, 2))
categorical_column_a = fc._categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
embedding_column_a = fc._embedding_column(categorical_column_a, dimension=2)
with self.assertRaisesRegexp(
ValueError,
r'In embedding_column: aaa_embedding\. categorical_column must be of '
r'type _SequenceCategoricalColumn to use sequence_input_layer\.'):
_, _ = sfc.sequence_input_layer(
features={'aaa': sparse_input},
feature_columns=[embedding_column_a])
def test_shared_embedding_column(self):
vocabulary_size = 3
sparse_input_a = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
indices=((0, 0), (1, 0), (1, 1)),
values=(2, 0, 1),
dense_shape=(2, 2))
sparse_input_b = sparse_tensor.SparseTensorValue(
# example 0, ids [1]
# example 1, ids [2, 0]
indices=((0, 0), (1, 0), (1, 1)),
values=(1, 2, 0),
dense_shape=(2, 2))
embedding_dimension = 2
embedding_values = (
(1., 2.), # id 0
(3., 4.), # id 1
(5., 6.) # id 2
)
def _get_initializer(embedding_dimension, embedding_values):
def _initializer(shape, dtype, partition_info):
self.assertAllEqual((vocabulary_size, embedding_dimension), shape)
self.assertEqual(dtypes.float32, dtype)
self.assertIsNone(partition_info)
return embedding_values
return _initializer
expected_input_layer = [
# example 0, ids_a [2], ids_b [1]
[[5., 6., 3., 4.], [0., 0., 0., 0.]],
# example 1, ids_a [0, 1], ids_b [2, 0]
[[1., 2., 5., 6.], [3., 4., 1., 2.]],
]
expected_sequence_length = [1, 2]
categorical_column_a = sfc.sequence_categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
categorical_column_b = sfc.sequence_categorical_column_with_identity(
key='bbb', num_buckets=vocabulary_size)
# Test that columns are reordered alphabetically.
shared_embedding_columns = fc_lib.shared_embedding_columns(
[categorical_column_b, categorical_column_a],
dimension=embedding_dimension,
initializer=_get_initializer(embedding_dimension, embedding_values))
input_layer, sequence_length = sfc.sequence_input_layer(
features={
'aaa': sparse_input_a,
'bbb': sparse_input_b,
},
feature_columns=shared_embedding_columns)
global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertItemsEqual(
('sequence_input_layer/aaa_bbb_shared_embedding/embedding_weights:0',),
tuple([v.name for v in global_vars]))
with monitored_session.MonitoredSession() as sess:
self.assertAllEqual(embedding_values, global_vars[0].eval(session=sess))
self.assertAllEqual(expected_input_layer, input_layer.eval(session=sess))
self.assertAllEqual(
expected_sequence_length, sequence_length.eval(session=sess))
def test_shared_embedding_column_with_non_sequence_categorical(self):
"""Tests that error is raised for non-sequence shared embedding column."""
vocabulary_size = 3
sparse_input_a = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
indices=((0, 0), (1, 0), (1, 1)),
values=(2, 0, 1),
dense_shape=(2, 2))
sparse_input_b = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
indices=((0, 0), (1, 0), (1, 1)),
values=(2, 0, 1),
dense_shape=(2, 2))
categorical_column_a = fc._categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
categorical_column_b = fc._categorical_column_with_identity(
key='bbb', num_buckets=vocabulary_size)
shared_embedding_columns = fc_lib.shared_embedding_columns(
[categorical_column_a, categorical_column_b], dimension=2)
with self.assertRaisesRegexp(
ValueError,
r'In embedding_column: aaa_shared_embedding\. categorical_column must '
r'be of type _SequenceCategoricalColumn to use sequence_input_layer\.'):
_, _ = sfc.sequence_input_layer(
features={
'aaa': sparse_input_a,
'bbb': sparse_input_b
},
feature_columns=shared_embedding_columns)
@parameterized.named_parameters(
{'testcase_name': '2D',
'sparse_input_args_a': {
# example 0, ids [2]
# example 1, ids [0, 1]
'indices': ((0, 0), (1, 0), (1, 1)),
'values': (2, 0, 1),
'dense_shape': (2, 2)},
'sparse_input_args_b': {
# example 0, ids [1]
# example 1, ids [1, 0]
'indices': ((0, 0), (1, 0), (1, 1)),
'values': (1, 1, 0),
'dense_shape': (2, 2)},
'expected_input_layer': [
# example 0, ids_a [2], ids_b [1]
[[0., 0., 1., 0., 1.], [0., 0., 0., 0., 0.]],
# example 1, ids_a [0, 1], ids_b [1, 0]
[[1., 0., 0., 0., 1.], [0., 1., 0., 1., 0.]]],
'expected_sequence_length': [1, 2]},
{'testcase_name': '3D',
'sparse_input_args_a': {
# feature 0, ids [[2], [0, 1]]
# feature 1, ids [[0, 0], [1]]
'indices': (
(0, 0, 0), (0, 1, 0), (0, 1, 1),
(1, 0, 0), (1, 0, 1), (1, 1, 0)),
'values': (2, 0, 1, 0, 0, 1),
'dense_shape': (2, 2, 2)},
'sparse_input_args_b': {
# feature 0, ids [[1, 1], [1]]
# feature 1, ids [[1], [0]]
'indices': ((0, 0, 0), (0, 0, 1), (0, 1, 0), (1, 0, 0), (1, 1, 0)),
'values': (1, 1, 1, 1, 0),
'dense_shape': (2, 2, 2)},
'expected_input_layer': [
# feature 0, [a: 2, -, b: 1, 1], [a: 0, 1, b: 1, -]
[[0., 0., 1., 0., 2.], [1., 1., 0., 0., 1.]],
# feature 1, [a: 0, 0, b: 1, -], [a: 1, -, b: 0, -]
[[2., 0., 0., 0., 1.], [0., 1., 0., 1., 0.]]],
'expected_sequence_length': [2, 2]},
)
def test_indicator_column(
self, sparse_input_args_a, sparse_input_args_b, expected_input_layer,
expected_sequence_length):
sparse_input_a = sparse_tensor.SparseTensorValue(**sparse_input_args_a)
sparse_input_b = sparse_tensor.SparseTensorValue(**sparse_input_args_b)
vocabulary_size_a = 3
vocabulary_size_b = 2
categorical_column_a = sfc.sequence_categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size_a)
indicator_column_a = fc._indicator_column(categorical_column_a)
categorical_column_b = sfc.sequence_categorical_column_with_identity(
key='bbb', num_buckets=vocabulary_size_b)
indicator_column_b = fc._indicator_column(categorical_column_b)
input_layer, sequence_length = sfc.sequence_input_layer(
features={
'aaa': sparse_input_a,
'bbb': sparse_input_b,
},
# Test that columns are reordered alphabetically.
feature_columns=[indicator_column_b, indicator_column_a])
with monitored_session.MonitoredSession() as sess:
self.assertAllEqual(expected_input_layer, input_layer.eval(session=sess))
self.assertAllEqual(
expected_sequence_length, sequence_length.eval(session=sess))
def test_indicator_column_with_non_sequence_categorical(self):
"""Tests that error is raised for non-sequence categorical column."""
vocabulary_size = 3
sparse_input = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
indices=((0, 0), (1, 0), (1, 1)),
values=(2, 0, 1),
dense_shape=(2, 2))
categorical_column_a = fc._categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
indicator_column_a = fc._indicator_column(categorical_column_a)
with self.assertRaisesRegexp(
ValueError,
r'In indicator_column: aaa_indicator\. categorical_column must be of '
r'type _SequenceCategoricalColumn to use sequence_input_layer\.'):
_, _ = sfc.sequence_input_layer(
features={'aaa': sparse_input},
feature_columns=[indicator_column_a])
@parameterized.named_parameters(
{'testcase_name': '2D',
'sparse_input_args': {
# example 0, values [0., 1]
# example 1, [10.]
'indices': ((0, 0), (0, 1), (1, 0)),
'values': (0., 1., 10.),
'dense_shape': (2, 2)},
'expected_input_layer': [
[[0.], [1.]],
[[10.], [0.]]],
'expected_sequence_length': [2, 1]},
{'testcase_name': '3D',
'sparse_input_args': {
# feature 0, ids [[20, 3], [5]]
# feature 1, ids [[3], [8]]
'indices': ((0, 0, 0), (0, 0, 1), (0, 1, 0), (1, 0, 0), (1, 1, 0)),
'values': (20, 3, 5., 3., 8.),
'dense_shape': (2, 2, 2)},
'expected_input_layer': [
[[20.], [3.], [5.], [0.]],
[[3.], [0.], [8.], [0.]]],
'expected_sequence_length': [2, 2]},
)
def test_numeric_column(
self, sparse_input_args, expected_input_layer, expected_sequence_length):
sparse_input = sparse_tensor.SparseTensorValue(**sparse_input_args)
numeric_column = sfc.sequence_numeric_column('aaa')
input_layer, sequence_length = sfc.sequence_input_layer(
features={'aaa': sparse_input},
feature_columns=[numeric_column])
with monitored_session.MonitoredSession() as sess:
self.assertAllEqual(expected_input_layer, input_layer.eval(session=sess))
self.assertAllEqual(
expected_sequence_length, sequence_length.eval(session=sess))
@parameterized.named_parameters(
{'testcase_name': '2D',
'sparse_input_args': {
# example 0, values [0., 1., 2., 3., 4., 5., 6., 7.]
# example 1, [10., 11., 12., 13.]
'indices': ((0, 0), (0, 1), (0, 2), (0, 3), (0, 4), (0, 5), (0, 6),
(0, 7), (1, 0), (1, 1), (1, 2), (1, 3)),
'values': (0., 1., 2., 3., 4., 5., 6., 7., 10., 11., 12., 13.),
'dense_shape': (2, 8)},
'expected_input_layer': [
# The output of numeric_column._get_dense_tensor should be flattened.
[[0., 1., 2., 3.], [4., 5., 6., 7.]],
[[10., 11., 12., 13.], [0., 0., 0., 0.]]],
'expected_sequence_length': [2, 1]},
{'testcase_name': '3D',
'sparse_input_args': {
# example 0, values [[0., 1., 2., 3.]], [[4., 5., 6., 7.]]
# example 1, [[10., 11., 12., 13.], []]
'indices': ((0, 0, 0), (0, 0, 1), (0, 0, 2), (0, 0, 3),
(0, 1, 0), (0, 1, 1), (0, 1, 2), (0, 1, 3),
(1, 0, 0), (1, 0, 1), (1, 0, 2), (1, 0, 3)),
'values': (0., 1., 2., 3., 4., 5., 6., 7., 10., 11., 12., 13.),
'dense_shape': (2, 2, 4)},
'expected_input_layer': [
# The output of numeric_column._get_dense_tensor should be flattened.
[[0., 1., 2., 3.], [4., 5., 6., 7.]],
[[10., 11., 12., 13.], [0., 0., 0., 0.]]],
'expected_sequence_length': [2, 1]},
)
def test_numeric_column_multi_dim(
self, sparse_input_args, expected_input_layer, expected_sequence_length):
"""Tests sequence_input_layer for multi-dimensional numeric_column."""
sparse_input = sparse_tensor.SparseTensorValue(**sparse_input_args)
numeric_column = sfc.sequence_numeric_column('aaa', shape=(2, 2))
input_layer, sequence_length = sfc.sequence_input_layer(
features={'aaa': sparse_input},
feature_columns=[numeric_column])
with monitored_session.MonitoredSession() as sess:
self.assertAllEqual(expected_input_layer, input_layer.eval(session=sess))
self.assertAllEqual(
expected_sequence_length, sequence_length.eval(session=sess))
def test_sequence_length_not_equal(self):
"""Tests that an error is raised when sequence lengths are not equal."""
# Input a with sequence_length = [2, 1]
sparse_input_a = sparse_tensor.SparseTensorValue(
indices=((0, 0), (0, 1), (1, 0)),
values=(0., 1., 10.),
dense_shape=(2, 2))
# Input b with sequence_length = [1, 1]
sparse_input_b = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0)),
values=(1., 10.),
dense_shape=(2, 2))
numeric_column_a = sfc.sequence_numeric_column('aaa')
numeric_column_b = sfc.sequence_numeric_column('bbb')
_, sequence_length = sfc.sequence_input_layer(
features={
'aaa': sparse_input_a,
'bbb': sparse_input_b,
},
feature_columns=[numeric_column_a, numeric_column_b])
with monitored_session.MonitoredSession() as sess:
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
r'\[Condition x == y did not hold element-wise:\] '
r'\[x \(sequence_input_layer/aaa/sequence_length:0\) = \] \[2 1\] '
r'\[y \(sequence_input_layer/bbb/sequence_length:0\) = \] \[1 1\]'):
sess.run(sequence_length)
@parameterized.named_parameters(
{'testcase_name': '2D',
'sparse_input_args': {
# example 0, values [[[0., 1.], [2., 3.]], [[4., 5.], [6., 7.]]]
# example 1, [[[10., 11.], [12., 13.]]]
'indices': ((0, 0), (0, 1), (0, 2), (0, 3), (0, 4), (0, 5), (0, 6),
(0, 7), (1, 0), (1, 1), (1, 2), (1, 3)),
'values': (0., 1., 2., 3., 4., 5., 6., 7., 10., 11., 12., 13.),
'dense_shape': (2, 8)},
'expected_shape': [2, 2, 4]},
{'testcase_name': '3D',
'sparse_input_args': {
# example 0, values [[0., 1., 2., 3.]], [[4., 5., 6., 7.]]
# example 1, [[10., 11., 12., 13.], []]
'indices': ((0, 0, 0), (0, 0, 1), (0, 0, 2), (0, 0, 3),
(0, 1, 0), (0, 1, 1), (0, 1, 2), (0, 1, 2),
(1, 0, 0), (1, 0, 1), (1, 0, 2), (1, 0, 3)),
'values': (0., 1., 2., 3., 4., 5., 6., 7., 10., 11., 12., 13.),
'dense_shape': (2, 2, 4)},
'expected_shape': [2, 2, 4]},
)
def test_static_shape_from_tensors_numeric(
self, sparse_input_args, expected_shape):
"""Tests that we return a known static shape when we have one."""
sparse_input = sparse_tensor.SparseTensorValue(**sparse_input_args)
numeric_column = sfc.sequence_numeric_column('aaa', shape=(2, 2))
input_layer, _ = sfc.sequence_input_layer(
features={'aaa': sparse_input},
feature_columns=[numeric_column])
shape = input_layer.get_shape()
self.assertEqual(shape, expected_shape)
@parameterized.named_parameters(
{'testcase_name': '2D',
'sparse_input_args': {
# example 0, ids [2]
# example 1, ids [0, 1]
# example 2, ids []
# example 3, ids [1]
'indices': ((0, 0), (1, 0), (1, 1), (3, 0)),
'values': (2, 0, 1, 1),
'dense_shape': (4, 2)},
'expected_shape': [4, 2, 3]},
{'testcase_name': '3D',
'sparse_input_args': {
# example 0, ids [[2]]
# example 1, ids [[0, 1], [2]]
# example 2, ids []
# example 3, ids [[1], [0, 2]]
'indices': ((0, 0, 0), (1, 0, 0), (1, 0, 1), (1, 1, 0),
(3, 0, 0), (3, 1, 0), (3, 1, 1)),
'values': (2, 0, 1, 2, 1, 0, 2),
'dense_shape': (4, 2, 2)},
'expected_shape': [4, 2, 3]}
)
def test_static_shape_from_tensors_indicator(
self, sparse_input_args, expected_shape):
"""Tests that we return a known static shape when we have one."""
sparse_input = sparse_tensor.SparseTensorValue(**sparse_input_args)
categorical_column = sfc.sequence_categorical_column_with_identity(
key='aaa', num_buckets=3)
indicator_column = fc._indicator_column(categorical_column)
input_layer, _ = sfc.sequence_input_layer(
features={'aaa': sparse_input}, feature_columns=[indicator_column])
shape = input_layer.get_shape()
self.assertEqual(shape, expected_shape)
class ConcatenateContextInputTest(test.TestCase, parameterized.TestCase):
"""Tests the utility fn concatenate_context_input."""
def test_concatenate_context_input(self):
seq_input = ops.convert_to_tensor(np.arange(12).reshape(2, 3, 2))
context_input = ops.convert_to_tensor(np.arange(10).reshape(2, 5))
seq_input = math_ops.cast(seq_input, dtype=dtypes.float32)
context_input = math_ops.cast(context_input, dtype=dtypes.float32)
input_layer = sfc.concatenate_context_input(context_input, seq_input)
expected = np.array([
[[0, 1, 0, 1, 2, 3, 4], [2, 3, 0, 1, 2, 3, 4], [4, 5, 0, 1, 2, 3, 4]],
[[6, 7, 5, 6, 7, 8, 9], [8, 9, 5, 6, 7, 8, 9], [10, 11, 5, 6, 7, 8, 9]]
], dtype=np.float32)
with monitored_session.MonitoredSession() as sess:
output = sess.run(input_layer)
self.assertAllEqual(expected, output)
@parameterized.named_parameters(
{'testcase_name': 'rank_lt_3',
'seq_input_arg': np.arange(100).reshape(10, 10)},
{'testcase_name': 'rank_gt_3',
'seq_input_arg': np.arange(100).reshape(5, 5, 2, 2)}
)
def test_sequence_input_throws_error(self, seq_input_arg):
seq_input = ops.convert_to_tensor(seq_input_arg)
context_input = ops.convert_to_tensor(np.arange(100).reshape(10, 10))
seq_input = math_ops.cast(seq_input, dtype=dtypes.float32)
context_input = math_ops.cast(context_input, dtype=dtypes.float32)
with self.assertRaisesRegexp(ValueError, 'sequence_input must have rank 3'):
sfc.concatenate_context_input(context_input, seq_input)
@parameterized.named_parameters(
{'testcase_name': 'rank_lt_2',
'context_input_arg': np.arange(100)},
{'testcase_name': 'rank_gt_2',
'context_input_arg': np.arange(100).reshape(5, 5, 4)}
)
def test_context_input_throws_error(self, context_input_arg):
context_input = ops.convert_to_tensor(context_input_arg)
seq_input = ops.convert_to_tensor(np.arange(100).reshape(5, 5, 4))
seq_input = math_ops.cast(seq_input, dtype=dtypes.float32)
context_input = math_ops.cast(context_input, dtype=dtypes.float32)
with self.assertRaisesRegexp(ValueError, 'context_input must have rank 2'):
sfc.concatenate_context_input(context_input, seq_input)
def test_integer_seq_input_throws_error(self):
seq_input = ops.convert_to_tensor(np.arange(100).reshape(5, 5, 4))
context_input = ops.convert_to_tensor(np.arange(100).reshape(10, 10))
context_input = math_ops.cast(context_input, dtype=dtypes.float32)
with self.assertRaisesRegexp(
TypeError, 'sequence_input must have dtype float32'):
sfc.concatenate_context_input(context_input, seq_input)
def test_integer_context_input_throws_error(self):
seq_input = ops.convert_to_tensor(np.arange(100).reshape(5, 5, 4))
context_input = ops.convert_to_tensor(np.arange(100).reshape(10, 10))
seq_input = math_ops.cast(seq_input, dtype=dtypes.float32)
with self.assertRaisesRegexp(
TypeError, 'context_input must have dtype float32'):
sfc.concatenate_context_input(context_input, seq_input)
class InputLayerTest(test.TestCase):
"""Tests input_layer with sequence feature columns."""
def test_embedding_column(self):
"""Tests that error is raised for sequence embedding column."""
vocabulary_size = 3
sparse_input = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
indices=((0, 0), (1, 0), (1, 1)),
values=(2, 0, 1),
dense_shape=(2, 2))
categorical_column_a = sfc.sequence_categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
embedding_column_a = fc._embedding_column(categorical_column_a, dimension=2)
with self.assertRaisesRegexp(
ValueError,
r'In embedding_column: aaa_embedding\. categorical_column must not be '
r'of type _SequenceCategoricalColumn\.'):
_ = fc.input_layer(
features={'aaa': sparse_input},
feature_columns=[embedding_column_a])
def test_indicator_column(self):
"""Tests that error is raised for sequence indicator column."""
vocabulary_size = 3
sparse_input = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
indices=((0, 0), (1, 0), (1, 1)),
values=(2, 0, 1),
dense_shape=(2, 2))
categorical_column_a = sfc.sequence_categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
indicator_column_a = fc._indicator_column(categorical_column_a)
with self.assertRaisesRegexp(
ValueError,
r'In indicator_column: aaa_indicator\. categorical_column must not be '
r'of type _SequenceCategoricalColumn\.'):
_ = fc.input_layer(
features={'aaa': sparse_input},
feature_columns=[indicator_column_a])
def _assert_sparse_tensor_value(test_case, expected, actual):
_assert_sparse_tensor_indices_shape(test_case, expected, actual)
test_case.assertEqual(
np.array(expected.values).dtype, np.array(actual.values).dtype)
test_case.assertAllEqual(expected.values, actual.values)
def _assert_sparse_tensor_indices_shape(test_case, expected, actual):
test_case.assertEqual(np.int64, np.array(actual.indices).dtype)
test_case.assertAllEqual(expected.indices, actual.indices)
test_case.assertEqual(np.int64, np.array(actual.dense_shape).dtype)
test_case.assertAllEqual(expected.dense_shape, actual.dense_shape)
class SequenceCategoricalColumnWithIdentityTest(
test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
{'testcase_name': '2D',
'inputs_args': {
'indices': ((0, 0), (1, 0), (1, 1)),
'values': (1, 2, 0),
'dense_shape': (2, 2)},
'expected_args': {
'indices': ((0, 0, 0), (1, 0, 0), (1, 1, 0)),
'values': np.array((1, 2, 0), dtype=np.int64),
'dense_shape': (2, 2, 1)}},
{'testcase_name': '3D',
'inputs_args': {
'indices': ((0, 0, 2), (1, 0, 0), (1, 2, 0)),
'values': (6, 7, 8),
'dense_shape': (2, 2, 2)},
'expected_args': {
'indices': ((0, 0, 2), (1, 0, 0), (1, 2, 0)),
'values': (6, 7, 8),
'dense_shape': (2, 2, 2)}}
)
def test_get_sparse_tensors(self, inputs_args, expected_args):
inputs = sparse_tensor.SparseTensorValue(**inputs_args)
expected = sparse_tensor.SparseTensorValue(**expected_args)
column = sfc.sequence_categorical_column_with_identity('aaa', num_buckets=9)
id_weight_pair = column._get_sparse_tensors(_LazyBuilder({'aaa': inputs}))
self.assertIsNone(id_weight_pair.weight_tensor)
with monitored_session.MonitoredSession() as sess:
_assert_sparse_tensor_value(
self, expected, id_weight_pair.id_tensor.eval(session=sess))
class SequenceCategoricalColumnWithHashBucketTest(
test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
{'testcase_name': '2D',
'inputs_args': {
'indices': ((0, 0), (1, 0), (1, 1)),
'values': ('omar', 'stringer', 'marlo'),
'dense_shape': (2, 2)},
'expected_args': {
'indices': ((0, 0, 0), (1, 0, 0), (1, 1, 0)),
# Ignored to avoid hash dependence in test.
'values': np.array((0, 0, 0), dtype=np.int64),
'dense_shape': (2, 2, 1)}},
{'testcase_name': '3D',
'inputs_args': {
'indices': ((0, 0, 2), (1, 0, 0), (1, 2, 0)),
'values': ('omar', 'stringer', 'marlo'),
'dense_shape': (2, 2, 2)},
'expected_args': {
'indices': ((0, 0, 2), (1, 0, 0), (1, 2, 0)),
# Ignored to avoid hash dependence in test.
'values': np.array((0, 0, 0), dtype=np.int64),
'dense_shape': (2, 2, 2)}}
)
def test_get_sparse_tensors(self, inputs_args, expected_args):
inputs = sparse_tensor.SparseTensorValue(**inputs_args)
expected = sparse_tensor.SparseTensorValue(**expected_args)
column = sfc.sequence_categorical_column_with_hash_bucket(
'aaa', hash_bucket_size=10)
id_weight_pair = column._get_sparse_tensors(_LazyBuilder({'aaa': inputs}))
self.assertIsNone(id_weight_pair.weight_tensor)
with monitored_session.MonitoredSession() as sess:
_assert_sparse_tensor_indices_shape(
self, expected, id_weight_pair.id_tensor.eval(session=sess))
class SequenceCategoricalColumnWithVocabularyFileTest(
test.TestCase, parameterized.TestCase):
def _write_vocab(self, vocab_strings, file_name):
vocab_file = os.path.join(self.get_temp_dir(), file_name)
with open(vocab_file, 'w') as f:
f.write('\n'.join(vocab_strings))
return vocab_file
def setUp(self):
super(SequenceCategoricalColumnWithVocabularyFileTest, self).setUp()
vocab_strings = ['omar', 'stringer', 'marlo']
self._wire_vocabulary_file_name = self._write_vocab(vocab_strings,
'wire_vocabulary.txt')
self._wire_vocabulary_size = 3
@parameterized.named_parameters(
{'testcase_name': '2D',
'inputs_args': {
'indices': ((0, 0), (1, 0), (1, 1)),
'values': ('marlo', 'skywalker', 'omar'),
'dense_shape': (2, 2)},
'expected_args': {
'indices': ((0, 0, 0), (1, 0, 0), (1, 1, 0)),
'values': np.array((2, -1, 0), dtype=np.int64),
'dense_shape': (2, 2, 1)}},
{'testcase_name': '3D',
'inputs_args': {
'indices': ((0, 0, 2), (1, 0, 0), (1, 2, 0)),
'values': ('omar', 'skywalker', 'marlo'),
'dense_shape': (2, 2, 2)},
'expected_args': {
'indices': ((0, 0, 2), (1, 0, 0), (1, 2, 0)),
'values': np.array((0, -1, 2), dtype=np.int64),
'dense_shape': (2, 2, 2)}}
)
def test_get_sparse_tensors(self, inputs_args, expected_args):
inputs = sparse_tensor.SparseTensorValue(**inputs_args)
expected = sparse_tensor.SparseTensorValue(**expected_args)
column = sfc.sequence_categorical_column_with_vocabulary_file(
key='aaa',
vocabulary_file=self._wire_vocabulary_file_name,
vocabulary_size=self._wire_vocabulary_size)
id_weight_pair = column._get_sparse_tensors(_LazyBuilder({'aaa': inputs}))
self.assertIsNone(id_weight_pair.weight_tensor)
with monitored_session.MonitoredSession() as sess:
_assert_sparse_tensor_value(
self, expected, id_weight_pair.id_tensor.eval(session=sess))
def test_get_sparse_tensors_dynamic_zero_length(self):
"""Tests _get_sparse_tensors with a dynamic sequence length."""
inputs = sparse_tensor.SparseTensorValue(
indices=np.zeros((0, 2)), values=[], dense_shape=(2, 0))
expected = sparse_tensor.SparseTensorValue(
indices=np.zeros((0, 3)),
values=np.array((), dtype=np.int64),
dense_shape=(2, 0, 1))
column = sfc.sequence_categorical_column_with_vocabulary_file(
key='aaa',
vocabulary_file=self._wire_vocabulary_file_name,
vocabulary_size=self._wire_vocabulary_size)
input_placeholder_shape = list(inputs.dense_shape)
# Make second dimension (sequence length) dynamic.
input_placeholder_shape[1] = None
input_placeholder = array_ops.sparse_placeholder(
dtypes.string, shape=input_placeholder_shape)
id_weight_pair = column._get_sparse_tensors(
_LazyBuilder({'aaa': input_placeholder}))
self.assertIsNone(id_weight_pair.weight_tensor)
with monitored_session.MonitoredSession() as sess:
result = id_weight_pair.id_tensor.eval(
session=sess, feed_dict={input_placeholder: inputs})
_assert_sparse_tensor_value(
self, expected, result)
class SequenceCategoricalColumnWithVocabularyListTest(
test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
{'testcase_name': '2D',
'inputs_args': {
'indices': ((0, 0), (1, 0), (1, 1)),
'values': ('marlo', 'skywalker', 'omar'),
'dense_shape': (2, 2)},
'expected_args': {
'indices': ((0, 0, 0), (1, 0, 0), (1, 1, 0)),
'values': np.array((2, -1, 0), dtype=np.int64),
'dense_shape': (2, 2, 1)}},
{'testcase_name': '3D',
'inputs_args': {
'indices': ((0, 0, 2), (1, 0, 0), (1, 2, 0)),
'values': ('omar', 'skywalker', 'marlo'),
'dense_shape': (2, 2, 2)},
'expected_args': {
'indices': ((0, 0, 2), (1, 0, 0), (1, 2, 0)),
'values': np.array((0, -1, 2), dtype=np.int64),
'dense_shape': (2, 2, 2)}}
)
def test_get_sparse_tensors(self, inputs_args, expected_args):
inputs = sparse_tensor.SparseTensorValue(**inputs_args)
expected = sparse_tensor.SparseTensorValue(**expected_args)
column = sfc.sequence_categorical_column_with_vocabulary_list(
key='aaa',
vocabulary_list=('omar', 'stringer', 'marlo'))
id_weight_pair = column._get_sparse_tensors(_LazyBuilder({'aaa': inputs}))
self.assertIsNone(id_weight_pair.weight_tensor)
with monitored_session.MonitoredSession() as sess:
_assert_sparse_tensor_value(
self, expected, id_weight_pair.id_tensor.eval(session=sess))
class SequenceEmbeddingColumnTest(
test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
{'testcase_name': '2D',
'inputs_args': {
# example 0, ids [2]
# example 1, ids [0, 1]
# example 2, ids []
# example 3, ids [1]
'indices': ((0, 0), (1, 0), (1, 1), (3, 0)),
'values': (2, 0, 1, 1),
'dense_shape': (4, 2)},
'expected': [
# example 0, ids [2]
[[7., 11.], [0., 0.]],
# example 1, ids [0, 1]
[[1., 2.], [3., 5.]],
# example 2, ids []
[[0., 0.], [0., 0.]],
# example 3, ids [1]
[[3., 5.], [0., 0.]]]},
{'testcase_name': '3D',
'inputs_args': {
# example 0, ids [[2]]
# example 1, ids [[0, 1], [2]]
# example 2, ids []
# example 3, ids [[1], [0, 2]]
'indices': ((0, 0, 0), (1, 0, 0), (1, 0, 1), (1, 1, 0),
(3, 0, 0), (3, 1, 0), (3, 1, 1)),
'values': (2, 0, 1, 2, 1, 0, 2),
'dense_shape': (4, 2, 2)},
'expected': [
# example 0, ids [[2]]
[[7., 11.], [0., 0.]],
# example 1, ids [[0, 1], [2]]
[[2, 3.5], [7., 11.]],
# example 2, ids []
[[0., 0.], [0., 0.]],
# example 3, ids [[1], [0, 2]]
[[3., 5.], [4., 6.5]]]}
)
def test_get_sequence_dense_tensor(self, inputs_args, expected):
inputs = sparse_tensor.SparseTensorValue(**inputs_args)
vocabulary_size = 3
embedding_dimension = 2
embedding_values = (
(1., 2.), # id 0
(3., 5.), # id 1
(7., 11.) # id 2
)
def _initializer(shape, dtype, partition_info):
self.assertAllEqual((vocabulary_size, embedding_dimension), shape)
self.assertEqual(dtypes.float32, dtype)
self.assertIsNone(partition_info)
return embedding_values
categorical_column = sfc.sequence_categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
embedding_column = fc._embedding_column(
categorical_column,
dimension=embedding_dimension,
initializer=_initializer)
embedding_lookup, _ = embedding_column._get_sequence_dense_tensor(
_LazyBuilder({'aaa': inputs}))
global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertItemsEqual(
('embedding_weights:0',), tuple([v.name for v in global_vars]))
with monitored_session.MonitoredSession() as sess:
self.assertAllEqual(embedding_values, global_vars[0].eval(session=sess))
self.assertAllEqual(expected, embedding_lookup.eval(session=sess))
@parameterized.named_parameters(
{'testcase_name': '2D',
'inputs_args': {
# example 0, ids [2]
# example 1, ids [0, 1]
'indices': ((0, 0), (1, 0), (1, 1)),
'values': (2, 0, 1),
'dense_shape': (2, 2)},
'expected_sequence_length': [1, 2]},
{'testcase_name': '3D',
'inputs_args': {
# example 0, ids [[2]]
# example 1, ids [[0, 1], [2]]
'indices': ((0, 0, 0), (1, 0, 0), (1, 0, 1), (1, 1, 0)),
'values': (2, 0, 1, 2),
'dense_shape': (2, 2, 2)},
'expected_sequence_length': [1, 2]}
)
def test_sequence_length(self, inputs_args, expected_sequence_length):
inputs = sparse_tensor.SparseTensorValue(**inputs_args)
vocabulary_size = 3
categorical_column = sfc.sequence_categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
embedding_column = fc._embedding_column(categorical_column, dimension=2)
_, sequence_length = embedding_column._get_sequence_dense_tensor(
_LazyBuilder({'aaa': inputs}))
with monitored_session.MonitoredSession() as sess:
sequence_length = sess.run(sequence_length)
self.assertAllEqual(expected_sequence_length, sequence_length)
self.assertEqual(np.int64, sequence_length.dtype)
def test_sequence_length_with_empty_rows(self):
"""Tests _sequence_length when some examples do not have ids."""
vocabulary_size = 3
sparse_input = sparse_tensor.SparseTensorValue(
# example 0, ids []
# example 1, ids [2]
# example 2, ids [0, 1]
# example 3, ids []
# example 4, ids [1]
# example 5, ids []
indices=((1, 0), (2, 0), (2, 1), (4, 0)),
values=(2, 0, 1, 1),
dense_shape=(6, 2))
expected_sequence_length = [0, 1, 2, 0, 1, 0]
categorical_column = sfc.sequence_categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
embedding_column = fc._embedding_column(categorical_column, dimension=2)
_, sequence_length = embedding_column._get_sequence_dense_tensor(
_LazyBuilder({'aaa': sparse_input}))
with monitored_session.MonitoredSession() as sess:
self.assertAllEqual(
expected_sequence_length, sequence_length.eval(session=sess))
class SequenceSharedEmbeddingColumnTest(test.TestCase):
def test_get_sequence_dense_tensor(self):
vocabulary_size = 3
embedding_dimension = 2
embedding_values = (
(1., 2.), # id 0
(3., 5.), # id 1
(7., 11.) # id 2
)
def _initializer(shape, dtype, partition_info):
self.assertAllEqual((vocabulary_size, embedding_dimension), shape)
self.assertEqual(dtypes.float32, dtype)
self.assertIsNone(partition_info)
return embedding_values
sparse_input_a = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
# example 2, ids []
# example 3, ids [1]
indices=((0, 0), (1, 0), (1, 1), (3, 0)),
values=(2, 0, 1, 1),
dense_shape=(4, 2))
sparse_input_b = sparse_tensor.SparseTensorValue(
# example 0, ids [1]
# example 1, ids [0, 2]
# example 2, ids [0]
# example 3, ids []
indices=((0, 0), (1, 0), (1, 1), (2, 0)),
values=(1, 0, 2, 0),
dense_shape=(4, 2))
expected_lookups_a = [
# example 0, ids [2]
[[7., 11.], [0., 0.]],
# example 1, ids [0, 1]
[[1., 2.], [3., 5.]],
# example 2, ids []
[[0., 0.], [0., 0.]],
# example 3, ids [1]
[[3., 5.], [0., 0.]],
]
expected_lookups_b = [
# example 0, ids [1]
[[3., 5.], [0., 0.]],
# example 1, ids [0, 2]
[[1., 2.], [7., 11.]],
# example 2, ids [0]
[[1., 2.], [0., 0.]],
# example 3, ids []
[[0., 0.], [0., 0.]],
]
categorical_column_a = sfc.sequence_categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
categorical_column_b = sfc.sequence_categorical_column_with_identity(
key='bbb', num_buckets=vocabulary_size)
shared_embedding_columns = fc_lib.shared_embedding_columns(
[categorical_column_a, categorical_column_b],
dimension=embedding_dimension,
initializer=_initializer)
embedding_lookup_a = shared_embedding_columns[0]._get_sequence_dense_tensor(
_LazyBuilder({
'aaa': sparse_input_a
}))[0]
embedding_lookup_b = shared_embedding_columns[1]._get_sequence_dense_tensor(
_LazyBuilder({
'bbb': sparse_input_b
}))[0]
global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertItemsEqual(('embedding_weights:0',),
tuple([v.name for v in global_vars]))
with monitored_session.MonitoredSession() as sess:
self.assertAllEqual(embedding_values, global_vars[0].eval(session=sess))
self.assertAllEqual(
expected_lookups_a, embedding_lookup_a.eval(session=sess))
self.assertAllEqual(
expected_lookups_b, embedding_lookup_b.eval(session=sess))
def test_sequence_length(self):
vocabulary_size = 3
sparse_input_a = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
indices=((0, 0), (1, 0), (1, 1)),
values=(2, 0, 1),
dense_shape=(2, 2))
expected_sequence_length_a = [1, 2]
categorical_column_a = sfc.sequence_categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
sparse_input_b = sparse_tensor.SparseTensorValue(
# example 0, ids [0, 2]
# example 1, ids [1]
indices=((0, 0), (0, 1), (1, 0)),
values=(0, 2, 1),
dense_shape=(2, 2))
expected_sequence_length_b = [2, 1]
categorical_column_b = sfc.sequence_categorical_column_with_identity(
key='bbb', num_buckets=vocabulary_size)
shared_embedding_columns = fc_lib.shared_embedding_columns(
[categorical_column_a, categorical_column_b], dimension=2)
sequence_length_a = shared_embedding_columns[0]._get_sequence_dense_tensor(
_LazyBuilder({
'aaa': sparse_input_a
}))[1]
sequence_length_b = shared_embedding_columns[1]._get_sequence_dense_tensor(
_LazyBuilder({
'bbb': sparse_input_b
}))[1]
with monitored_session.MonitoredSession() as sess:
sequence_length_a = sess.run(sequence_length_a)
self.assertAllEqual(expected_sequence_length_a, sequence_length_a)
self.assertEqual(np.int64, sequence_length_a.dtype)
sequence_length_b = sess.run(sequence_length_b)
self.assertAllEqual(expected_sequence_length_b, sequence_length_b)
self.assertEqual(np.int64, sequence_length_b.dtype)
def test_sequence_length_with_empty_rows(self):
"""Tests _sequence_length when some examples do not have ids."""
vocabulary_size = 3
sparse_input_a = sparse_tensor.SparseTensorValue(
# example 0, ids []
# example 1, ids [2]
# example 2, ids [0, 1]
# example 3, ids []
# example 4, ids [1]
# example 5, ids []
indices=((1, 0), (2, 0), (2, 1), (4, 0)),
values=(2, 0, 1, 1),
dense_shape=(6, 2))
expected_sequence_length_a = [0, 1, 2, 0, 1, 0]
categorical_column_a = sfc.sequence_categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
sparse_input_b = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids []
# example 2, ids []
# example 3, ids []
# example 4, ids [1]
# example 5, ids [0, 1]
indices=((0, 0), (4, 0), (5, 0), (5, 1)),
values=(2, 1, 0, 1),
dense_shape=(6, 2))
expected_sequence_length_b = [1, 0, 0, 0, 1, 2]
categorical_column_b = sfc.sequence_categorical_column_with_identity(
key='bbb', num_buckets=vocabulary_size)
shared_embedding_columns = fc_lib.shared_embedding_columns(
[categorical_column_a, categorical_column_b], dimension=2)
sequence_length_a = shared_embedding_columns[0]._get_sequence_dense_tensor(
_LazyBuilder({
'aaa': sparse_input_a
}))[1]
sequence_length_b = shared_embedding_columns[1]._get_sequence_dense_tensor(
_LazyBuilder({
'bbb': sparse_input_b
}))[1]
with monitored_session.MonitoredSession() as sess:
self.assertAllEqual(
expected_sequence_length_a, sequence_length_a.eval(session=sess))
self.assertAllEqual(
expected_sequence_length_b, sequence_length_b.eval(session=sess))
class SequenceIndicatorColumnTest(test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
{'testcase_name': '2D',
'inputs_args': {
# example 0, ids [2]
# example 1, ids [0, 1]
# example 2, ids []
# example 3, ids [1]
'indices': ((0, 0), (1, 0), (1, 1), (3, 0)),
'values': (2, 0, 1, 1),
'dense_shape': (4, 2)},
'expected': [
# example 0, ids [2]
[[0., 0., 1.], [0., 0., 0.]],
# example 1, ids [0, 1]
[[1., 0., 0.], [0., 1., 0.]],
# example 2, ids []
[[0., 0., 0.], [0., 0., 0.]],
# example 3, ids [1]
[[0., 1., 0.], [0., 0., 0.]]]},
{'testcase_name': '3D',
'inputs_args': {
# example 0, ids [[2]]
# example 1, ids [[0, 1], [2]]
# example 2, ids []
# example 3, ids [[1], [2, 2]]
'indices': ((0, 0, 0), (1, 0, 0), (1, 0, 1), (1, 1, 0),
(3, 0, 0), (3, 1, 0), (3, 1, 1)),
'values': (2, 0, 1, 2, 1, 2, 2),
'dense_shape': (4, 2, 2)},
'expected': [
# example 0, ids [[2]]
[[0., 0., 1.], [0., 0., 0.]],
# example 1, ids [[0, 1], [2]]
[[1., 1., 0.], [0., 0., 1.]],
# example 2, ids []
[[0., 0., 0.], [0., 0., 0.]],
# example 3, ids [[1], [2, 2]]
[[0., 1., 0.], [0., 0., 2.]]]}
)
def test_get_sequence_dense_tensor(self, inputs_args, expected):
inputs = sparse_tensor.SparseTensorValue(**inputs_args)
vocabulary_size = 3
categorical_column = sfc.sequence_categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
indicator_column = fc._indicator_column(categorical_column)
indicator_tensor, _ = indicator_column._get_sequence_dense_tensor(
_LazyBuilder({'aaa': inputs}))
with monitored_session.MonitoredSession() as sess:
self.assertAllEqual(expected, indicator_tensor.eval(session=sess))
@parameterized.named_parameters(
{'testcase_name': '2D',
'inputs_args': {
# example 0, ids [2]
# example 1, ids [0, 1]
'indices': ((0, 0), (1, 0), (1, 1)),
'values': (2, 0, 1),
'dense_shape': (2, 2)},
'expected_sequence_length': [1, 2]},
{'testcase_name': '3D',
'inputs_args': {
# example 0, ids [[2]]
# example 1, ids [[0, 1], [2]]
'indices': ((0, 0, 0), (1, 0, 0), (1, 0, 1), (1, 1, 0)),
'values': (2, 0, 1, 2),
'dense_shape': (2, 2, 2)},
'expected_sequence_length': [1, 2]}
)
def test_sequence_length(self, inputs_args, expected_sequence_length):
inputs = sparse_tensor.SparseTensorValue(**inputs_args)
vocabulary_size = 3
categorical_column = sfc.sequence_categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
indicator_column = fc._indicator_column(categorical_column)
_, sequence_length = indicator_column._get_sequence_dense_tensor(
_LazyBuilder({'aaa': inputs}))
with monitored_session.MonitoredSession() as sess:
sequence_length = sess.run(sequence_length)
self.assertAllEqual(expected_sequence_length, sequence_length)
self.assertEqual(np.int64, sequence_length.dtype)
def test_sequence_length_with_empty_rows(self):
"""Tests _sequence_length when some examples do not have ids."""
vocabulary_size = 3
sparse_input = sparse_tensor.SparseTensorValue(
# example 0, ids []
# example 1, ids [2]
# example 2, ids [0, 1]
# example 3, ids []
# example 4, ids [1]
# example 5, ids []
indices=((1, 0), (2, 0), (2, 1), (4, 0)),
values=(2, 0, 1, 1),
dense_shape=(6, 2))
expected_sequence_length = [0, 1, 2, 0, 1, 0]
categorical_column = sfc.sequence_categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
indicator_column = fc._indicator_column(categorical_column)
_, sequence_length = indicator_column._get_sequence_dense_tensor(
_LazyBuilder({'aaa': sparse_input}))
with monitored_session.MonitoredSession() as sess:
self.assertAllEqual(
expected_sequence_length, sequence_length.eval(session=sess))
class SequenceNumericColumnTest(test.TestCase, parameterized.TestCase):
def test_defaults(self):
a = sfc.sequence_numeric_column('aaa')
self.assertEqual('aaa', a.key)
self.assertEqual('aaa', a.name)
self.assertEqual('aaa', a._var_scope_name)
self.assertEqual((1,), a.shape)
self.assertEqual(0., a.default_value)
self.assertEqual(dtypes.float32, a.dtype)
self.assertIsNone(a.normalizer_fn)
def test_shape_saved_as_tuple(self):
a = sfc.sequence_numeric_column('aaa', shape=[1, 2])
self.assertEqual((1, 2), a.shape)
def test_shape_must_be_positive_integer(self):
with self.assertRaisesRegexp(TypeError, 'shape dimensions must be integer'):
sfc.sequence_numeric_column('aaa', shape=[1.0])
with self.assertRaisesRegexp(
ValueError, 'shape dimensions must be greater than 0'):
sfc.sequence_numeric_column('aaa', shape=[0])
def test_dtype_is_convertible_to_float(self):
with self.assertRaisesRegexp(
ValueError, 'dtype must be convertible to float'):
sfc.sequence_numeric_column('aaa', dtype=dtypes.string)
def test_normalizer_fn_must_be_callable(self):
with self.assertRaisesRegexp(TypeError, 'must be a callable'):
sfc.sequence_numeric_column('aaa', normalizer_fn='NotACallable')
@parameterized.named_parameters(
{'testcase_name': '2D',
'inputs_args': {
# example 0, values [0., 1]
# example 1, [10.]
'indices': ((0, 0), (0, 1), (1, 0)),
'values': (0., 1., 10.),
'dense_shape': (2, 2)},
'expected': [
[[0.], [1.]],
[[10.], [0.]]]},
{'testcase_name': '3D',
'inputs_args': {
# feature 0, ids [[20, 3], [5]]
# feature 1, ids [[3], [8]]
'indices': ((0, 0, 0), (0, 0, 1), (0, 1, 0), (1, 0, 0), (1, 1, 0)),
'values': (20, 3, 5., 3., 8.),
'dense_shape': (2, 2, 2)},
'expected': [
[[20.], [3.], [5.], [0.]],
[[3.], [0.], [8.], [0.]]]},
)
def test_get_sequence_dense_tensor(self, inputs_args, expected):
inputs = sparse_tensor.SparseTensorValue(**inputs_args)
numeric_column = sfc.sequence_numeric_column('aaa')
dense_tensor, _ = numeric_column._get_sequence_dense_tensor(
_LazyBuilder({'aaa': inputs}))
with monitored_session.MonitoredSession() as sess:
self.assertAllEqual(expected, dense_tensor.eval(session=sess))
def test_get_sequence_dense_tensor_with_normalizer_fn(self):
def _increment_two(input_sparse_tensor):
return sparse_ops.sparse_add(
input_sparse_tensor,
sparse_tensor.SparseTensor(((0, 0), (1, 1)), (2.0, 2.0), (2, 2))
)
sparse_input = sparse_tensor.SparseTensorValue(
# example 0, values [[0.], [1]]
# example 1, [[10.]]
indices=((0, 0), (0, 1), (1, 0)),
values=(0., 1., 10.),
dense_shape=(2, 2))
# Before _increment_two:
# [[0.], [1.]],
# [[10.], [0.]],
# After _increment_two:
# [[2.], [1.]],
# [[10.], [2.]],
expected_dense_tensor = [
[[2.], [1.]],
[[10.], [2.]],
]
numeric_column = sfc.sequence_numeric_column(
'aaa', normalizer_fn=_increment_two)
dense_tensor, _ = numeric_column._get_sequence_dense_tensor(
_LazyBuilder({'aaa': sparse_input}))
with monitored_session.MonitoredSession() as sess:
self.assertAllEqual(
expected_dense_tensor, dense_tensor.eval(session=sess))
@parameterized.named_parameters(
{'testcase_name': '2D',
'sparse_input_args': {
# example 0, values [[[0., 1.], [2., 3.]], [[4., 5.], [6., 7.]]]
# example 1, [[[10., 11.], [12., 13.]]]
'indices': ((0, 0), (0, 1), (0, 2), (0, 3), (0, 4), (0, 5), (0, 6),
(0, 7), (1, 0), (1, 1), (1, 2), (1, 3)),
'values': (0., 1., 2., 3., 4., 5., 6., 7., 10., 11., 12., 13.),
'dense_shape': (2, 8)},
'expected_dense_tensor': [
[[[0., 1.], [2., 3.]], [[4., 5.], [6., 7.]]],
[[[10., 11.], [12., 13.]], [[0., 0.], [0., 0.]]]]},
{'testcase_name': '3D',
'sparse_input_args': {
'indices': ((0, 0, 0), (0, 0, 2), (0, 0, 4), (0, 0, 6),
(0, 1, 0), (0, 1, 2), (0, 1, 4), (0, 1, 6),
(1, 0, 0), (1, 0, 2), (1, 0, 4), (1, 0, 6)),
'values': (0., 1., 2., 3., 4., 5., 6., 7., 10., 11., 12., 13.),
'dense_shape': (2, 2, 8)},
'expected_dense_tensor': [
[[[0., 0.], [1., 0.]], [[2., 0.], [3., 0.]],
[[4., 0.], [5., 0.]], [[6., 0.], [7., 0.]]],
[[[10., 0.], [11., 0.]], [[12., 0.], [13., 0.]],
[[0., 0.], [0., 0.]], [[0., 0.], [0., 0.]]]]},
)
def test_get_dense_tensor_multi_dim(
self, sparse_input_args, expected_dense_tensor):
"""Tests get_sequence_dense_tensor for multi-dim numeric_column."""
sparse_input = sparse_tensor.SparseTensorValue(**sparse_input_args)
numeric_column = sfc.sequence_numeric_column('aaa', shape=(2, 2))
dense_tensor, _ = numeric_column._get_sequence_dense_tensor(
_LazyBuilder({'aaa': sparse_input}))
with monitored_session.MonitoredSession() as sess:
self.assertAllEqual(
expected_dense_tensor, dense_tensor.eval(session=sess))
@parameterized.named_parameters(
{'testcase_name': '2D',
'inputs_args': {
# example 0, ids [2]
# example 1, ids [0, 1]
'indices': ((0, 0), (1, 0), (1, 1)),
'values': (2., 0., 1.),
'dense_shape': (2, 2)},
'expected_sequence_length': [1, 2],
'shape': (1,)},
{'testcase_name': '3D',
'inputs_args': {
# example 0, ids [[2]]
# example 1, ids [[0, 1], [2]]
'indices': ((0, 0, 0), (1, 0, 0), (1, 0, 1), (1, 1, 0)),
'values': (2., 0., 1., 2.),
'dense_shape': (2, 2, 2)},
'expected_sequence_length': [1, 2],
'shape': (1,)},
{'testcase_name': '2D_with_shape',
'inputs_args': {
# example 0, ids [2]
# example 1, ids [0, 1]
'indices': ((0, 0), (1, 0), (1, 1)),
'values': (2., 0., 1.),
'dense_shape': (2, 2)},
'expected_sequence_length': [1, 1],
'shape': (2,)},
{'testcase_name': '3D_with_shape',
'inputs_args': {
# example 0, ids [[2]]
# example 1, ids [[0, 1], [2]]
'indices': ((0, 0, 0), (1, 0, 0), (1, 0, 1), (1, 1, 0)),
'values': (2., 0., 1., 2.),
'dense_shape': (2, 2, 2)},
'expected_sequence_length': [1, 2],
'shape': (2,)},
)
def test_sequence_length(self, inputs_args, expected_sequence_length, shape):
inputs = sparse_tensor.SparseTensorValue(**inputs_args)
numeric_column = sfc.sequence_numeric_column('aaa', shape=shape)
_, sequence_length = numeric_column._get_sequence_dense_tensor(
_LazyBuilder({'aaa': inputs}))
with monitored_session.MonitoredSession() as sess:
sequence_length = sess.run(sequence_length)
self.assertAllEqual(expected_sequence_length, sequence_length)
self.assertEqual(np.int64, sequence_length.dtype)
def test_sequence_length_with_empty_rows(self):
"""Tests _sequence_length when some examples do not have ids."""
sparse_input = sparse_tensor.SparseTensorValue(
# example 0, values []
# example 1, values [[0.], [1.]]
# example 2, [[2.]]
# example 3, values []
# example 4, [[3.]]
# example 5, values []
indices=((1, 0), (1, 1), (2, 0), (4, 0)),
values=(0., 1., 2., 3.),
dense_shape=(6, 2))
expected_sequence_length = [0, 2, 1, 0, 1, 0]
numeric_column = sfc.sequence_numeric_column('aaa')
_, sequence_length = numeric_column._get_sequence_dense_tensor(
_LazyBuilder({'aaa': sparse_input}))
with monitored_session.MonitoredSession() as sess:
self.assertAllEqual(
expected_sequence_length, sequence_length.eval(session=sess))
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/feature_column/python/feature_column/sequence_feature_column_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Experimental methods for tf.feature_column sequence input."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.python.feature_column import feature_column as fc
from tensorflow.python.feature_column import utils as fc_utils
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import variable_scope
# pylint: disable=protected-access
def sequence_input_layer(
features,
feature_columns,
weight_collections=None,
trainable=True):
""""Builds input layer for sequence input.
All `feature_columns` must be sequence dense columns with the same
`sequence_length`. The output of this method can be fed into sequence
networks, such as RNN.
The output of this method is a 3D `Tensor` of shape `[batch_size, T, D]`.
`T` is the maximum sequence length for this batch, which could differ from
batch to batch.
If multiple `feature_columns` are given with `Di` `num_elements` each, their
outputs are concatenated. So, the final `Tensor` has shape
`[batch_size, T, D0 + D1 + ... + Dn]`.
Example:
```python
rating = sequence_numeric_column('rating')
watches = sequence_categorical_column_with_identity(
'watches', num_buckets=1000)
watches_embedding = embedding_column(watches, dimension=10)
columns = [rating, watches]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
input_layer, sequence_length = sequence_input_layer(features, columns)
rnn_cell = tf.compat.v1.nn.rnn_cell.BasicRNNCell(hidden_size)
outputs, state = tf.compat.v1.nn.dynamic_rnn(
rnn_cell, inputs=input_layer, sequence_length=sequence_length)
```
Args:
features: A dict mapping keys to tensors.
feature_columns: An iterable of dense sequence columns. Valid columns are
- `embedding_column` that wraps a `sequence_categorical_column_with_*`
- `sequence_numeric_column`.
weight_collections: A list of collection names to which the Variable will be
added. Note that variables will also be added to collections
`tf.GraphKeys.GLOBAL_VARIABLES` and `ops.GraphKeys.MODEL_VARIABLES`.
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES`.
Returns:
An `(input_layer, sequence_length)` tuple where:
- input_layer: A float `Tensor` of shape `[batch_size, T, D]`.
`T` is the maximum sequence length for this batch, which could differ
from batch to batch. `D` is the sum of `num_elements` for all
`feature_columns`.
- sequence_length: An int `Tensor` of shape `[batch_size]`. The sequence
length for each example.
Raises:
ValueError: If any of the `feature_columns` is the wrong type.
"""
feature_columns = fc._normalize_feature_columns(feature_columns)
for c in feature_columns:
if not isinstance(c, fc._SequenceDenseColumn):
raise ValueError(
'All feature_columns must be of type _SequenceDenseColumn. '
'You can wrap a sequence_categorical_column with an embedding_column '
'or indicator_column. '
'Given (type {}): {}'.format(type(c), c))
with variable_scope.variable_scope(
None, default_name='sequence_input_layer', values=features.values()):
builder = fc._LazyBuilder(features)
output_tensors = []
sequence_lengths = []
ordered_columns = []
for column in sorted(feature_columns, key=lambda x: x.name):
ordered_columns.append(column)
with variable_scope.variable_scope(
None, default_name=column._var_scope_name):
dense_tensor, sequence_length = column._get_sequence_dense_tensor(
builder,
weight_collections=weight_collections,
trainable=trainable)
# Flattens the final dimension to produce a 3D Tensor.
num_elements = column._variable_shape.num_elements()
shape = array_ops.shape(dense_tensor)
target_shape = [shape[0], shape[1], num_elements]
output_tensors.append(
array_ops.reshape(dense_tensor, shape=target_shape))
sequence_lengths.append(sequence_length)
fc._verify_static_batch_size_equality(output_tensors, ordered_columns)
fc._verify_static_batch_size_equality(sequence_lengths, ordered_columns)
sequence_length = _assert_all_equal_and_return(sequence_lengths)
return array_ops.concat(output_tensors, -1), sequence_length
def concatenate_context_input(context_input, sequence_input):
"""Replicates `context_input` across all timesteps of `sequence_input`.
Expands dimension 1 of `context_input` then tiles it `sequence_length` times.
This value is appended to `sequence_input` on dimension 2 and the result is
returned.
Args:
context_input: A `Tensor` of dtype `float32` and shape `[batch_size, d1]`.
sequence_input: A `Tensor` of dtype `float32` and shape `[batch_size,
padded_length, d0]`.
Returns:
A `Tensor` of dtype `float32` and shape `[batch_size, padded_length,
d0 + d1]`.
Raises:
ValueError: If `sequence_input` does not have rank 3 or `context_input` does
not have rank 2.
"""
seq_rank_check = check_ops.assert_rank(
sequence_input,
3,
message='sequence_input must have rank 3',
data=[array_ops.shape(sequence_input)])
seq_type_check = check_ops.assert_type(
sequence_input,
dtypes.float32,
message='sequence_input must have dtype float32; got {}.'.format(
sequence_input.dtype))
ctx_rank_check = check_ops.assert_rank(
context_input,
2,
message='context_input must have rank 2',
data=[array_ops.shape(context_input)])
ctx_type_check = check_ops.assert_type(
context_input,
dtypes.float32,
message='context_input must have dtype float32; got {}.'.format(
context_input.dtype))
with ops.control_dependencies(
[seq_rank_check, seq_type_check, ctx_rank_check, ctx_type_check]):
padded_length = array_ops.shape(sequence_input)[1]
tiled_context_input = array_ops.tile(
array_ops.expand_dims(context_input, 1),
array_ops.concat([[1], [padded_length], [1]], 0))
return array_ops.concat([sequence_input, tiled_context_input], 2)
def sequence_categorical_column_with_identity(
key, num_buckets, default_value=None):
"""Returns a feature column that represents sequences of integers.
Pass this to `embedding_column` or `indicator_column` to convert sequence
categorical data into dense representation for input to sequence NN, such as
RNN.
Example:
```python
watches = sequence_categorical_column_with_identity(
'watches', num_buckets=1000)
watches_embedding = embedding_column(watches, dimension=10)
columns = [watches_embedding]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
input_layer, sequence_length = sequence_input_layer(features, columns)
rnn_cell = tf.compat.v1.nn.rnn_cell.BasicRNNCell(hidden_size)
outputs, state = tf.compat.v1.nn.dynamic_rnn(
rnn_cell, inputs=input_layer, sequence_length=sequence_length)
```
Args:
key: A unique string identifying the input feature.
num_buckets: Range of inputs. Namely, inputs are expected to be in the
range `[0, num_buckets)`.
default_value: If `None`, this column's graph operations will fail for
out-of-range inputs. Otherwise, this value must be in the range
`[0, num_buckets)`, and will replace out-of-range inputs.
Returns:
A `_SequenceCategoricalColumn`.
Raises:
ValueError: if `num_buckets` is less than one.
ValueError: if `default_value` is not in range `[0, num_buckets)`.
"""
return fc._SequenceCategoricalColumn(
fc._categorical_column_with_identity(
key=key, num_buckets=num_buckets, default_value=default_value))
def sequence_categorical_column_with_hash_bucket(
key, hash_bucket_size, dtype=dtypes.string):
"""A sequence of categorical terms where ids are set by hashing.
Pass this to `embedding_column` or `indicator_column` to convert sequence
categorical data into dense representation for input to sequence NN, such as
RNN.
Example:
```python
tokens = sequence_categorical_column_with_hash_bucket(
'tokens', hash_bucket_size=1000)
tokens_embedding = embedding_column(tokens, dimension=10)
columns = [tokens_embedding]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
input_layer, sequence_length = sequence_input_layer(features, columns)
rnn_cell = tf.compat.v1.nn.rnn_cell.BasicRNNCell(hidden_size)
outputs, state = tf.compat.v1.nn.dynamic_rnn(
rnn_cell, inputs=input_layer, sequence_length=sequence_length)
```
Args:
key: A unique string identifying the input feature.
hash_bucket_size: An int > 1. The number of buckets.
dtype: The type of features. Only string and integer types are supported.
Returns:
A `_SequenceCategoricalColumn`.
Raises:
ValueError: `hash_bucket_size` is not greater than 1.
ValueError: `dtype` is neither string nor integer.
"""
return fc._SequenceCategoricalColumn(
fc._categorical_column_with_hash_bucket(
key=key, hash_bucket_size=hash_bucket_size, dtype=dtype))
def sequence_categorical_column_with_vocabulary_file(
key, vocabulary_file, vocabulary_size=None, num_oov_buckets=0,
default_value=None, dtype=dtypes.string):
"""A sequence of categorical terms where ids use a vocabulary file.
Pass this to `embedding_column` or `indicator_column` to convert sequence
categorical data into dense representation for input to sequence NN, such as
RNN.
Example:
```python
states = sequence_categorical_column_with_vocabulary_file(
key='states', vocabulary_file='/us/states.txt', vocabulary_size=50,
num_oov_buckets=5)
states_embedding = embedding_column(states, dimension=10)
columns = [states_embedding]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
input_layer, sequence_length = sequence_input_layer(features, columns)
rnn_cell = tf.compat.v1.nn.rnn_cell.BasicRNNCell(hidden_size)
outputs, state = tf.compat.v1.nn.dynamic_rnn(
rnn_cell, inputs=input_layer, sequence_length=sequence_length)
```
Args:
key: A unique string identifying the input feature.
vocabulary_file: The vocabulary file name.
vocabulary_size: Number of the elements in the vocabulary. This must be no
greater than length of `vocabulary_file`, if less than length, later
values are ignored. If None, it is set to the length of `vocabulary_file`.
num_oov_buckets: Non-negative integer, the number of out-of-vocabulary
buckets. All out-of-vocabulary inputs will be assigned IDs in the range
`[vocabulary_size, vocabulary_size+num_oov_buckets)` based on a hash of
the input value. A positive `num_oov_buckets` can not be specified with
`default_value`.
default_value: The integer ID value to return for out-of-vocabulary feature
values, defaults to `-1`. This can not be specified with a positive
`num_oov_buckets`.
dtype: The type of features. Only string and integer types are supported.
Returns:
A `_SequenceCategoricalColumn`.
Raises:
ValueError: `vocabulary_file` is missing or cannot be opened.
ValueError: `vocabulary_size` is missing or < 1.
ValueError: `num_oov_buckets` is a negative integer.
ValueError: `num_oov_buckets` and `default_value` are both specified.
ValueError: `dtype` is neither string nor integer.
"""
return fc._SequenceCategoricalColumn(
fc._categorical_column_with_vocabulary_file(
key=key,
vocabulary_file=vocabulary_file,
vocabulary_size=vocabulary_size,
num_oov_buckets=num_oov_buckets,
default_value=default_value,
dtype=dtype))
def sequence_categorical_column_with_vocabulary_list(
key, vocabulary_list, dtype=None, default_value=-1, num_oov_buckets=0):
"""A sequence of categorical terms where ids use an in-memory list.
Pass this to `embedding_column` or `indicator_column` to convert sequence
categorical data into dense representation for input to sequence NN, such as
RNN.
Example:
```python
colors = sequence_categorical_column_with_vocabulary_list(
key='colors', vocabulary_list=('R', 'G', 'B', 'Y'),
num_oov_buckets=2)
colors_embedding = embedding_column(colors, dimension=3)
columns = [colors_embedding]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
input_layer, sequence_length = sequence_input_layer(features, columns)
rnn_cell = tf.compat.v1.nn.rnn_cell.BasicRNNCell(hidden_size)
outputs, state = tf.compat.v1.nn.dynamic_rnn(
rnn_cell, inputs=input_layer, sequence_length=sequence_length)
```
Args:
key: A unique string identifying the input feature.
vocabulary_list: An ordered iterable defining the vocabulary. Each feature
is mapped to the index of its value (if present) in `vocabulary_list`.
Must be castable to `dtype`.
dtype: The type of features. Only string and integer types are supported.
If `None`, it will be inferred from `vocabulary_list`.
default_value: The integer ID value to return for out-of-vocabulary feature
values, defaults to `-1`. This can not be specified with a positive
`num_oov_buckets`.
num_oov_buckets: Non-negative integer, the number of out-of-vocabulary
buckets. All out-of-vocabulary inputs will be assigned IDs in the range
`[len(vocabulary_list), len(vocabulary_list)+num_oov_buckets)` based on a
hash of the input value. A positive `num_oov_buckets` can not be specified
with `default_value`.
Returns:
A `_SequenceCategoricalColumn`.
Raises:
ValueError: if `vocabulary_list` is empty, or contains duplicate keys.
ValueError: `num_oov_buckets` is a negative integer.
ValueError: `num_oov_buckets` and `default_value` are both specified.
ValueError: if `dtype` is not integer or string.
"""
return fc._SequenceCategoricalColumn(
fc._categorical_column_with_vocabulary_list(
key=key,
vocabulary_list=vocabulary_list,
dtype=dtype,
default_value=default_value,
num_oov_buckets=num_oov_buckets))
def sequence_numeric_column(
key,
shape=(1,),
default_value=0.,
dtype=dtypes.float32,
normalizer_fn=None):
"""Returns a feature column that represents sequences of numeric data.
Example:
```python
temperature = sequence_numeric_column('temperature')
columns = [temperature]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
input_layer, sequence_length = sequence_input_layer(features, columns)
rnn_cell = tf.compat.v1.nn.rnn_cell.BasicRNNCell(hidden_size)
outputs, state = tf.compat.v1.nn.dynamic_rnn(
rnn_cell, inputs=input_layer, sequence_length=sequence_length)
```
Args:
key: A unique string identifying the input features.
shape: The shape of the input data per sequence id. E.g. if `shape=(2,)`,
each example must contain `2 * sequence_length` values.
default_value: A single value compatible with `dtype` that is used for
padding the sparse data into a dense `Tensor`.
dtype: The type of values.
normalizer_fn: If not `None`, a function that can be used to normalize the
value of the tensor after `default_value` is applied for parsing.
Normalizer function takes the input `Tensor` as its argument, and returns
the output `Tensor`. (e.g. lambda x: (x - 3.0) / 4.2). Please note that
even though the most common use case of this function is normalization, it
can be used for any kind of Tensorflow transformations.
Returns:
A `_SequenceNumericColumn`.
Raises:
TypeError: if any dimension in shape is not an int.
ValueError: if any dimension in shape is not a positive integer.
ValueError: if `dtype` is not convertible to `tf.float32`.
"""
shape = fc._check_shape(shape=shape, key=key)
if not (dtype.is_integer or dtype.is_floating):
raise ValueError('dtype must be convertible to float. '
'dtype: {}, key: {}'.format(dtype, key))
if normalizer_fn is not None and not callable(normalizer_fn):
raise TypeError(
'normalizer_fn must be a callable. Given: {}'.format(normalizer_fn))
return _SequenceNumericColumn(
key,
shape=shape,
default_value=default_value,
dtype=dtype,
normalizer_fn=normalizer_fn)
def _assert_all_equal_and_return(tensors, name=None):
"""Asserts that all tensors are equal and returns the first one."""
with ops.name_scope(name, 'assert_all_equal', values=tensors):
if len(tensors) == 1:
return tensors[0]
assert_equal_ops = []
for t in tensors[1:]:
assert_equal_ops.append(check_ops.assert_equal(tensors[0], t))
with ops.control_dependencies(assert_equal_ops):
return array_ops.identity(tensors[0])
class _SequenceNumericColumn(
fc._SequenceDenseColumn,
collections.namedtuple(
'_SequenceNumericColumn',
['key', 'shape', 'default_value', 'dtype', 'normalizer_fn'])):
"""Represents sequences of numeric data."""
@property
def name(self):
return self.key
@property
def _parse_example_spec(self):
return {self.key: parsing_ops.VarLenFeature(self.dtype)}
def _transform_feature(self, inputs):
input_tensor = inputs.get(self.key)
if self.normalizer_fn is not None:
input_tensor = self.normalizer_fn(input_tensor)
return input_tensor
@property
def _variable_shape(self):
return tensor_shape.TensorShape(self.shape)
def _get_sequence_dense_tensor(
self, inputs, weight_collections=None, trainable=None):
# Do nothing with weight_collections and trainable since no variables are
# created in this function.
del weight_collections
del trainable
sp_tensor = inputs.get(self)
dense_tensor = sparse_ops.sparse_tensor_to_dense(
sp_tensor, default_value=self.default_value)
# Reshape into [batch_size, T, variable_shape].
dense_shape = array_ops.concat(
[array_ops.shape(dense_tensor)[:1], [-1], self._variable_shape],
axis=0)
dense_tensor = array_ops.reshape(dense_tensor, shape=dense_shape)
# Get the number of timesteps per example
# For the 2D case, the raw values are grouped according to num_elements;
# for the 3D case, the grouping happens in the third dimension, and
# sequence length is not affected.
num_elements = (self._variable_shape.num_elements()
if sp_tensor.shape.ndims == 2 else 1)
seq_length = fc_utils.sequence_length_from_sparse_tensor(
sp_tensor, num_elements=num_elements)
return fc._SequenceDenseColumn.TensorSequenceLengthPair(
dense_tensor=dense_tensor, sequence_length=seq_length)
# pylint: enable=protected-access
|
tensorflow-master
|
tensorflow/contrib/feature_column/python/feature_column/sequence_feature_column.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""estimator python module.
Importing from tensorflow.python.estimator
is unsupported and will soon break!
"""
# pylint: disable=unused-import,g-bad-import-order,g-import-not-at-top,wildcard-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Importing from tensorflow.python.estimator
# is unsupported and will soon break!
from tensorflow_estimator.contrib import estimator
# Fixes remove_undocumented not working as intended.
#
# Problem is that when the below import happens (for first time,
# Python only imports things once), Python sets attribute named
# 'python' to this package. If this first import happens
# after the call to remove_undocumented, then the 'python'
# attribute won't be removed.
import tensorflow.contrib.estimator.python
# Include attrs that start with single underscore.
_HAS_DYNAMIC_ATTRIBUTES = True
estimator.__all__ = [s for s in dir(estimator) if not s.startswith('__')]
from tensorflow_estimator.contrib.estimator import *
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = [
'add_metrics',
'binary_classification_head',
'clip_gradients_by_norm',
'forward_features',
'InMemoryEvaluatorHook',
'make_stop_at_checkpoint_step_hook',
'logistic_regression_head',
'multi_class_head',
'multi_head',
'multi_label_head',
'poisson_regression_head',
'regression_head',
'boosted_trees_classifier_train_in_memory',
'boosted_trees_regressor_train_in_memory',
'call_logit_fn',
'dnn_logit_fn_builder',
'linear_logit_fn_builder',
'replicate_model_fn',
'TowerOptimizer',
'RNNClassifier',
'RNNEstimator',
'export_saved_model_for_mode',
'export_all_saved_models',
'make_early_stopping_hook',
'read_eval_metrics',
'stop_if_lower_hook',
'stop_if_higher_hook',
'stop_if_no_increase_hook',
'stop_if_no_decrease_hook',
'build_raw_supervised_input_receiver_fn',
'build_supervised_input_receiver_fn_from_input_fn',
'SavedModelEstimator',
'DNNClassifierWithLayerAnnotations',
'DNNRegressorWithLayerAnnotations',
]
remove_undocumented(__name__, allowed_exception_list=_allowed_symbols)
|
tensorflow-master
|
tensorflow/contrib/estimator/__init__.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""dnn_with_layer_annotations python module.
Importing from tensorflow.python.estimator is unsupported
and will soon break!
"""
# pylint: disable=unused-import,g-bad-import-order,g-import-not-at-top,wildcard-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow_estimator.contrib.estimator.python.estimator import dnn_with_layer_annotations
# Include attrs that start with single underscore.
_HAS_DYNAMIC_ATTRIBUTES = True
dnn_with_layer_annotations.__all__ = [
s for s in dir(dnn_with_layer_annotations) if not s.startswith('__')
]
from tensorflow_estimator.contrib.estimator.python.estimator.dnn_with_layer_annotations import *
|
tensorflow-master
|
tensorflow/contrib/estimator/python/estimator/dnn_with_layer_annotations.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""hooks python module.
Importing from tensorflow.python.estimator is unsupported
and will soon break!
"""
# pylint: disable=unused-import,g-bad-import-order,g-import-not-at-top,wildcard-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow_estimator.contrib.estimator.python.estimator import hooks
# Include attrs that start with single underscore.
_HAS_DYNAMIC_ATTRIBUTES = True
hooks.__all__ = [s for s in dir(hooks) if not s.startswith('__')]
from tensorflow_estimator.contrib.estimator.python.estimator.hooks import *
|
tensorflow-master
|
tensorflow/contrib/estimator/python/estimator/hooks.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""logit_fns python module.
Importing from tensorflow.python.estimator is unsupported
and will soon break!
"""
# pylint: disable=unused-import,g-bad-import-order,g-import-not-at-top,wildcard-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow_estimator.contrib.estimator.python.estimator import logit_fns
# Include attrs that start with single underscore.
_HAS_DYNAMIC_ATTRIBUTES = True
logit_fns.__all__ = [s for s in dir(logit_fns) if not s.startswith('__')]
from tensorflow_estimator.contrib.estimator.python.estimator.logit_fns import *
|
tensorflow-master
|
tensorflow/contrib/estimator/python/estimator/logit_fns.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""exporter python module.
Importing from tensorflow.python.estimator is unsupported
and will soon break!
"""
# pylint: disable=unused-import,g-bad-import-order,g-import-not-at-top,wildcard-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow_estimator.contrib.estimator.python.estimator import exporter
# Include attrs that start with single underscore.
_HAS_DYNAMIC_ATTRIBUTES = True
exporter.__all__ = [s for s in dir(exporter) if not s.startswith('__')]
from tensorflow_estimator.contrib.estimator.python.estimator.exporter import *
|
tensorflow-master
|
tensorflow/contrib/estimator/python/estimator/exporter.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""export python module.
Importing from tensorflow.python.estimator is unsupported
and will soon break!
"""
# pylint: disable=unused-import,g-bad-import-order,g-import-not-at-top,wildcard-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow_estimator.contrib.estimator.python.estimator import export
# Include attrs that start with single underscore.
_HAS_DYNAMIC_ATTRIBUTES = True
export.__all__ = [s for s in dir(export) if not s.startswith('__')]
from tensorflow_estimator.contrib.estimator.python.estimator.export import *
|
tensorflow-master
|
tensorflow/contrib/estimator/python/estimator/export.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""boosted_trees python module.
Importing from tensorflow.python.estimator is unsupported
and will soon break!
"""
# pylint: disable=unused-import,g-bad-import-order,g-import-not-at-top,wildcard-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow_estimator.contrib.estimator.python.estimator import boosted_trees
# Include attrs that start with single underscore.
_HAS_DYNAMIC_ATTRIBUTES = True
boosted_trees.__all__ = [
s for s in dir(boosted_trees) if not s.startswith('__')
]
from tensorflow_estimator.contrib.estimator.python.estimator.boosted_trees import *
|
tensorflow-master
|
tensorflow/contrib/estimator/python/estimator/boosted_trees.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""multi_head python module.
Importing from tensorflow.python.estimator is unsupported
and will soon break!
"""
# pylint: disable=unused-import,g-bad-import-order,g-import-not-at-top,wildcard-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow_estimator.contrib.estimator.python.estimator import multi_head
# Include attrs that start with single underscore.
_HAS_DYNAMIC_ATTRIBUTES = True
multi_head.__all__ = [s for s in dir(multi_head) if not s.startswith('__')]
from tensorflow_estimator.contrib.estimator.python.estimator.multi_head import *
|
tensorflow-master
|
tensorflow/contrib/estimator/python/estimator/multi_head.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""saved_model_estimator python module.
Importing from tensorflow.python.estimator is unsupported
and will soon break!
"""
# pylint: disable=unused-import,g-bad-import-order,g-import-not-at-top,wildcard-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow_estimator.contrib.estimator.python.estimator import saved_model_estimator
# Include attrs that start with single underscore.
_HAS_DYNAMIC_ATTRIBUTES = True
saved_model_estimator.__all__ = [
s for s in dir(saved_model_estimator) if not s.startswith('__')
]
from tensorflow_estimator.contrib.estimator.python.estimator.saved_model_estimator import *
|
tensorflow-master
|
tensorflow/contrib/estimator/python/estimator/saved_model_estimator.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""head python module.
Importing from tensorflow.python.estimator is unsupported
and will soon break!
"""
# pylint: disable=unused-import,g-bad-import-order,g-import-not-at-top,wildcard-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow_estimator.contrib.estimator.python.estimator import head
# Include attrs that start with single underscore.
_HAS_DYNAMIC_ATTRIBUTES = True
head.__all__ = [s for s in dir(head) if not s.startswith('__')]
from tensorflow_estimator.contrib.estimator.python.estimator.head import *
|
tensorflow-master
|
tensorflow/contrib/estimator/python/estimator/head.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""extenders python module.
Importing from tensorflow.python.estimator is unsupported
and will soon break!
"""
# pylint: disable=unused-import,g-bad-import-order,g-import-not-at-top,wildcard-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow_estimator.contrib.estimator.python.estimator import extenders
# Include attrs that start with single underscore.
_HAS_DYNAMIC_ATTRIBUTES = True
extenders.__all__ = [s for s in dir(extenders) if not s.startswith('__')]
from tensorflow_estimator.contrib.estimator.python.estimator.extenders import *
|
tensorflow-master
|
tensorflow/contrib/estimator/python/estimator/extenders.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""replicate_model_fn python module.
Importing from tensorflow.python.estimator is unsupported
and will soon break!
"""
# pylint: disable=unused-import,g-bad-import-order,g-import-not-at-top,wildcard-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow_estimator.contrib.estimator.python.estimator import replicate_model_fn
# Include attrs that start with single underscore.
_HAS_DYNAMIC_ATTRIBUTES = True
replicate_model_fn.__all__ = [
s for s in dir(replicate_model_fn) if not s.startswith('__')
]
from tensorflow_estimator.contrib.estimator.python.estimator.replicate_model_fn import *
|
tensorflow-master
|
tensorflow/contrib/estimator/python/estimator/replicate_model_fn.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""early_stopping python module.
Importing from tensorflow.python.estimator is unsupported
and will soon break!
"""
# pylint: disable=unused-import,g-bad-import-order,g-import-not-at-top,wildcard-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow_estimator.python.estimator import early_stopping
# Include attrs that start with single underscore.
_HAS_DYNAMIC_ATTRIBUTES = True
early_stopping.__all__ = [
s for s in dir(early_stopping) if not s.startswith('__')
]
from tensorflow_estimator.python.estimator.early_stopping import *
|
tensorflow-master
|
tensorflow/contrib/estimator/python/estimator/early_stopping.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""rnn python module.
Importing from tensorflow.python.estimator is unsupported
and will soon break!
"""
# pylint: disable=unused-import,g-bad-import-order,g-import-not-at-top,wildcard-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow_estimator.contrib.estimator.python.estimator import rnn
# Include attrs that start with single underscore.
_HAS_DYNAMIC_ATTRIBUTES = True
rnn.__all__ = [s for s in dir(rnn) if not s.startswith('__')]
from tensorflow_estimator.contrib.estimator.python.estimator.rnn import *
|
tensorflow-master
|
tensorflow/contrib/estimator/python/estimator/rnn.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""reduce by slice
@@reduce_slice_sum
@@reduce_slice_prod
@@reduce_slice_min
@@reduce_slice_max
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.reduce_slice_ops.python.ops import *
|
tensorflow-master
|
tensorflow/contrib/reduce_slice_ops/__init__.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.contrib.reduce_slice_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.reduce_slice_ops.python.ops import reduce_slice_ops
from tensorflow.python.framework.test_util import TensorFlowTestCase
from tensorflow.python.platform import googletest
class ReduceSliceTest(TensorFlowTestCase):
def testReduceSliceSum1D(self):
x = np.array([1, 40, 700], dtype=np.int32)
indices = np.array([[0, 1], [0, 3], [1, 2], [1, 3], [0, 2]], dtype=np.int32)
result = np.array([1, 741, 40, 740, 41], dtype=np.int32)
with self.test_session(use_gpu=True):
y_tf = reduce_slice_ops.reduce_slice_sum(x, indices, 0).eval()
self.assertAllEqual(y_tf, result)
def testReduceSliceSum2D(self):
x = np.array([[1, 2, 3], [40, 50, 60], [700, 800, 900]], dtype=np.int32)
indices = np.array([[0, 1], [0, 3], [1, 2], [1, 3], [0, 2]], dtype=np.int32)
result = np.array([[1, 2, 3], [741, 852, 963], [40, 50, 60],
[740, 850, 960], [41, 52, 63]], dtype=np.int32)
with self.test_session(use_gpu=True):
y_tf = reduce_slice_ops.reduce_slice_sum(x, indices, 0).eval()
self.assertAllEqual(y_tf, result)
def testReduceSliceSum3D(self):
x = np.array([[[1, 2], [3, 4]], [[50, 60], [70, 80]],
[[600, 700], [800, 900]]], dtype=np.int32)
indices = np.array([[0, 1], [0, 3], [1, 2], [1, 3], [0, 2]], dtype=np.int32)
result = np.array([[[1, 2], [3, 4]],
[[651, 762], [873, 984]],
[[50, 60], [70, 80]],
[[650, 760], [870, 980]],
[[51, 62], [73, 84]]], dtype=np.int32)
with self.test_session(use_gpu=True):
y_tf = reduce_slice_ops.reduce_slice_sum(x, indices, 0).eval()
self.assertAllEqual(y_tf, result)
def testReduceSliceSumAxis1(self):
x = np.transpose(np.array([[1, 2, 3], [40, 50, 60],
[700, 800, 900]], dtype=np.int32))
indices = np.array([[0, 1], [0, 3], [1, 2], [1, 3], [0, 2]], dtype=np.int32)
result = np.transpose(np.array([[1, 2, 3],
[741, 852, 963],
[40, 50, 60],
[740, 850, 960],
[41, 52, 63]], dtype=np.int32))
with self.test_session(use_gpu=True):
y_tf = reduce_slice_ops.reduce_slice_sum(x, indices, 1).eval()
self.assertAllEqual(y_tf, result)
def testReduceSliceSum1DIndices(self):
x = np.array([[1, 2, 3], [40, 50, 60], [700, 800, 900],
[1000, 2000, 3000], [40000, 50000, 60000]], dtype=np.int32)
indices = np.array([0, 0, 2, 5], dtype=np.int32)
result = np.array([[0, 0, 0], [41, 52, 63],
[41700, 52800, 63900]], dtype=np.int32)
with self.test_session(use_gpu=True):
y_tf = reduce_slice_ops.reduce_slice_sum(x, indices, 0).eval()
self.assertAllEqual(y_tf, result)
def testReduceSliceProd(self):
x = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.int32)
indices = np.array([[0, 1], [0, 3], [1, 2], [1, 3], [0, 2]], dtype=np.int32)
result = np.array([[1, 2, 3], [28, 80, 162], [4, 5, 6],
[28, 40, 54], [4, 10, 18]], dtype=np.int32)
with self.test_session(use_gpu=True):
y_tf = reduce_slice_ops.reduce_slice_prod(x, indices, 0).eval()
self.assertAllEqual(y_tf, result)
def testReduceSliceMax(self):
x = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.int32)
indices = np.array([[0, 1], [0, 3], [1, 2], [1, 3], [0, 2]], dtype=np.int32)
result = np.array([[1, 2, 3], [7, 8, 9], [4, 5, 6],
[7, 8, 9], [4, 5, 6]], dtype=np.int32)
with self.test_session(use_gpu=True):
y_tf = reduce_slice_ops.reduce_slice_max(x, indices, 0).eval()
self.assertAllEqual(y_tf, result)
def testReduceSliceMin(self):
x = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.int32)
indices = np.array([[0, 1], [0, 3], [1, 2], [1, 3], [0, 2]], dtype=np.int32)
result = np.array([[1, 2, 3], [1, 2, 3], [4, 5, 6],
[4, 5, 6], [1, 2, 3]], dtype=np.int32)
with self.test_session(use_gpu=True):
y_tf = reduce_slice_ops.reduce_slice_min(x, indices, 0).eval()
self.assertAllEqual(y_tf, result)
def testReduceSliceEmptyDataRows(self):
x = np.empty((0, 1, 2, 3, 4, 5, 6), dtype=np.int32)
indices = np.array([[0, 1], [0, 3], [1, 2], [1, 3], [0, 2]], dtype=np.int32)
result = np.zeros((5, 1, 2, 3, 4, 5, 6), dtype=np.int32)
with self.test_session(use_gpu=True):
y_tf = reduce_slice_ops.reduce_slice_sum(x, indices, 0).eval()
self.assertAllEqual(y_tf, result)
def testReduceSliceEmptyDataCols(self):
x = np.empty((100, 0, 2, 3, 4, 5, 6), dtype=np.int32)
indices = np.array([[0, 1], [0, 3], [1, 2], [1, 3], [0, 2]], dtype=np.int32)
result = np.empty((5, 0, 2, 3, 4, 5, 6), dtype=np.int32)
with self.test_session(use_gpu=True):
y_tf = reduce_slice_ops.reduce_slice_sum(x, indices, 0).eval()
self.assertAllEqual(y_tf, result)
def testReduceSliceEmptyIndicesRows(self):
x = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.int32)
indices = np.empty((0, 2), dtype=np.int32)
result = np.empty((0, 3), dtype=np.int32)
with self.test_session(use_gpu=True):
y_tf = reduce_slice_ops.reduce_slice_sum(x, indices, 0).eval()
self.assertAllEqual(y_tf, result)
def testReduceSliceEmpty0Indices1D(self):
x = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.int32)
indices = np.empty((0,), dtype=np.int32)
result = np.empty((0, 3), dtype=np.int32)
with self.test_session(use_gpu=True):
y_tf = reduce_slice_ops.reduce_slice_sum(x, indices, 0).eval()
self.assertAllEqual(y_tf, result)
def testReduceSliceEmpty1Indices1D(self):
x = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.int32)
indices = np.array([0], dtype=np.int32)
result = np.empty((0, 3), dtype=np.int32)
with self.test_session(use_gpu=True):
y_tf = reduce_slice_ops.reduce_slice_sum(x, indices, 0).eval()
self.assertAllEqual(y_tf, result)
if __name__ == "__main__":
googletest.main()
|
tensorflow-master
|
tensorflow/contrib/reduce_slice_ops/python/kernel_tests/reduce_slice_ops_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python wrapper for the reduce slice operators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.reduce_slice_ops.ops import gen_reduce_slice_ops
from tensorflow.contrib.util import loader
from tensorflow.python.platform import resource_loader
_reduce_slice_ops = loader.load_op_library(
resource_loader.get_path_to_datafile("_reduce_slice_ops.so"))
reduce_slice_sum = gen_reduce_slice_ops.reduce_slice_sum
reduce_slice_prod = gen_reduce_slice_ops.reduce_slice_prod
reduce_slice_max = gen_reduce_slice_ops.reduce_slice_max
reduce_slice_min = gen_reduce_slice_ops.reduce_slice_min
|
tensorflow-master
|
tensorflow/contrib/reduce_slice_ops/python/ops/reduce_slice_ops.py
|
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# mixed_precisiond under the License is mixed_precisiond on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Library for mixed precision training."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,wildcard-import
from tensorflow.contrib.mixed_precision.python.loss_scale_manager import *
from tensorflow.contrib.mixed_precision.python.loss_scale_optimizer import *
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = [
"LossScaleManager",
"FixedLossScaleManager",
"ExponentialUpdateLossScaleManager",
"LossScaleOptimizer",
]
remove_undocumented(__name__, _allowed_symbols)
|
tensorflow-master
|
tensorflow/contrib/mixed_precision/__init__.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for LossScaleManager classes.."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.mixed_precision.python import loss_scale_manager as lsm_lib
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import context
from tensorflow.python.framework import test_util
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
def _GetExampleIter(inputs):
dataset = dataset_ops.Dataset.from_tensor_slices(inputs)
return dataset_ops.make_one_shot_iterator(dataset)
class FixedLossScaleManagerTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def test_basic(self):
itr = _GetExampleIter([True] * 10 + [False] * 10)
loss_scale = 1000
lsm = lsm_lib.FixedLossScaleManager(loss_scale)
update_fn = lambda: lsm.update_loss_scale(itr.get_next())
self.evaluate(variables.global_variables_initializer())
if not context.executing_eagerly():
update_op = update_fn()
for _ in range(10):
if context.executing_eagerly():
update_fn()
else:
self.evaluate(update_op)
self.assertEqual(loss_scale, self.evaluate(lsm.get_loss_scale()))
class ExponentialUpdateLossScaleManagerTest(test.TestCase):
def _test_helper(self,
inputs,
expected_outputs,
init_loss_scale=1,
incr_every_n_step=2,
decr_every_n_nan_or_inf=2):
ratio = 2
lsm = lsm_lib.ExponentialUpdateLossScaleManager(
init_loss_scale=init_loss_scale,
incr_every_n_steps=incr_every_n_step,
decr_every_n_nan_or_inf=decr_every_n_nan_or_inf,
incr_ratio=ratio,
decr_ratio=1. / ratio)
itr = _GetExampleIter(inputs)
update_fn = lambda: lsm.update_loss_scale(itr.get_next())
self.evaluate(variables.global_variables_initializer())
actual_outputs = []
if not context.executing_eagerly():
update_op = update_fn()
for _ in range(len(inputs)):
if context.executing_eagerly():
update_fn()
else:
self.evaluate(update_op)
actual_outputs.append(self.evaluate(lsm.get_loss_scale()))
self.assertEqual(actual_outputs, expected_outputs)
@test_util.run_in_graph_and_eager_modes
def test_increase_every_n_steps(self):
inputs = [True] * 6
expected_outputs = [1, 2, 2, 4, 4, 8]
self._test_helper(inputs, expected_outputs)
@test_util.run_in_graph_and_eager_modes
def test_keep_increasing_until_capped(self):
init_loss_scale = np.finfo(np.float32).max / 4 + 10
max_float = np.finfo(np.float32).max
inputs = [True] * 6
# Output is capped the 2nd time it doubles.
expected_outputs = [
init_loss_scale, init_loss_scale * 2, init_loss_scale * 2, max_float,
max_float, max_float
]
self._test_helper(inputs, expected_outputs, init_loss_scale)
@test_util.run_in_graph_and_eager_modes
def test_decrease_every_n_steps(self):
inputs = [False] * 6
init_loss_scale = 1024
expected_outputs = [1024, 512, 512, 256, 256, 128]
self._test_helper(inputs, expected_outputs, init_loss_scale)
@test_util.run_in_graph_and_eager_modes
def test_keep_decreasing_until_one(self):
inputs = [False] * 10
init_loss_scale = 16
expected_outputs = [16, 8, 8, 4, 4, 2, 2, 1, 1, 1]
self._test_helper(inputs, expected_outputs, init_loss_scale)
@test_util.run_in_graph_and_eager_modes
def test_incr_bad_step_clear_good_step(self):
inputs = [True, True, True, False, True]
expected_outputs = [1, 2, 2, 2, 2]
self._test_helper(inputs, expected_outputs)
@test_util.run_in_graph_and_eager_modes
def test_incr_good_step_does_not_clear_bad_step(self):
inputs = [True, True, True, False, True, False]
expected_outputs = [1, 2, 2, 2, 2, 1]
self._test_helper(inputs, expected_outputs)
@test_util.run_in_graph_and_eager_modes
def test_trigger_loss_scale_update_each_step(self):
"""Test when incr_every_n_step and decr_every_n_nan_or_inf is 1."""
init_loss_scale = 1
incr_every_n_step = 1
decr_every_n_nan_or_inf = 1
inputs = [True] * 3 + [False, True, True]
expected_outputs = [2, 4, 8, 4, 8, 16]
self._test_helper(inputs, expected_outputs, init_loss_scale,
incr_every_n_step, decr_every_n_nan_or_inf)
@test_util.run_in_graph_and_eager_modes
def test_alternating_good_and_bad_gradients_trigger_each_step(self):
init_loss_scale = 1
incr_every_n_step = 1
decr_every_n_nan_or_inf = 1
inputs = [True, False] * 4 + [True]
expected_outputs = [2, 1, 2, 1, 2, 1, 2, 1, 2]
self._test_helper(inputs, expected_outputs, init_loss_scale,
incr_every_n_step, decr_every_n_nan_or_inf)
@test_util.run_in_graph_and_eager_modes
def test_alternating_good_and_bad_gradients_trigger_incr_every_2steps(self):
init_loss_scale = 32
incr_every_n_step = 2
decr_every_n_nan_or_inf = 1
inputs = [True, False] * 3 + [True]
expected_outputs = [32, 16, 16, 8, 8, 4, 4]
self._test_helper(inputs, expected_outputs, init_loss_scale,
incr_every_n_step, decr_every_n_nan_or_inf)
@test_util.run_in_graph_and_eager_modes
def test_random_mix_good_and_bad_gradients(self):
init_loss_scale = 4
inputs = [
False, False, True, True, True, False, True, False, True, True, True,
False
]
expected_outputs = [4, 2, 2, 4, 4, 4, 4, 2, 2, 4, 4, 4]
self._test_helper(inputs, expected_outputs, init_loss_scale)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/mixed_precision/python/loss_scale_manager_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""LossScaleManager classes for mixed precision training."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_control_flow_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
@six.add_metaclass(abc.ABCMeta)
class LossScaleManager(object):
"""Abstract loss scale manager class.
Loss scale managers with a different strategy should subclass this class.
Loss scaling is a process that:
1) Applies a multiplier on the loss before computing gradients, and
2) Applies the reciprocal of the multiplier on the gradients before they are
applied on variables.
This class is used together with
`tf.contrib.mixed_precision.LossScaleOptimizer` for mixed precision training
(float32 variables and float16 ops) on Nvidia GPUs in order to achieve the
same model quality as single precision training, with the benefits of
potential higher throughput.
See `tf.contrib.mixed_precision.LossScaleOptimizer` for more details.
"""
@abc.abstractmethod
def get_loss_scale(self):
"""Returns the loss scale as a scalar `float32` tensor."""
pass
@abc.abstractmethod
def update_loss_scale(self, finite_grads):
"""Updates loss scale based on if gradients are finite in current step.
Args:
finite_grads: bool scalar tensor indicating if all gradients are
finite (i.e., not inf or nan).
Returns:
An op, when executed updates the loss scale. If eager execution is
enabled, does not return anything.
"""
del finite_grads
return
class FixedLossScaleManager(LossScaleManager):
"""Loss scale manager with a fixed loss scale.
The loss scale is not updated for the lifetime of the class.
"""
def __init__(self, loss_scale):
"""Creates the fixed loss scale manager.
Args:
loss_scale: A Python float. Its ideal value varies depending on models to
run. Choosing a too small loss_scale might affect model quality; a too
big loss_scale might cause inf or nan. There is no single right
loss_scale to apply. There is no harm choosing a relatively big number
as long as no nan or inf is encountered in training.
Raises:
ValueError: If loss_scale is less than 1.
"""
if loss_scale < 1:
raise ValueError("loss scale must be at least 1.")
self._loss_scale = ops.convert_to_tensor(loss_scale, dtype=dtypes.float32)
def get_loss_scale(self):
return self._loss_scale
def update_loss_scale(self, finite_grads):
del finite_grads
return gen_control_flow_ops.no_op()
class ExponentialUpdateLossScaleManager(LossScaleManager):
"""Loss scale manager uses an exponential update strategy.
In general, the strategy increases loss scale by a greater-than-one factor
after encountering a consecutive series of steps with finite gradients;
Similarly, it decreases the loss scale by a factor when the accumulated number
of steps with non-finite (nan or inf) gradients are met. An update is not
applied if its result is less than 1 or overflows the float32 dynamic range.
The number of finite and non-finite steps are cleared every time the loss
scale is changed. The condition to decrease the loss scale is looser than to
increase it since the former does not require the steps to be consecutive.
"""
def __init__(self,
init_loss_scale,
incr_every_n_steps,
decr_every_n_nan_or_inf=2,
incr_ratio=2,
decr_ratio=0.8):
"""Constructor of exponential-update loss scale manager.
Args:
init_loss_scale: A Python float. The loss scale to use at the beginning.
incr_every_n_steps: Increases loss scale every n consecutive steps with
finite gradients.
decr_every_n_nan_or_inf: Decreases loss scale every n accumulated steps
with nan or inf gradients.
incr_ratio: The multiplier to use when increasing the loss scale.
decr_ratio: The less-than-one-multiplier to use when decreasing the loss
scale.
"""
self._incr_every_n_steps = incr_every_n_steps
self._decr_every_n_nan_or_inf = decr_every_n_nan_or_inf
self._incr_ratio = incr_ratio
self._decr_ratio = decr_ratio
self._loss_scale = variable_scope.variable(
name="loss_scale",
initial_value=ops.convert_to_tensor(init_loss_scale, dtypes.float32),
dtype=dtypes.float32,
trainable=False)
self._num_good_steps = variable_scope.variable(
name="good_steps", initial_value=0, dtype=dtypes.int32, trainable=False)
self._num_bad_steps = variable_scope.variable(
name="bad_steps", initial_value=0, dtype=dtypes.int32, trainable=False)
def _reset_stats(self):
return control_flow_ops.group(
state_ops.assign(self._num_good_steps, 0),
state_ops.assign(self._num_bad_steps, 0))
def get_loss_scale(self):
"""Returns the loss scale."""
return self._loss_scale
def update_loss_scale(self, finite_grads):
"""Updates loss scale based on if gradients are finite in current step."""
def update_if_finite_grads():
"""Branch function when grads are all finite."""
def incr_loss_scale():
new_loss_scale = control_flow_ops.cond(
gen_math_ops.is_finite(self._loss_scale * self._incr_ratio),
lambda: self._loss_scale * self._incr_ratio,
lambda: self._loss_scale)
update_op = state_ops.assign(self._loss_scale, new_loss_scale)
# When loss_scale is updated, both good and bad steps are reset.
return control_flow_ops.group(update_op, self._reset_stats())
return control_flow_ops.cond(
self._num_good_steps + 1 >= self._incr_every_n_steps,
incr_loss_scale,
lambda: state_ops.assign_add(self._num_good_steps, 1).op)
def update_if_not_finite_grads():
"""Branch function when any grad is not finite."""
def decr_loss_scale():
update_op = state_ops.assign(
self._loss_scale,
gen_math_ops.maximum(1., self._loss_scale * self._decr_ratio))
# When loss_scale is updated, both good and bad steps are reset.
return control_flow_ops.group(update_op, self._reset_stats())
def just_update_steps():
# When bad_steps is incremented, good_step is reset.
return control_flow_ops.group(
state_ops.assign_add(self._num_bad_steps, 1),
state_ops.assign(self._num_good_steps, 0))
return control_flow_ops.cond(
self._num_bad_steps + 1 >= self._decr_every_n_nan_or_inf,
decr_loss_scale, just_update_steps)
return control_flow_ops.cond(finite_grads, update_if_finite_grads,
update_if_not_finite_grads)
|
tensorflow-master
|
tensorflow/contrib/mixed_precision/python/loss_scale_manager.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for LossScaleOptimizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.mixed_precision.python import loss_scale_manager as lsm_lib
from tensorflow.contrib.mixed_precision.python import loss_scale_optimizer as lso
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import gradient_descent as gd
class LossScaleOptimizerTest(test.TestCase):
def _build_graph(self, lr, init_val, loss_scale_opt_fn=None):
x = variable_scope.get_variable(
"x", initializer=init_val, dtype=dtypes.float32)
c1 = constant_op.constant(1e4, dtype=dtypes.float16)
c2 = constant_op.constant(1e-4, dtype=dtypes.float16)
c3 = constant_op.constant(1e-4, dtype=dtypes.float16)
if context.executing_eagerly():
loss = lambda: math_ops.cast(x, dtypes.float16) * c1 * c2 * c3
else:
loss = math_ops.cast(x, dtypes.float16) * c1 * c2 * c3
opt = gd.GradientDescentOptimizer(lr)
if loss_scale_opt_fn:
opt = loss_scale_opt_fn(opt)
return x, loss, opt
@test_util.run_in_graph_and_eager_modes
def test_float16_underflow_without_loss_scale(self):
lr = 1
init_val = 1.
x, loss, opt = self._build_graph(lr, init_val)
self.evaluate(variables.global_variables_initializer())
self.evaluate(opt.minimize(loss, var_list=[x]))
# Symbolic grad is c1 * c2 * c3 = 1e-4 and actual grad is 0, since in
# backprop, c2 * c3 underflows in fp16 range. So variable isn't updated.
expected_update = 0
symbolic_update = 1e-4 * lr
self.assertAllClose(
init_val - expected_update,
self.evaluate(x),
rtol=0,
atol=min(symbolic_update, 1e-6))
@test_util.run_in_graph_and_eager_modes
def test_float16_with_loss_scale(self):
lr = 1.
init_val = 1.
def loss_scale_opt_fn(opt):
return lso.LossScaleOptimizer(opt, lsm_lib.FixedLossScaleManager(1e4))
x, loss, opt = self._build_graph(lr, init_val, loss_scale_opt_fn)
self.evaluate(variables.global_variables_initializer())
self.evaluate(opt.minimize(loss, var_list=[x]))
# Symbolic grad is c1 * c2 * c3 = 1e-4 and actual grad is the same, due to
# up-scaled loss before backprop starts.
expected_update = 1.e-4 * lr
self.assertAllClose(
init_val - expected_update,
self.evaluate(x),
rtol=0,
atol=min(expected_update, 1e-6))
@test_util.run_in_graph_and_eager_modes
def test_compute_gradients_with_loss_scale(self):
lr = 1
init_val = 1.
def loss_scale_opt_fn(opt):
return lso.LossScaleOptimizer(opt, lsm_lib.FixedLossScaleManager(1e4))
x, loss, opt = self._build_graph(lr, init_val, loss_scale_opt_fn)
grads_and_vars = opt.compute_gradients(loss, var_list=[x])
self.assertEqual(len(grads_and_vars), 1)
self.evaluate(variables.global_variables_initializer())
g_v = self.evaluate(grads_and_vars[0][0])
self.assertAllClose(g_v, 1e-4)
self.assertIs(grads_and_vars[0][1], x)
# Gradients aren't applied.
self.assertAllClose(init_val, self.evaluate(x), rtol=0, atol=1e-6)
@test_util.run_in_graph_and_eager_modes
def test_compute_gradients_without_loss_scale(self):
lr = 1
init_val = 1.
x, loss, opt = self._build_graph(lr, init_val)
grads_and_vars = opt.compute_gradients(loss, var_list=[x])
self.assertEqual(len(grads_and_vars), 1)
self.evaluate(variables.global_variables_initializer())
g_v = self.evaluate(grads_and_vars[0][0])
self.assertAllClose(g_v, 0)
@test_util.run_in_graph_and_eager_modes
def test_apply_gradients(self):
x = variable_scope.get_variable("x", initializer=1., dtype=dtypes.float32)
dataset = dataset_ops.Dataset.from_tensor_slices([np.nan, np.inf, 0.1])
itr = dataset_ops.make_one_shot_iterator(dataset)
lr = 1
opt = gd.GradientDescentOptimizer(lr)
lsm = lsm_lib.FixedLossScaleManager(1.e4)
opt = lso.LossScaleOptimizer(opt, lsm)
train_fn = lambda: opt.apply_gradients([(itr.get_next(), x)])
if not context.executing_eagerly():
train_op = train_fn()
expected_output = [1, 1, 1 - 0.1]
actual_output = []
self.evaluate(variables.global_variables_initializer())
for _ in range(3):
# nan or inf is not applied.
if context.executing_eagerly():
train_fn()
else:
self.evaluate(train_op)
actual_output.append(self.evaluate(x))
self.assertAllClose(expected_output, actual_output)
@test_util.run_in_graph_and_eager_modes
def test_apply_gradients_loss_scale_is_updated(self):
class SimpleLossScaleManager(lsm_lib.LossScaleManager):
"""A simple loss scale manager for easier testing.
It increments loss scale by 1 if grads are finite, and decreases loss
scale by 1 if otherwise.
"""
def __init__(self, loss_scale):
self._loss_scale = variable_scope.variable(
name="loss_scale",
initial_value=loss_scale,
dtype=dtypes.float32,
trainable=False)
def get_loss_scale(self):
return self._loss_scale
def update_loss_scale(self, if_finite_grads):
return control_flow_ops.cond(
if_finite_grads, lambda: state_ops.assign_add(self._loss_scale, 1),
lambda: state_ops.assign_sub(self._loss_scale, 1))
x = variable_scope.get_variable("x", initializer=1., dtype=dtypes.float32)
dataset = dataset_ops.Dataset.from_tensor_slices([np.nan, np.inf, 0.1])
itr = dataset_ops.make_one_shot_iterator(dataset)
lr = 1
init_loss_scale = 8
opt = gd.GradientDescentOptimizer(lr)
lsm = SimpleLossScaleManager(init_loss_scale)
opt = lso.LossScaleOptimizer(opt, lsm)
train_fn = lambda: opt.apply_gradients([(itr.get_next(), x)])
if not context.executing_eagerly():
train_op = train_fn()
self.evaluate(variables.global_variables_initializer())
expected_loss_scale = [
init_loss_scale - 1, init_loss_scale - 2, init_loss_scale - 2 + 1
]
expected_output = [1, 1, 1 - 0.1]
actual_output = []
for i in range(3):
# nan or inf is not applied.
if context.executing_eagerly():
train_fn()
else:
self.evaluate(train_op)
actual_output.append(self.evaluate(x))
self.assertAllClose(expected_loss_scale[i],
self.evaluate(lsm._loss_scale))
self.assertAllClose(expected_output, actual_output)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/mixed_precision/python/loss_scale_optimizer_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Loss scaling optimizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import context
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_control_flow_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.training import optimizer
class LossScaleOptimizer(optimizer.Optimizer):
# TODO(jamesqin): move mixed precision training explanation to __init__
# docstring.
"""An optimizer that applies loss scaling in backprop.
This class is useful for "mixed precision training" on GPUs (or other
potential accelerators), an approach to improve compute throughput without
compromising model quality.
The canonical way to perform mixed precision training is the following:
* Model variables are kept in high precision (e.g. float32).
* Computations are done in lower precision (e.g. float16), which enjoys
performance speedup by virtue of hardware support. Variables are casted to
lower precision before they're used.
* Final gradients are casted back to high precision dtype, then used to update
variables.
The side-effect of performing computation in lower precision, is that it comes
with smaller numerical range. During backproping, small gradients might
underflow in the reduced numerical range, causing a model to converge at
suboptimal level.
To prevent underflow, this optimizer multiplies the loss by a factor before
backprop starts. Consequently, the gradients are linearly scaled up by the
same factor, thus not falling into the underflow zone. After that, to perserve
the correctness of backprop, the gradients are down-scaled by the same factor,
casted to the (higher) variable precision, then applied on the variables.
See [Nvidia's manual on mixed precision training](
https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/index.html)
for more details.
To use loss scale optimizer, one only needs choose a loss scale strategy and
wrap a regular optimizer. See examples below.
```
loss = loss_fn()
opt = tf.AdamOptimizer(learning_rate=...)
# Choose a loss scale manager which decides how to pick the right loss scale
# throughout the training process.
loss_scale_manager = tf.contrib.mixed_precision.FixedLossScaleManager(5000)
# Wraps the original optimizer in a LossScaleOptimizer.
loss_scale_optimizer =
tf.contrib.mixed_precision.LossScaleOptimizer(opt, loss_scale_manager)
# Call minimize() on the loss scale optimizer.
train_op = loss_scale_optimizer.minimize(loss)
```
If gradients clipping is applied, one can call
`optimizer.compute_gradients()` and `optimizer.apply_gradients()`
separately.
Notice the following way of using LossScaleOptimizer is not intended. Always
use `loss_scale_optimizer.compute_gradients()` to compute gradients instead of
`tf.gradients()` if doing mixed precision training.
```
# The following is a wrong way to use LossScaleOptimizer along with
# tf.gradients().
# Always use loss_scale_optimizer.compute_gradients() to compute grads, or
# loss scale is not correctly applied.
grads = tf.gradients(loss, ...)
# Do some custom grad clipping.
grads = clip_grads(grads, ...)
loss_scale_optimizer.apply(grads_and_vars)
```
"""
def __init__(self, opt, loss_scale_manager):
"""Construct a loss scaling optimizer.
Args:
opt: The actual optimizer that will be used to compute and apply the
gradients. Must be an implementation of the
`tf.compat.v1.train.Optimizer` interface.
loss_scale_manager: A LossScaleManager object.
"""
self._opt = opt
self._loss_scale_manager = loss_scale_manager
def compute_gradients(self,
loss,
var_list=None,
gate_gradients=optimizer.Optimizer.GATE_OP,
aggregation_method=None,
colocate_gradients_with_ops=False,
grad_loss=None):
"""Compute gradients. See base class `tf.compat.v1.train.Optimizer`."""
loss_scale = self._loss_scale_manager.get_loss_scale()
if context.executing_eagerly():
def scaled_loss():
loss_val = loss()
return loss_val * math_ops.cast(loss_scale, loss_val.dtype.base_dtype)
else:
if callable(loss):
loss_val = loss()
else:
loss_val = loss
scaled_loss = loss_val * math_ops.cast(loss_scale,
loss_val.dtype.base_dtype)
grads_and_vars = self._opt.compute_gradients(
scaled_loss,
var_list=var_list,
gate_gradients=gate_gradients,
aggregation_method=aggregation_method,
colocate_gradients_with_ops=colocate_gradients_with_ops,
grad_loss=grad_loss)
return self._down_scale(grads_and_vars, loss_scale)
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
"""Apply gradients. See base class `tf.compat.v1.train.Optimizer`."""
grads = [g for (g, _) in grads_and_vars]
is_finite_grad = []
for g in grads:
is_finite_grad.append(math_ops.reduce_all(gen_math_ops.is_finite(g)))
is_overall_finite = math_ops.reduce_all(is_finite_grad)
# Only update gradients when all grads are finite.
def true_apply_gradients_fn():
return self._opt.apply_gradients(grads_and_vars, global_step, name)
update_vars = control_flow_ops.cond(is_overall_finite,
true_apply_gradients_fn,
gen_control_flow_ops.no_op)
# Potentially adjust gradient scale in case of finite gradients.
return control_flow_ops.group(
update_vars,
self._loss_scale_manager.update_loss_scale(is_overall_finite))
def _down_scale(self, grads_vars, loss_scale):
# Down scale grads by the loss_scale.
gv = []
inv_loss_scale = gen_math_ops.reciprocal(loss_scale)
for g, v in grads_vars:
if g is not None:
gv.append((g * math_ops.cast(inv_loss_scale, g.dtype.base_dtype), v))
else:
gv.append((g, v))
return gv
|
tensorflow-master
|
tensorflow/contrib/mixed_precision/python/loss_scale_optimizer.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""All-reduce implementations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,line-too-long,wildcard-import
from tensorflow.contrib.all_reduce.python.all_reduce import *
from tensorflow.python.util.all_util import remove_undocumented
# pylint: enable=unused-import,line-too-long,wildcard-import
_allowed_symbols = [
'build_ring_all_reduce',
'build_recursive_hd_all_reduce',
'build_shuffle_all_reduce',
'build_nccl_all_reduce',
'build_nccl_then_ring',
'build_nccl_then_recursive_hd',
'build_nccl_then_shuffle',
'build_shuffle_then_ring',
'build_shuffle_then_shuffle'
]
remove_undocumented(__name__, allowed_exception_list=_allowed_symbols)
|
tensorflow-master
|
tensorflow/contrib/all_reduce/__init__.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities to construct a TF subgraph implementing distributed All-Reduce."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,wildcard-import
from tensorflow.python.distribute.all_reduce import *
|
tensorflow-master
|
tensorflow/contrib/all_reduce/python/all_reduce.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops for representing Bayesian computation.
## This package provides classes for Bayesian computation with TensorFlow.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,wildcard-import,line-too-long
from tensorflow.contrib.solvers.python.ops import lanczos
from tensorflow.contrib.solvers.python.ops import least_squares
from tensorflow.contrib.solvers.python.ops import linear_equations
from tensorflow.contrib.solvers.python.ops import util
|
tensorflow-master
|
tensorflow/contrib/solvers/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""ops module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
|
tensorflow-master
|
tensorflow/contrib/solvers/python/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python import tf2
from tensorflow.contrib.solvers.python.ops import lanczos
from tensorflow.contrib.solvers.python.ops import util
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test as test_lib
def _add_test(test, test_name, fn):
test_name = "_".join(["test", test_name])
if hasattr(test, test_name):
raise RuntimeError("Test %s defined more than once" % test_name)
setattr(test, test_name, fn)
class LanczosBidiagTest(test_lib.TestCase):
pass # Filled in below.
def _get_lanczos_tests(dtype_, use_static_shape_, shape_, orthogonalize_,
steps_):
def test_lanczos_bidiag(self):
np.random.seed(1)
a_np = np.random.uniform(
low=-1.0, high=1.0, size=np.prod(shape_)).reshape(shape_).astype(dtype_)
tol = 1e-12 if dtype_ == np.float64 else 1e-5
with self.cached_session() as sess:
if use_static_shape_:
a = constant_op.constant(a_np)
else:
a = array_ops.placeholder(dtype_)
operator = util.create_operator(a)
lbd = lanczos.lanczos_bidiag(
operator, steps_, orthogonalize=orthogonalize_)
# The computed factorization should satisfy the equations
# A * V = U * B
# A' * U[:, :-1] = V * B[:-1, :]'
av = math_ops.matmul(a, lbd.v)
ub = lanczos.bidiag_matmul(lbd.u, lbd.alpha, lbd.beta, adjoint_b=False)
atu = math_ops.matmul(a, lbd.u[:, :-1], adjoint_a=True)
vbt = lanczos.bidiag_matmul(lbd.v, lbd.alpha, lbd.beta, adjoint_b=True)
if use_static_shape_:
av_val, ub_val, atu_val, vbt_val = sess.run([av, ub, atu, vbt])
else:
av_val, ub_val, atu_val, vbt_val = sess.run([av, ub, atu, vbt],
feed_dict={a: a_np})
self.assertAllClose(av_val, ub_val, atol=tol, rtol=tol)
self.assertAllClose(atu_val, vbt_val, atol=tol, rtol=tol)
return [test_lanczos_bidiag]
if __name__ == "__main__":
for dtype in np.float32, np.float64:
for shape in [[4, 4], [7, 4], [5, 8]]:
for orthogonalize in True, False:
for steps in range(1, min(shape) + 1):
# TF2 does not support placeholders so we skip it
for use_static_shape in set([True, tf2.enabled()]):
arg_string = "%s_%s_%s_%s_staticshape_%s" % (
dtype.__name__, "_".join(map(str, shape)), orthogonalize, steps,
use_static_shape)
for test_fn in _get_lanczos_tests(dtype, use_static_shape, shape,
orthogonalize, steps):
name = "_".join(["Lanczos", test_fn.__name__, arg_string])
_add_test(LanczosBidiagTest, name, test_fn)
test_lib.main()
|
tensorflow-master
|
tensorflow/contrib/solvers/python/kernel_tests/lanczos_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python import tf2
from tensorflow.contrib.solvers.python.ops import linear_equations
from tensorflow.contrib.solvers.python.ops import util
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test as test_lib
def _add_test(test, test_name, fn):
test_name = "_".join(["test", test_name])
if hasattr(test, test_name):
raise RuntimeError("Test %s defined more than once" % test_name)
setattr(test, test_name, fn)
class LinearEquationsTest(test_lib.TestCase):
pass # Filled in below.
def _get_linear_equations_tests(dtype_, use_static_shape_, shape_):
def test_conjugate_gradient(self):
np.random.seed(1)
a_np = np.random.uniform(
low=-1.0, high=1.0, size=np.prod(shape_)).reshape(shape_).astype(dtype_)
# Make a selfadjoint, positive definite.
a_np = np.dot(a_np.T, a_np)
# jacobi preconditioner
jacobi_np = np.zeros_like(a_np)
jacobi_np[range(a_np.shape[0]), range(a_np.shape[1])] = (
1.0 / a_np.diagonal())
rhs_np = np.random.uniform(
low=-1.0, high=1.0, size=shape_[0]).astype(dtype_)
x_np = np.zeros_like(rhs_np)
tol = 1e-6 if dtype_ == np.float64 else 1e-3
max_iter = 20
with self.cached_session() as sess:
if use_static_shape_:
a = constant_op.constant(a_np)
rhs = constant_op.constant(rhs_np)
x = constant_op.constant(x_np)
jacobi = constant_op.constant(jacobi_np)
else:
a = array_ops.placeholder(dtype_)
rhs = array_ops.placeholder(dtype_)
x = array_ops.placeholder(dtype_)
jacobi = array_ops.placeholder(dtype_)
operator = util.create_operator(a)
preconditioners = [
None, util.identity_operator(a),
util.create_operator(jacobi)
]
cg_results = []
for preconditioner in preconditioners:
cg_graph = linear_equations.conjugate_gradient(
operator,
rhs,
preconditioner=preconditioner,
x=x,
tol=tol,
max_iter=max_iter)
if use_static_shape_:
cg_val = sess.run(cg_graph)
else:
cg_val = sess.run(
cg_graph,
feed_dict={
a: a_np,
rhs: rhs_np,
x: x_np,
jacobi: jacobi_np
})
norm_r0 = np.linalg.norm(rhs_np)
norm_r = np.linalg.norm(cg_val.r)
self.assertLessEqual(norm_r, tol * norm_r0)
# Validate that we get an equally small residual norm with numpy
# using the computed solution.
r_np = rhs_np - np.dot(a_np, cg_val.x)
norm_r_np = np.linalg.norm(r_np)
self.assertLessEqual(norm_r_np, tol * norm_r0)
cg_results.append(cg_val)
# Validate that we get same results using identity_preconditioner
# and None
self.assertEqual(cg_results[0].i, cg_results[1].i)
self.assertAlmostEqual(cg_results[0].gamma, cg_results[1].gamma)
self.assertAllClose(cg_results[0].r, cg_results[1].r, rtol=tol)
self.assertAllClose(cg_results[0].x, cg_results[1].x, rtol=tol)
self.assertAllClose(cg_results[0].p, cg_results[1].p, rtol=tol)
return [test_conjugate_gradient]
if __name__ == "__main__":
for dtype in np.float32, np.float64:
for size in 1, 4, 10:
# TF2 does not support placeholders under eager so we skip it
for use_static_shape in set([True, tf2.enabled()]):
shape = [size, size]
arg_string = "%s_%s_staticshape_%s" % (dtype.__name__, size,
use_static_shape)
for test_fn in _get_linear_equations_tests(dtype, use_static_shape,
shape):
name = "_".join(["LinearEquations", test_fn.__name__, arg_string])
_add_test(LinearEquationsTest, name, test_fn)
test_lib.main()
|
tensorflow-master
|
tensorflow/contrib/solvers/python/kernel_tests/linear_equations_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.solvers.python.ops import util
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class UtilTest(test.TestCase):
def _testCreateOperator(self, use_static_shape_):
for dtype in np.float32, np.float64:
a_np = np.array([[1., 2.], [3., 4.], [5., 6.]], dtype=dtype)
x_np = np.array([[2.], [-3.]], dtype=dtype)
y_np = np.array([[2], [-3.], [5.]], dtype=dtype)
with self.cached_session() as sess:
if use_static_shape_:
a = constant_op.constant(a_np, dtype=dtype)
x = constant_op.constant(x_np, dtype=dtype)
y = constant_op.constant(y_np, dtype=dtype)
else:
a = array_ops.placeholder(dtype)
x = array_ops.placeholder(dtype)
y = array_ops.placeholder(dtype)
op = util.create_operator(a)
ax = op.apply(x)
aty = op.apply_adjoint(y)
op_shape = ops.convert_to_tensor(op.shape)
if use_static_shape_:
op_shape_val, ax_val, aty_val = sess.run([op_shape, ax, aty])
else:
op_shape_val, ax_val, aty_val = sess.run(
[op_shape, ax, aty], feed_dict={a: a_np,
x: x_np,
y: y_np})
self.assertAllEqual(op_shape_val, [3, 2])
self.assertAllClose(ax_val, np.dot(a_np, x_np))
self.assertAllClose(aty_val, np.dot(a_np.T, y_np))
def testCreateOperator(self):
self._testCreateOperator(True)
def testCreateOperatorUnknownShape(self):
self._testCreateOperator(False)
def _testIdentityOperator(self, use_static_shape_):
for dtype in np.float32, np.float64:
a_np = np.array([[1., 2.], [3., 4.], [5., 6.]], dtype=dtype)
x_np = np.array([[2.], [-3.]], dtype=dtype)
y_np = np.array([[2], [-3.], [5.]], dtype=dtype)
with self.cached_session() as sess:
if use_static_shape_:
a = constant_op.constant(a_np, dtype=dtype)
x = constant_op.constant(x_np, dtype=dtype)
y = constant_op.constant(y_np, dtype=dtype)
else:
a = array_ops.placeholder(dtype)
x = array_ops.placeholder(dtype)
y = array_ops.placeholder(dtype)
id_op = util.identity_operator(a)
ax = id_op.apply(x)
aty = id_op.apply_adjoint(y)
op_shape = ops.convert_to_tensor(id_op.shape)
if use_static_shape_:
op_shape_val, ax_val, aty_val = sess.run([op_shape, ax, aty])
else:
op_shape_val, ax_val, aty_val = sess.run(
[op_shape, ax, aty], feed_dict={
a: a_np,
x: x_np,
y: y_np
})
self.assertAllEqual(op_shape_val, [3, 2])
self.assertAllClose(ax_val, x_np)
self.assertAllClose(aty_val, y_np)
def testIdentityOperator(self):
self._testIdentityOperator(True)
def testIdentityOperatorUnknownShape(self):
self._testIdentityOperator(False)
def testL2Norm(self):
with self.cached_session():
x_np = np.array([[2], [-3.], [5.]])
x_norm_np = np.linalg.norm(x_np)
x_normalized_np = x_np / x_norm_np
x = constant_op.constant(x_np)
l2norm = util.l2norm(x)
l2norm_squared = util.l2norm_squared(x)
x_normalized, x_norm = util.l2normalize(x)
self.assertAllClose(l2norm.eval(), x_norm_np)
self.assertAllClose(l2norm_squared.eval(), np.square(x_norm_np))
self.assertAllClose(x_norm.eval(), x_norm_np)
self.assertAllClose(x_normalized.eval(), x_normalized_np)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/solvers/python/kernel_tests/util_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python import tf2
from tensorflow.contrib.solvers.python.ops import least_squares
from tensorflow.contrib.solvers.python.ops import util
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test as test_lib
def _add_test(test, test_name, fn):
test_name = "_".join(["test", test_name])
if hasattr(test, test_name):
raise RuntimeError("Test %s defined more than once" % test_name)
setattr(test, test_name, fn)
class LeastSquaresTest(test_lib.TestCase):
pass # Filled in below.
def _get_least_squares_tests(dtype_, use_static_shape_, shape_):
def test_cgls(self):
np.random.seed(1)
a_np = np.random.uniform(
low=-1.0, high=1.0, size=np.prod(shape_)).reshape(shape_).astype(dtype_)
rhs_np = np.random.uniform(
low=-1.0, high=1.0, size=shape_[0]).astype(dtype_)
tol = 1e-12 if dtype_ == np.float64 else 1e-6
max_iter = 20
with self.cached_session() as sess:
if use_static_shape_:
a = constant_op.constant(a_np)
rhs = constant_op.constant(rhs_np)
else:
a = array_ops.placeholder(dtype_)
rhs = array_ops.placeholder(dtype_)
operator = util.create_operator(a)
cgls_graph = least_squares.cgls(operator, rhs, tol=tol, max_iter=max_iter)
if use_static_shape_:
cgls_val = sess.run(cgls_graph)
else:
cgls_val = sess.run(cgls_graph, feed_dict={a: a_np, rhs: rhs_np})
# Below we use s = A^* (rhs - A x), s0 = A^* rhs
norm_s0 = np.linalg.norm(np.dot(a_np.T, rhs_np))
norm_s = np.sqrt(cgls_val.gamma)
self.assertLessEqual(norm_s, tol * norm_s0)
# Validate that we get an equally small residual norm with numpy
# using the computed solution.
r_np = rhs_np - np.dot(a_np, cgls_val.x)
norm_s_np = np.linalg.norm(np.dot(a_np.T, r_np))
self.assertLessEqual(norm_s_np, tol * norm_s0)
return [test_cgls]
if __name__ == "__main__":
for dtype in np.float32, np.float64:
for shape in [[4, 4], [8, 5], [3, 7]]:
# TF2 does not support placeholders under eager so we skip it
for use_static_shape in set([True, tf2.enabled()]):
arg_string = "%s_%s_staticshape_%s" % (dtype.__name__,
"_".join(map(str, shape)),
use_static_shape)
for test_fn in _get_least_squares_tests(dtype, use_static_shape, shape):
name = "_".join(["LeastSquares", test_fn.__name__, arg_string])
_add_test(LeastSquaresTest, name, test_fn)
test_lib.main()
|
tensorflow-master
|
tensorflow/contrib/solvers/python/kernel_tests/least_squares_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Solvers for linear equations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.contrib.solvers.python.ops import util
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
def conjugate_gradient(operator,
rhs,
preconditioner=None,
x=None,
tol=1e-4,
max_iter=20,
name="conjugate_gradient"):
r"""Conjugate gradient solver.
Solves a linear system of equations `A*x = rhs` for selfadjoint, positive
definite matrix `A` and right-hand side vector `rhs`, using an iterative,
matrix-free algorithm where the action of the matrix A is represented by
`operator`. The iteration terminates when either the number of iterations
exceeds `max_iter` or when the residual norm has been reduced to `tol`
times its initial value, i.e. \\(||rhs - A x_k|| <= tol ||rhs||\\).
Args:
operator: An object representing a linear operator with attributes:
- shape: Either a list of integers or a 1-D `Tensor` of type `int32` of
length 2. `shape[0]` is the dimension on the domain of the operator,
`shape[1]` is the dimension of the co-domain of the operator. On other
words, if operator represents an N x N matrix A, `shape` must contain
`[N, N]`.
- dtype: The datatype of input to and output from `apply`.
- apply: Callable object taking a vector `x` as input and returning a
vector with the result of applying the operator to `x`, i.e. if
`operator` represents matrix `A`, `apply` should return `A * x`.
rhs: A rank-1 `Tensor` of shape `[N]` containing the right-hand size vector.
preconditioner: An object representing a linear operator, see `operator`
for detail. The preconditioner should approximate the inverse of `A`.
An efficient preconditioner could dramatically improve the rate of
convergence. If `preconditioner` represents matrix `M`(`M` approximates
`A^{-1}`), the algorithm uses `preconditioner.apply(x)` to estimate
`A^{-1}x`. For this to be useful, the cost of applying `M` should be
much lower than computing `A^{-1}` directly.
x: A rank-1 `Tensor` of shape `[N]` containing the initial guess for the
solution.
tol: A float scalar convergence tolerance.
max_iter: An integer giving the maximum number of iterations.
name: A name scope for the operation.
Returns:
output: A namedtuple representing the final state with fields:
- i: A scalar `int32` `Tensor`. Number of iterations executed.
- x: A rank-1 `Tensor` of shape `[N]` containing the computed solution.
- r: A rank-1 `Tensor` of shape `[M]` containing the residual vector.
- p: A rank-1 `Tensor` of shape `[N]`. `A`-conjugate basis vector.
- gamma: \\(r \dot M \dot r\\), equivalent to \\(||r||_2^2\\) when
`preconditioner=None`.
"""
# ephemeral class holding CG state.
cg_state = collections.namedtuple("CGState", ["i", "x", "r", "p", "gamma"])
def stopping_criterion(i, state):
return math_ops.logical_and(i < max_iter, linalg_ops.norm(state.r) > tol)
def cg_step(i, state): # pylint: disable=missing-docstring
z = operator.apply(state.p)
alpha = state.gamma / util.dot(state.p, z)
x = state.x + alpha * state.p
r = state.r - alpha * z
if preconditioner is None:
gamma = util.dot(r, r)
beta = gamma / state.gamma
p = r + beta * state.p
else:
q = preconditioner.apply(r)
gamma = util.dot(r, q)
beta = gamma / state.gamma
p = q + beta * state.p
return i + 1, cg_state(i + 1, x, r, p, gamma)
with ops.name_scope(name):
n = operator.shape[1:]
rhs = array_ops.expand_dims(rhs, -1)
if x is None:
x = array_ops.expand_dims(
array_ops.zeros(n, dtype=rhs.dtype.base_dtype), -1)
r0 = rhs
else:
x = array_ops.expand_dims(x, -1)
r0 = rhs - operator.apply(x)
if preconditioner is None:
p0 = r0
else:
p0 = preconditioner.apply(r0)
gamma0 = util.dot(r0, p0)
tol *= linalg_ops.norm(r0)
i = constant_op.constant(0, dtype=dtypes.int32)
state = cg_state(i=i, x=x, r=r0, p=p0, gamma=gamma0)
_, state = control_flow_ops.while_loop(stopping_criterion, cg_step,
[i, state])
return cg_state(
state.i,
x=array_ops.squeeze(state.x),
r=array_ops.squeeze(state.r),
p=array_ops.squeeze(state.p),
gamma=state.gamma)
|
tensorflow-master
|
tensorflow/contrib/solvers/python/ops/linear_equations.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Solvers for linear least-squares."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.contrib.solvers.python.ops import util
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
def cgls(operator, rhs, tol=1e-6, max_iter=20, name="cgls"):
r"""Conjugate gradient least squares solver.
Solves a linear least squares problem \\(||A x - rhs||_2\\) for a single
right-hand side, using an iterative, matrix-free algorithm where the action of
the matrix A is represented by `operator`. The CGLS algorithm implicitly
applies the symmetric conjugate gradient algorithm to the normal equations
\\(A^* A x = A^* rhs\\). The iteration terminates when either
the number of iterations exceeds `max_iter` or when the norm of the conjugate
residual (residual of the normal equations) have been reduced to `tol` times
its initial initial value, i.e.
\\(||A^* (rhs - A x_k)|| <= tol ||A^* rhs||\\).
Args:
operator: An object representing a linear operator with attributes:
- shape: Either a list of integers or a 1-D `Tensor` of type `int32` of
length 2. `shape[0]` is the dimension on the domain of the operator,
`shape[1]` is the dimension of the co-domain of the operator. On other
words, if operator represents an M x N matrix A, `shape` must contain
`[M, N]`.
- dtype: The datatype of input to and output from `apply` and
`apply_adjoint`.
- apply: Callable object taking a vector `x` as input and returning a
vector with the result of applying the operator to `x`, i.e. if
`operator` represents matrix `A`, `apply` should return `A * x`.
- apply_adjoint: Callable object taking a vector `x` as input and
returning a vector with the result of applying the adjoint operator
to `x`, i.e. if `operator` represents matrix `A`, `apply_adjoint` should
return `conj(transpose(A)) * x`.
rhs: A rank-1 `Tensor` of shape `[M]` containing the right-hand size vector.
tol: A float scalar convergence tolerance.
max_iter: An integer giving the maximum number of iterations.
name: A name scope for the operation.
Returns:
output: A namedtuple representing the final state with fields:
- i: A scalar `int32` `Tensor`. Number of iterations executed.
- x: A rank-1 `Tensor` of shape `[N]` containing the computed solution.
- r: A rank-1 `Tensor` of shape `[M]` containing the residual vector.
- p: A rank-1 `Tensor` of shape `[N]`. The next descent direction.
- gamma: \\(||A^* r||_2^2\\)
"""
# ephemeral class holding CGLS state.
cgls_state = collections.namedtuple("CGLSState",
["i", "x", "r", "p", "gamma"])
def stopping_criterion(i, state):
return math_ops.logical_and(i < max_iter, state.gamma > tol)
# TODO(rmlarsen): add preconditioning
def cgls_step(i, state):
q = operator.apply(state.p)
alpha = state.gamma / util.l2norm_squared(q)
x = state.x + alpha * state.p
r = state.r - alpha * q
s = operator.apply_adjoint(r)
gamma = util.l2norm_squared(s)
beta = gamma / state.gamma
p = s + beta * state.p
return i + 1, cgls_state(i + 1, x, r, p, gamma)
with ops.name_scope(name):
n = operator.shape[1:]
rhs = array_ops.expand_dims(rhs, -1)
s0 = operator.apply_adjoint(rhs)
gamma0 = util.l2norm_squared(s0)
tol = tol * tol * gamma0
x = array_ops.expand_dims(
array_ops.zeros(
n, dtype=rhs.dtype.base_dtype), -1)
i = constant_op.constant(0, dtype=dtypes.int32)
state = cgls_state(i=i, x=x, r=rhs, p=s0, gamma=gamma0)
_, state = control_flow_ops.while_loop(stopping_criterion, cgls_step,
[i, state])
return cgls_state(
state.i,
x=array_ops.squeeze(state.x),
r=array_ops.squeeze(state.r),
p=array_ops.squeeze(state.p),
gamma=state.gamma)
|
tensorflow-master
|
tensorflow/contrib/solvers/python/ops/least_squares.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility functions for solvers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
def create_operator(matrix):
"""Creates a linear operator from a rank-2 tensor."""
linear_operator = collections.namedtuple(
"LinearOperator", ["shape", "dtype", "apply", "apply_adjoint"])
# TODO(rmlarsen): Handle SparseTensor.
shape = matrix.get_shape()
if shape.is_fully_defined():
shape = shape.as_list()
else:
shape = array_ops.shape(matrix)
return linear_operator(
shape=shape,
dtype=matrix.dtype,
apply=lambda v: math_ops.matmul(matrix, v, adjoint_a=False),
apply_adjoint=lambda v: math_ops.matmul(matrix, v, adjoint_a=True))
def identity_operator(matrix):
"""Creates a linear operator from a rank-2 identity tensor."""
linear_operator = collections.namedtuple(
"LinearOperator", ["shape", "dtype", "apply", "apply_adjoint"])
shape = matrix.get_shape()
if shape.is_fully_defined():
shape = shape.as_list()
else:
shape = array_ops.shape(matrix)
return linear_operator(
shape=shape,
dtype=matrix.dtype,
apply=lambda v: v,
apply_adjoint=lambda v: v)
# TODO(rmlarsen): Measure if we should just call matmul.
def dot(x, y):
return math_ops.reduce_sum(math_ops.conj(x) * y)
# TODO(rmlarsen): Implement matrix/vector norm op in C++ in core.
# We need 1-norm, inf-norm, and Frobenius norm.
def l2norm_squared(v):
return constant_op.constant(2, dtype=v.dtype.base_dtype) * nn_ops.l2_loss(v)
def l2norm(v):
return math_ops.sqrt(l2norm_squared(v))
def l2normalize(v):
norm = l2norm(v)
return v / norm, norm
|
tensorflow-master
|
tensorflow/contrib/solvers/python/ops/util.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Lanczos algorithms."""
# TODO(rmlarsen): Add implementation of symmetric Lanczos algorithm.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.contrib.solvers.python.ops import util
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import tensor_array_ops
def lanczos_bidiag(operator,
k,
orthogonalize=True,
starting_vector=None,
name="lanczos_bidiag"):
"""Computes a Lanczos bidiagonalization for a linear operator.
Computes matrices `U` of shape `[m, k+1]`, `V` of shape `[n, k]` and lower
bidiagonal matrix `B` of shape `[k+1, k]`, that satisfy the equations
`A * V = U * B` and `A' * U[:, :-1] = V * B[:-1, :]'`.
The columns of `U` are orthonormal and form a basis for the Krylov subspace
`K(A*A', U[:,0])`.
The columns of `V` are orthonormal and form a basis for the Krylov subspace
`K(A'*A, A' U[:,0])`.
Args:
operator: An object representing a linear operator with attributes:
- shape: Either a list of integers or a 1-D `Tensor` of type `int32` of
length 2. `shape[0]` is the dimension on the domain of the operator,
`shape[1]` is the dimension of the co-domain of the operator. On other
words, if operator represents an M x N matrix A, `shape` must contain
`[M, N]`.
- dtype: The datatype of input to and output from `apply` and
`apply_adjoint`.
- apply: Callable object taking a vector `x` as input and returning a
vector with the result of applying the operator to `x`, i.e. if
`operator` represents matrix `A`, `apply` should return `A * x`.
- apply_adjoint: Callable object taking a vector `x` as input and
returning a vector with the result of applying the adjoint operator
to `x`, i.e. if `operator` represents matrix `A`, `apply_adjoint` should
return `conj(transpose(A)) * x`.
k: An integer or a scalar Tensor of type `int32`. Determines the maximum
number of steps to run. If an invariant subspace is found, the algorithm
may terminate before `k` steps have been run.
orthogonalize: If `True`, perform full orthogonalization. If `False` no
orthogonalization is performed.
starting_vector: If not null, must be a `Tensor` of shape `[n]`.
name: A name scope for the operation.
Returns:
output: A namedtuple representing a Lanczos bidiagonalization of
`operator` with attributes:
u: A rank-2 `Tensor` of type `operator.dtype` and shape
`[operator.shape[0], k_actual+1]`, where `k_actual` is the number of
steps run.
v: A rank-2 `Tensor` of type `operator.dtype` and shape
`[operator.shape[1], k_actual]`, where `k_actual` is the number of steps
run.
alpha: A rank-1 `Tensor` of type `operator.dtype` and shape `[k]`.
beta: A rank-1 `Tensor` of type `operator.dtype` and shape `[k]`.
"""
def tarray(size, dtype, name):
return tensor_array_ops.TensorArray(
dtype=dtype, size=size, tensor_array_name=name, clear_after_read=False)
# Reads a row-vector at location i in tarray and returns it as a
# column-vector.
def read_colvec(tarray, i):
return array_ops.expand_dims(tarray.read(i), -1)
# Writes an column-vector as a row-vecor at location i in tarray.
def write_colvec(tarray, colvec, i):
return tarray.write(i, array_ops.squeeze(colvec))
# Ephemeral class holding Lanczos bidiagonalization state:
# u = left Lanczos vectors
# v = right Lanczos vectors
# alpha = diagonal of B_k.
# beta = subdiagonal of B_k.
# Notice that we store the left and right Lanczos vectors as the _rows_
# of u and v. This is done because tensors are stored row-major and
# TensorArray only supports packing along dimension 0.
lanzcos_bidiag_state = collections.namedtuple("LanczosBidiagState",
["u", "v", "alpha", "beta"])
def update_state(old, i, u, v, alpha, beta):
return lanzcos_bidiag_state(
write_colvec(old.u, u, i + 1),
write_colvec(old.v, v, i),
old.alpha.write(i, alpha), old.beta.write(i, beta))
def gram_schmidt_step(j, basis, v):
"""Makes v orthogonal to the j'th vector in basis."""
v_shape = v.get_shape()
basis_vec = read_colvec(basis, j)
v -= math_ops.matmul(basis_vec, v, adjoint_a=True) * basis_vec
v.set_shape(v_shape)
return j + 1, basis, v
def orthogonalize_once(i, basis, v):
j = constant_op.constant(0, dtype=dtypes.int32)
_, _, v = control_flow_ops.while_loop(lambda j, basis, v: j < i,
gram_schmidt_step, [j, basis, v])
return util.l2normalize(v)
# Iterated modified Gram-Schmidt orthogonalization adapted from PROPACK.
# TODO(rmlarsen): This is possibly the slowest implementation of
# iterated Gram-Schmidt orthogonalization since the abacus. Move to C++.
def orthogonalize_(i, basis, v):
v_norm = util.l2norm(v)
v_new, v_new_norm = orthogonalize_once(i, basis, v)
# If the norm decreases more than 1/sqrt(2), run a second
# round of MGS. See proof in:
# B. N. Parlett, ``The Symmetric Eigenvalue Problem'',
# Prentice-Hall, Englewood Cliffs, NJ, 1980. pp. 105-109
return control_flow_ops.cond(v_new_norm < 0.7071 * v_norm,
lambda: orthogonalize_once(i, basis, v),
lambda: (v_new, v_new_norm))
def stopping_criterion(i, _):
# TODO(rmlarsen): Stop if an invariant subspace is detected.
return i < k
def lanczos_bidiag_step(i, ls):
"""Extends the Lanczos bidiagonalization ls by one step."""
u = read_colvec(ls.u, i)
r = operator.apply_adjoint(u)
# The shape inference doesn't work across cond, save and reapply the shape.
r_shape = r.get_shape()
r = control_flow_ops.cond(
i > 0, lambda: r - ls.beta.read(i - 1) * read_colvec(ls.v, i - 1),
lambda: r)
r.set_shape(r_shape)
if orthogonalize:
v, alpha = orthogonalize_(i - 1, ls.v, r)
else:
v, alpha = util.l2normalize(r)
p = operator.apply(v) - alpha * u
if orthogonalize:
u, beta = orthogonalize_(i, ls.u, p)
else:
u, beta = util.l2normalize(p)
return i + 1, update_state(ls, i, u, v, alpha, beta)
with ops.name_scope(name):
dtype = operator.dtype
if starting_vector is None:
starting_vector = random_ops.random_uniform(
operator.shape[:1], -1, 1, dtype=dtype)
u0, _ = util.l2normalize(starting_vector)
ls = lanzcos_bidiag_state(
u=write_colvec(tarray(k + 1, dtype, "u"), u0, 0),
v=tarray(k, dtype, "v"),
alpha=tarray(k, dtype, "alpha"),
beta=tarray(k, dtype, "beta"))
i = constant_op.constant(0, dtype=dtypes.int32)
_, ls = control_flow_ops.while_loop(stopping_criterion, lanczos_bidiag_step,
[i, ls])
return lanzcos_bidiag_state(
array_ops.matrix_transpose(ls.u.stack()),
array_ops.matrix_transpose(ls.v.stack()),
ls.alpha.stack(), ls.beta.stack())
# TODO(rmlarsen): Implement C++ ops for handling bidiagonal matrices
# efficiently. Such a module should provide
# - multiplication,
# - linear system solution by back-substitution,
# - QR factorization,
# - SVD.
def bidiag_matmul(matrix, alpha, beta, adjoint_b=False, name="bidiag_matmul"):
"""Multiplies a matrix by a bidiagonal matrix.
alpha and beta are length k vectors representing the diagonal and first lower
subdiagonal of (K+1) x K matrix B.
If adjoint_b is False, computes A * B as follows:
A * B = A[:, :-1] * diag(alpha) + A[:, 1:] * diag(beta)
If adjoint_b is True, computes A * B[:-1, :]' as follows
A * B[:-1, :]' =
A * diag(alpha) + [zeros(m,1), A[:, :-1] * diag(beta[:-1])]
Args:
matrix: A rank-2 `Tensor` representing matrix A.
alpha: A rank-1 `Tensor` representing the diagonal of B.
beta: A rank-1 `Tensor` representing the lower subdiagonal diagonal of B.
adjoint_b: `bool` determining what to compute.
name: A name scope for the operation.
Returns:
If `adjoint_b` is False the `A * B` is returned.
If `adjoint_b` is True the `A * B'` is returned.
"""
with ops.name_scope(name):
alpha = array_ops.expand_dims(alpha, 0)
if adjoint_b is False:
beta = array_ops.expand_dims(beta, 0)
return matrix[:, :-1] * alpha + matrix[:, 1:] * beta
else:
beta = array_ops.expand_dims(beta[:-1], 0)
shape = array_ops.shape(matrix)
zero_column = array_ops.expand_dims(
array_ops.zeros(
shape[:1], dtype=matrix.dtype), 1)
return matrix * alpha + array_ops.concat(
[zero_column, matrix[:, :-1] * beta], 1)
|
tensorflow-master
|
tensorflow/contrib/solvers/python/ops/lanczos.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Lookup table operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import gen_lookup_ops
from tensorflow.python.ops import lookup_ops
# pylint: disable=unused-import
from tensorflow.python.ops.lookup_ops import FastHashSpec
from tensorflow.python.ops.lookup_ops import HasherSpec
from tensorflow.python.ops.lookup_ops import IdTableWithHashBuckets
from tensorflow.python.ops.lookup_ops import index_table_from_file
from tensorflow.python.ops.lookup_ops import index_to_string_table_from_file
from tensorflow.python.ops.lookup_ops import InitializableLookupTableBase
from tensorflow.python.ops.lookup_ops import InitializableLookupTableBaseV1
from tensorflow.python.ops.lookup_ops import KeyValueTensorInitializer
from tensorflow.python.ops.lookup_ops import LookupInterface
from tensorflow.python.ops.lookup_ops import StrongHashSpec
from tensorflow.python.ops.lookup_ops import TableInitializerBase
from tensorflow.python.ops.lookup_ops import TextFileIdTableInitializer
from tensorflow.python.ops.lookup_ops import TextFileIndex
from tensorflow.python.ops.lookup_ops import TextFileInitializer
from tensorflow.python.ops.lookup_ops import TextFileStringTableInitializer
# pylint: enable=unused-import
from tensorflow.python.util.deprecation import deprecated
@deprecated("2017-04-10", "Use `index_table_from_file`.")
def string_to_index_table_from_file(vocabulary_file=None,
num_oov_buckets=0,
vocab_size=None,
default_value=-1,
hasher_spec=FastHashSpec,
name=None):
return index_table_from_file(
vocabulary_file,
num_oov_buckets,
vocab_size,
default_value,
hasher_spec,
key_dtype=dtypes.string,
name=name)
@deprecated("2017-04-10", "Use `index_table_from_tensor`.")
def string_to_index_table_from_tensor(mapping,
num_oov_buckets=0,
default_value=-1,
hasher_spec=FastHashSpec,
name=None):
with ops.name_scope(name, "string_to_index") as scope:
mapping = ops.convert_to_tensor(mapping)
if dtypes.string != mapping.dtype.base_dtype:
raise ValueError("string_to_index_table_from_tensor requires string.")
return index_table_from_tensor(
mapping, num_oov_buckets, default_value, hasher_spec, name=scope)
def index_table_from_tensor(mapping,
num_oov_buckets=0,
default_value=-1,
hasher_spec=FastHashSpec,
dtype=dtypes.string,
name=None):
"""Returns a lookup table that converts a string tensor into int64 IDs.
This operation constructs a lookup table to convert tensor of strings into
int64 IDs. The mapping can be initialized from a string `mapping` 1-D tensor
where each element is a key and corresponding index within the tensor is the
value.
Any lookup of an out-of-vocabulary token will return a bucket ID based on its
hash if `num_oov_buckets` is greater than zero. Otherwise it is assigned the
`default_value`.
The bucket ID range is `[mapping size, mapping size + num_oov_buckets - 1]`.
The underlying table must be initialized by calling
`session.run(tf.compat.v1.tables_initializer)` or `session.run(table.init)`
once.
Elements in `mapping` cannot have duplicates, otherwise when executing the
table initializer op, it will throw a `FailedPreconditionError`.
Sample Usages:
```python
mapping_strings = tf.constant(["emerson", "lake", "palmer"])
table = tf.contrib.lookup.index_table_from_tensor(
mapping=mapping_strings, num_oov_buckets=1, default_value=-1)
features = tf.constant(["emerson", "lake", "and", "palmer"])
ids = table.lookup(features)
...
tf.compat.v1.tables_initializer().run()
ids.eval() ==> [0, 1, 3, 2]
```
Args:
mapping: A 1-D `Tensor` that specifies the mapping of keys to indices. The
type of this object must be castable to `dtype`.
num_oov_buckets: The number of out-of-vocabulary buckets.
default_value: The value to use for out-of-vocabulary feature values.
Defaults to -1.
hasher_spec: A `HasherSpec` to specify the hash function to use for
assignment of out-of-vocabulary buckets.
dtype: The type of values passed to `lookup`. Only string and integers are
supported.
name: A name for this op (optional).
Returns:
The lookup table to map an input `Tensor` to index `int64` `Tensor`.
Raises:
ValueError: If `mapping` is invalid.
ValueError: If `num_oov_buckets` is negative.
"""
if mapping is None:
raise ValueError("mapping must be specified.")
return lookup_ops.index_table_from_tensor(
vocabulary_list=mapping,
num_oov_buckets=num_oov_buckets,
default_value=default_value,
hasher_spec=hasher_spec,
dtype=dtype,
name=name)
@deprecated("2017-01-07", "This op will be removed after the deprecation date. "
"Please switch to index_table_from_tensor and call the lookup "
"method of the returned table.")
def string_to_index(tensor, mapping, default_value=-1, name=None):
"""Maps `tensor` of strings into `int64` indices based on `mapping`.
This operation converts `tensor` of strings into `int64` indices.
The mapping is initialized from a string `mapping` tensor where each element
is a key and corresponding index within the tensor is the value.
Any entry in the input which does not have a corresponding entry in 'mapping'
(an out-of-vocabulary entry) is assigned the `default_value`
Elements in `mapping` cannot be duplicated, otherwise the initialization
will throw a FailedPreconditionError.
The underlying table must be initialized by calling
`session.run(tf.compat.v1.tables_initializer)` once.
For example:
```python
mapping_strings = tf.constant(["emerson", "lake", "palmer"])
feats = tf.constant(["emerson", "lake", "and", "palmer"])
ids = tf.contrib.lookup.string_to_index(
feats, mapping=mapping_strings, default_value=-1)
...
tf.compat.v1.tables_initializer().run()
ids.eval() ==> [0, 1, -1, 2]
```
Args:
tensor: A 1-D input `Tensor` with the strings to map to indices.
mapping: A 1-D string `Tensor` that specifies the mapping of strings to
indices.
default_value: The `int64` value to use for out-of-vocabulary strings.
Defaults to -1.
name: A name for this op (optional).
Returns:
The mapped indices. It has the same shape and tensor type (dense or sparse)
as `tensor`.
"""
table = index_table_from_tensor(
mapping=mapping, default_value=default_value, name=name)
return table.lookup(tensor)
def index_to_string_table_from_tensor(mapping, default_value="UNK", name=None):
"""Returns a lookup table that maps a `Tensor` of indices into strings.
This operation constructs a lookup table to map int64 indices into string
values. The mapping is initialized from a string `mapping` 1-D `Tensor` where
each element is a value and the corresponding index within the tensor is the
key.
Any input which does not have a corresponding index in 'mapping'
(an out-of-vocabulary entry) is assigned the `default_value`
The underlying table must be initialized by calling
`session.run(tf.compat.v1.tables_initializer)` or `session.run(table.init)`
once.
Elements in `mapping` cannot have duplicates, otherwise when executing the
table initializer op, it will throw a `FailedPreconditionError`.
Sample Usages:
```python
mapping_string = tf.constant(["emerson", "lake", "palmer"])
indices = tf.constant([1, 5], tf.int64)
table = tf.contrib.lookup.index_to_string_table_from_tensor(
mapping_string, default_value="UNKNOWN")
values = table.lookup(indices)
...
tf.compat.v1.tables_initializer().run()
values.eval() ==> ["lake", "UNKNOWN"]
```
Args:
mapping: A 1-D string `Tensor` that specifies the strings to map from
indices.
default_value: The value to use for out-of-vocabulary indices.
name: A name for this op (optional).
Returns:
The lookup table to map a string values associated to a given index `int64`
`Tensors`.
Raises:
ValueError: when `mapping` is not set.
"""
if mapping is None:
raise ValueError("mapping must be specified.")
return lookup_ops.index_to_string_table_from_tensor(
vocabulary_list=mapping, default_value=default_value, name=name)
@deprecated(
"2017-01-07", "This op will be removed after the deprecation date. "
"Please switch to index_to_string_table_from_tensor and call the lookup "
"method of the returned table.")
def index_to_string(tensor, mapping, default_value="UNK", name=None):
"""Maps `tensor` of indices into string values based on `mapping`.
This operation converts `int64` indices into string values. The mapping is
initialized from a string `mapping` tensor where each element is a value and
the corresponding index within the tensor is the key.
Any input which does not have a corresponding index in 'mapping'
(an out-of-vocabulary entry) is assigned the `default_value`
The underlying table must be initialized by calling
`session.run(tf.compat.v1.tables_initializer)` once.
For example:
```python
mapping_string = tf.constant(["emerson", "lake", "palmer"])
indices = tf.constant([1, 5], tf.int64)
values = tf.contrib.lookup.index_to_string(
indices, mapping=mapping_string, default_value="UNKNOWN")
...
tf.compat.v1.tables_initializer().run()
values.eval() ==> ["lake", "UNKNOWN"]
```
Args:
tensor: A `int64` `Tensor` with the indices to map to strings.
mapping: A 1-D string `Tensor` that specifies the strings to map from
indices.
default_value: The string value to use for out-of-vocabulary indices.
name: A name for this op (optional).
Returns:
The strings values associated to the indices. The resultant dense
feature value tensor has the same shape as the corresponding `indices`.
"""
table = index_to_string_table_from_tensor(
mapping=mapping, default_value=default_value, name=name)
return table.lookup(tensor)
class HashTable(InitializableLookupTableBaseV1):
"""A generic hash table implementation.
Example usage:
```python
table = tf.HashTable(
tf.KeyValueTensorInitializer(keys, values), -1)
out = table.lookup(input_tensor)
table.init.run()
print(out.eval())
```
"""
def __init__(self, initializer, default_value, shared_name=None, name=None):
"""Creates a non-initialized `HashTable` object.
Creates a table, the type of its keys and values are specified by the
initializer.
Before using the table you will have to initialize it. After initialization
the table will be immutable.
Args:
initializer: The table initializer to use. See `HashTable` kernel for
supported key and value types.
default_value: The value to use if a key is missing in the table.
shared_name: If non-empty, this table will be shared under the given name
across multiple sessions.
name: A name for the operation (optional).
Returns:
A `HashTable` object.
"""
self._initializer = initializer
self._default_value = default_value
self._shared_name = shared_name
self._name = name or "hash_table"
self._table_name = None
super(HashTable, self).__init__(default_value, initializer)
self._value_shape = self._default_value.get_shape()
def _create_resource(self):
table_ref = gen_lookup_ops.hash_table_v2(
shared_name=self._shared_name,
key_dtype=self._initializer.key_dtype,
value_dtype=self._initializer.value_dtype,
name=self._name)
if context.executing_eagerly():
self._table_name = None
else:
self._table_name = table_ref.op.name.split("/")[-1]
return table_ref
@property
def init(self):
return self.initializer
@property
def name(self):
return self._table_name
def export(self, name=None):
"""Returns tensors of all keys and values in the table.
Args:
name: A name for the operation (optional).
Returns:
A pair of tensors with the first tensor containing all keys and the
second tensors containing all values in the table.
"""
with ops.name_scope(name, "%s_Export" % self.name,
[self.resource_handle]) as name:
exported_keys, exported_values = gen_lookup_ops.lookup_table_export_v2(
self.resource_handle, self._key_dtype, self._value_dtype, name=name)
exported_values.set_shape(exported_keys.get_shape().concatenate(
self._value_shape))
return exported_keys, exported_values
MutableHashTable = lookup_ops.MutableHashTable
MutableDenseHashTable = lookup_ops.DenseHashTable
|
tensorflow-master
|
tensorflow/contrib/lookup/lookup_ops.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops for lookup operations.
@@string_to_index
@@string_to_index_table_from_file
@@string_to_index_table_from_tensor
@@index_table_from_file
@@index_table_from_tensor
@@index_to_string
@@index_to_string_table_from_file
@@index_to_string_table_from_tensor
@@LookupInterface
@@InitializableLookupTableBase
@@IdTableWithHashBuckets
@@HashTable
@@MutableHashTable
@@MutableDenseHashTable
@@TableInitializerBase
@@KeyValueTensorInitializer
@@TextFileIndex
@@TextFileInitializer
@@TextFileIdTableInitializer
@@TextFileStringTableInitializer
@@HasherSpec
@@StrongHashSpec
@@FastHashSpec
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,wildcard-import
from tensorflow.contrib.lookup.lookup_ops import *
# pylint: enable=unused-import,wildcard-import
from tensorflow.python.util.all_util import remove_undocumented
remove_undocumented(__name__)
|
tensorflow-master
|
tensorflow/contrib/lookup/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.contrib.lookup.lookup."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from tensorflow.contrib import lookup
from tensorflow.python.client import session
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import server_lib
class HashTableOpTest(test.TestCase):
def testHashTable(self):
with self.cached_session():
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup.HashTable(
lookup.KeyValueTensorInitializer(keys, values), default_val)
table.initializer.run()
self.assertAllEqual(3, table.size().eval())
input_string = constant_op.constant(["brain", "salad", "tank"])
output = table.lookup(input_string)
self.assertAllEqual([3], output.get_shape())
result = output.eval()
self.assertAllEqual([0, 1, -1], result)
exported_keys_tensor, exported_values_tensor = table.export()
self.assertItemsEqual([b"brain", b"salad", b"surgery"],
exported_keys_tensor.eval())
self.assertItemsEqual([0, 1, 2], exported_values_tensor.eval())
def testHashTableFindHighRank(self):
with self.cached_session():
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup.HashTable(
lookup.KeyValueTensorInitializer(keys, values), default_val)
table.initializer.run()
self.assertAllEqual(3, table.size().eval())
input_string = constant_op.constant(
[["brain", "salad"], ["tank", "tarkus"]])
output = table.lookup(input_string)
result = output.eval()
self.assertAllEqual([[0, 1], [-1, -1]], result)
def testHashTableInitWithPythonArrays(self):
with self.cached_session():
default_val = -1
keys = ["brain", "salad", "surgery"]
values = [0, 1, 2]
table = lookup.HashTable(
lookup.KeyValueTensorInitializer(
keys, values, value_dtype=dtypes.int64),
default_val)
table.initializer.run()
self.assertAllEqual(3, table.size().eval())
input_string = constant_op.constant(["brain", "salad", "tank"])
output = table.lookup(input_string)
result = output.eval()
self.assertAllEqual([0, 1, -1], result)
def testHashTableInitWithNumPyArrays(self):
with self.cached_session():
default_val = -1
keys = np.array(["brain", "salad", "surgery"], dtype=np.str)
values = np.array([0, 1, 2], dtype=np.int64)
table = lookup.HashTable(
lookup.KeyValueTensorInitializer(keys, values), default_val)
table.initializer.run()
self.assertAllEqual(3, table.size().eval())
input_string = constant_op.constant(["brain", "salad", "tank"])
output = table.lookup(input_string)
result = output.eval()
self.assertAllEqual([0, 1, -1], result)
def testMultipleHashTables(self):
with self.cached_session() as sess:
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table1 = lookup.HashTable(
lookup.KeyValueTensorInitializer(keys, values), default_val)
table2 = lookup.HashTable(
lookup.KeyValueTensorInitializer(keys, values), default_val)
table3 = lookup.HashTable(
lookup.KeyValueTensorInitializer(keys, values), default_val)
lookup_ops.tables_initializer().run()
self.assertAllEqual(3, table1.size().eval())
self.assertAllEqual(3, table2.size().eval())
self.assertAllEqual(3, table3.size().eval())
input_string = constant_op.constant(["brain", "salad", "tank"])
output1 = table1.lookup(input_string)
output2 = table2.lookup(input_string)
output3 = table3.lookup(input_string)
out1, out2, out3 = sess.run([output1, output2, output3])
self.assertAllEqual([0, 1, -1], out1)
self.assertAllEqual([0, 1, -1], out2)
self.assertAllEqual([0, 1, -1], out3)
def testHashTableWithTensorDefault(self):
with self.cached_session():
default_val = constant_op.constant(-1, dtypes.int64)
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup.HashTable(
lookup.KeyValueTensorInitializer(keys, values), default_val)
table.initializer.run()
input_string = constant_op.constant(["brain", "salad", "tank"])
output = table.lookup(input_string)
result = output.eval()
self.assertAllEqual([0, 1, -1], result)
def testHashTableWithSparseTensorInput(self):
with self.cached_session() as sess:
default_val = constant_op.constant(-1, dtypes.int64)
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup.HashTable(
lookup.KeyValueTensorInitializer(keys, values), default_val)
table.initializer.run()
sp_indices = [[0, 0], [0, 1], [1, 0]]
sp_shape = [2, 2]
input_tensor = sparse_tensor.SparseTensor(
constant_op.constant(sp_indices, dtypes.int64),
constant_op.constant(["brain", "salad", "tank"]),
constant_op.constant(sp_shape, dtypes.int64))
output = table.lookup(input_tensor)
out_indices, out_values, out_shape = sess.run(output)
self.assertAllEqual([0, 1, -1], out_values)
self.assertAllEqual(sp_indices, out_indices)
self.assertAllEqual(sp_shape, out_shape)
def testSignatureMismatch(self):
with self.cached_session():
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup.HashTable(
lookup.KeyValueTensorInitializer(keys, values), default_val)
table.initializer.run()
# Ref types do not produce a lookup signature mismatch.
input_string_ref = variables.Variable("brain")
variables.global_variables_initializer().run()
self.assertEqual(0, table.lookup(input_string_ref).eval())
input_string = constant_op.constant([1, 2, 3], dtypes.int64)
with self.assertRaises(TypeError):
table.lookup(input_string)
with self.assertRaises(TypeError):
lookup.HashTable(
lookup.KeyValueTensorInitializer(keys, values), "UNK")
def testDTypes(self):
with self.cached_session():
default_val = -1
with self.assertRaises(TypeError):
lookup.HashTable(
lookup.KeyValueTensorInitializer(["a"], [1], [dtypes.string],
dtypes.int64), default_val)
def testNotInitialized(self):
with self.cached_session():
default_val = -1
table = lookup.HashTable(
lookup.KeyValueTensorInitializer(
["a"], [1], value_dtype=dtypes.int64),
default_val)
input_string = constant_op.constant(["brain", "salad", "surgery"])
output = table.lookup(input_string)
with self.assertRaisesOpError("Table not initialized"):
output.eval()
def testInitializeTwice(self):
with self.cached_session():
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup.HashTable(
lookup.KeyValueTensorInitializer(keys, values), default_val)
table.initializer.run()
# Re-initializing should not throw an error.
table.initializer.run()
def testInitializationWithInvalidDimensions(self):
with self.cached_session():
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2, 3, 4], dtypes.int64)
with self.assertRaises(ValueError):
lookup.HashTable(
lookup.KeyValueTensorInitializer(keys, values), default_val)
def testMultipleSessions(self):
# Start a server
server = server_lib.Server(
{
"local0": ["localhost:0"]
}, protocol="grpc", start=True)
# Create two sessions sharing the same state
session1 = session.Session(server.target)
session2 = session.Session(server.target)
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup.HashTable(
lookup.KeyValueTensorInitializer(keys, values),
default_val,
name="t1")
# Init the table in the first session.
with session1:
table.initializer.run()
self.assertAllEqual(3, table.size().eval())
# Init the table in the second session and verify that we do not get a
# "Table already initialized" error.
with session2:
table.initializer.run()
self.assertAllEqual(3, table.size().eval())
def testHashTableInt32String(self):
with self.cached_session():
default_val = "n/a"
keys = constant_op.constant([0, 1, 2], dtypes.int32)
values = constant_op.constant(["brain", "salad", "surgery"])
table = lookup.HashTable(
lookup.KeyValueTensorInitializer(keys, values), default_val)
table.initializer.run()
input_tensor = constant_op.constant([0, 1, -1])
output = table.lookup(input_tensor)
result = output.eval()
self.assertAllEqual([b"brain", b"salad", b"n/a"], result)
class IndexTableFromFile(test.TestCase):
def _createVocabFile(self, basename, values=("brain", "salad", "surgery")):
vocabulary_file = os.path.join(self.get_temp_dir(), basename)
with open(vocabulary_file, "w") as f:
f.write("\n".join(values) + "\n")
return vocabulary_file
def test_string_index_table_from_file(self):
vocabulary_file = self._createVocabFile("f2i_vocab1.txt")
with self.cached_session():
table = lookup.index_table_from_file(
vocabulary_file=vocabulary_file, num_oov_buckets=1)
ids = table.lookup(constant_op.constant(["salad", "surgery", "tarkus"]))
self.assertRaises(errors_impl.OpError, ids.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((1, 2, 3), ids.eval())
def test_string_index_table_from_file_tensor_filename(self):
vocabulary_file = self._createVocabFile("f2i_vocab1.txt")
with self.cached_session():
vocabulary_file = constant_op.constant(vocabulary_file)
table = lookup.index_table_from_file(
vocabulary_file=vocabulary_file, num_oov_buckets=1)
ids = table.lookup(constant_op.constant(["salad", "surgery", "tarkus"]))
self.assertRaises(errors_impl.OpError, ids.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((1, 2, 3), ids.eval())
self.assertEqual(1,
len(ops.get_collection(ops.GraphKeys.ASSET_FILEPATHS)))
def test_string_index_table_from_file_placeholder_filename(self):
vocabulary_file = self._createVocabFile("f2i_vocab1.txt")
with self.cached_session():
vocabulary_placeholder = array_ops.placeholder(dtypes.string, [])
table = lookup.index_table_from_file(
vocabulary_file=vocabulary_placeholder, num_oov_buckets=1)
ids = table.lookup(constant_op.constant(["salad", "surgery", "tarkus"]))
self.assertRaises(errors_impl.OpError, ids.eval)
feed_dict = {vocabulary_placeholder.name: vocabulary_file}
lookup_ops.tables_initializer().run(feed_dict=feed_dict)
self.assertAllEqual((1, 2, 3), ids.eval())
self.assertEqual(0,
len(ops.get_collection(ops.GraphKeys.ASSET_FILEPATHS)))
def test_int32_index_table_from_file(self):
vocabulary_file = self._createVocabFile(
"f2i_vocab2.txt", values=("42", "1", "-1000"))
with self.cached_session():
table = lookup.index_table_from_file(
vocabulary_file=vocabulary_file, num_oov_buckets=1,
key_dtype=dtypes.int32)
ids = table.lookup(
constant_op.constant((1, -1000, 11), dtype=dtypes.int32))
self.assertRaises(errors_impl.OpError, ids.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((1, 2, 3), ids.eval())
def test_int64_index_table_from_file(self):
vocabulary_file = self._createVocabFile(
"f2i_vocab3.txt", values=("42", "1", "-1000"))
with self.cached_session():
table = lookup.index_table_from_file(
vocabulary_file=vocabulary_file, num_oov_buckets=1,
key_dtype=dtypes.int64)
ids = table.lookup(
constant_op.constant((1, -1000, 11), dtype=dtypes.int64))
self.assertRaises(errors_impl.OpError, ids.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((1, 2, 3), ids.eval())
def test_index_table_from_file_with_default_value(self):
default_value = -42
vocabulary_file = self._createVocabFile("f2i_vocab4.txt")
with self.cached_session():
table = lookup.index_table_from_file(
vocabulary_file=vocabulary_file, default_value=default_value)
ids = table.lookup(constant_op.constant(["salad", "surgery", "tarkus"]))
self.assertRaises(errors_impl.OpError, ids.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((1, 2, default_value), ids.eval())
def test_index_table_from_file_with_oov_buckets(self):
vocabulary_file = self._createVocabFile("f2i_vocab5.txt")
with self.cached_session():
table = lookup.index_table_from_file(
vocabulary_file=vocabulary_file, num_oov_buckets=1000)
ids = table.lookup(
constant_op.constant(["salad", "surgery", "tarkus", "toccata"]))
self.assertRaises(errors_impl.OpError, ids.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual(
(
1, # From vocabulary file.
2, # From vocabulary file.
867, # 3 + fingerprint("tarkus") mod 300.
860), # 3 + fingerprint("toccata") mod 300.
ids.eval())
def test_index_table_from_file_fails_with_empty_vocabulary_file_name(self):
self.assertRaises(
ValueError,
lookup.index_table_from_file,
vocabulary_file="")
def test_index_table_from_file_fails_with_empty_vocabulary(self):
self.assertRaises(
ValueError,
lookup.index_table_from_file,
vocabulary_file=None)
def test_index_table_from_file_with_vocab_size_too_small(self):
vocabulary_file = self._createVocabFile("f2i_vocab6.txt")
with self.cached_session():
table = lookup.index_table_from_file(
vocabulary_file=vocabulary_file, vocab_size=2)
ids = table.lookup(constant_op.constant(["salad", "surgery", "tarkus"]))
self.assertRaises(errors_impl.OpError, ids.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((1, -1, -1), ids.eval())
self.assertEqual(2, table.size().eval())
def test_index_table_from_file_with_vocab_size_too_large(self):
vocabulary_file = self._createVocabFile("f2i_vocab7.txt")
with self.cached_session():
table = lookup.index_table_from_file(
vocabulary_file=vocabulary_file, vocab_size=4)
self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"Invalid vocab_size", table.initializer.run)
def test_index_table_from_file_with_vocab_size(self):
vocabulary_file = self._createVocabFile("f2i_vocab8.txt")
self.assertRaises(
ValueError,
lookup.index_table_from_file,
vocabulary_file=vocabulary_file,
vocab_size=0)
with self.cached_session():
table = lookup.index_table_from_file(
vocabulary_file=vocabulary_file, vocab_size=3)
ids = table.lookup(constant_op.constant(["salad", "surgery", "tarkus"]))
self.assertRaises(errors_impl.OpError, ids.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((1, 2, -1), ids.eval())
self.assertEqual(3, table.size().eval())
def test_index_table_from_file_with_invalid_hashers(self):
vocabulary_file = self._createVocabFile("invalid_hasher.txt")
with self.cached_session():
with self.assertRaises(TypeError):
lookup.index_table_from_file(
vocabulary_file=vocabulary_file,
vocab_size=3,
num_oov_buckets=1,
hasher_spec=1)
table = lookup.index_table_from_file(
vocabulary_file=vocabulary_file,
vocab_size=3,
num_oov_buckets=1,
hasher_spec=lookup.HasherSpec("my-awesome-hash", None))
self.assertRaises(ValueError, table.lookup,
constant_op.constant(["salad", "surgery", "tarkus"]))
class KeyValueTensorInitializerTest(test.TestCase):
def test_string(self):
with ops.Graph().as_default(), self.cached_session():
init = lookup.KeyValueTensorInitializer(
("brain", "salad", "surgery"), (0, 1, 2), dtypes.string, dtypes.int64)
table = lookup.HashTable(init, default_value=-1)
table.initializer.run()
def test_int64(self):
with ops.Graph().as_default(), self.cached_session():
init = lookup.KeyValueTensorInitializer(
(42, 1, -1000), (0, 1, 2), dtypes.int64, dtypes.int64)
table = lookup.HashTable(init, default_value=-1)
table.initializer.run()
def test_int32(self):
with ops.Graph().as_default(), self.cached_session():
init = lookup.KeyValueTensorInitializer(
(42, 1, -1000), (0, 1, 2), dtypes.int32, dtypes.int64)
table = lookup.HashTable(init, default_value=-1)
with self.assertRaisesRegexp(
errors_impl.OpError, "No OpKernel was registered"):
table.initializer.run()
class IndexTableFromTensor(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def test_index_table_from_tensor_with_tensor_init(self):
table = lookup.index_table_from_tensor(
mapping=("brain", "salad", "surgery"), num_oov_buckets=1)
if not context.executing_eagerly():
with self.assertRaises(errors_impl.OpError):
self.evaluate(table.lookup(
constant_op.constant(("salad", "surgery", "tarkus"))))
else:
# Reinitializing a table in eager should work.
table = lookup.index_table_from_tensor(
mapping=("brain", "salad", "surgery"), num_oov_buckets=1)
self.evaluate(lookup_ops.tables_initializer())
ids = table.lookup(constant_op.constant(("salad", "surgery", "tarkus")))
self.assertAllEqual((1, 2, 3), self.evaluate(ids))
def test_int32_index_table_from_tensor_with_tensor_init(self):
with self.cached_session():
table = lookup.index_table_from_tensor(
mapping=(42, 1, -1000), num_oov_buckets=1, dtype=dtypes.int32)
ids = table.lookup(
constant_op.constant((1, -1000, 11), dtype=dtypes.int32))
self.assertRaises(errors_impl.OpError, ids.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((1, 2, 3), ids.eval())
def test_int64_index_table_from_tensor_with_tensor_init(self):
with self.cached_session():
table = lookup.index_table_from_tensor(
mapping=(42, 1, -1000), num_oov_buckets=1, dtype=dtypes.int64)
ids = table.lookup(
constant_op.constant((1, -1000, 11), dtype=dtypes.int64))
self.assertRaises(errors_impl.OpError, ids.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((1, 2, 3), ids.eval())
def test_index_table_from_tensor_with_default_value(self):
default_value = -42
with self.cached_session():
table = lookup.index_table_from_tensor(
mapping=["brain", "salad", "surgery"], default_value=default_value)
ids = table.lookup(constant_op.constant(["salad", "surgery", "tarkus"]))
self.assertRaises(errors_impl.OpError, ids.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((1, 2, default_value), ids.eval())
def test_index_table_from_tensor_missing_mapping(self):
with self.cached_session():
with self.assertRaisesRegexp(ValueError, "mapping must be specified"):
lookup.index_table_from_tensor(mapping=None, num_oov_buckets=1)
def test_index_table_from_tensor_empty_mapping(self):
with self.cached_session():
table = lookup.index_table_from_tensor(
mapping=np.array([], dtype=np.str_), num_oov_buckets=1)
ids = table.lookup(constant_op.constant(["salad", "surgery", "brain"]))
self.assertRaises(errors_impl.OpError, ids.eval)
with self.assertRaisesRegexp(
errors_impl.OpError, "keys and values cannot be empty"):
lookup_ops.tables_initializer().run()
def test_index_table_from_tensor_with_invalid_hashers(self):
with self.cached_session():
with self.assertRaises(TypeError):
lookup.index_table_from_tensor(
mapping=["brain", "salad", "surgery"],
num_oov_buckets=1,
hasher_spec=1)
table = lookup.index_table_from_tensor(
mapping=["brain", "salad", "surgery"],
num_oov_buckets=1,
hasher_spec=lookup.HasherSpec("my-awesome-hash", None))
self.assertRaises(ValueError, table.lookup,
constant_op.constant(["salad", "surgery", "tarkus"]))
class StringToIndexTest(test.TestCase):
def test_string_to_index(self):
with self.cached_session():
mapping_strings = constant_op.constant(["brain", "salad", "surgery"])
feats = constant_op.constant(["salad", "surgery", "tarkus"])
indices = lookup.string_to_index(feats, mapping=mapping_strings)
self.assertRaises(errors_impl.OpError, indices.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((1, 2, -1), indices.eval())
def test_duplicate_entries(self):
with self.cached_session():
mapping_strings = constant_op.constant(["hello", "hello"])
feats = constant_op.constant(["hello", "hola"])
_ = lookup.string_to_index(feats, mapping=mapping_strings)
self.assertRaises(errors_impl.OpError,
lookup_ops.tables_initializer().run)
def test_string_to_index_with_default_value(self):
default_value = -42
with self.cached_session():
mapping_strings = constant_op.constant(["brain", "salad", "surgery"])
feats = constant_op.constant(["salad", "surgery", "tarkus"])
indices = lookup.string_to_index(
feats, mapping=mapping_strings, default_value=default_value)
self.assertRaises(errors_impl.OpError, indices.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((1, 2, default_value), indices.eval())
class IndexToStringTableFromFileTest(test.TestCase):
def _createVocabFile(self, basename):
vocabulary_file = os.path.join(self.get_temp_dir(), basename)
with open(vocabulary_file, "w") as f:
f.write("\n".join(["brain", "salad", "surgery"]) + "\n")
return vocabulary_file
def test_index_to_string_table(self):
vocabulary_file = self._createVocabFile("i2f_vocab1.txt")
with self.cached_session():
table = lookup.index_to_string_table_from_file(
vocabulary_file=vocabulary_file)
features = table.lookup(constant_op.constant([0, 1, 2, 3], dtypes.int64))
self.assertRaises(errors_impl.OpError, features.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((b"brain", b"salad", b"surgery", b"UNK"),
features.eval())
def test_index_to_string_table_with_default_value(self):
default_value = b"NONE"
vocabulary_file = self._createVocabFile("f2i_vocab2.txt")
with self.cached_session():
table = lookup.index_to_string_table_from_file(
vocabulary_file=vocabulary_file, default_value=default_value)
features = table.lookup(constant_op.constant([1, 2, 4], dtypes.int64))
self.assertRaises(errors_impl.OpError, features.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((b"salad", b"surgery", default_value),
features.eval())
def test_index_to_string_table_with_vocab_size_too_small(self):
default_value = b"NONE"
vocabulary_file = self._createVocabFile("f2i_vocab2.txt")
with self.cached_session():
table = lookup.index_to_string_table_from_file(
vocabulary_file=vocabulary_file,
vocab_size=2,
default_value=default_value)
features = table.lookup(constant_op.constant([1, 2, 4], dtypes.int64))
self.assertRaises(errors_impl.OpError, features.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((b"salad", default_value, default_value),
features.eval())
def test_index_to_string_table_with_vocab_size_too_large(self):
vocabulary_file = self._createVocabFile("f2i_vocab6.txt")
with self.cached_session():
table = lookup.index_to_string_table_from_file(
vocabulary_file=vocabulary_file, vocab_size=4)
features = table.lookup(constant_op.constant([1, 2, 4], dtypes.int64))
self.assertRaises(errors_impl.OpError, features.eval)
init = lookup_ops.tables_initializer()
self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"Invalid vocab_size", init.run)
def test_index_to_string_table_with_vocab_size(self):
vocabulary_file = self._createVocabFile("f2i_vocab7.txt")
with self.cached_session():
table = lookup.index_to_string_table_from_file(
vocabulary_file=vocabulary_file, vocab_size=3)
features = table.lookup(constant_op.constant([1, 2, 4], dtypes.int64))
self.assertRaises(errors_impl.OpError, features.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((b"salad", b"surgery", b"UNK"), features.eval())
class IndexToStringTableFromTensorTest(test.TestCase):
def test_index_to_string_table_from_tensor(self):
with self.cached_session():
mapping_strings = constant_op.constant(["brain", "salad", "surgery"])
table = lookup.index_to_string_table_from_tensor(
mapping=mapping_strings)
indices = constant_op.constant([0, 1, 2, 3], dtypes.int64)
features = table.lookup(indices)
self.assertRaises(errors_impl.OpError, features.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((b"brain", b"salad", b"surgery", b"UNK"),
features.eval())
def test_duplicate_entries(self):
with self.cached_session():
mapping_strings = constant_op.constant(["hello", "hello"])
table = lookup.index_to_string_table_from_tensor(
mapping=mapping_strings)
indices = constant_op.constant([0, 1, 4], dtypes.int64)
features = table.lookup(indices)
lookup_ops.tables_initializer().run()
self.assertAllEqual((b"hello", b"hello", b"UNK"), features.eval())
def test_index_to_string_with_default_value(self):
default_value = b"NONE"
with self.cached_session():
mapping_strings = constant_op.constant(["brain", "salad", "surgery"])
table = lookup.index_to_string_table_from_tensor(
mapping=mapping_strings, default_value=default_value)
indices = constant_op.constant([1, 2, 4], dtypes.int64)
features = table.lookup(indices)
self.assertRaises(errors_impl.OpError, features.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((b"salad", b"surgery", default_value),
features.eval())
class IndexToStringTest(test.TestCase):
def test_index_to_string(self):
with self.cached_session():
mapping_strings = constant_op.constant(["brain", "salad", "surgery"])
indices = constant_op.constant([0, 1, 2, 3], dtypes.int64)
feats = lookup.index_to_string(indices, mapping=mapping_strings)
self.assertRaises(errors_impl.OpError, feats.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((b"brain", b"salad", b"surgery", b"UNK"),
feats.eval())
def test_duplicate_entries(self):
with self.cached_session():
mapping_strings = constant_op.constant(["hello", "hello"])
indices = constant_op.constant([0, 1, 4], dtypes.int64)
feats = lookup.index_to_string(indices, mapping=mapping_strings)
lookup_ops.tables_initializer().run()
self.assertAllEqual((b"hello", b"hello", b"UNK"), feats.eval())
def test_index_to_string_with_default_value(self):
default_value = b"NONE"
with self.cached_session():
mapping_strings = constant_op.constant(["brain", "salad", "surgery"])
indices = constant_op.constant([1, 2, 4], dtypes.int64)
feats = lookup.index_to_string(
indices, mapping=mapping_strings, default_value=default_value)
self.assertRaises(errors_impl.OpError, feats.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((b"salad", b"surgery", default_value), feats.eval())
class InitializeTableFromFileOpTest(test.TestCase):
def _createVocabFile(self, basename, values=("brain", "salad", "surgery")):
vocabulary_file = os.path.join(self.get_temp_dir(), basename)
with open(vocabulary_file, "w") as f:
f.write("\n".join(values) + "\n")
return vocabulary_file
@test_util.run_in_graph_and_eager_modes
def testInitializeStringTable(self):
vocabulary_file = self._createVocabFile("one_column_1.txt")
default_value = -1
table = lookup.HashTable(
lookup.TextFileInitializer(vocabulary_file, dtypes.string,
lookup.TextFileIndex.WHOLE_LINE,
dtypes.int64,
lookup.TextFileIndex.LINE_NUMBER),
default_value)
self.evaluate(table.initializer)
output = table.lookup(constant_op.constant(["brain", "salad", "tank"]))
result = self.evaluate(output)
self.assertAllEqual([0, 1, -1], result)
def testInitializeInt64Table(self):
vocabulary_file = self._createVocabFile(
"one_column_int64.txt", values=("42", "1", "-1000"))
with self.cached_session():
default_value = -1
table = lookup.HashTable(
lookup.TextFileInitializer(vocabulary_file, dtypes.int64,
lookup.TextFileIndex.WHOLE_LINE,
dtypes.int64,
lookup.TextFileIndex.LINE_NUMBER),
default_value)
table.initializer.run()
output = table.lookup(
constant_op.constant((42, 1, 11), dtype=dtypes.int64))
result = output.eval()
self.assertAllEqual([0, 1, -1], result)
def testInitializeIndexTable(self):
vocabulary_file = self._createVocabFile("one_column_2.txt")
with self.cached_session():
default_value = "UNK"
key_index = lookup.TextFileIndex.LINE_NUMBER
value_index = lookup.TextFileIndex.WHOLE_LINE
table = lookup.HashTable(
lookup.TextFileInitializer(vocabulary_file, dtypes.int64,
key_index, dtypes.string, value_index),
default_value)
table.initializer.run()
input_values = constant_op.constant([0, 1, 2, 3], dtypes.int64)
output = table.lookup(input_values)
result = output.eval()
self.assertAllEqual([b"brain", b"salad", b"surgery", b"UNK"], result)
def testMultiColumn(self):
vocabulary_file = os.path.join(self.get_temp_dir(), "three_columns.txt")
with open(vocabulary_file, "w") as f:
f.write("\n".join(["0\tbrain\t1", "1\tsalad\t5", "2\tsurgery\t6"]) + "\n")
with self.cached_session():
default_value = -1
key_index = 1
value_index = 2
table = lookup.HashTable(
lookup.TextFileInitializer(vocabulary_file, dtypes.string,
key_index, dtypes.int64, value_index),
default_value)
table.initializer.run()
input_string = constant_op.constant(["brain", "salad", "surgery"])
output = table.lookup(input_string)
result = output.eval()
self.assertAllEqual([1, 5, 6], result)
def testInvalidDataTypeInMultiColumn(self):
vocabulary_file = os.path.join(self.get_temp_dir(), "three_columns.txt")
with open(vocabulary_file, "w") as f:
f.write("\n".join(["0\tbrain\t1", "1\tsalad\t5", "2\tsurgery\t6"]) + "\n")
with self.cached_session():
default_value = -1
key_index = 2
value_index = 1
table = lookup.HashTable(
lookup.TextFileInitializer(vocabulary_file, dtypes.string,
key_index, dtypes.int64, value_index),
default_value)
with self.assertRaisesOpError("is not a valid"):
table.initializer.run()
def testInvalidDataType(self):
vocabulary_file = self._createVocabFile("one_column_3.txt")
with self.cached_session():
default_value = "UNK"
key_index = lookup.TextFileIndex.WHOLE_LINE
value_index = lookup.TextFileIndex.LINE_NUMBER
with self.assertRaises(ValueError):
lookup.HashTable(
lookup.TextFileInitializer(vocabulary_file, dtypes.int64,
key_index, dtypes.string,
value_index), default_value)
def testInvalidIndex(self):
vocabulary_file = self._createVocabFile("one_column_4.txt")
with self.cached_session():
default_value = -1
key_index = 1 # second column of the line
value_index = lookup.TextFileIndex.LINE_NUMBER
table = lookup.HashTable(
lookup.TextFileInitializer(vocabulary_file, dtypes.string,
key_index, dtypes.int64, value_index),
default_value)
with self.assertRaisesOpError("Invalid number of columns"):
table.initializer.run()
def testInitializeSameTableWithMultipleNodes(self):
vocabulary_file = self._createVocabFile("one_column_5.txt")
with self.cached_session() as sess:
shared_name = "shared-one-columm"
default_value = -1
table1 = lookup.HashTable(
lookup.TextFileInitializer(vocabulary_file, dtypes.string,
lookup.TextFileIndex.WHOLE_LINE,
dtypes.int64,
lookup.TextFileIndex.LINE_NUMBER),
default_value,
shared_name=shared_name)
table2 = lookup.HashTable(
lookup.TextFileInitializer(vocabulary_file, dtypes.string,
lookup.TextFileIndex.WHOLE_LINE,
dtypes.int64,
lookup.TextFileIndex.LINE_NUMBER),
default_value,
shared_name=shared_name)
table3 = lookup.HashTable(
lookup.TextFileInitializer(vocabulary_file, dtypes.string,
lookup.TextFileIndex.WHOLE_LINE,
dtypes.int64,
lookup.TextFileIndex.LINE_NUMBER),
default_value,
shared_name=shared_name)
lookup_ops.tables_initializer().run()
input_string = constant_op.constant(["brain", "salad", "tank"])
output1 = table1.lookup(input_string)
output2 = table2.lookup(input_string)
output3 = table3.lookup(input_string)
out1, out2, out3 = sess.run([output1, output2, output3])
self.assertAllEqual([0, 1, -1], out1)
self.assertAllEqual([0, 1, -1], out2)
self.assertAllEqual([0, 1, -1], out3)
def testInitializeTableWithNoFilename(self):
with self.cached_session():
default_value = -1
with self.assertRaises(ValueError):
lookup.HashTable(
lookup.TextFileInitializer(
"", dtypes.string, lookup.TextFileIndex.WHOLE_LINE,
dtypes.int64, lookup.TextFileIndex.LINE_NUMBER),
default_value)
def testInitializeWithVocabSize(self):
with self.cached_session():
default_value = -1
vocab_size = 3
vocabulary_file1 = self._createVocabFile("one_column6.txt")
table1 = lookup.HashTable(
lookup.TextFileInitializer(
vocabulary_file1,
dtypes.string,
lookup.TextFileIndex.WHOLE_LINE,
dtypes.int64,
lookup.TextFileIndex.LINE_NUMBER,
vocab_size=vocab_size),
default_value)
# Initialize from file.
table1.initializer.run()
self.assertEquals(vocab_size, table1.size().eval())
vocabulary_file2 = self._createVocabFile("one_column7.txt")
vocab_size = 5
table2 = lookup.HashTable(
lookup.TextFileInitializer(
vocabulary_file2,
dtypes.string,
lookup.TextFileIndex.WHOLE_LINE,
dtypes.int64,
lookup.TextFileIndex.LINE_NUMBER,
vocab_size=vocab_size),
default_value)
with self.assertRaisesOpError("Invalid vocab_size"):
table2.initializer.run()
vocab_size = 1
vocabulary_file3 = self._createVocabFile("one_column3.txt")
table3 = lookup.HashTable(
lookup.TextFileInitializer(
vocabulary_file3,
dtypes.string,
lookup.TextFileIndex.WHOLE_LINE,
dtypes.int64,
lookup.TextFileIndex.LINE_NUMBER,
vocab_size=vocab_size),
default_value)
# Smaller vocab size reads only vocab_size records.
table3.initializer.run()
self.assertEquals(vocab_size, table3.size().eval())
def testFeedVocabularyName(self):
vocabulary_file = self._createVocabFile("feed_vocabulary.txt")
with self.cached_session():
default_value = -1
table = lookup.HashTable(
lookup.TextFileInitializer("old_file.txt", dtypes.string,
lookup.TextFileIndex.WHOLE_LINE,
dtypes.int64,
lookup.TextFileIndex.LINE_NUMBER),
default_value)
# Initialize with non existing file (old_file.txt) should fail.
# TODO(yleon): Update message, which might change per FileSystem.
with self.assertRaisesOpError("old_file.txt"):
table.initializer.run()
# Initialize the model feeding the vocabulary file.
filenames = ops.get_collection(ops.GraphKeys.ASSET_FILEPATHS)
table.initializer.run(feed_dict={filenames[0]: vocabulary_file})
input_string = constant_op.constant(["brain", "salad", "tank"])
output = table.lookup(input_string)
result = output.eval()
self.assertAllEqual([0, 1, -1], result)
def testInvalidFilenames(self):
vocabulary_file = self._createVocabFile("filename_shape.txt")
with self.cached_session():
default_value = -1
# Invalid data type
other_type = constant_op.constant(1)
with self.assertRaises(ValueError):
lookup.HashTable(
lookup.TextFileInitializer(
other_type, dtypes.string, lookup.TextFileIndex.WHOLE_LINE,
dtypes.int64, lookup.TextFileIndex.LINE_NUMBER),
default_value)
# Non-scalar filename
filenames = constant_op.constant([vocabulary_file, vocabulary_file])
with self.assertRaises(ValueError):
lookup.HashTable(
lookup.TextFileInitializer(
filenames, dtypes.string, lookup.TextFileIndex.WHOLE_LINE,
dtypes.int64, lookup.TextFileIndex.LINE_NUMBER),
default_value)
def testIdToStringTable(self):
vocab_file = self._createVocabFile("feat_to_id_1.txt")
with self.cached_session():
default_value = "UNK"
vocab_size = 3
table = lookup.HashTable(
lookup.TextFileStringTableInitializer(
vocab_file, vocab_size=vocab_size),
default_value)
table.initializer.run()
input_values = constant_op.constant([0, 1, 2, 3], dtypes.int64)
out = table.lookup(input_values)
self.assertAllEqual([b"brain", b"salad", b"surgery", b"UNK"], out.eval())
self.assertEquals(vocab_size, table.size().eval())
def testStringToIdTable(self):
vocab_file = self._createVocabFile("feat_to_id_2.txt")
with self.cached_session():
default_value = -1
vocab_size = 3
table = lookup.HashTable(
lookup.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size),
default_value)
table.initializer.run()
input_string = constant_op.constant(["brain", "salad", "surgery", "UNK"])
out = table.lookup(input_string)
self.assertAllEqual([0, 1, 2, -1], out.eval())
self.assertEquals(vocab_size, table.size().eval())
def testInt64ToIdTable(self):
vocab_file = self._createVocabFile(
"feat_to_id_3.txt", values=("42", "1", "-1000"))
with self.cached_session():
default_value = -1
vocab_size = 3
table = lookup.HashTable(
lookup.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size, key_dtype=dtypes.int64),
default_value)
table.initializer.run()
out = table.lookup(
constant_op.constant((42, 1, -1000, 11), dtype=dtypes.int64))
self.assertAllEqual((0, 1, 2, -1), out.eval())
self.assertEquals(vocab_size, table.size().eval())
class IdTableWithHashBucketsTest(test.TestCase):
def _createVocabFile(self, basename, values=("brain", "salad", "surgery")):
vocabulary_file = os.path.join(self.get_temp_dir(), basename)
with open(vocabulary_file, "w") as f:
f.write("\n".join(values) + "\n")
return vocabulary_file
def testStringIdTableWithHashBuckets(self):
vocab_file = self._createVocabFile("feat_to_id_1.txt")
with self.cached_session():
default_value = -1
vocab_size = 3
oov_buckets = 1
table = lookup.IdTableWithHashBuckets(
lookup.HashTable(
lookup.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size),
default_value),
oov_buckets)
table.initializer.run()
input_string = constant_op.constant(["brain", "salad", "surgery", "UNK"])
out = table.lookup(input_string)
self.assertAllEqual([0, 1, 2, 3], out.eval())
self.assertEquals(vocab_size + oov_buckets, table.size().eval())
def testInt32IdTableWithHashBuckets(self):
vocab_file = self._createVocabFile("feat_to_id_2.txt", ("42", "1", "-1000"))
with self.cached_session():
default_value = -1
vocab_size = 3
oov_buckets = 1
table = lookup.IdTableWithHashBuckets(
lookup.HashTable(
lookup.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size, key_dtype=dtypes.int64),
default_value),
oov_buckets,
key_dtype=dtypes.int32)
table.initializer.run()
values = constant_op.constant((42, 1, -1000, 11), dtype=dtypes.int32)
out = table.lookup(values)
self.assertAllEqual([0, 1, 2, 3], out.eval())
self.assertEquals(vocab_size + oov_buckets, table.size().eval())
def testInt64IdTableWithHashBuckets(self):
vocab_file = self._createVocabFile("feat_to_id_3.txt", ("42", "1", "-1000"))
with self.cached_session():
default_value = -1
vocab_size = 3
oov_buckets = 1
table = lookup.IdTableWithHashBuckets(
lookup.HashTable(
lookup.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size, key_dtype=dtypes.int64),
default_value),
oov_buckets)
table.initializer.run()
values = constant_op.constant((42, 1, -1000, 11), dtype=dtypes.int64)
out = table.lookup(values)
self.assertAllEqual([0, 1, 2, 3], out.eval())
self.assertEquals(vocab_size + oov_buckets, table.size().eval())
def testStringIdTableWithOnlyHashBucket(self):
with self.cached_session():
oov_buckets = 5
# Set a table that only uses hash buckets, for each input value returns
# an id calculated by fingerprint("input") mod oov_buckets.
table = lookup.IdTableWithHashBuckets(None, oov_buckets)
table.initializer.run()
values = constant_op.constant(("brain", "salad", "surgery"))
out = table.lookup(values)
self.assertAllEqual(
[
3, # fingerprint("brain") mod 5.
1, # fingerprint("salad") mod 5.
4 # fingerprint("surgery") mod 5
],
out.eval())
self.assertEquals(oov_buckets, table.size().eval())
def testInt32IdTableWithOnlyHashBucket(self):
with self.cached_session():
oov_buckets = 5
# Set a table that only uses hash buckets, for each input value returns
# an id calculated by fingerprint("input") mod oov_buckets.
table = lookup.IdTableWithHashBuckets(
None, oov_buckets, key_dtype=dtypes.int32)
table.initializer.run()
input_string = constant_op.constant([42, 1, -1000], dtype=dtypes.int32)
out = table.lookup(input_string)
self.assertAllEqual(
[
1, # fingerprint("42") mod 5.
4, # fingerprint("1") mod 5.
2 # fingerprint("-1000") mod 5
],
out.eval())
self.assertEquals(oov_buckets, table.size().eval())
def testFloat64IdTableWithOnlyHashBucket(self):
with self.cached_session():
with self.assertRaisesRegexp(TypeError, "Invalid key_dtype"):
lookup.IdTableWithHashBuckets(
None, num_oov_buckets=5, key_dtype=dtypes.float64)
def testBoolIdTableWithOnlyHashBucket(self):
with self.cached_session():
with self.assertRaisesRegexp(TypeError, "Invalid key_dtype"):
lookup.IdTableWithHashBuckets(
None, num_oov_buckets=5, key_dtype=dtypes.bool)
def testIdTableWithHashBucketsWithMultipleInitializers(self):
vocab_file = self._createVocabFile("feat_to_id_4.txt")
with self.cached_session() as sess:
default_value = -1
vocab_size = 3
oov_buckets = 3
vocab_table = lookup.HashTable(
lookup.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size),
default_value)
table1 = lookup.IdTableWithHashBuckets(
vocab_table,
oov_buckets,
hasher_spec=lookup.FastHashSpec,
name="table1")
table2 = lookup.IdTableWithHashBuckets(
vocab_table,
oov_buckets,
hasher_spec=lookup.StrongHashSpec((1, 2)),
name="table2")
lookup_ops.tables_initializer().run()
input_string = constant_op.constant(
["fruit", "brain", "salad", "surgery", "UNK"])
out1 = table1.lookup(input_string)
out2 = table2.lookup(input_string)
out1, out2 = sess.run([out1, out2])
self.assertAllEqual([5, 0, 1, 2, 5], out1)
self.assertAllEqual([5, 0, 1, 2, 3], out2)
self.assertEquals(vocab_size + oov_buckets, table1.size().eval())
self.assertEquals(vocab_size + oov_buckets, table2.size().eval())
test_util.assert_ops_in_graph({
"table1_Lookup/hash_bucket": "StringToHashBucketFast",
"table2_Lookup/hash_bucket": "StringToHashBucketStrong",
}, sess.graph)
def testIdTableWithHashBucketsInitializationAcrossSessions(self):
vocab_file = self._createVocabFile("feat_to_id_5.txt")
shared_name = "across-sessions"
with self.cached_session():
default_value = -1
vocab_size = 3
oov_buckets = 1
table1 = lookup.IdTableWithHashBuckets(
lookup.HashTable(
lookup.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size),
default_value,
shared_name=shared_name),
oov_buckets)
table1.initializer.run()
input_string_1 = constant_op.constant(
["brain", "salad", "surgery", "UNK"])
out1 = table1.lookup(input_string_1)
self.assertAllEqual([0, 1, 2, 3], out1.eval())
self.assertEquals(vocab_size + oov_buckets, table1.size().eval())
with self.cached_session():
default_value = -1
vocab_size = 3
oov_buckets = 1
# Underlying lookup table already initialized in previous session.
# No need to call table2.initializer.run()
table2 = lookup.IdTableWithHashBuckets(
lookup.HashTable(
lookup.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size),
default_value,
shared_name=shared_name),
oov_buckets)
input_string_2 = constant_op.constant(["fruit", "salad", "UNK"])
out2 = table2.lookup(input_string_2)
self.assertAllEqual([3, 1, 3], out2.eval())
self.assertEquals(vocab_size + oov_buckets, table2.size().eval())
def testIdTableWithHashBucketsWithMultipleInitializersDifferentDefault(self):
vocab_file = self._createVocabFile("feat_to_id_6.txt")
with self.cached_session() as sess:
default_value1 = -1
vocab_size = 3
oov_buckets = 0
table1 = lookup.IdTableWithHashBuckets(
lookup.HashTable(
lookup.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size),
default_value1),
oov_buckets)
default_value2 = -2
table2 = lookup.IdTableWithHashBuckets(
lookup.HashTable(
lookup.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size),
default_value2),
oov_buckets)
lookup_ops.tables_initializer().run()
input_string_1 = constant_op.constant(
["brain", "salad", "surgery", "UNK"])
input_string_2 = constant_op.constant(["fruit", "salad", "UNK"])
out1 = table1.lookup(input_string_1)
out2 = table2.lookup(input_string_2)
out1, out2 = sess.run([out1, out2])
self.assertAllEqual([0, 1, 2, -1], out1)
self.assertAllEqual([-2, 1, -2], out2)
self.assertEquals(vocab_size + oov_buckets, table1.size().eval())
self.assertEquals(vocab_size + oov_buckets, table2.size().eval())
def testSparseTensor(self):
vocab_file = self._createVocabFile("feat_to_id_7.txt")
input_indices = [[0, 0], [0, 1], [2, 0], [2, 2], [3, 0]]
input_shape = [4, 4]
with self.cached_session() as sess:
sp_features = sparse_tensor.SparseTensor(
constant_op.constant(input_indices, dtypes.int64),
constant_op.constant(["brain", "salad", "brain", "surgery", "tarkus"],
dtypes.string),
constant_op.constant(input_shape, dtypes.int64))
table = lookup.IdTableWithHashBuckets(
lookup.HashTable(
lookup.TextFileIdTableInitializer(
vocab_file, vocab_size=3),
-1),
1)
table.initializer.run()
sp_ids = table.lookup(sp_features)
self.assertAllEqual([5], sp_ids.values._shape_as_list())
sp_ids_ind, sp_ids_val, sp_ids_shape = sess.run(
[sp_ids.indices, sp_ids.values, sp_ids.dense_shape])
self.assertAllEqual(input_indices, sp_ids_ind)
self.assertAllEqual([0, 1, 0, 2, 3], sp_ids_val)
self.assertAllEqual(input_shape, sp_ids_shape)
def testInt32SparseTensor(self):
input_indices = [[0, 0], [0, 1], [2, 0], [2, 2], [3, 0]]
input_shape = [4, 4]
with self.cached_session() as sess:
sp_features = sparse_tensor.SparseTensor(
constant_op.constant(input_indices, dtypes.int64),
constant_op.constant([42, 1, 42, -1000, 11], dtypes.int32),
constant_op.constant(input_shape, dtypes.int64))
table = lookup.IdTableWithHashBuckets(
lookup.HashTable(
lookup.KeyValueTensorInitializer(
(42, 1, -1000), (0, 1, 2), dtypes.int64, dtypes.int64),
-1),
1,
key_dtype=dtypes.int32)
table.initializer.run()
sp_ids = table.lookup(sp_features)
self.assertAllEqual([5], sp_ids.values._shape_as_list())
sp_ids_ind, sp_ids_val, sp_ids_shape = sess.run(
[sp_ids.indices, sp_ids.values, sp_ids.dense_shape])
self.assertAllEqual(input_indices, sp_ids_ind)
self.assertAllEqual([0, 1, 0, 2, 3], sp_ids_val)
self.assertAllEqual(input_shape, sp_ids_shape)
def testInt64SparseTensor(self):
input_indices = [[0, 0], [0, 1], [2, 0], [2, 2], [3, 0]]
input_shape = [4, 4]
with self.cached_session() as sess:
sp_features = sparse_tensor.SparseTensor(
constant_op.constant(input_indices, dtypes.int64),
constant_op.constant([42, 1, 42, -1000, 11], dtypes.int64),
constant_op.constant(input_shape, dtypes.int64))
table = lookup.IdTableWithHashBuckets(
lookup.HashTable(
lookup.KeyValueTensorInitializer(
(42, 1, -1000), (0, 1, 2), dtypes.int64, dtypes.int64),
-1),
1,
key_dtype=dtypes.int64)
table.initializer.run()
sp_ids = table.lookup(sp_features)
self.assertAllEqual([5], sp_ids.values._shape_as_list())
sp_ids_ind, sp_ids_val, sp_ids_shape = sess.run(
[sp_ids.indices, sp_ids.values, sp_ids.dense_shape])
self.assertAllEqual(input_indices, sp_ids_ind)
self.assertAllEqual([0, 1, 0, 2, 3], sp_ids_val)
self.assertAllEqual(input_shape, sp_ids_shape)
def testIdTableWithHashBucketsWithInvalidHashers(self):
vocab_file = self._createVocabFile("feat_to_id_4.txt")
with self.cached_session():
default_value = -1
vocab_size = 3
oov_buckets = 1
lookup_table = lookup.HashTable(
lookup.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size),
default_value)
with self.assertRaises(TypeError):
lookup.IdTableWithHashBuckets(
lookup_table, oov_buckets, hasher_spec=1)
table = lookup.IdTableWithHashBuckets(
lookup_table,
oov_buckets,
hasher_spec=lookup.HasherSpec("my-awesome-hash", None))
input_string = constant_op.constant(["brain", "salad", "surgery", "UNK"])
with self.assertRaises(ValueError):
table.lookup(input_string)
with self.assertRaises(ValueError):
table = lookup.IdTableWithHashBuckets(
lookup_table,
oov_buckets,
hasher_spec=lookup.StrongHashSpec([]))
with self.assertRaises(ValueError):
table = lookup.IdTableWithHashBuckets(
lookup_table,
oov_buckets,
hasher_spec=lookup.StrongHashSpec([1, 2, 3]))
with self.assertRaises(TypeError):
table = lookup.IdTableWithHashBuckets(
lookup_table,
oov_buckets,
hasher_spec=lookup.StrongHashSpec([None, 2]))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/lookup/lookup_ops_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops and modules related to resampler."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import
from tensorflow.contrib.resampler.python.ops.resampler_ops import *
from tensorflow.python.util.all_util import remove_undocumented
remove_undocumented(__name__, ["resampler"])
|
tensorflow-master
|
tensorflow/contrib/resampler/__init__.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for resampler ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.contrib import resampler
from tensorflow.contrib.resampler.ops import gen_resampler_ops
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class ResamplerOpsTest(xla_test.XLATestCase):
def _assertForwardOpMatchesExpected(self, image_np, warp_np, expected):
with self.session() as sess, self.test_scope():
input_image = array_ops.placeholder(image_np.dtype)
warp = array_ops.placeholder(warp_np.dtype)
resampled = resampler.resampler(input_image, warp, name='resampler')
out = sess.run(resampled, {input_image: image_np, warp: warp_np})
self.assertAllCloseAccordingToType(
expected, out, rtol=5e-3, half_rtol=1e-2, bfloat16_rtol=3e-2)
def _assertBackwardOpMatchesExpected(self, input_np, warp_np, grad_output_np,
expected_grad_data, expected_grad_warp):
with self.session() as sess, self.test_scope():
input_image = array_ops.placeholder(input_np.dtype)
warp = array_ops.placeholder(warp_np.dtype)
grad_output = array_ops.placeholder(grad_output_np.dtype)
grad_data, grad_warp = gen_resampler_ops.resampler_grad(
input_image, warp, grad_output)
grad_data_tf, grad_warp_tf = sess.run([grad_data, grad_warp], {
input_image: input_np,
warp: warp_np,
grad_output: grad_output_np
})
self.assertAllCloseAccordingToType(
expected_grad_warp, grad_warp_tf, half_rtol=1e-2, bfloat16_rtol=3e-2)
self.assertAllCloseAccordingToType(
expected_grad_data, grad_data_tf, half_rtol=1e-2, bfloat16_rtol=3e-2)
def testSimple(self):
for dtype in self.float_types:
input_shape = [1, 2, 2, 1]
input_data = [0, 5, 13, 54]
input_np = np.array(input_data, dtype=dtype).reshape(input_shape)
warp_shape = [1, 2]
warp_data = [0.7, 0.6]
warp_np = np.array(warp_data, dtype=dtype).reshape(warp_shape)
expected = [[26.42]]
self._assertForwardOpMatchesExpected(input_np, warp_np, expected)
grad_output = np.ones([1, 1], dtype=dtype)
expected_grad_data = [[[[0.12], [0.27999997]], [[0.18000001],
[0.42000002]]]]
expected_grad_warp = [[26.60000038, 38.20000076]]
self._assertBackwardOpMatchesExpected(input_np, warp_np, grad_output,
expected_grad_data,
expected_grad_warp)
def testMultiChannel(self):
for dtype in self.float_types:
input_shape = [1, 2, 2, 3]
input_rgb_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
input_np = np.array(input_rgb_data, dtype=dtype).reshape(input_shape)
warp_shape = [1, 2]
warp_data = [0.7, 0.6]
warp_np = np.array(warp_data, dtype=dtype).reshape(warp_shape)
expected = [[59.58000183, 146.94000244, 107.37999725]]
self._assertForwardOpMatchesExpected(input_np, warp_np, expected)
grad_output = np.ones([1, 3], dtype=dtype)
expected_grad_data = [[[[0.12, 0.12, 0.12],
[0.27999997, 0.27999997, 0.27999997]],
[[0.18000001, 0.18000001, 0.18000001],
[0.42000002, 0.42000002, 0.42000002]]]]
expected_grad_warp = [[199, 30]]
self._assertBackwardOpMatchesExpected(input_np, warp_np, grad_output,
expected_grad_data,
expected_grad_warp)
def testBatch2Height3byWidth3RGB(self):
for dtype in self.float_types:
input_shape = [2, 3, 3, 3]
input_rgb_data = [
0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1, 30, 105, 2, 40, 115,
3, 50, 125, 4, 60, 135, 5, 70, 145, 6, 0, 5, 13, 54, 135, 226, 37, 8,
234, 90, 255, 1, 30, 105, 2, 40, 115, 3, 50, 125, 4, 60, 135, 5, 70,
145, 6
]
input_np = np.array(input_rgb_data, dtype=dtype).reshape(input_shape)
# 2 batches and 2 samples for each batch.
warp_shape = [2, 2, 2]
warp_data = [0.7, 0.6, 1, 0.7, 0.9, 1.2, 1.3, 1.6]
warp_np = np.array(warp_data, dtype=dtype).reshape(warp_shape)
expected_forward = [[[43.92, 128.4, 65.86], [37.2, 114., 69.2]],
[[40.6, 122.8, 2.5], [51., 126, 4.1]]]
self._assertForwardOpMatchesExpected(input_np, warp_np, expected_forward)
expected_grad_data = [[[[0.12, 0.12, 0.12],
[0.57999998, 0.57999998, 0.57999998],
[0., 0., 0.]],
[[0.18000001, 0.18000001, 0.18000001],
[1.12, 1.12, 1.12], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]],
[[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0.08000001, 0.08000001, 0.08000001],
[0.99999988, 0.99999988, 0.99999988],
[0.11999997, 0.11999997, 0.11999997]],
[[0.02000001, 0.02000001, 0.02000001],
[0.60000008, 0.60000008, 0.60000008],
[0.17999998, 0.17999998, 0.17999998]]]]
expected_grad_warp = [[[33.39999008, -96.20000458], [-26.10000229,
-278.]],
[[-162.99998474, 39.99999619], [21., 63.]]]
grad_output = np.ones([2, 2, 3], dtype=dtype)
self._assertBackwardOpMatchesExpected(input_np, warp_np, grad_output,
expected_grad_data,
expected_grad_warp)
def testOutOfBoundWarps(self):
# (x, y) are both less than 0.
for dtype in self.float_types:
input_shape = [1, 2, 2, 1]
input_data = [10, 5, 13, 54]
input_np = np.array(input_data, dtype=dtype).reshape(input_shape)
warp_shape = [1, 2, 2]
warp_data = [-1, -1, 0.7, 0.6]
warp_np = np.array(warp_data, dtype=dtype).reshape(warp_shape)
expected = [[[0.0], [27.62]]]
self._assertForwardOpMatchesExpected(input_np, warp_np, expected)
expected_grad_data = [[[[0.12], [0.27999997]], [[0.18000001],
[0.42000002]]]]
expected_grad_warp = [[[0., 0.], [22.60000038, 35.20000076]]]
grad_output = np.ones([1, 2, 1], dtype=dtype)
self._assertBackwardOpMatchesExpected(input_np, warp_np, grad_output,
expected_grad_data,
expected_grad_warp)
# One of (x, y) is less than 0.
for dtype in self.float_types:
input_shape = [1, 2, 2, 1]
input_data = [10, 5, 13, 54]
input_np = np.array(input_data, dtype=dtype).reshape(input_shape)
warp_shape = [1, 2, 2]
# -1 is out of bound for grad_warp.
warp_data = [-1, 0.1, 0.7, 0.6]
warp_np = np.array(warp_data, dtype=dtype).reshape(warp_shape)
expected = [[[0.0], [27.62]]]
self._assertForwardOpMatchesExpected(input_np, warp_np, expected)
expected_grad_data = [[[[0.12], [0.27999997]], [[0.18000001],
[0.42000002]]]]
expected_grad_warp = [[[0., 0.], [22.60000038, 35.20000076]]]
grad_output = np.ones([1, 2, 1], dtype=dtype)
self._assertBackwardOpMatchesExpected(input_np, warp_np, grad_output,
expected_grad_data,
expected_grad_warp)
# Both of (x, y) are greater than image size.
for dtype in self.float_types:
input_shape = [1, 2, 2, 1]
input_data = [10, 5, 13, 54]
input_np = np.array(input_data, dtype=dtype).reshape(input_shape)
warp_shape = [1, 2, 2]
# -0.1 is *inbound* for grad_warp and grad_data, 2.1 is out of bound.
warp_data = [-0.1, 0.1, 1.2, 2.1]
warp_np = np.array(warp_data, dtype=dtype).reshape(warp_shape)
expected = [[[0.0], [0.0]]]
self._assertForwardOpMatchesExpected(input_np, warp_np, expected)
expected_grad_data = [[[[0.81], [0.0]], [[0.09], [0.0]]]]
expected_grad_warp = [[[10.30, 2.7], [0.0, 0.0]]]
grad_output = np.ones([1, 2, 1], dtype=dtype)
self._assertBackwardOpMatchesExpected(input_np, warp_np, grad_output,
expected_grad_data,
expected_grad_warp)
# One of (x, y) is greater than image size.
for dtype in self.float_types:
input_shape = [1, 2, 2, 1]
input_data = [10, 5, 13, 54]
input_np = np.array(input_data, dtype=dtype).reshape(input_shape)
warp_shape = [1, 2, 2]
warp_data = [0.1, -0.1, 1.2, 0.1]
warp_np = np.array(warp_data, dtype=dtype).reshape(warp_shape)
expected = [[[0.0], [0.0]]]
self._assertForwardOpMatchesExpected(input_np, warp_np, expected)
expected_grad_data = [[[[0.81], [0.81]], [[0.0], [0.08]]]]
expected_grad_warp = [[[-4.5, 9.5], [-9.9, 39.20]]]
grad_output = np.ones([1, 2, 1], dtype=dtype)
self._assertBackwardOpMatchesExpected(input_np, warp_np, grad_output,
expected_grad_data,
expected_grad_warp)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/resampler/xla/resampler_ops_xla_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""ops module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
|
tensorflow-master
|
tensorflow/contrib/resampler/python/__init__.py
|
# pylint: disable=g-bad-file-header
# Copyright 2017 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tensorflow op performing differentiable resampling."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.resampler.ops import gen_resampler_ops
from tensorflow.contrib.util import loader
from tensorflow.python.framework import ops
from tensorflow.python.platform import resource_loader
_resampler_so = loader.load_op_library(
resource_loader.get_path_to_datafile("_resampler_ops.so"))
def resampler(data, warp, name="resampler"):
"""Resamples input data at user defined coordinates.
The resampler currently only supports bilinear interpolation of 2D data.
Args:
data: Tensor of shape `[batch_size, data_height, data_width,
data_num_channels]` containing 2D data that will be resampled.
warp: Tensor of minimum rank 2 containing the coordinates at which
resampling will be performed. Since only bilinear interpolation is
currently supported, the last dimension of the `warp` tensor must be 2,
representing the (x, y) coordinate where x is the index for width and y is
the index for height.
name: Optional name of the op.
Returns:
Tensor of resampled values from `data`. The output tensor shape is
determined by the shape of the warp tensor. For example, if `data` is of
shape `[batch_size, data_height, data_width, data_num_channels]` and warp of
shape `[batch_size, dim_0, ... , dim_n, 2]` the output will be of shape
`[batch_size, dim_0, ... , dim_n, data_num_channels]`.
Raises:
ImportError: if the wrapper generated during compilation is not present when
the function is called.
"""
with ops.name_scope(name, "resampler", [data, warp]):
data_tensor = ops.convert_to_tensor(data, name="data")
warp_tensor = ops.convert_to_tensor(warp, name="warp")
return gen_resampler_ops.resampler(data_tensor, warp_tensor)
@ops.RegisterGradient("Resampler")
def _resampler_grad(op, grad_output):
data, warp = op.inputs
grad_output_tensor = ops.convert_to_tensor(grad_output, name="grad_output")
return gen_resampler_ops.resampler_grad(data, warp, grad_output_tensor)
ops.NotDifferentiable("ResamplerGrad")
|
tensorflow-master
|
tensorflow/contrib/resampler/python/ops/resampler_ops.py
|
# pylint: disable=g-bad-file-header
# Copyright 2017 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for contrib.resampler.python.ops.resampler_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.contrib import resampler
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
def _bilinearly_interpolate(data, x, y):
"""Performs bilinenar interpolation of grid data at user defined coordinates.
This interpolation function:
a) implicitly pads the input data with 0s.
b) returns 0 when sampling outside the (padded) image.
The effect is that the sampled signal smoothly goes to 0 outside the original
input domain, rather than producing a jump discontinuity at the image
boundaries.
Args:
data: numpy array of shape `[data_height, data_width]` containing data
samples assumed to be defined at the corresponding pixel coordinates.
x: numpy array of shape `[warp_height, warp_width]` containing x coordinates
at which interpolation will be performed.
y: numpy array of shape `[warp_height, warp_width]` containing y coordinates
at which interpolation will be performed.
Returns:
Numpy array of shape `[warp_height, warp_width]` containing interpolated
values.
"""
shape = x.shape
x = np.asarray(x) + 1
y = np.asarray(y) + 1
data = np.lib.pad(data, 1, "constant", constant_values=0)
x_0 = np.floor(x).astype(int)
x_1 = x_0 + 1
y_0 = np.floor(y).astype(int)
y_1 = y_0 + 1
x_0 = np.clip(x_0, 0, data.shape[1] - 1)
x_1 = np.clip(x_1, 0, data.shape[1] - 1)
y_0 = np.clip(y_0, 0, data.shape[0] - 1)
y_1 = np.clip(y_1, 0, data.shape[0] - 1)
i_a = data[y_0, x_0]
i_b = data[y_1, x_0]
i_c = data[y_0, x_1]
i_d = data[y_1, x_1]
w_a = (x_1 - x) * (y_1 - y)
w_b = (x_1 - x) * (y - y_0)
w_c = (x - x_0) * (y_1 - y)
w_d = (x - x_0) * (y - y_0)
samples = (w_a * i_a + w_b * i_b + w_c * i_c + w_d * i_d)
samples.reshape(shape)
return samples
def _make_warp(batch_size, warp_height, warp_width, dtype):
"""Creates batch of warping coordinates."""
x, y = np.meshgrid(np.linspace(0, warp_width - 1, warp_width),
np.linspace(0, warp_height - 1, warp_height))
warp = np.concatenate((x.reshape([warp_height, warp_width, 1]),
y.reshape([warp_height, warp_width, 1])), 2)
warp = np.tile(warp.reshape([1, warp_height, warp_width, 2]),
[batch_size, 1, 1, 1])
warp += np.random.randn(*warp.shape)
return warp.astype(dtype)
class ResamplerTest(test.TestCase):
def test_op_forward_pass_gpu_float32(self):
self._test_op_forward_pass(True, dtypes.float32, 1e-4)
def test_op_forward_pass_gpu_float64(self):
self._test_op_forward_pass(True, dtypes.float64, 1e-5)
def test_op_forward_pass_cpu_float16(self):
self._test_op_forward_pass(False, dtypes.float16, 1e-2)
def test_op_forward_pass_cpu_float32(self):
self._test_op_forward_pass(False, dtypes.float32, 1e-4)
def test_op_forward_pass_cpu_float64(self):
self._test_op_forward_pass(False, dtypes.float64, 1e-5)
def test_op_backward_pass_gpu_float32(self):
self._test_op_backward_pass(True, dtypes.float32, 1e-3)
def test_op_backward_pass_cpu_float16(self):
self._test_op_backward_pass(False, dtypes.float16, 1e-3)
def test_op_backward_pass_cpu_float32(self):
self._test_op_backward_pass(False, dtypes.float32, 1e-4)
def test_op_backward_pass_cpu_float64(self):
self._test_op_backward_pass(False, dtypes.float64, 1e-6)
def _test_op_forward_pass(self, on_gpu, dtype, tol):
np.random.seed(0)
data_width = 7
data_height = 9
data_channels = 5
warp_width = 4
warp_height = 8
batch_size = 10
warp = _make_warp(batch_size, warp_height, warp_width, dtype.as_numpy_dtype)
data_shape = (batch_size, data_height, data_width, data_channels)
data = np.random.rand(*data_shape).astype(dtype.as_numpy_dtype)
with self.test_session(use_gpu=on_gpu, force_gpu=False) as sess:
data_ph = array_ops.placeholder(dtype, shape=(None,) + data.shape[1:])
warp_ph = array_ops.placeholder(dtype, shape=(None,) + warp.shape[1:])
outputs = resampler.resampler(data=data_ph, warp=warp_ph)
self.assertEqual(outputs.get_shape().as_list(),
[None, warp_height, warp_width, data_channels])
out = sess.run(outputs, feed_dict={data_ph: data, warp_ph: warp})
# Generate reference output via bilinear interpolation in numpy
reference_output = np.zeros_like(out)
for batch in xrange(batch_size):
for c in xrange(data_channels):
reference_output[batch, :, :, c] = _bilinearly_interpolate(
data[batch, :, :, c],
warp[batch, :, :, 0],
warp[batch, :, :, 1])
self.assertAllClose(out, reference_output, rtol=tol, atol=tol)
def _test_op_backward_pass(self, on_gpu, dtype, tol):
np.random.seed(13)
data_width = 5
data_height = 4
data_channels = 3
warp_width = 2
warp_height = 6
batch_size = 3
warp = _make_warp(batch_size, warp_height, warp_width, dtype.as_numpy_dtype)
data_shape = (batch_size, data_height, data_width, data_channels)
data = np.random.rand(*data_shape).astype(dtype.as_numpy_dtype)
with self.test_session(use_gpu=on_gpu, force_gpu=False):
data_tensor = constant_op.constant(data)
warp_tensor = constant_op.constant(warp)
output_tensor = resampler.resampler(data=data_tensor, warp=warp_tensor)
grads = test.compute_gradient([data_tensor, warp_tensor], [
data_tensor.get_shape().as_list(),
warp_tensor.get_shape().as_list()
], output_tensor, output_tensor.get_shape().as_list(), [data, warp])
if not on_gpu:
# On CPU we perform numerical differentiation at the best available
# precision, and compare against that. This is necessary for test to
# pass for float16.
data_tensor_64 = constant_op.constant(data, dtype=dtypes.float64)
warp_tensor_64 = constant_op.constant(warp, dtype=dtypes.float64)
output_tensor_64 = resampler.resampler(data=data_tensor_64,
warp=warp_tensor_64)
grads_64 = test.compute_gradient([data_tensor_64, warp_tensor_64], [
data_tensor.get_shape().as_list(),
warp_tensor.get_shape().as_list()
], output_tensor_64, output_tensor.get_shape().as_list(), [data, warp])
for g, g_64 in zip(grads, grads_64):
self.assertLess(np.fabs(g[0] - g_64[1]).max(), tol)
else:
for g in grads:
self.assertLess(np.fabs(g[0] - g[1]).max(), tol)
def test_op_errors(self):
data_width = 7
data_height = 9
data_depth = 3
data_channels = 5
warp_width = 4
warp_height = 8
batch_size = 10
# Input data shape is not defined over a 2D grid, i.e. its shape is not like
# (batch_size, data_height, data_width, data_channels).
with self.cached_session() as sess:
data_shape = (batch_size, data_height, data_width, data_depth,
data_channels)
data = np.zeros(data_shape)
warp_shape = (batch_size, warp_height, warp_width, 2)
warp = np.zeros(warp_shape)
outputs = resampler.resampler(constant_op.constant(data),
constant_op.constant(warp))
with self.assertRaisesRegexp(errors_impl.UnimplementedError,
"Only bilinear interpolation is currently "
"supported."):
sess.run(outputs)
# Warp tensor must be at least a matrix, with shape [batch_size, 2].
with self.cached_session() as sess:
data_shape = (batch_size, data_height, data_width, data_channels)
data = np.zeros(data_shape)
warp_shape = (batch_size,)
warp = np.zeros(warp_shape)
outputs = resampler.resampler(constant_op.constant(data),
constant_op.constant(warp))
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"warp should be at least a matrix"):
sess.run(outputs)
# The batch size of the data and warp tensors must be the same.
with self.cached_session() as sess:
data_shape = (batch_size, data_height, data_width, data_channels)
data = np.zeros(data_shape)
warp_shape = (batch_size+1, warp_height, warp_width, 2)
warp = np.zeros(warp_shape)
outputs = resampler.resampler(constant_op.constant(data),
constant_op.constant(warp))
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"Batch size of data and warp tensor"):
sess.run(outputs)
# The warp tensor must contain 2D coordinates, i.e. its shape last dimension
# must be 2.
with self.cached_session() as sess:
data_shape = (batch_size, data_height, data_width, data_channels)
data = np.zeros(data_shape)
warp_shape = (batch_size, warp_height, warp_width, 3)
warp = np.zeros(warp_shape)
outputs = resampler.resampler(constant_op.constant(data),
constant_op.constant(warp))
with self.assertRaisesRegexp(errors_impl.UnimplementedError,
"Only bilinear interpolation is supported, "
"warping"):
sess.run(outputs)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/resampler/python/ops/resampler_ops_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops for memory statistics.
@@BytesInUse
@@BytesLimit
@@MaxBytesInUse
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.memory_stats.python.ops.memory_stats_ops import BytesInUse
from tensorflow.contrib.memory_stats.python.ops.memory_stats_ops import BytesLimit
from tensorflow.contrib.memory_stats.python.ops.memory_stats_ops import MaxBytesInUse
from tensorflow.python.util.all_util import remove_undocumented
remove_undocumented(__name__)
|
tensorflow-master
|
tensorflow/contrib/memory_stats/__init__.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for memory statistics ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.memory_stats.python.ops import memory_stats_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import test
class MemoryStatsOpsTest(test_util.TensorFlowTestCase):
def testBytesLimit(self):
# AllocatorStats.bytes_limit is set to zero for CPU allocators, so we skip
# the check.
if not test.is_gpu_available():
return
with self.test_session(use_gpu=True) as sess:
bytes_limit = sess.run(memory_stats_ops.BytesLimit())
self.assertLess(0, bytes_limit)
# Tests the peak memory usage of the following computation.
# a b
# | / |
# c |
# \ |
# \ |
# d
# The memory for matrix "a" can be reused for matrix "d". Therefore, this
# computation needs space for only three matrix plus some small overhead.
def testChainOfMatmul(self):
# MaxBytesInUse is registered on GPU only. See kernels/memory_stats_ops.cc.
if not test.is_gpu_available():
return
with self.test_session(use_gpu=True) as sess:
matrix_size = 64
matrix_shape = tensor_shape.TensorShape([matrix_size, matrix_size])
dtype = dtypes.float32
matrix_size_in_bytes = matrix_shape.num_elements() * dtype.size
a = random_ops.random_uniform(matrix_shape, dtype=dtype)
b = random_ops.random_uniform(matrix_shape, dtype=dtype)
c = math_ops.matmul(a, b)
d = math_ops.matmul(c, b)
sess.run(d)
max_bytes_in_use_op = memory_stats_ops.MaxBytesInUse()
max_bytes_in_use = sess.run(max_bytes_in_use_op)
self.assertGreaterEqual(max_bytes_in_use, matrix_size_in_bytes * 3)
self.assertLess(max_bytes_in_use, matrix_size_in_bytes * 4)
# run chain with 2 ops, make sure BytesInUse captures intermediate
# memory usage
a = random_ops.random_uniform(matrix_shape, dtype=dtype)
with ops.control_dependencies([a]):
bytes_in_use_op = memory_stats_ops.BytesInUse()
with ops.control_dependencies([bytes_in_use_op]):
b = random_ops.random_uniform(matrix_shape, dtype=dtype)
c = math_ops.matmul(a, b)
_, bytes_in_use, max_bytes_in_use = sess.run([c, bytes_in_use_op,
max_bytes_in_use_op])
# intermediate result allocates 1 matrix, max usage is at least 2
self.assertGreaterEqual(bytes_in_use, matrix_size_in_bytes * 1)
self.assertLess(bytes_in_use, matrix_size_in_bytes * 2)
# max usage is still 3 because it reflects maxium from previous .run call
self.assertGreaterEqual(max_bytes_in_use, matrix_size_in_bytes * 3)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/memory_stats/python/kernel_tests/memory_stats_ops_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops for memory statistics."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.memory_stats.ops import gen_memory_stats_ops
from tensorflow.contrib.util import loader
from tensorflow.python.platform import resource_loader
_memory_stats_ops_so = loader.load_op_library(
resource_loader.get_path_to_datafile("_memory_stats_ops.so"))
def BytesInUse():
"""Generates an op that computes the current memory of a device."""
return gen_memory_stats_ops.bytes_in_use()
def BytesLimit():
"""Generates an op that measures the total memory (in bytes) of a device."""
return gen_memory_stats_ops.bytes_limit()
def MaxBytesInUse():
"""Generates an op that computes the peak memory of a device."""
return gen_memory_stats_ops.max_bytes_in_use()
|
tensorflow-master
|
tensorflow/contrib/memory_stats/python/ops/memory_stats_ops.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Recurrent computations library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from tensorflow.contrib.recurrent.python.ops.functional_rnn import bidirectional_functional_rnn
from tensorflow.contrib.recurrent.python.ops.functional_rnn import functional_rnn
from tensorflow.contrib.recurrent.python.ops.recurrent import Recurrent
# pylint: enable=unused-import
del absolute_import
del division
del print_function
|
tensorflow-master
|
tensorflow/contrib/recurrent/python/recurrent_api.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Recurrent ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.contrib.recurrent.python.ops import recurrent
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import function
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import test as test_lib
from tensorflow.python.platform import tf_logging as logging
_ElmanState = collections.namedtuple('ElmanState', ('h'))
_ElmanTheta = collections.namedtuple('ElmanTheta', ('w', 'b'))
_ElmanInputs = collections.namedtuple('ElmanInputs', ('x'))
# TODO(drpng): add test for max length computation.
class RecurrentTest(test_util.TensorFlowTestCase):
def testBasic(self):
# pylint:disable=invalid-name
_PolyState = collections.namedtuple('PolyState', ('value', 'x_power'))
_PolyTheta = collections.namedtuple('PolyTheta', ('x'))
_PolyInputs = collections.namedtuple('PolyInputs', ('coeff'))
# pylint:enable=invalid-name
def Poly(theta, state, inputs):
next_state = _PolyState(
value=state.value + inputs.coeff * state.x_power,
x_power=state.x_power * theta.x)
return next_state, []
with self.cached_session() as sess:
theta = _PolyTheta(x=array_ops.constant(2.0))
state = _PolyState(
value=array_ops.constant(0.0),
x_power=array_ops.constant(1.0))
inputs = _PolyInputs(coeff=array_ops.constant([1., 2., 3.]))
# x = 2
# 1 + 2*x + 3*x^2
ret = recurrent.Recurrent(theta, state, inputs, Poly)
acc, state = sess.run(ret)
self.assertAllClose(acc.value, [1., 5., 17.])
self.assertAllClose(acc.x_power, [2., 4., 8.])
self.assertAllClose(state.value, 17.)
self.assertAllClose(state.x_power, 8.)
y = ret[1].value
dx, d_coeff = gradients_impl.gradients(ys=[y], xs=[theta.x, inputs.coeff])
dx_val, d_coeff_val = sess.run([dx, d_coeff])
# 2 + 6*x
self.assertAllClose(dx_val, 14.)
self.assertAllClose(d_coeff_val, [1., 2., 4.])
# acc = [1, 1+2x, 1+2x+3x^2]
# sum(acc) = 3 + 4x + 3x^2
acc = ret[0].value
dx, d_coeff = gradients_impl.gradients(
ys=[math_ops.reduce_sum(acc)], xs=[theta.x, inputs.coeff])
dx_val, d_coeff_val = sess.run([dx, d_coeff])
# 4 + 6*x
self.assertAllClose(dx_val, 16.)
self.assertAllClose(d_coeff_val, [3., 4., 4.])
@staticmethod
def Rand(shape):
return random_ops.random_uniform(
shape, minval=-0.2, maxval=0.2, dtype=dtypes.float64)
@staticmethod
def Elman(theta, state0, inputs):
h0, w, b, x = state0.h, theta.w, theta.b, inputs.x
xw = math_ops.matmul(array_ops.concat([x, h0], axis=1), w)
h1 = math_ops.sigmoid(xw + b)
state1 = _ElmanState(h=h1)
return (state1, state1)
@staticmethod
def ElmanGrad(theta, state0, inputs, extras, dstate1):
@function.Defun()
def Grad(h0, w, b, x, h1, dh1):
del b
# We hand-roll the gradient for the 2nd half of the cell as a demo.
dxwb = (dh1 * (1 - h1) * h1)
dxw, db = dxwb, math_ops.reduce_sum(dxwb, axis=0)
# Uses tf.gradient for the 1nd half of the cell as a demo.
xw = math_ops.matmul(array_ops.concat([x, h0], axis=1), w)
dh0, dx, dw = gradients_impl.gradients(
ys=[xw], xs=[h0, x, w], grad_ys=[dxw])
return dh0, dx, dw, db
dh0, dx, dw, db = Grad(state0.h, theta.w, theta.b, inputs.x,
extras.h, dstate1.h)
dstate0 = _ElmanState(h=dh0)
dinputs = _ElmanInputs(x=dx)
return (_ElmanTheta(w=dw, b=db), dstate0, dinputs)
@staticmethod
def ElmanOut(state1):
return _ElmanState(x=state1.h)
@staticmethod
def ElmanOutGrad(dout):
return _ElmanState(h=dout.x)
def testElman(self):
for seqlen, use_grad in [(1, False), (1, True), (7, False), (7, True)]:
logging.info('== Elman: seqlen=%s, use_grad=%s', seqlen, use_grad)
self._ParameterizedTestElman(seqlen, use_grad)
def _ParameterizedTestElman(self, seqlen, use_grad):
with self.cached_session() as sess:
random_seed.set_random_seed(342462)
batch = 3
dims = 4
theta = _ElmanTheta(w=RecurrentTest.Rand([2 * dims, dims]),
b=RecurrentTest.Rand([dims]))
state0 = _ElmanState(h=RecurrentTest.Rand([batch, dims]))
inputs = _ElmanInputs(x=RecurrentTest.Rand([seqlen, batch, dims]))
# Statically unrolled.
s = state0
out = []
for i in xrange(seqlen):
inp = _ElmanInputs(x=inputs.x[i, :])
s, _ = RecurrentTest.Elman(theta, s, inp)
out += [s.h]
acc0, final0 = array_ops.stack(out), s.h
loss0 = math_ops.reduce_sum(acc0) + math_ops.reduce_sum(final0)
(dw0, db0, dh0, di0) = gradients_impl.gradients(
loss0, [theta.w, theta.b, state0.h, inputs.x])
acc1, final1 = recurrent.Recurrent(
theta=theta,
state0=state0,
inputs=inputs,
cell_fn=RecurrentTest.Elman,
cell_grad=RecurrentTest.ElmanGrad if use_grad else None)
assert isinstance(acc1, _ElmanState)
assert isinstance(final1, _ElmanState)
acc1, final1 = acc1.h, final1.h
loss1 = math_ops.reduce_sum(acc1) + math_ops.reduce_sum(final1)
(dw1, db1, dh1, di1) = gradients_impl.gradients(
loss1, [theta.w, theta.b, state0.h, inputs.x])
# Fetches a few values and compare them.
(acc0, acc1, final0, final1, dw0, dw1, db0, db1, dh0, dh1, di0,
di1) = sess.run(
[acc0, acc1, final0, final1, dw0, dw1, db0, db1, dh0, dh1, di0, di1])
self.assertAllClose(acc0, acc1)
self.assertAllClose(final0, final1)
self.assertAllClose(dw0, dw1)
self.assertAllClose(db0, db1)
self.assertAllClose(dh0, dh1)
self.assertAllClose(di0, di1)
if __name__ == '__main__':
test_lib.main()
|
tensorflow-master
|
tensorflow/contrib/recurrent/python/kernel_tests/recurrent_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Functional RNN."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.recurrent.python.ops import functional_rnn
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import rnn as rnn_lib
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import variables
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
import tensorflow.python.ops.tensor_array_grad # pylint: disable=unused-import
from tensorflow.python.platform import test as test_lib
from tensorflow.python.platform import tf_logging as logging
def _CreateStackedLstmCell(*cell_sizes):
subcells = [rnn_cell_impl.LSTMCell(cell_size) for cell_size in cell_sizes]
return rnn_cell_impl.MultiRNNCell(subcells)
class FunctionalRnnTest(test_util.TensorFlowTestCase):
_BATCH_SIZE = 3
_TOTAL_TIME = 5
_INPUT_SIZE = 11
_NUM_UNITS = 7
# Set this to some output if you want to use it.
_LSTM_GRAPH_DEF_FILEPATH = None
_CELLDEFS = {
'gru': (rnn_cell_impl.GRUCell, [_NUM_UNITS]),
'lstm': (rnn_cell_impl.LSTMCell, [_NUM_UNITS]),
'stacked_lstm': (_CreateStackedLstmCell, [_NUM_UNITS] * 3)
}
def _CreateCell(self, celldef_name):
func, args = self._CELLDEFS[celldef_name]
return func(*args)
def _CreateInputs(self, time_major=False):
if time_major:
inputs = np.random.random([
FunctionalRnnTest._TOTAL_TIME, FunctionalRnnTest._BATCH_SIZE,
FunctionalRnnTest._INPUT_SIZE
])
else:
inputs = np.random.random([
FunctionalRnnTest._BATCH_SIZE, FunctionalRnnTest._TOTAL_TIME,
FunctionalRnnTest._INPUT_SIZE
])
# Always leave one time slot empty, to check max_length behavior.
sequence_length = np.random.randint(
0, high=FunctionalRnnTest._TOTAL_TIME - 1,
size=FunctionalRnnTest._BATCH_SIZE,
dtype=np.int)
return (inputs, sequence_length)
def _CreateSymmetricInputs(self):
# total time = batch size
inputs = np.zeros(
(FunctionalRnnTest._BATCH_SIZE, FunctionalRnnTest._BATCH_SIZE,
FunctionalRnnTest._INPUT_SIZE))
for i in range(FunctionalRnnTest._BATCH_SIZE):
for j in range(i, FunctionalRnnTest._BATCH_SIZE):
inputs[i][j] = np.random.random([FunctionalRnnTest._INPUT_SIZE])
inputs[j][i] = inputs[i][j]
# Always leave one time slot empty, to check max_length behavior.
sequence_length = np.random.randint(
0,
high=FunctionalRnnTest._BATCH_SIZE - 1,
size=FunctionalRnnTest._BATCH_SIZE,
dtype=np.int)
return (inputs, sequence_length)
def _CreateRnnGraph(self,
create_rnn_computation_func,
cell,
tf_inputs,
tf_sequence_length,
is_bidirectional,
initial_state=None,
time_major=None,
scope=None):
if is_bidirectional:
tf_result = create_rnn_computation_func(
cell_fw=cell,
cell_bw=cell,
inputs=tf_inputs,
sequence_length=tf_sequence_length,
dtype=dtypes.float32,
time_major=time_major,
scope=scope)
else:
tf_result = create_rnn_computation_func(
cell=cell,
inputs=tf_inputs,
sequence_length=tf_sequence_length,
initial_state=initial_state,
dtype=dtypes.float32,
time_major=time_major,
scope=scope)
grad = gradients_impl.gradients(tf_result, variables.trainable_variables())
return {'inference': tf_result, 'grad': grad}
def _MaybeResetVariables(self, variable_cache, sess, var_list):
"""Possibly resets the variables to a previously seen value."""
reset_ops = []
fetches = []
for var in var_list:
if var.name in variable_cache:
reset_ops += [var.assign(variable_cache[var.name])]
else:
fetches += [(var.name, var)]
if reset_ops:
sess.run(reset_ops)
if fetches:
val = sess.run(dict(fetches))
for n, v in val.items():
assert n not in variable_cache
variable_cache[n] = v
def _RunRnn(self, numpy_inputs, numpy_slen, cell_name, variable_cache,
is_dynamic, time_major=None, is_bidirectional=False):
with ops.Graph().as_default() as graph:
tf_inputs = array_ops.placeholder(
dtypes.float32, shape=numpy_inputs.shape)
tf_slen = array_ops.placeholder(dtypes.int32)
feeds = {tf_inputs: numpy_inputs, tf_slen: numpy_slen}
cell = self._CreateCell(cell_name)
if is_dynamic:
if is_bidirectional:
fn = rnn_lib.bidirectional_dynamic_rnn
else:
fn = rnn_lib.dynamic_rnn
else:
if is_bidirectional:
fn = functional_rnn.bidirectional_functional_rnn
else:
fn = functional_rnn.functional_rnn
fetches = self._CreateRnnGraph(
fn, cell, tf_inputs, tf_slen, is_bidirectional, time_major=time_major)
with self.session(graph=graph) as sess:
sess.run(variables.global_variables_initializer())
# Note that cell.trainable_variables it not always set.
self._MaybeResetVariables(variable_cache, sess,
variables.trainable_variables())
val = sess.run(fetches, feed_dict=feeds)
graph_def = graph.as_graph_def()
return graph_def, val
def testRunLstm(self):
"""Runs a simple LSTM. Does not check output."""
np_inputs, np_slen = self._CreateInputs()
var_cache = {}
graphdef, _ = self._RunRnn(np_inputs, np_slen, 'lstm', var_cache, False)
logging.info('graphdef: %s', graphdef)
if self._LSTM_GRAPH_DEF_FILEPATH:
with open(self._LSTM_GRAPH_DEF_FILEPATH, 'w') as f:
f.write(str(graphdef))
def testLstm(self):
"""Checks an LSTM against the reference implementation."""
np_inputs, np_slen = self._CreateInputs()
var_cache = {}
_, func_rnn = self._RunRnn(np_inputs, np_slen, 'lstm', var_cache, False)
_, dyn_rnn = self._RunRnn(np_inputs, np_slen, 'lstm', var_cache, True)
self.assertAllClose(dyn_rnn['inference'], func_rnn['inference'])
self.assertAllClose(dyn_rnn['grad'], func_rnn['grad'])
def testGru(self):
"""Checks a GRU cell against the reference implementation."""
np_inputs, np_slen = self._CreateInputs()
var_cache = {}
_, func_rnn = self._RunRnn(np_inputs, np_slen, 'gru', var_cache, False)
_, dyn_rnn = self._RunRnn(np_inputs, np_slen, 'gru', var_cache, True)
self.assertAllClose(dyn_rnn['inference'], func_rnn['inference'])
self.assertAllClose(dyn_rnn['grad'], func_rnn['grad'])
def testStackedLstm(self):
"""Checks a stacked LSTM cell against the reference implementation."""
np_inputs, np_slen = self._CreateInputs()
var_cache = {}
args = [np_inputs, np_slen, 'stacked_lstm', var_cache]
_, func_rnn = self._RunRnn(*(args + [False]))
_, dyn_rnn = self._RunRnn(*(args + [True]))
self.assertAllClose(dyn_rnn['inference'], func_rnn['inference'])
self.assertAllClose(dyn_rnn['grad'], func_rnn['grad'])
def testLstmWithTimeMajorInputs(self):
"""Checks an LSTM against the reference implementation, with time_major."""
time_major = True
np_inputs, np_slen = self._CreateInputs(time_major=True)
var_cache = {}
args = [np_inputs, np_slen, 'lstm', var_cache]
_, func_rnn = self._RunRnn(*(args + [False]), time_major=time_major)
_, dyn_rnn = self._RunRnn(*(args + [True]), time_major=time_major)
self.assertAllClose(dyn_rnn['inference'], func_rnn['inference'])
self.assertAllClose(dyn_rnn['grad'], func_rnn['grad'])
def testBidirectionalLstmWithTimeMajorInputs(self):
"""Checks a bi-directional LSTM with time-major inputs."""
time_major = True
np_inputs, np_slen = self._CreateInputs(time_major)
var_cache = {}
args = [np_inputs, np_slen, 'lstm', var_cache]
_, func_rnn = self._RunRnn(
*(args + [False]), time_major=time_major, is_bidirectional=True)
_, dyn_rnn = self._RunRnn(
*(args + [True]), time_major=time_major, is_bidirectional=True)
self.assertAllClose(dyn_rnn['inference'], func_rnn['inference'])
# TODO(b/112170761): comment out this line after the bug is fixed.
# self.assertAllClose(dyn_rnn['grad'], func_rnn['grad'])
def testBidirectionalLstm(self):
"""Checks time-major and batch-major rnn produce consistent results."""
time_major_inputs, np_slen = self._CreateInputs(True)
batch_major_inputs = np.transpose(time_major_inputs, [1, 0, 2])
var_cache = {}
args = [np_slen, 'lstm', var_cache, False]
_, time_major_rnn = self._RunRnn(
*([time_major_inputs] + args), time_major=True, is_bidirectional=True)
_, batch_major_rnn = self._RunRnn(
*([batch_major_inputs]+ args), time_major=False, is_bidirectional=True)
# Convert the batch-major outputs to be time-major before the comparasion.
outputs, state = batch_major_rnn['inference']
outputs = [np.transpose(x, [1, 0, 2]) for x in outputs]
batch_major_rnn['inference'] = [outputs, state]
self.assertAllClose(time_major_rnn['inference'],
batch_major_rnn['inference'])
self.assertAllClose(time_major_rnn['grad'], batch_major_rnn['grad'])
def testBidirectionalLstmWithSymmetricInputs(self):
"""Checks a bi-directional LSTM with symmetric inputs.
time-major and batch-major rnn produce the same result with symmetric
inputs.
"""
np_inputs, np_slen = self._CreateSymmetricInputs()
var_cache = {}
args = [np_inputs, np_slen, 'lstm', var_cache]
_, time_major_func_rnn = self._RunRnn(
*(args + [False]), time_major=True, is_bidirectional=True)
_, batch_major_func_rnn = self._RunRnn(
*(args + [False]), time_major=False, is_bidirectional=True)
_, time_major_dyn_rnn = self._RunRnn(
*(args + [True]), time_major=True, is_bidirectional=True)
_, batch_major_dyn_rnn = self._RunRnn(
*(args + [True]), time_major=False, is_bidirectional=True)
self.assertAllClose(time_major_func_rnn['inference'],
batch_major_func_rnn['inference'])
self.assertAllClose(time_major_func_rnn['grad'],
batch_major_func_rnn['grad'])
self.assertAllClose(time_major_dyn_rnn['inference'],
batch_major_dyn_rnn['inference'])
self.assertAllClose(time_major_dyn_rnn['grad'], batch_major_dyn_rnn['grad'])
self.assertAllClose(time_major_func_rnn['inference'],
batch_major_dyn_rnn['inference'])
self.assertAllClose(time_major_func_rnn['grad'],
batch_major_dyn_rnn['grad'])
if __name__ == '__main__':
test_lib.main()
|
tensorflow-master
|
tensorflow/contrib/recurrent/python/kernel_tests/functional_rnn_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Recurrent computation.
The main interface of this module is Recurrent().
A recurrent computation describes an auto-regressive process, where outputs
of one time step are fed to the output of the next time step.
This module uses:
theta: the "weights" each RNN uses.
state0: the initial state of each RNN.
cell_fn: A python function describing RNN cell. It must has the following
signature:
cell_fn: (theta, state0, inputs) -> (state1, extras)
state1 is the next RNN state, extras are computed by cell_fn
and the library forwards extras to cell_fn's gradient function.
cell_grad: A python function describing the backprop gradient function
for the RNN cell. It must has the following signature:
cell_grad: (theta, state0, inputs, extras, dstate1) -> (
dtheta, dstate0, dinputs)
dstate1 is what the backprop algorithm provides representing
gradients of state1 w.r.t. the final loss.
In this module, we handle structures of tensors for theta, state0, inputs,
and extras. The structure is an arbitrarily nested python structure, such
as a dictionary of named tuples.
Because the computation is a left-to-right chain, a single in-place accumulator
can be used rather than a stack. Thus a special gradient was written to reduce
unnecessary memory usage.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import inplace_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.inplace_ops import alias_inplace_update
from tensorflow.python.util import nest
def _AssertIsCompatible(a, b):
"""Checks that `a` and `b` are nested structures of the same type."""
# TODO(drpng): implement.
del a
del b
def _Index(struct, index):
"""Returns a structure with `x[index]` for each tensor `x` in the structure.
Args:
struct: A structure of tensors.
index: A scalar integer tensor. Performance is better if `index` is
on the host memory.
Returns:
A structure of tensors congruent to `struct`.
For each key in `ret`, `rets[key] = struct[key][index]`.
"""
index = ops.convert_to_tensor(index)
index.get_shape().assert_has_rank(0)
return nest.map_structure(lambda x: array_ops.gather(x, index), struct)
def _Update(struct_acc, struct_x, t):
"""Updates t-th row in accumulators.
Args:
struct_acc: The accumulators. A structure of tensors.
struct_x: The new values. A structure of tensors congruent to `struct_acc`.
t: A scalar integer. Performance is better if `t` is on the device
memory.
Returns:
A structure of tensors. Say, ret is a returned dictionary. Then, for
each key, we have:
ret[key] = struct_acc[key];
ret[key][t, :] = struct_x[key]
"""
to_skip_update = set()
acc_lst = nest.flatten(struct_acc)
x_lst = nest.flatten(struct_x)
t = math_ops.cast(
[t], dtypes.int32) # tf.compat.v1.to_int32 casts on-device tensors.
lst = []
for acc, x in zip(acc_lst, x_lst):
if acc in to_skip_update:
# Until b/62105730 is fixed, we need to avoid inplace update for tensors
# of rank 1. could reshape to handle it, but we don't really need the
# values applied to these, so just skip their modification.
lst += [acc]
else:
lst += [alias_inplace_update(acc, t, array_ops.expand_dims(x, 0))]
return nest.pack_sequence_as(struct_acc, lst)
def _SeqLenDim(struct):
"""Returns the 0-th dim size of tensors in a structure of tensors.
This is the max sequence length according to the shape of the inputs.
Args:
struct: A structure of tensors. Every tensor's 0-th dim has the same size.
Returns:
A scalar tensor which is the size of 0-th dim of every tensors in struct.
"""
xs = nest.flatten(struct)
assert xs
dim0 = array_ops.shape(xs[0])[0]
return dim0
def _Flatten(struct):
"""Flattens a structure."""
return nest.flatten(struct)
def _Pack(elements, struct_template):
"""Packs the list of tensors according to the structure.
In the event that `elements` should be a scalar, `struct_template` must
contain exactly one non-trivial element (for instance, `[[], {'x':elt}]`).
Args:
elements: Elements to be packed. A list of tensor, or a single tensor.
struct_template: The container structure in which to pack them.
Returns:
A python structure of the same type as `struct_template`, containing
`elements` as its contained elements.
"""
if not nest.is_sequence(elements):
return nest.pack_sequence_as(struct_template, [elements])
return nest.pack_sequence_as(struct_template, elements)
def _EmptyAcc(slen, struct_template):
"""Creates a set of accumulators for tensors in structure.
Args:
slen: The sequence length. A scalar tensor.
struct_template: A structure of tensors.
Returns:
A structure congruent to `struct_template`. Say ret is a returned
dictionary. Then, `ret.key`, a tensor, has the same dtype as
`struct_template.key`. The tensor's shape has 1 more dimension
than the tensor `struct_template.key`. The extra 0-th dimension is of size
`slen`. E.g., if `slen=10` and `struct_template.key`'s shape is `[3, 5]`,
then, `ret.key`'s shape is `[10, 3, 5]`.
"""
def _EmptyAccForTensor(tensor):
return inplace_ops.empty(
array_ops.concat([[slen], array_ops.shape(tensor)], axis=0),
tensor.dtype,
init=True)
return nest.map_structure(_EmptyAccForTensor, struct_template)
def _EmptyLike(struct):
"""Creates a set of empty initialized tensors.
Args:
struct: A structure of tensors.
Returns:
A struct of tensors. Each tensor has the same shape and dtype as
its corresponding tensor in `struct`. And each tensor is initialized.
"""
return nest.map_structure(
lambda x: inplace_ops.empty_like(x, init=True), struct)
def _Add(struct_x, struct_y):
"""Adds tensors in `struct_x` with respective tensors in `struct_y`.
Args:
struct_x: A struct of tensors.
struct_y: A struct of tensors congruent to `struct_x`.
Returns:
A struct of tensors. Each element of the returned value
equals `x + y`, with corresponding values in `struct_x` and `struct_y`.
"""
list_x = nest.flatten(struct_x)
list_y = nest.flatten(struct_y)
z = []
for x, y in zip(list_x, list_y):
z += [math_ops.add(x, y)]
return nest.pack_sequence_as(struct_x, z)
def _Dtypes(struct):
"""Returns all tensors' data types in a list."""
return [x.dtype for x in nest.flatten(struct)]
def _ConvertNoneGradientToZeros(xs, dxs):
"""Sanitize dxs so that None becomes zeros appropriately.
Args:
xs: A list of tensors.
dxs: A list of tensors. dxs[i] corresponds to xs[i]'s gradient.
Returns:
A structure same as `dxs` with `None` replaced by a zero tensor.
"""
list_xs = nest.flatten(xs)
list_dxs = nest.flatten(dxs)
# If x does not get any backprop-ed gradient, propagate zeros.
rets = []
for (x, dx) in zip(list_xs, list_dxs):
if dx is None:
rets.append(array_ops.zeros_like(x))
else:
rets.append(dx)
return nest.pack_sequence_as(dxs, rets)
# All structures are flattened for use internally. This is for simplicity
# and also to use the Defun construct.
# In the forward pass (inference), the computation is structured as follows.
# Forward: [gradient = _Recurrent.Grad]
# Flatten structures, create accumulators.
# for t = 0..max_input_length:
# Defun ForwardLoopBody:
# Defun Fwd: flatten/pack around cell_fn
# state1 = Fwd(inputs[t], state0)
# acc_state += [state1]
# Pack structures.
# During the backward pass (backpropping the gradient from the last time
# step to the first, through the structure), the computation is structured
# as follows.
# Grad:
# Flatten structures.
# Defun Backward:
# Create create accumulated derivatives: d_theta, d_inputs, d_acc_state.
# Regarding the note at the top of the file, there is only one accumulator
# for d_theta accumulated over the whole sequence.
# for t = max_input_length -1..0:
# Defun BackwardLoopBody:
# Retrieve acc_state[t] computed in the forward pass.
# Defun Bak: flatten/back around cell_fn_grad.
# d_state1 is d_state0 from previous step (ie next time).
# d_acc_state[dev_t] += d_state1
# d_theta_t, d_state0, d_inputs_t, = Bak()
# d_inputs[dev_t] += d_inputs
# d_theta += d_theta_t
# d_acc_state[t] += d_state1
# Pack structures and return.
class _Recurrent(object):
"""A helper class to construct a recurrent neural net."""
def __init__(self,
cell_fn,
cell_grad,
theta,
state0,
inputs,
max_input_length,
extras,
use_tpu,
aligned_end=False):
"""RNN helper class.
Args:
cell_fn: A python function, which computes:
state1, extras = cell_fn(theta, state0, inputs[t, :])
cell_grad: A python function which computes:
dtheta, dstate0, dinputs[t, :] = cell_grad(
theta, state0, inputs[t, :], extras, dstate1)
theta: weights. A structure of tensors.
state0: initial state. A structure of tensors.
inputs: inputs. A structure of tensors.
max_input_length: None, or the maximum effective length of the input over
all batches. A scalar tensor.
extras: A structure of tensors. The 2nd return value of every
invocation of cell_fn is a structure of tensors with matching keys
and shapes of this `extras`.
use_tpu: A boolean indicating whether the computation is mean to
run on a TPU.
aligned_end: A boolean indicating whether the sequence is aligned at
the end.
"""
self._theta = theta
self._state = state0
self._inputs = inputs
self._max_input_length = self._MaybeComputeMaxInputLength(
inputs, max_input_length)
self._cell_fn = cell_fn
self._cell_grad = cell_grad
self._extras = extras
self._aligned_end = aligned_end
# pylint: disable=unbalanced-tuple-unpacking
# NOTE: TF Function (Fwd, Bak, ForwardLoopBody, BackwardLoopBody,
# Forward and Backward defined below) simply takes a list of
# Tensors and returns a list of Tensors. When we pass in a
# structure (a list of structures of Tensors), we use _Flatten to
# convert the structure into a list of tensor. Conversely, the
# following code often uses _Pack to formulate a structure from a
# list of tensors based on a "template".
# Wraps cell_fn in a TF Function:
# state1 = cell_fn(theta, state0, inputs)
fwd_sig = [self._theta, self._state, self._inputs]
compiled = use_tpu
noinline = not compiled
dev_t_type = dtypes.int32 if use_tpu else dtypes.int64
@function.Defun(*_Dtypes(fwd_sig))
def Fwd(*args):
(theta, state0, inputs) = _Pack(args, fwd_sig)
state1, extras = self._cell_fn(theta, state0, inputs)
assert not function.get_extra_args(), (
'cell_fn is not pure with extra args: %s.' %
(function.get_extra_args()))
_AssertIsCompatible(state1, self._state)
_AssertIsCompatible(extras, self._extras)
return _Flatten([state1, extras])
# Wraps cell_fn in a TF Function as a for-loop's body.
#
# The loop state is composed of:
# t: The loop variable. Timestep id.
# dev_t: The loop variable mirrored on the device.
# theta: the recurrent net's weights.
# state0: the previous recurrent state.
# inputs: inputs to the recurrent net. inputs[t, :] are for the timestep t.
# acc_state: Each timestep's computed new state is also stashed into
# acc_state.
# acc_extras: Each timestep's computed extras is stashed into acc_extras
fwdloop_sig = [
self._theta, self._state, self._inputs, self._state, self._extras
]
@function.Defun(dtypes.int32, dev_t_type, *_Dtypes(fwdloop_sig))
def ForwardLoopBody(*args):
"""The body of forward loop."""
t, dev_t = args[0], args[1]
(theta, state0, inputs, acc_state, acc_extras) = _Pack(
args[2:], fwdloop_sig)
inputs_t = _Index(inputs, t) # external input at time step t.
fwd = Fwd(*_Flatten([theta, state0, inputs_t]))
state1, extras = _Pack(fwd, [self._state, self._extras])
# Saves state1 and extras in their accumulators.
acc_state = _Update(acc_state, state1, dev_t)
acc_extras = _Update(acc_extras, extras, dev_t)
return [math_ops.add(dev_t, 1)] + _Flatten(
[theta, state1, inputs, acc_state, acc_extras])
def Grad(op, *args):
"""The python grad function for the Forward function."""
# NOTE: tf.gradient backprops None for int32/int64 while zeros
# for float32/float64. For consistency, we always backprop
# zeros.
args = list(args)
for i, dy in enumerate(args):
if dy is None:
args[i] = array_ops.zeros_like(op.outputs[i])
# TODO(drpng): getting the extra state here?
op_inputs = [x for x in op.inputs]
op_struct = [
self._theta, self._state, self._inputs, self._max_input_length,
self._extras
]
(theta, state0, inputs, max_input_length, _) = _Pack(op_inputs, op_struct)
# acc_state and acc_extras are computed by the Forward pass and
# needed by the Backward pass.
acc_state, _, acc_extras = _Pack([x for x in op.outputs],
[self._state, self._state, self._extras])
# Forward computes acc_state, the final state and
# acc_extras. tf.gradients gives us their gradients w.r.t. the
# final loss. Because acc_extras are not exposed by Compute(),
# it has no gradients w.r.t. the final loss (i.e., by
# construction, it must be zeros).
d_acc_state, d_state1, _ = _Pack(args,
[self._state, self._state, self._extras])
return Backward(*_Flatten([
theta, state0, inputs, max_input_length, acc_state, acc_extras,
d_acc_state, d_state1
]))
# Forward calls ForwardLoopBody n times. Each time computes one
# time step of the recurrent net.
forward_sig = [
self._theta, self._state, self._inputs, self._max_input_length,
self._extras
]
@function.Defun(
*_Dtypes(forward_sig), python_grad_func=Grad, noinline=noinline)
def Forward(*args):
"""Forward pass of the recurrent net."""
theta, state0, inputs, max_input_length, extras = _Pack(args, forward_sig)
slen_dim = _SeqLenDim(inputs)
# Creates accumulators for state0 and extras.
acc_state = _EmptyAcc(slen_dim, state0)
acc_extras = _EmptyAcc(slen_dim, extras)
t = slen_dim - max_input_length if self._aligned_end else 0
dev_t = math_ops.cast(t, dtypes.int32) if use_tpu else math_ops.cast(
t, dtypes.int64)
run = functional_ops.For(
start=t,
limit=slen_dim if self._aligned_end else max_input_length,
delta=1,
inputs=[dev_t] + _Flatten(
[theta, state0, inputs, acc_state, acc_extras]),
body=ForwardLoopBody,
rewrite_with_while=compiled)
_, state1, _, acc_state, acc_extras = _Pack(
run[1:],
[self._theta, self._state, self._inputs, self._state, self._extras])
return _Flatten([acc_state, state1, acc_extras])
# The per-step backward computes:
# d_theta, d_state0, d_inputs = cell_grad(
# theta, state0, inputs, extras, d_state1)
# where d_state1 is the backprop-ed gradient for state1, and
# extras is the computed by the forward step to facilitate the
# backward step.
bak_sig = [
self._theta, self._state, self._inputs, self._extras, self._state
]
@function.Defun(*_Dtypes(bak_sig))
def Bak(*args):
"""Backward step."""
(theta, state0, inputs, extras, d_state1) = _Pack(args, bak_sig)
(dtheta, dstate0, dinputs) = self._cell_grad(theta, state0, inputs,
extras, d_state1)
assert not function.get_extra_args(), (
'cell_grad is not pure with extra args: %s.' %
(function.get_extra_args()))
_AssertIsCompatible(dtheta, self._theta)
_AssertIsCompatible(dstate0, self._state)
_AssertIsCompatible(dinputs, self._inputs)
return _Flatten(
_ConvertNoneGradientToZeros([theta, state0, inputs],
[dtheta, dstate0, dinputs]))
# Define defuns used by a functional_ops.If in BackwardLoopBody.
state_if_sig = [self._state, self._state]
@function.Defun(*_Dtypes(state_if_sig))
def ReturnOrigState0(*args):
"""Returns original state0 from inputs."""
(_, orig_state0) = _Pack(args, state_if_sig)
return nest.flatten(orig_state0)
@function.Defun(*_Dtypes(state_if_sig))
def ReturnAccState(*args):
"""Returns acc_state[t-1] from inputs."""
(acc_state, _) = _Pack(args, state_if_sig)
return nest.flatten(acc_state)
# Wraps cell_grad gradient function in a TF Function as a
# for-loop's body for the Backward pass.
#
# The loop state is composed of:
# t: The loop variable. Timestep id.
# state0: the initial state for the entire backward loop.
# dev_t: The loop variable mirrored on the device.
# theta: the recurrent net's weights.
# inputs: inputs to the recurrent net. inputs[t, :] are for the timestep t.
# acc_state: Each timestep's computed new state was stashed into
# acc_state by the Forward pass.
# acc_extras: Each timestep's computed extras was stashed into
# acc_extras by the Forward pass.
# d_theta: All timestep's gradient for theta is accumulated (added) into
# d_theta.
# d_state1: The backprop-ed gradient for the new stated computed by
# timestep t.
# d_inputs: d_inputs[t, :] is populated by the backward time step t.
# d_acc_state: The backprop-ed gradient for acc_state.
bakloop_sig = [
self._theta, self._state, self._inputs, self._state, self._extras,
self._theta, self._state, self._inputs, self._state
]
@function.Defun(dtypes.int32, dev_t_type, *_Dtypes(bakloop_sig))
def BackwardLoopBody(*args):
"""Backward loop body function."""
t, dev_t = args[0], args[1]
(theta, orig_state0, inputs, acc_state, acc_extras, d_theta, d_state1,
d_inputs, d_acc_state) = _Pack(args[2:], bakloop_sig)
# The input recurrent state for time step t is previous time step's
# output, or the original state0 when on time step 0.
state_from_acc = _Index(acc_state, math_ops.maximum(0, t - 1))
state0 = functional_ops.If(
math_ops.equal(t, array_ops.constant(0, dtypes.int32)),
_Flatten([state_from_acc, orig_state0]), ReturnOrigState0,
ReturnAccState)
state0 = nest.pack_sequence_as(orig_state0, state0)
# The external inputs for time step t.
inputs_t = _Index(inputs, t)
# The extras for time step t.
extras_t = _Index(acc_extras, t)
d_state1 = _Add(_Index(d_acc_state, t), d_state1)
(d_theta_t, d_state0, d_inputs_t) = _Pack(
Bak(*_Flatten([theta, state0, inputs_t, extras_t, d_state1])),
[self._theta, self._state, self._inputs])
d_theta = _Add(d_theta, d_theta_t)
d_inputs = _Update(d_inputs, d_inputs_t, dev_t)
return [math_ops.subtract(dev_t, 1)] + _Flatten([
theta, orig_state0, inputs, acc_state, acc_extras, d_theta, d_state0,
d_inputs, d_acc_state
])
# Backward calls BackwardLoopBody n times. Each time computes the backprop
# for one time step of the recurrent net.
backward_sig = [
self._theta, self._state, self._inputs, self._max_input_length,
self._state, self._extras, self._state, self._state
]
@function.Defun(*_Dtypes(backward_sig), noinline=noinline)
def Backward(*args):
"""Backward pass for the recurrent net."""
# theta, state0, inputs are Forward's inputs.
# acc_state is the accumulated 1st output of Forward.
# acc_extras is the accumulated 2nd output of Forward.
# d_acc_state is the gradient for acc_state.
# d_state1 is the gradient for the final state computed by Forward.
(theta, state0, inputs, max_input_length, acc_state, acc_extras,
d_acc_state, d_state1) = _Pack(args, backward_sig)
# Accumulators for gradients.
d_theta = _EmptyLike(theta)
d_inputs = _EmptyLike(inputs)
slen_dim = _SeqLenDim(inputs)
# Loop backwards. Note the loop's limit is open-ended, so goes through
# t=0.
t = slen_dim - 1 if self._aligned_end else max_input_length - 1
dev_t = math_ops.cast(t, dtypes.int32) if use_tpu else math_ops.cast(
t, dtypes.int64)
limit = slen_dim - max_input_length - 1 if self._aligned_end else -1
run = functional_ops.For(
start=t,
limit=limit,
delta=-1,
inputs=[dev_t] + _Flatten([
theta, state0, inputs, acc_state, acc_extras, d_theta, d_state1,
d_inputs, d_acc_state
]),
body=BackwardLoopBody,
rewrite_with_while=compiled)
(theta, state0, inputs, acc_state, acc_extras, d_theta, d_state0,
d_inputs, d_acc_state) = _Pack(run[1:], bakloop_sig)
d_max_input_length = array_ops.constant(0, dtype=max_input_length.dtype)
return _Flatten(
[d_theta, d_state0, d_inputs, d_max_input_length, acc_extras])
self._forward = Forward
def _MaybeComputeMaxInputLength(self, inputs, max_input_length):
if max_input_length is not None:
return max_input_length
return math_ops.reduce_max(array_ops.shape(nest.flatten(inputs)[0])[0])
def Compute(self):
return _Pack(
self._forward(*_Flatten([
self._theta, self._state, self._inputs, self._max_input_length,
self._extras
])), [self._state, self._state, self._extras])[:2]
def _GetCellGrad(cell_fn, cell_grad):
"""Returns the gradient function for cell_fn.
Args:
cell_fn: The recurrent neural net's cell function.
cell_grad: If not None, cell_fn's gradient function.
Returns:
Returns cell_grad if not None. Otherwise, assume cell_fn is a python
function representing the recurrent neural net's cell function, i.e.,
cell_fn: (theta, state0, inputs) -> (state1, extra)
returns its default gradient python function, i.e.,
cell_grad: (theta, state0, inputs, extras, dstate1) -> (
dtheta, dstate0, dinputs)
"""
if cell_grad:
return cell_grad
def CellGrad(theta, state0, inputs, extras, dstate1):
"""Default gradient function for cell_fn."""
# NOTE: The default grad function recomputes the forward
# function and does not take advantage of 'extras' returned by
# the forward function.
del extras
state1, extras = cell_fn(theta, state0, inputs)
ys = _Flatten([state1])
xs = _Flatten([theta, state0, inputs])
grad_ys = _Flatten([dstate1])
grads = gradients_impl.gradients(ys=ys, xs=xs, grad_ys=grad_ys)
return _ConvertNoneGradientToZeros([theta, state0, inputs],
_Pack(grads, [theta, state0, inputs]))
return CellGrad
def _IsSingleTimeStep(inputs, max_input_length):
"""Returns True only if the time dimension of inputs is 1."""
if not isinstance(max_input_length, ops.Tensor):
return max_input_length == 1
for x in nest.flatten(inputs):
if x.shape.dims is None or x.shape[0].value != 1:
return False
return True
def Recurrent(theta,
state0,
inputs,
cell_fn,
cell_grad=None,
extras=None,
max_input_length=None,
use_tpu=False,
aligned_end=False):
"""Compute a recurrent neural net.
Roughly, Recurrent() computes the following:
state = state0
for t in inputs' sequence length:
state = cell_fn(theta, state, inputs[t, :])
accumulate_state[t, :] = state
return accumulate_state, state
theta, state, inputs are all structures of tensors.
inputs[t, :] means taking a slice out from every tensor in the inputs.
accumulate_state[t, :] = state means that we stash every tensor in
'state' into a slice of the corresponding tensor in
accumulate_state.
cell_fn is a python callable computing (building up a TensorFlow
graph) the recurrent neural network's one forward step. Two calls of
cell_fn must describe two identical computations.
By construction, Recurrent()'s backward computation does not access
any intermediate values computed by cell_fn during forward
computation. We may extend Recurrent() to support that by taking a
customized backward function of cell_fn.
Args:
theta: weights. A structure of tensors.
state0: initial state. A structure of tensors.
inputs: inputs. A structure of tensors.
cell_fn: A python function, which computes:
state1, extras = cell_fn(theta, state0, inputs[t, :])
cell_grad: A python function which computes:
dtheta, dstate0, dinputs[t, :] = cell_grad(
theta, state0, inputs[t, :], extras, dstate1)
extras: A structure of tensors. The 2nd return value of every
invocation of cell_fn is a structure of tensors with matching keys
and shapes of this `extras`.
max_input_length: maximum length of effective input. This is used to
truncate the computation if the inputs have been allocated to a
larger size. A scalar tensor.
use_tpu: whether or not we are on TPU.
aligned_end: A boolean indicating whether the sequence is aligned at
the end.
Returns:
accumulate_state and the final state.
"""
if cell_grad is None and _IsSingleTimeStep(inputs, max_input_length):
# The seqlen length is staticly known as 1. Hence, we just need to
# call cell_fn once without putting it into a loop.
inputs = nest.map_structure(lambda x: array_ops.squeeze(x, axis=0), inputs)
state1, _ = cell_fn(theta, state0, inputs)
acc_state = nest.map_structure(lambda x: array_ops.expand_dims(x, axis=0),
state1)
return acc_state, state1
# If cell_grad is not given, derives the gradient function from
# cell_fn.
cell_grad = _GetCellGrad(cell_fn, cell_grad)
if extras is None:
# Derives 'extras' so that we can allocate extras' accumulator.
_, extras = cell_fn(theta, state0, _Index(inputs, 0))
extras = nest.map_structure(array_ops.zeros_like, extras)
else:
_, actual = cell_fn(theta, state0, _Index(inputs, 0))
_AssertIsCompatible(extras, actual)
return _Recurrent(
cell_fn=cell_fn,
cell_grad=cell_grad,
theta=theta,
state0=state0,
inputs=inputs,
max_input_length=max_input_length,
extras=extras,
use_tpu=use_tpu,
aligned_end=aligned_end).Compute()
|
tensorflow-master
|
tensorflow/contrib/recurrent/python/ops/recurrent.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A tf.compat.v1.nn.dynamic_rnn variant, built on the Recurrent class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
from tensorflow.contrib.recurrent.python.ops import recurrent
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.util import nest
def _GetDTypesFromStructure(struct):
dtypes_list = []
for x in nest.flatten(struct):
x = ops.convert_to_tensor(x)
dtypes_list.append(x.dtype)
return dtypes_list
def _SetShapeFromTemplate(struct, struct_template):
as_list = nest.flatten(struct)
template_as_list = nest.flatten(struct_template)
for element, template in zip(as_list, template_as_list):
element.set_shape(template.shape)
class _FunctionalRnnCell(object):
"""Wrapper around RNNCell which separates state from computation.
This class accomplishes the following:
* Turn the cell's `__call__` function into a pure function. The global
side effects are separated as `theta`. They are the variables created
for the weights of the computation.
* Unless the output is aliased as part of the state, extend the state to
contain the output so that we store the history in `Recurrent`.
* Set static shapes as required.
"""
def __init__(self, rnn_cell, seq_inputs, initial_state):
assert initial_state is not None
# TODO(drpng): Dtype needs to be configurable.
input_dtypes = [seq_inputs.dtype] + _GetDTypesFromStructure(initial_state)
# See _index.
like_inputs_t = nest.map_structure(
lambda x: array_ops.stop_gradient(array_ops.gather(x, 0)), seq_inputs)
input_structure = (like_inputs_t, initial_state)
@function.Defun(*input_dtypes)
def FlatCellStep(*flat_inputs):
"""The flattened version of `rnn_cell`."""
inputs_t, state0 = nest.pack_sequence_as(input_structure, flat_inputs)
_SetShapeFromTemplate(state0, initial_state)
_SetShapeFromTemplate(inputs_t, like_inputs_t)
outputs_t, state1 = rnn_cell(inputs_t, state0)
state_list = nest.flatten(state1)
self._output_shape = outputs_t.shape
if outputs_t in state_list:
output_index_in_state = state_list.index(outputs_t)
else:
output_index_in_state = None
if output_index_in_state is None:
self._prepend_output = True
self._output_state_idx = 0
return [outputs_t] + state_list
else:
self._output_state_idx = output_index_in_state
self._prepend_output = False
# To save memory, we don't store return the output separately
# from the state list, since we know it's the same.
return state_list
def _ToPureFunction(func):
# NOTE: This forces the creating of the function.
if func.captured_inputs:
pure_func = copy.copy(func)
# pylint: disable=protected-access
pure_func._extra_inputs = []
return pure_func
return func
pure_flat_cell_step = _ToPureFunction(FlatCellStep)
def CellStep(theta, extended_state0, inputs_t):
"""Performs one time steps on structured inputs.
The purpose of this function is to turn the parameters into flattened
versions, and to resolve the parameter order difference between
`Recurrent` and `RNNCell`.
In the event the cell returns a transformed output that is not aliased
within its state, the `extended_state0` also contains the output as its
first element.
Args:
theta: Weights required for the computation. A structure of tensors.
extended_state0: the state0, and possibly the output at the previous
time step. A structure of tensors.
inputs_t: the inputs at time t.
Returns:
A pair of the next state (inclusive of the output), and an empty list
(unused `extras`).
The next state is congruent to state0.
"""
extended_state0_flat = nest.flatten(extended_state0)
state0_flat = self.MaybeRemoveOutputFromState(extended_state0_flat)
full_inputs = [inputs_t] + state0_flat + theta
# Note that the thetas are additional inputs appeneded as extra
# parameters.
cell_out = pure_flat_cell_step(*full_inputs)
return cell_out, []
self._cell_step = CellStep
self._theta = FlatCellStep.captured_inputs
self._zero_state = rnn_cell.zero_state
self._state_template = initial_state
self._output_size = rnn_cell.output_size
@property
def extended_initial_state(self):
if self._prepend_output:
return [
array_ops.zeros(
self._output_shape,
dtype=_GetDTypesFromStructure(self._state_template)[0]),
self._state_template
]
else:
# The base case, where the output is just the hidden state.
return self._state_template
@property
def cell_step(self):
return self._cell_step
@property
def theta(self):
return self._theta
@property
def state_template(self):
return self._state_template
@property
def output_shape(self):
return self._output_shape
def GetOutputFromState(self, state):
return nest.flatten(state)[self._output_state_idx]
def MaybeRemoveOutputFromState(self, flat_state):
if self._prepend_output:
return flat_state[1:]
return flat_state
def _ApplyLengthsToBatch(sequence_lengths, tf_output):
# TODO(drpng): just use Update so that we don't carry over the gradients?
"""Sets the output to be zero at the end of the sequence."""
# output is batch major.
shape = array_ops.shape(tf_output)
batch_size, max_time, vector_size = shape[0], shape[1], shape[2]
output_time = array_ops.tile(math_ops.range(0, max_time), [batch_size])
output_time = array_ops.reshape(output_time, [batch_size, max_time])
lengths = array_ops.tile(
array_ops.reshape(sequence_lengths, [-1, 1]), [1, max_time])
is_less = math_ops.cast(
math_ops.less(output_time, lengths), dtype=tf_output.dtype)
keep_mask = array_ops.tile(
array_ops.expand_dims(is_less, -1), [1, 1, vector_size])
final_output = keep_mask * tf_output
return final_output
def _PickFinalStateFromHistory(acc_state, sequence_length):
"""Implements acc_state[sequence_length - 1]."""
# This will work on all platforms, unlike the regular slice.
last_value = []
for state_var in nest.flatten(acc_state):
# We compute the following with matrix operations:
# last_var = state_var[sequence_length - 1]
shape = array_ops.shape(state_var)
max_time, batch_size = shape[0], shape[1]
output_time = array_ops.tile(math_ops.range(0, max_time), [batch_size])
output_time = array_ops.reshape(output_time, [batch_size, max_time])
lengths = array_ops.tile(
array_ops.reshape(sequence_length, [-1, 1]), [1, max_time])
last_idx = math_ops.cast(
math_ops.equal(output_time, lengths - 1), dtype=state_var.dtype)
last_idx = array_ops.transpose(last_idx)
last_idx_for_bcast = array_ops.expand_dims(last_idx, -1)
sliced = math_ops.multiply(last_idx_for_bcast, state_var)
last_var = math_ops.reduce_sum(sliced, 0)
last_value += [last_var]
return nest.pack_sequence_as(acc_state, last_value)
def _PostProcessOutput(extended_acc_state, extended_final_state, func_cell,
total_time, inputs_lengths, is_reversed):
"""Post-process output of recurrent.
This function takes the accumulated extended state and extracts the requested
state and output.
When `inputs_lengths` has been set, it extracts the output from the
accumulated state. It also sets outputs past.
When `is_reversed` is true, the output will be reversed in this function.
It also sets the static shape information.
Args:
extended_acc_state: A structure containing the accumulated state at each
time. It may contain the output at each time as well.
extended_final_state: A structure containing the final state. It may contain
the output at the final time.
func_cell: The functional wrapper around the cell.
total_time: A scalar integer tensor.
inputs_lengths: An integer tensor with one entry per input.
is_reversed: A boolean to indicate if the sequence is reversed.
Returns:
A tuple with the outputs at each time, and the final state.
"""
if inputs_lengths is None or is_reversed:
flat_final_state = func_cell.MaybeRemoveOutputFromState(
nest.flatten(extended_final_state))
tf_state = nest.pack_sequence_as(func_cell.state_template, flat_final_state)
else:
# The accumulated state is over the entire sequence, so we pick it
# out from the acc_state sequence.
flat_acc_state = func_cell.MaybeRemoveOutputFromState(
nest.flatten(extended_acc_state))
acc_state = nest.pack_sequence_as(func_cell.state_template, flat_acc_state)
tf_state = _PickFinalStateFromHistory(acc_state, inputs_lengths)
output_from_state = func_cell.GetOutputFromState(extended_acc_state)
if is_reversed:
output_from_state = array_ops.reverse(output_from_state, [0])
tf_output = array_ops.transpose(output_from_state, [1, 0, 2])
tf_output.set_shape(
[func_cell.output_shape[0], total_time, func_cell.output_shape[1]])
if inputs_lengths is not None:
# Need set the outputs to zero.
tf_output = _ApplyLengthsToBatch(inputs_lengths, tf_output)
_SetShapeFromTemplate(tf_state, func_cell.state_template)
return tf_output, tf_state
# pylint: disable=invalid-name
def functional_rnn(cell,
inputs,
sequence_length=None,
initial_state=None,
dtype=None,
time_major=False,
scope=None,
use_tpu=False,
reverse=False):
"""Same interface as `tf.compat.v1.nn.dynamic_rnn`."""
with variable_scope.variable_scope(scope or 'rnn'):
if not time_major:
inputs = nest.map_structure(lambda t: array_ops.transpose(t, [1, 0, 2]),
inputs)
inputs_flat = nest.flatten(inputs)
batch_size = array_ops.shape(inputs_flat[0])[1]
if initial_state is None:
initial_state = cell.zero_state(batch_size, dtype)
func_cell = _FunctionalRnnCell(cell, inputs, initial_state)
if sequence_length is not None:
max_length = math_ops.reduce_max(sequence_length)
else:
max_length = None
if reverse:
inputs = array_ops.reverse(inputs, [0])
extended_acc_state, extended_final_state = recurrent.Recurrent(
theta=func_cell.theta,
state0=func_cell.extended_initial_state,
inputs=inputs,
cell_fn=func_cell.cell_step,
max_input_length=max_length,
use_tpu=use_tpu,
aligned_end=reverse)
tf_output, tf_state = _PostProcessOutput(
extended_acc_state,
extended_final_state,
func_cell,
inputs_flat[0].shape[0],
sequence_length,
is_reversed=reverse)
if time_major:
tf_output = array_ops.transpose(tf_output, [1, 0, 2])
return tf_output, tf_state
def bidirectional_functional_rnn(cell_fw,
cell_bw,
inputs,
initial_state_fw=None,
initial_state_bw=None,
dtype=None,
sequence_length=None,
time_major=False,
use_tpu=False,
fast_reverse=False,
scope=None):
"""Creates a bidirectional recurrent neural network.
Performs fully dynamic unrolling of inputs in both directions. Built to be API
compatible with `tf.compat.v1.nn.bidirectional_dynamic_rnn`, but implemented
with
functional control flow for TPU compatibility.
Args:
cell_fw: An instance of `tf.compat.v1.nn.rnn_cell.RNNCell`.
cell_bw: An instance of `tf.compat.v1.nn.rnn_cell.RNNCell`.
inputs: The RNN inputs. If time_major == False (default), this must be a
Tensor (or hierarchical structure of Tensors) of shape [batch_size,
max_time, ...]. If time_major == True, this must be a Tensor
(or hierarchical structure of Tensors) of shape: [max_time, batch_size,
...]. The first two dimensions must match across all the inputs, but
otherwise the ranks and other shape components may differ.
initial_state_fw: An optional initial state for `cell_fw`. Should match
`cell_fw.zero_state` in structure and type.
initial_state_bw: An optional initial state for `cell_bw`. Should match
`cell_bw.zero_state` in structure and type.
dtype: (optional) The data type for the initial state and expected output.
Required if initial_states are not provided or RNN state has a
heterogeneous dtype.
sequence_length: An optional int32/int64 vector sized [batch_size]. Used to
copy-through state and zero-out outputs when past a batch element's
sequence length. So it's more for correctness than performance.
time_major: Whether the `inputs` tensor is in "time major" format.
use_tpu: Whether to enable TPU-compatible operation. If True, does not truly
reverse `inputs` in the backwards RNN. Once b/69305369 is fixed, we can
remove this flag.
fast_reverse: Whether to use fast tf.reverse to replace tf.reverse_sequence.
This is only possible when either all sequence lengths are the same inside
the batch, or when the cell function does not change the state on padded
input.
scope: An optional scope name for the dynamic RNN.
Returns:
outputs: A tuple of `(output_fw, output_bw)`. The output of the forward and
backward RNN. If time_major == False (default), these will
be Tensors shaped: [batch_size, max_time, cell.output_size]. If
time_major == True, these will be Tensors shaped:
[max_time, batch_size, cell.output_size]. Note, if cell.output_size is a
(possibly nested) tuple of integers or TensorShape objects, then the
output for that direction will be a tuple having the same structure as
cell.output_size, containing Tensors having shapes corresponding to the
shape data in cell.output_size.
final_states: A tuple of `(final_state_fw, final_state_bw)`. A Tensor or
hierarchical structure of Tensors indicating the final cell state in each
direction. Must have the same structure and shape as cell.zero_state.
Raises:
ValueError: If `initial_state_fw` is None or `initial_state_bw` is None and
`dtype` is not provided.
"""
# Keep this code in sync with tf.compat.v1.nn.dynamic_rnn for compatibility.
with variable_scope.variable_scope(scope or 'bidirectional_rnn'):
# Forward direction
with variable_scope.variable_scope('fw') as fw_scope:
output_fw, output_state_fw = functional_rnn(
cell=cell_fw,
inputs=inputs,
sequence_length=sequence_length,
initial_state=initial_state_fw,
dtype=dtype,
time_major=time_major,
scope=fw_scope,
use_tpu=use_tpu)
# Backward direction
if not time_major:
time_dim = 1
batch_dim = 0
else:
time_dim = 0
batch_dim = 1
def _reverse(input_, seq_lengths, seq_dim, batch_dim):
if seq_lengths is not None:
return array_ops.reverse_sequence(
input=input_,
seq_lengths=seq_lengths,
seq_dim=seq_dim,
batch_dim=batch_dim)
else:
# See b/69305369.
assert not use_tpu, (
'Bidirectional with variable sequence lengths unsupported on TPU')
return array_ops.reverse(input_, axis=[seq_dim])
with variable_scope.variable_scope('bw') as bw_scope:
if not fast_reverse:
inputs = _reverse(
inputs,
seq_lengths=sequence_length,
seq_dim=time_dim,
batch_dim=batch_dim)
output_bw, output_state_bw = functional_rnn(
cell=cell_bw,
inputs=inputs,
sequence_length=sequence_length,
initial_state=initial_state_bw,
dtype=dtype,
time_major=time_major,
scope=bw_scope,
use_tpu=use_tpu,
reverse=fast_reverse)
if not fast_reverse:
output_bw = _reverse(
output_bw,
seq_lengths=sequence_length,
seq_dim=time_dim,
batch_dim=batch_dim)
outputs = (output_fw, output_bw)
output_states = (output_state_fw, output_state_bw)
return (outputs, output_states)
# pylint: enable=invalid-name
|
tensorflow-master
|
tensorflow/contrib/recurrent/python/ops/functional_rnn.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Deprecated library for creating sequence-to-sequence models in TensorFlow.
@@attention_decoder
@@basic_rnn_seq2seq
@@embedding_attention_decoder
@@embedding_attention_seq2seq
@@embedding_rnn_decoder
@@embedding_rnn_seq2seq
@@embedding_tied_rnn_seq2seq
@@model_with_buckets
@@one2many_rnn_seq2seq
@@rnn_decoder
@@sequence_loss
@@sequence_loss_by_example
@@tied_rnn_seq2seq
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.legacy_seq2seq.python.ops.seq2seq import attention_decoder
from tensorflow.contrib.legacy_seq2seq.python.ops.seq2seq import basic_rnn_seq2seq
from tensorflow.contrib.legacy_seq2seq.python.ops.seq2seq import embedding_attention_decoder
from tensorflow.contrib.legacy_seq2seq.python.ops.seq2seq import embedding_attention_seq2seq
from tensorflow.contrib.legacy_seq2seq.python.ops.seq2seq import embedding_rnn_decoder
from tensorflow.contrib.legacy_seq2seq.python.ops.seq2seq import embedding_rnn_seq2seq
from tensorflow.contrib.legacy_seq2seq.python.ops.seq2seq import embedding_tied_rnn_seq2seq
from tensorflow.contrib.legacy_seq2seq.python.ops.seq2seq import model_with_buckets
from tensorflow.contrib.legacy_seq2seq.python.ops.seq2seq import one2many_rnn_seq2seq
from tensorflow.contrib.legacy_seq2seq.python.ops.seq2seq import rnn_decoder
from tensorflow.contrib.legacy_seq2seq.python.ops.seq2seq import sequence_loss
from tensorflow.contrib.legacy_seq2seq.python.ops.seq2seq import sequence_loss_by_example
from tensorflow.contrib.legacy_seq2seq.python.ops.seq2seq import tied_rnn_seq2seq
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = []
remove_undocumented(__name__, _allowed_symbols)
|
tensorflow-master
|
tensorflow/contrib/legacy_seq2seq/__init__.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
|
tensorflow-master
|
tensorflow/contrib/legacy_seq2seq/python/__init__.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for functional style sequence-to-sequence models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import math
import random
import numpy as np
from tensorflow.contrib.legacy_seq2seq.python.ops import seq2seq as seq2seq_lib
from tensorflow.contrib.rnn.python.ops import core_rnn_cell
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import nn_impl
from tensorflow.python.ops import rnn
from tensorflow.python.ops import rnn_cell
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import adam
class Seq2SeqTest(test.TestCase):
def testRNNDecoder(self):
with self.cached_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
inp = [constant_op.constant(0.5, shape=[2, 2])] * 2
_, enc_state = rnn.static_rnn(
rnn_cell.GRUCell(2), inp, dtype=dtypes.float32)
dec_inp = [constant_op.constant(0.4, shape=[2, 2])] * 3
cell = core_rnn_cell.OutputProjectionWrapper(rnn_cell.GRUCell(2), 4)
dec, mem = seq2seq_lib.rnn_decoder(dec_inp, enc_state, cell)
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 4), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 2), res[0].shape)
def testBasicRNNSeq2Seq(self):
with self.cached_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
inp = [constant_op.constant(0.5, shape=[2, 2])] * 2
dec_inp = [constant_op.constant(0.4, shape=[2, 2])] * 3
cell = core_rnn_cell.OutputProjectionWrapper(rnn_cell.GRUCell(2), 4)
dec, mem = seq2seq_lib.basic_rnn_seq2seq(inp, dec_inp, cell)
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 4), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 2), res[0].shape)
def testTiedRNNSeq2Seq(self):
with self.cached_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
inp = [constant_op.constant(0.5, shape=[2, 2])] * 2
dec_inp = [constant_op.constant(0.4, shape=[2, 2])] * 3
cell = core_rnn_cell.OutputProjectionWrapper(rnn_cell.GRUCell(2), 4)
dec, mem = seq2seq_lib.tied_rnn_seq2seq(inp, dec_inp, cell)
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 4), res[0].shape)
res = sess.run([mem])
self.assertEqual(1, len(res))
self.assertEqual((2, 2), res[0].shape)
def testEmbeddingRNNDecoder(self):
with self.cached_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
inp = [constant_op.constant(0.5, shape=[2, 2])] * 2
cell_fn = lambda: rnn_cell.BasicLSTMCell(2)
cell = cell_fn()
_, enc_state = rnn.static_rnn(cell, inp, dtype=dtypes.float32)
dec_inp = [
constant_op.constant(
i, dtypes.int32, shape=[2]) for i in range(3)
]
# Use a new cell instance since the attention decoder uses a
# different variable scope.
dec, mem = seq2seq_lib.embedding_rnn_decoder(
dec_inp, enc_state, cell_fn(), num_symbols=4, embedding_size=2)
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 2), res[0].shape)
res = sess.run([mem])
self.assertEqual(1, len(res))
self.assertEqual((2, 2), res[0].c.shape)
self.assertEqual((2, 2), res[0].h.shape)
def testEmbeddingRNNSeq2Seq(self):
with self.cached_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
enc_inp = [
constant_op.constant(
1, dtypes.int32, shape=[2]) for i in range(2)
]
dec_inp = [
constant_op.constant(
i, dtypes.int32, shape=[2]) for i in range(3)
]
cell_fn = lambda: rnn_cell.BasicLSTMCell(2)
cell = cell_fn()
dec, mem = seq2seq_lib.embedding_rnn_seq2seq(
enc_inp,
dec_inp,
cell,
num_encoder_symbols=2,
num_decoder_symbols=5,
embedding_size=2)
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 5), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 2), res[0].c.shape)
self.assertEqual((2, 2), res[0].h.shape)
# Test with state_is_tuple=False.
with variable_scope.variable_scope("no_tuple"):
cell_nt = rnn_cell.BasicLSTMCell(2, state_is_tuple=False)
dec, mem = seq2seq_lib.embedding_rnn_seq2seq(
enc_inp,
dec_inp,
cell_nt,
num_encoder_symbols=2,
num_decoder_symbols=5,
embedding_size=2)
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 5), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 4), res[0].shape)
# Test externally provided output projection.
w = variable_scope.get_variable("proj_w", [2, 5])
b = variable_scope.get_variable("proj_b", [5])
with variable_scope.variable_scope("proj_seq2seq"):
dec, _ = seq2seq_lib.embedding_rnn_seq2seq(
enc_inp,
dec_inp,
cell_fn(),
num_encoder_symbols=2,
num_decoder_symbols=5,
embedding_size=2,
output_projection=(w, b))
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 2), res[0].shape)
# Test that previous-feeding model ignores inputs after the first.
dec_inp2 = [
constant_op.constant(
0, dtypes.int32, shape=[2]) for _ in range(3)
]
with variable_scope.variable_scope("other"):
d3, _ = seq2seq_lib.embedding_rnn_seq2seq(
enc_inp,
dec_inp2,
cell_fn(),
num_encoder_symbols=2,
num_decoder_symbols=5,
embedding_size=2,
feed_previous=constant_op.constant(True))
with variable_scope.variable_scope("other_2"):
d1, _ = seq2seq_lib.embedding_rnn_seq2seq(
enc_inp,
dec_inp,
cell_fn(),
num_encoder_symbols=2,
num_decoder_symbols=5,
embedding_size=2,
feed_previous=True)
with variable_scope.variable_scope("other_3"):
d2, _ = seq2seq_lib.embedding_rnn_seq2seq(
enc_inp,
dec_inp2,
cell_fn(),
num_encoder_symbols=2,
num_decoder_symbols=5,
embedding_size=2,
feed_previous=True)
sess.run([variables.global_variables_initializer()])
res1 = sess.run(d1)
res2 = sess.run(d2)
res3 = sess.run(d3)
self.assertAllClose(res1, res2)
self.assertAllClose(res1, res3)
def testEmbeddingTiedRNNSeq2Seq(self):
with self.cached_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
enc_inp = [
constant_op.constant(
1, dtypes.int32, shape=[2]) for i in range(2)
]
dec_inp = [
constant_op.constant(
i, dtypes.int32, shape=[2]) for i in range(3)
]
cell = functools.partial(rnn_cell.BasicLSTMCell, 2, state_is_tuple=True)
dec, mem = seq2seq_lib.embedding_tied_rnn_seq2seq(
enc_inp, dec_inp, cell(), num_symbols=5, embedding_size=2)
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 5), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 2), res[0].c.shape)
self.assertEqual((2, 2), res[0].h.shape)
# Test when num_decoder_symbols is provided, the size of decoder output
# is num_decoder_symbols.
with variable_scope.variable_scope("decoder_symbols_seq2seq"):
dec, mem = seq2seq_lib.embedding_tied_rnn_seq2seq(
enc_inp,
dec_inp,
cell(),
num_symbols=5,
num_decoder_symbols=3,
embedding_size=2)
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 3), res[0].shape)
# Test externally provided output projection.
w = variable_scope.get_variable("proj_w", [2, 5])
b = variable_scope.get_variable("proj_b", [5])
with variable_scope.variable_scope("proj_seq2seq"):
dec, _ = seq2seq_lib.embedding_tied_rnn_seq2seq(
enc_inp,
dec_inp,
cell(),
num_symbols=5,
embedding_size=2,
output_projection=(w, b))
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 2), res[0].shape)
# Test that previous-feeding model ignores inputs after the first.
dec_inp2 = [constant_op.constant(0, dtypes.int32, shape=[2])] * 3
with variable_scope.variable_scope("other"):
d3, _ = seq2seq_lib.embedding_tied_rnn_seq2seq(
enc_inp,
dec_inp2,
cell(),
num_symbols=5,
embedding_size=2,
feed_previous=constant_op.constant(True))
with variable_scope.variable_scope("other_2"):
d1, _ = seq2seq_lib.embedding_tied_rnn_seq2seq(
enc_inp,
dec_inp,
cell(),
num_symbols=5,
embedding_size=2,
feed_previous=True)
with variable_scope.variable_scope("other_3"):
d2, _ = seq2seq_lib.embedding_tied_rnn_seq2seq(
enc_inp,
dec_inp2,
cell(),
num_symbols=5,
embedding_size=2,
feed_previous=True)
sess.run([variables.global_variables_initializer()])
res1 = sess.run(d1)
res2 = sess.run(d2)
res3 = sess.run(d3)
self.assertAllClose(res1, res2)
self.assertAllClose(res1, res3)
def testAttentionDecoder1(self):
with self.cached_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
cell_fn = lambda: rnn_cell.GRUCell(2)
cell = cell_fn()
inp = [constant_op.constant(0.5, shape=[2, 2])] * 2
enc_outputs, enc_state = rnn.static_rnn(cell, inp, dtype=dtypes.float32)
attn_states = array_ops.concat([
array_ops.reshape(e, [-1, 1, cell.output_size]) for e in enc_outputs
], 1)
dec_inp = [constant_op.constant(0.4, shape=[2, 2])] * 3
# Create a new cell instance for the decoder, since it uses a
# different variable scope
dec, mem = seq2seq_lib.attention_decoder(
dec_inp, enc_state, attn_states, cell_fn(), output_size=4)
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 4), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 2), res[0].shape)
def testAttentionDecoder2(self):
with self.cached_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
cell_fn = lambda: rnn_cell.GRUCell(2)
cell = cell_fn()
inp = [constant_op.constant(0.5, shape=[2, 2])] * 2
enc_outputs, enc_state = rnn.static_rnn(cell, inp, dtype=dtypes.float32)
attn_states = array_ops.concat([
array_ops.reshape(e, [-1, 1, cell.output_size]) for e in enc_outputs
], 1)
dec_inp = [constant_op.constant(0.4, shape=[2, 2])] * 3
# Use a new cell instance since the attention decoder uses a
# different variable scope.
dec, mem = seq2seq_lib.attention_decoder(
dec_inp, enc_state, attn_states, cell_fn(),
output_size=4, num_heads=2)
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 4), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 2), res[0].shape)
def testDynamicAttentionDecoder1(self):
with self.cached_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
cell_fn = lambda: rnn_cell.GRUCell(2)
cell = cell_fn()
inp = constant_op.constant(0.5, shape=[2, 2, 2])
enc_outputs, enc_state = rnn.dynamic_rnn(
cell, inp, dtype=dtypes.float32)
attn_states = enc_outputs
dec_inp = [constant_op.constant(0.4, shape=[2, 2])] * 3
# Use a new cell instance since the attention decoder uses a
# different variable scope.
dec, mem = seq2seq_lib.attention_decoder(
dec_inp, enc_state, attn_states, cell_fn(), output_size=4)
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 4), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 2), res[0].shape)
def testDynamicAttentionDecoder2(self):
with self.cached_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
cell_fn = lambda: rnn_cell.GRUCell(2)
cell = cell_fn()
inp = constant_op.constant(0.5, shape=[2, 2, 2])
enc_outputs, enc_state = rnn.dynamic_rnn(
cell, inp, dtype=dtypes.float32)
attn_states = enc_outputs
dec_inp = [constant_op.constant(0.4, shape=[2, 2])] * 3
# Use a new cell instance since the attention decoder uses a
# different variable scope.
dec, mem = seq2seq_lib.attention_decoder(
dec_inp, enc_state, attn_states, cell_fn(),
output_size=4, num_heads=2)
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 4), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 2), res[0].shape)
def testAttentionDecoderStateIsTuple(self):
with self.cached_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
single_cell = lambda: rnn_cell.BasicLSTMCell( # pylint: disable=g-long-lambda
2, state_is_tuple=True)
cell_fn = lambda: rnn_cell.MultiRNNCell( # pylint: disable=g-long-lambda
cells=[single_cell() for _ in range(2)], state_is_tuple=True)
cell = cell_fn()
inp = [constant_op.constant(0.5, shape=[2, 2])] * 2
enc_outputs, enc_state = rnn.static_rnn(cell, inp, dtype=dtypes.float32)
attn_states = array_ops.concat([
array_ops.reshape(e, [-1, 1, cell.output_size]) for e in enc_outputs
], 1)
dec_inp = [constant_op.constant(0.4, shape=[2, 2])] * 3
# Use a new cell instance since the attention decoder uses a
# different variable scope.
dec, mem = seq2seq_lib.attention_decoder(
dec_inp, enc_state, attn_states, cell_fn(), output_size=4)
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 4), res[0].shape)
res = sess.run([mem])
self.assertEqual(2, len(res[0]))
self.assertEqual((2, 2), res[0][0].c.shape)
self.assertEqual((2, 2), res[0][0].h.shape)
self.assertEqual((2, 2), res[0][1].c.shape)
self.assertEqual((2, 2), res[0][1].h.shape)
def testDynamicAttentionDecoderStateIsTuple(self):
with self.cached_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
cell_fn = lambda: rnn_cell.MultiRNNCell( # pylint: disable=g-long-lambda
cells=[rnn_cell.BasicLSTMCell(2) for _ in range(2)])
cell = cell_fn()
inp = [constant_op.constant(0.5, shape=[2, 2])] * 2
enc_outputs, enc_state = rnn.static_rnn(cell, inp, dtype=dtypes.float32)
attn_states = array_ops.concat([
array_ops.reshape(e, [-1, 1, cell.output_size])
for e in enc_outputs
], 1)
dec_inp = [constant_op.constant(0.4, shape=[2, 2])] * 3
# Use a new cell instance since the attention decoder uses a
# different variable scope.
dec, mem = seq2seq_lib.attention_decoder(
dec_inp, enc_state, attn_states, cell_fn(), output_size=4)
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 4), res[0].shape)
res = sess.run([mem])
self.assertEqual(2, len(res[0]))
self.assertEqual((2, 2), res[0][0].c.shape)
self.assertEqual((2, 2), res[0][0].h.shape)
self.assertEqual((2, 2), res[0][1].c.shape)
self.assertEqual((2, 2), res[0][1].h.shape)
def testEmbeddingAttentionDecoder(self):
with self.cached_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
inp = [constant_op.constant(0.5, shape=[2, 2])] * 2
cell_fn = lambda: rnn_cell.GRUCell(2)
cell = cell_fn()
enc_outputs, enc_state = rnn.static_rnn(cell, inp, dtype=dtypes.float32)
attn_states = array_ops.concat([
array_ops.reshape(e, [-1, 1, cell.output_size]) for e in enc_outputs
], 1)
dec_inp = [
constant_op.constant(
i, dtypes.int32, shape=[2]) for i in range(3)
]
# Use a new cell instance since the attention decoder uses a
# different variable scope.
dec, mem = seq2seq_lib.embedding_attention_decoder(
dec_inp,
enc_state,
attn_states,
cell_fn(),
num_symbols=4,
embedding_size=2,
output_size=3)
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 3), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 2), res[0].shape)
def testEmbeddingAttentionSeq2Seq(self):
with self.cached_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
enc_inp = [
constant_op.constant(
1, dtypes.int32, shape=[2]) for i in range(2)
]
dec_inp = [
constant_op.constant(
i, dtypes.int32, shape=[2]) for i in range(3)
]
cell_fn = lambda: rnn_cell.BasicLSTMCell(2)
cell = cell_fn()
dec, mem = seq2seq_lib.embedding_attention_seq2seq(
enc_inp,
dec_inp,
cell,
num_encoder_symbols=2,
num_decoder_symbols=5,
embedding_size=2)
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 5), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 2), res[0].c.shape)
self.assertEqual((2, 2), res[0].h.shape)
# Test with state_is_tuple=False.
with variable_scope.variable_scope("no_tuple"):
cell_fn = functools.partial(
rnn_cell.BasicLSTMCell, 2, state_is_tuple=False)
cell_nt = cell_fn()
dec, mem = seq2seq_lib.embedding_attention_seq2seq(
enc_inp,
dec_inp,
cell_nt,
num_encoder_symbols=2,
num_decoder_symbols=5,
embedding_size=2)
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 5), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 4), res[0].shape)
# Test externally provided output projection.
w = variable_scope.get_variable("proj_w", [2, 5])
b = variable_scope.get_variable("proj_b", [5])
with variable_scope.variable_scope("proj_seq2seq"):
dec, _ = seq2seq_lib.embedding_attention_seq2seq(
enc_inp,
dec_inp,
cell_fn(),
num_encoder_symbols=2,
num_decoder_symbols=5,
embedding_size=2,
output_projection=(w, b))
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 2), res[0].shape)
# TODO(ebrevdo, lukaszkaiser): Re-enable once RNNCells allow reuse
# within a variable scope that already has a weights tensor.
#
# # Test that previous-feeding model ignores inputs after the first.
# dec_inp2 = [
# constant_op.constant(
# 0, dtypes.int32, shape=[2]) for _ in range(3)
# ]
# with variable_scope.variable_scope("other"):
# d3, _ = seq2seq_lib.embedding_attention_seq2seq(
# enc_inp,
# dec_inp2,
# cell_fn(),
# num_encoder_symbols=2,
# num_decoder_symbols=5,
# embedding_size=2,
# feed_previous=constant_op.constant(True))
# sess.run([variables.global_variables_initializer()])
# variable_scope.get_variable_scope().reuse_variables()
# cell = cell_fn()
# d1, _ = seq2seq_lib.embedding_attention_seq2seq(
# enc_inp,
# dec_inp,
# cell,
# num_encoder_symbols=2,
# num_decoder_symbols=5,
# embedding_size=2,
# feed_previous=True)
# d2, _ = seq2seq_lib.embedding_attention_seq2seq(
# enc_inp,
# dec_inp2,
# cell,
# num_encoder_symbols=2,
# num_decoder_symbols=5,
# embedding_size=2,
# feed_previous=True)
# res1 = sess.run(d1)
# res2 = sess.run(d2)
# res3 = sess.run(d3)
# self.assertAllClose(res1, res2)
# self.assertAllClose(res1, res3)
def testOne2ManyRNNSeq2Seq(self):
with self.cached_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
enc_inp = [
constant_op.constant(
1, dtypes.int32, shape=[2]) for i in range(2)
]
dec_inp_dict = {}
dec_inp_dict["0"] = [
constant_op.constant(
i, dtypes.int32, shape=[2]) for i in range(3)
]
dec_inp_dict["1"] = [
constant_op.constant(
i, dtypes.int32, shape=[2]) for i in range(4)
]
dec_symbols_dict = {"0": 5, "1": 6}
def EncCellFn():
return rnn_cell.BasicLSTMCell(2, state_is_tuple=True)
def DecCellsFn():
return dict((k, rnn_cell.BasicLSTMCell(2, state_is_tuple=True))
for k in dec_symbols_dict)
outputs_dict, state_dict = (seq2seq_lib.one2many_rnn_seq2seq(
enc_inp, dec_inp_dict, EncCellFn(), DecCellsFn(),
2, dec_symbols_dict, embedding_size=2))
sess.run([variables.global_variables_initializer()])
res = sess.run(outputs_dict["0"])
self.assertEqual(3, len(res))
self.assertEqual((2, 5), res[0].shape)
res = sess.run(outputs_dict["1"])
self.assertEqual(4, len(res))
self.assertEqual((2, 6), res[0].shape)
res = sess.run([state_dict["0"]])
self.assertEqual((2, 2), res[0].c.shape)
self.assertEqual((2, 2), res[0].h.shape)
res = sess.run([state_dict["1"]])
self.assertEqual((2, 2), res[0].c.shape)
self.assertEqual((2, 2), res[0].h.shape)
# Test that previous-feeding model ignores inputs after the first, i.e.
# dec_inp_dict2 has different inputs from dec_inp_dict after the first
# time-step.
dec_inp_dict2 = {}
dec_inp_dict2["0"] = [
constant_op.constant(
0, dtypes.int32, shape=[2]) for _ in range(3)
]
dec_inp_dict2["1"] = [
constant_op.constant(
0, dtypes.int32, shape=[2]) for _ in range(4)
]
with variable_scope.variable_scope("other"):
outputs_dict3, _ = seq2seq_lib.one2many_rnn_seq2seq(
enc_inp,
dec_inp_dict2,
EncCellFn(),
DecCellsFn(),
2,
dec_symbols_dict,
embedding_size=2,
feed_previous=constant_op.constant(True))
with variable_scope.variable_scope("other_2"):
outputs_dict1, _ = seq2seq_lib.one2many_rnn_seq2seq(
enc_inp,
dec_inp_dict,
EncCellFn(),
DecCellsFn(),
2,
dec_symbols_dict,
embedding_size=2,
feed_previous=True)
with variable_scope.variable_scope("other_3"):
outputs_dict2, _ = seq2seq_lib.one2many_rnn_seq2seq(
enc_inp,
dec_inp_dict2,
EncCellFn(),
DecCellsFn(),
2,
dec_symbols_dict,
embedding_size=2,
feed_previous=True)
sess.run([variables.global_variables_initializer()])
res1 = sess.run(outputs_dict1["0"])
res2 = sess.run(outputs_dict2["0"])
res3 = sess.run(outputs_dict3["0"])
self.assertAllClose(res1, res2)
self.assertAllClose(res1, res3)
def testSequenceLoss(self):
with self.cached_session() as sess:
logits = [constant_op.constant(i + 0.5, shape=[2, 5]) for i in range(3)]
targets = [
constant_op.constant(
i, dtypes.int32, shape=[2]) for i in range(3)
]
weights = [constant_op.constant(1.0, shape=[2]) for i in range(3)]
average_loss_per_example = seq2seq_lib.sequence_loss(
logits,
targets,
weights,
average_across_timesteps=True,
average_across_batch=True)
res = sess.run(average_loss_per_example)
self.assertAllClose(1.60944, res)
average_loss_per_sequence = seq2seq_lib.sequence_loss(
logits,
targets,
weights,
average_across_timesteps=False,
average_across_batch=True)
res = sess.run(average_loss_per_sequence)
self.assertAllClose(4.828314, res)
total_loss = seq2seq_lib.sequence_loss(
logits,
targets,
weights,
average_across_timesteps=False,
average_across_batch=False)
res = sess.run(total_loss)
self.assertAllClose(9.656628, res)
def testSequenceLossByExample(self):
with self.cached_session() as sess:
output_classes = 5
logits = [
constant_op.constant(
i + 0.5, shape=[2, output_classes]) for i in range(3)
]
targets = [
constant_op.constant(
i, dtypes.int32, shape=[2]) for i in range(3)
]
weights = [constant_op.constant(1.0, shape=[2]) for i in range(3)]
average_loss_per_example = (seq2seq_lib.sequence_loss_by_example(
logits, targets, weights, average_across_timesteps=True))
res = sess.run(average_loss_per_example)
self.assertAllClose(np.asarray([1.609438, 1.609438]), res)
loss_per_sequence = seq2seq_lib.sequence_loss_by_example(
logits, targets, weights, average_across_timesteps=False)
res = sess.run(loss_per_sequence)
self.assertAllClose(np.asarray([4.828314, 4.828314]), res)
# TODO(ebrevdo, lukaszkaiser): Re-enable once RNNCells allow reuse
# within a variable scope that already has a weights tensor.
#
# def testModelWithBucketsScopeAndLoss(self):
# """Test variable scope reuse is not reset after model_with_buckets."""
# classes = 10
# buckets = [(4, 4), (8, 8)]
# with self.cached_session():
# # Here comes a sample Seq2Seq model using GRU cells.
# def SampleGRUSeq2Seq(enc_inp, dec_inp, weights, per_example_loss):
# """Example sequence-to-sequence model that uses GRU cells."""
# def GRUSeq2Seq(enc_inp, dec_inp):
# cell = rnn_cell.MultiRNNCell(
# [rnn_cell.GRUCell(24) for _ in range(2)])
# return seq2seq_lib.embedding_attention_seq2seq(
# enc_inp,
# dec_inp,
# cell,
# num_encoder_symbols=classes,
# num_decoder_symbols=classes,
# embedding_size=24)
# targets = [dec_inp[i + 1] for i in range(len(dec_inp) - 1)] + [0]
# return seq2seq_lib.model_with_buckets(
# enc_inp,
# dec_inp,
# targets,
# weights,
# buckets,
# GRUSeq2Seq,
# per_example_loss=per_example_loss)
# # Now we construct the copy model.
# inp = [
# array_ops.placeholder(
# dtypes.int32, shape=[None]) for _ in range(8)
# ]
# out = [
# array_ops.placeholder(
# dtypes.int32, shape=[None]) for _ in range(8)
# ]
# weights = [
# array_ops.ones_like(
# inp[0], dtype=dtypes.float32) for _ in range(8)
# ]
# with variable_scope.variable_scope("root"):
# _, losses1 = SampleGRUSeq2Seq(
# inp, out, weights, per_example_loss=False)
# # Now check that we did not accidentally set reuse.
# self.assertEqual(False, variable_scope.get_variable_scope().reuse)
# with variable_scope.variable_scope("new"):
# _, losses2 = SampleGRUSeq2Seq
# inp, out, weights, per_example_loss=True)
# # First loss is scalar, the second one is a 1-dimensional tensor.
# self.assertEqual([], losses1[0].get_shape().as_list())
# self.assertEqual([None], losses2[0].get_shape().as_list())
def testModelWithBuckets(self):
"""Larger tests that does full sequence-to-sequence model training."""
# We learn to copy 10 symbols in 2 buckets: length 4 and length 8.
classes = 10
buckets = [(4, 4), (8, 8)]
perplexities = [[], []] # Results for each bucket.
random_seed.set_random_seed(111)
random.seed(111)
np.random.seed(111)
with self.cached_session() as sess:
# We use sampled softmax so we keep output projection separate.
w = variable_scope.get_variable("proj_w", [24, classes])
w_t = array_ops.transpose(w)
b = variable_scope.get_variable("proj_b", [classes])
# Here comes a sample Seq2Seq model using GRU cells.
def SampleGRUSeq2Seq(enc_inp, dec_inp, weights):
"""Example sequence-to-sequence model that uses GRU cells."""
def GRUSeq2Seq(enc_inp, dec_inp):
cell = rnn_cell.MultiRNNCell(
[rnn_cell.GRUCell(24) for _ in range(2)], state_is_tuple=True)
return seq2seq_lib.embedding_attention_seq2seq(
enc_inp,
dec_inp,
cell,
num_encoder_symbols=classes,
num_decoder_symbols=classes,
embedding_size=24,
output_projection=(w, b))
targets = [dec_inp[i + 1] for i in range(len(dec_inp) - 1)] + [0]
def SampledLoss(labels, logits):
labels = array_ops.reshape(labels, [-1, 1])
return nn_impl.sampled_softmax_loss(
weights=w_t,
biases=b,
labels=labels,
inputs=logits,
num_sampled=8,
num_classes=classes)
return seq2seq_lib.model_with_buckets(
enc_inp,
dec_inp,
targets,
weights,
buckets,
GRUSeq2Seq,
softmax_loss_function=SampledLoss)
# Now we construct the copy model.
batch_size = 8
inp = [
array_ops.placeholder(
dtypes.int32, shape=[None]) for _ in range(8)
]
out = [
array_ops.placeholder(
dtypes.int32, shape=[None]) for _ in range(8)
]
weights = [
array_ops.ones_like(
inp[0], dtype=dtypes.float32) for _ in range(8)
]
with variable_scope.variable_scope("root"):
_, losses = SampleGRUSeq2Seq(inp, out, weights)
updates = []
params = variables.global_variables()
optimizer = adam.AdamOptimizer(0.03, epsilon=1e-5)
for i in range(len(buckets)):
full_grads = gradients_impl.gradients(losses[i], params)
grads, _ = clip_ops.clip_by_global_norm(full_grads, 30.0)
update = optimizer.apply_gradients(zip(grads, params))
updates.append(update)
sess.run([variables.global_variables_initializer()])
steps = 6
for _ in range(steps):
bucket = random.choice(np.arange(len(buckets)))
length = buckets[bucket][0]
i = [
np.array(
[np.random.randint(9) + 1 for _ in range(batch_size)],
dtype=np.int32) for _ in range(length)
]
# 0 is our "GO" symbol here.
o = [np.array([0] * batch_size, dtype=np.int32)] + i
feed = {}
for i1, i2, o1, o2 in zip(inp[:length], i[:length], out[:length],
o[:length]):
feed[i1.name] = i2
feed[o1.name] = o2
if length < 8: # For the 4-bucket, we need the 5th as target.
feed[out[length].name] = o[length]
res = sess.run([updates[bucket], losses[bucket]], feed)
perplexities[bucket].append(math.exp(float(res[1])))
for bucket in range(len(buckets)):
if len(perplexities[bucket]) > 1: # Assert that perplexity went down.
self.assertLess(perplexities[bucket][-1], # 20% margin of error.
1.2 * perplexities[bucket][0])
def testModelWithBooleanFeedPrevious(self):
"""Test the model behavior when feed_previous is True.
For example, the following two cases have the same effect:
- Train `embedding_rnn_seq2seq` with `feed_previous=True`, which contains
a `embedding_rnn_decoder` with `feed_previous=True` and
`update_embedding_for_previous=True`. The decoder is fed with "<Go>"
and outputs "A, B, C".
- Train `embedding_rnn_seq2seq` with `feed_previous=False`. The decoder
is fed with "<Go>, A, B".
"""
num_encoder_symbols = 3
num_decoder_symbols = 5
batch_size = 2
num_enc_timesteps = 2
num_dec_timesteps = 3
def TestModel(seq2seq):
with self.session(graph=ops.Graph()) as sess:
random_seed.set_random_seed(111)
random.seed(111)
np.random.seed(111)
enc_inp = [
constant_op.constant(
i + 1, dtypes.int32, shape=[batch_size])
for i in range(num_enc_timesteps)
]
dec_inp_fp_true = [
constant_op.constant(
i, dtypes.int32, shape=[batch_size])
for i in range(num_dec_timesteps)
]
dec_inp_holder_fp_false = [
array_ops.placeholder(
dtypes.int32, shape=[batch_size])
for _ in range(num_dec_timesteps)
]
targets = [
constant_op.constant(
i + 1, dtypes.int32, shape=[batch_size])
for i in range(num_dec_timesteps)
]
weights = [
constant_op.constant(
1.0, shape=[batch_size]) for i in range(num_dec_timesteps)
]
def ForwardBackward(enc_inp, dec_inp, feed_previous):
scope_name = "fp_{}".format(feed_previous)
with variable_scope.variable_scope(scope_name):
dec_op, _ = seq2seq(enc_inp, dec_inp, feed_previous=feed_previous)
net_variables = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES,
scope_name)
optimizer = adam.AdamOptimizer(0.03, epsilon=1e-5)
update_op = optimizer.minimize(
seq2seq_lib.sequence_loss(dec_op, targets, weights),
var_list=net_variables)
return dec_op, update_op, net_variables
dec_op_fp_true, update_fp_true, variables_fp_true = ForwardBackward(
enc_inp, dec_inp_fp_true, feed_previous=True)
_, update_fp_false, variables_fp_false = ForwardBackward(
enc_inp, dec_inp_holder_fp_false, feed_previous=False)
sess.run(variables.global_variables_initializer())
# We only check consistencies between the variables existing in both
# the models with True and False feed_previous. Variables created by
# the loop_function in the model with True feed_previous are ignored.
v_false_name_dict = {
v.name.split("/", 1)[-1]: v
for v in variables_fp_false
}
matched_variables = [(v, v_false_name_dict[v.name.split("/", 1)[-1]])
for v in variables_fp_true]
for v_true, v_false in matched_variables:
sess.run(state_ops.assign(v_false, v_true))
# Take the symbols generated by the decoder with feed_previous=True as
# the true input symbols for the decoder with feed_previous=False.
dec_fp_true = sess.run(dec_op_fp_true)
output_symbols_fp_true = np.argmax(dec_fp_true, axis=2)
dec_inp_fp_false = np.vstack((dec_inp_fp_true[0].eval(),
output_symbols_fp_true[:-1]))
sess.run(update_fp_true)
sess.run(update_fp_false, {
holder: inp
for holder, inp in zip(dec_inp_holder_fp_false, dec_inp_fp_false)
})
for v_true, v_false in matched_variables:
self.assertAllClose(v_true.eval(), v_false.eval())
def EmbeddingRNNSeq2SeqF(enc_inp, dec_inp, feed_previous):
cell = rnn_cell.BasicLSTMCell(2, state_is_tuple=True)
return seq2seq_lib.embedding_rnn_seq2seq(
enc_inp,
dec_inp,
cell,
num_encoder_symbols,
num_decoder_symbols,
embedding_size=2,
feed_previous=feed_previous)
def EmbeddingRNNSeq2SeqNoTupleF(enc_inp, dec_inp, feed_previous):
cell = rnn_cell.BasicLSTMCell(2, state_is_tuple=False)
return seq2seq_lib.embedding_rnn_seq2seq(
enc_inp,
dec_inp,
cell,
num_encoder_symbols,
num_decoder_symbols,
embedding_size=2,
feed_previous=feed_previous)
def EmbeddingTiedRNNSeq2Seq(enc_inp, dec_inp, feed_previous):
cell = rnn_cell.BasicLSTMCell(2, state_is_tuple=True)
return seq2seq_lib.embedding_tied_rnn_seq2seq(
enc_inp,
dec_inp,
cell,
num_decoder_symbols,
embedding_size=2,
feed_previous=feed_previous)
def EmbeddingTiedRNNSeq2SeqNoTuple(enc_inp, dec_inp, feed_previous):
cell = rnn_cell.BasicLSTMCell(2, state_is_tuple=False)
return seq2seq_lib.embedding_tied_rnn_seq2seq(
enc_inp,
dec_inp,
cell,
num_decoder_symbols,
embedding_size=2,
feed_previous=feed_previous)
def EmbeddingAttentionSeq2Seq(enc_inp, dec_inp, feed_previous):
cell = rnn_cell.BasicLSTMCell(2, state_is_tuple=True)
return seq2seq_lib.embedding_attention_seq2seq(
enc_inp,
dec_inp,
cell,
num_encoder_symbols,
num_decoder_symbols,
embedding_size=2,
feed_previous=feed_previous)
def EmbeddingAttentionSeq2SeqNoTuple(enc_inp, dec_inp, feed_previous):
cell = rnn_cell.BasicLSTMCell(2, state_is_tuple=False)
return seq2seq_lib.embedding_attention_seq2seq(
enc_inp,
dec_inp,
cell,
num_encoder_symbols,
num_decoder_symbols,
embedding_size=2,
feed_previous=feed_previous)
for model in (EmbeddingRNNSeq2SeqF, EmbeddingRNNSeq2SeqNoTupleF,
EmbeddingTiedRNNSeq2Seq, EmbeddingTiedRNNSeq2SeqNoTuple,
EmbeddingAttentionSeq2Seq, EmbeddingAttentionSeq2SeqNoTuple):
TestModel(model)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/legacy_seq2seq/python/kernel_tests/seq2seq_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
|
tensorflow-master
|
tensorflow/contrib/legacy_seq2seq/python/kernel_tests/__init__.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Library for creating sequence-to-sequence models in TensorFlow.
Sequence-to-sequence recurrent neural networks can learn complex functions
that map input sequences to output sequences. These models yield very good
results on a number of tasks, such as speech recognition, parsing, machine
translation, or even constructing automated replies to emails.
Before using this module, it is recommended to read the TensorFlow tutorial
on sequence-to-sequence models. It explains the basic concepts of this module
and shows an end-to-end example of how to build a translation model.
https://www.tensorflow.org/versions/master/tutorials/seq2seq/index.html
Here is an overview of functions available in this module. They all use
a very similar interface, so after reading the above tutorial and using
one of them, others should be easy to substitute.
* Full sequence-to-sequence models.
- basic_rnn_seq2seq: The most basic RNN-RNN model.
- tied_rnn_seq2seq: The basic model with tied encoder and decoder weights.
- embedding_rnn_seq2seq: The basic model with input embedding.
- embedding_tied_rnn_seq2seq: The tied model with input embedding.
- embedding_attention_seq2seq: Advanced model with input embedding and
the neural attention mechanism; recommended for complex tasks.
* Multi-task sequence-to-sequence models.
- one2many_rnn_seq2seq: The embedding model with multiple decoders.
* Decoders (when you write your own encoder, you can use these to decode;
e.g., if you want to write a model that generates captions for images).
- rnn_decoder: The basic decoder based on a pure RNN.
- attention_decoder: A decoder that uses the attention mechanism.
* Losses.
- sequence_loss: Loss for a sequence model returning average log-perplexity.
- sequence_loss_by_example: As above, but not averaging over all examples.
* model_with_buckets: A convenience function to create models with bucketing
(see the tutorial above for an explanation of why and how to use it).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
# We disable pylint because we need python3 compatibility.
from six.moves import xrange # pylint: disable=redefined-builtin
from six.moves import zip # pylint: disable=redefined-builtin
from tensorflow.contrib.rnn.python.ops import core_rnn_cell
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import rnn
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import variable_scope
from tensorflow.python.util import nest
# TODO(ebrevdo): Remove once _linear is fully deprecated.
Linear = core_rnn_cell._Linear # pylint: disable=protected-access,invalid-name
def _extract_argmax_and_embed(embedding,
output_projection=None,
update_embedding=True):
"""Get a loop_function that extracts the previous symbol and embeds it.
Args:
embedding: embedding tensor for symbols.
output_projection: None or a pair (W, B). If provided, each fed previous
output will first be multiplied by W and added B.
update_embedding: Boolean; if False, the gradients will not propagate
through the embeddings.
Returns:
A loop function.
"""
def loop_function(prev, _):
if output_projection is not None:
prev = nn_ops.xw_plus_b(prev, output_projection[0], output_projection[1])
prev_symbol = math_ops.argmax(prev, 1)
# Note that gradients will not propagate through the second parameter of
# embedding_lookup.
emb_prev = embedding_ops.embedding_lookup(embedding, prev_symbol)
if not update_embedding:
emb_prev = array_ops.stop_gradient(emb_prev)
return emb_prev
return loop_function
def rnn_decoder(decoder_inputs,
initial_state,
cell,
loop_function=None,
scope=None):
"""RNN decoder for the sequence-to-sequence model.
Args:
decoder_inputs: A list of 2D Tensors [batch_size x input_size].
initial_state: 2D Tensor with shape [batch_size x cell.state_size].
cell: rnn_cell.RNNCell defining the cell function and size.
loop_function: If not None, this function will be applied to the i-th output
in order to generate the i+1-st input, and decoder_inputs will be ignored,
except for the first element ("GO" symbol). This can be used for decoding,
but also for training to emulate http://arxiv.org/abs/1506.03099.
Signature -- loop_function(prev, i) = next * prev is a 2D Tensor of
shape [batch_size x output_size], * i is an integer, the step number
(when advanced control is needed), * next is a 2D Tensor of shape
[batch_size x input_size].
scope: VariableScope for the created subgraph; defaults to "rnn_decoder".
Returns:
A tuple of the form (outputs, state), where:
outputs: A list of the same length as decoder_inputs of 2D Tensors with
shape [batch_size x output_size] containing generated outputs.
state: The state of each cell at the final time-step.
It is a 2D Tensor of shape [batch_size x cell.state_size].
(Note that in some cases, like basic RNN cell or GRU cell, outputs and
states can be the same. They are different for LSTM cells though.)
"""
with variable_scope.variable_scope(scope or "rnn_decoder"):
state = initial_state
outputs = []
prev = None
for i, inp in enumerate(decoder_inputs):
if loop_function is not None and prev is not None:
with variable_scope.variable_scope("loop_function", reuse=True):
inp = loop_function(prev, i)
if i > 0:
variable_scope.get_variable_scope().reuse_variables()
output, state = cell(inp, state)
outputs.append(output)
if loop_function is not None:
prev = output
return outputs, state
def basic_rnn_seq2seq(encoder_inputs,
decoder_inputs,
cell,
dtype=dtypes.float32,
scope=None):
"""Basic RNN sequence-to-sequence model.
This model first runs an RNN to encode encoder_inputs into a state vector,
then runs decoder, initialized with the last encoder state, on decoder_inputs.
Encoder and decoder use the same RNN cell type, but don't share parameters.
Args:
encoder_inputs: A list of 2D Tensors [batch_size x input_size].
decoder_inputs: A list of 2D Tensors [batch_size x input_size].
cell: tf.compat.v1.nn.rnn_cell.RNNCell defining the cell function and size.
dtype: The dtype of the initial state of the RNN cell (default: tf.float32).
scope: VariableScope for the created subgraph; default: "basic_rnn_seq2seq".
Returns:
A tuple of the form (outputs, state), where:
outputs: A list of the same length as decoder_inputs of 2D Tensors with
shape [batch_size x output_size] containing the generated outputs.
state: The state of each decoder cell in the final time-step.
It is a 2D Tensor of shape [batch_size x cell.state_size].
"""
with variable_scope.variable_scope(scope or "basic_rnn_seq2seq"):
enc_cell = copy.deepcopy(cell)
_, enc_state = rnn.static_rnn(enc_cell, encoder_inputs, dtype=dtype)
return rnn_decoder(decoder_inputs, enc_state, cell)
def tied_rnn_seq2seq(encoder_inputs,
decoder_inputs,
cell,
loop_function=None,
dtype=dtypes.float32,
scope=None):
"""RNN sequence-to-sequence model with tied encoder and decoder parameters.
This model first runs an RNN to encode encoder_inputs into a state vector, and
then runs decoder, initialized with the last encoder state, on decoder_inputs.
Encoder and decoder use the same RNN cell and share parameters.
Args:
encoder_inputs: A list of 2D Tensors [batch_size x input_size].
decoder_inputs: A list of 2D Tensors [batch_size x input_size].
cell: tf.compat.v1.nn.rnn_cell.RNNCell defining the cell function and size.
loop_function: If not None, this function will be applied to i-th output in
order to generate i+1-th input, and decoder_inputs will be ignored, except
for the first element ("GO" symbol), see rnn_decoder for details.
dtype: The dtype of the initial state of the rnn cell (default: tf.float32).
scope: VariableScope for the created subgraph; default: "tied_rnn_seq2seq".
Returns:
A tuple of the form (outputs, state), where:
outputs: A list of the same length as decoder_inputs of 2D Tensors with
shape [batch_size x output_size] containing the generated outputs.
state: The state of each decoder cell in each time-step. This is a list
with length len(decoder_inputs) -- one item for each time-step.
It is a 2D Tensor of shape [batch_size x cell.state_size].
"""
with variable_scope.variable_scope("combined_tied_rnn_seq2seq"):
scope = scope or "tied_rnn_seq2seq"
_, enc_state = rnn.static_rnn(
cell, encoder_inputs, dtype=dtype, scope=scope)
variable_scope.get_variable_scope().reuse_variables()
return rnn_decoder(
decoder_inputs,
enc_state,
cell,
loop_function=loop_function,
scope=scope)
def embedding_rnn_decoder(decoder_inputs,
initial_state,
cell,
num_symbols,
embedding_size,
output_projection=None,
feed_previous=False,
update_embedding_for_previous=True,
scope=None):
"""RNN decoder with embedding and a pure-decoding option.
Args:
decoder_inputs: A list of 1D batch-sized int32 Tensors (decoder inputs).
initial_state: 2D Tensor [batch_size x cell.state_size].
cell: tf.compat.v1.nn.rnn_cell.RNNCell defining the cell function.
num_symbols: Integer, how many symbols come into the embedding.
embedding_size: Integer, the length of the embedding vector for each symbol.
output_projection: None or a pair (W, B) of output projection weights and
biases; W has shape [output_size x num_symbols] and B has shape
[num_symbols]; if provided and feed_previous=True, each fed previous
output will first be multiplied by W and added B.
feed_previous: Boolean; if True, only the first of decoder_inputs will be
used (the "GO" symbol), and all other decoder inputs will be generated by:
next = embedding_lookup(embedding, argmax(previous_output)), In effect,
this implements a greedy decoder. It can also be used
during training to emulate http://arxiv.org/abs/1506.03099. If False,
decoder_inputs are used as given (the standard decoder case).
update_embedding_for_previous: Boolean; if False and feed_previous=True,
only the embedding for the first symbol of decoder_inputs (the "GO"
symbol) will be updated by back propagation. Embeddings for the symbols
generated from the decoder itself remain unchanged. This parameter has no
effect if feed_previous=False.
scope: VariableScope for the created subgraph; defaults to
"embedding_rnn_decoder".
Returns:
A tuple of the form (outputs, state), where:
outputs: A list of the same length as decoder_inputs of 2D Tensors. The
output is of shape [batch_size x cell.output_size] when
output_projection is not None (and represents the dense representation
of predicted tokens). It is of shape [batch_size x num_decoder_symbols]
when output_projection is None.
state: The state of each decoder cell in each time-step. This is a list
with length len(decoder_inputs) -- one item for each time-step.
It is a 2D Tensor of shape [batch_size x cell.state_size].
Raises:
ValueError: When output_projection has the wrong shape.
"""
with variable_scope.variable_scope(scope or "embedding_rnn_decoder") as scope:
if output_projection is not None:
dtype = scope.dtype
proj_weights = ops.convert_to_tensor(output_projection[0], dtype=dtype)
proj_weights.get_shape().assert_is_compatible_with([None, num_symbols])
proj_biases = ops.convert_to_tensor(output_projection[1], dtype=dtype)
proj_biases.get_shape().assert_is_compatible_with([num_symbols])
embedding = variable_scope.get_variable("embedding",
[num_symbols, embedding_size])
loop_function = _extract_argmax_and_embed(
embedding, output_projection,
update_embedding_for_previous) if feed_previous else None
emb_inp = (
embedding_ops.embedding_lookup(embedding, i) for i in decoder_inputs)
return rnn_decoder(
emb_inp, initial_state, cell, loop_function=loop_function)
def embedding_rnn_seq2seq(encoder_inputs,
decoder_inputs,
cell,
num_encoder_symbols,
num_decoder_symbols,
embedding_size,
output_projection=None,
feed_previous=False,
dtype=None,
scope=None):
"""Embedding RNN sequence-to-sequence model.
This model first embeds encoder_inputs by a newly created embedding (of shape
[num_encoder_symbols x input_size]). Then it runs an RNN to encode
embedded encoder_inputs into a state vector. Next, it embeds decoder_inputs
by another newly created embedding (of shape [num_decoder_symbols x
input_size]). Then it runs RNN decoder, initialized with the last
encoder state, on embedded decoder_inputs.
Args:
encoder_inputs: A list of 1D int32 Tensors of shape [batch_size].
decoder_inputs: A list of 1D int32 Tensors of shape [batch_size].
cell: tf.compat.v1.nn.rnn_cell.RNNCell defining the cell function and size.
num_encoder_symbols: Integer; number of symbols on the encoder side.
num_decoder_symbols: Integer; number of symbols on the decoder side.
embedding_size: Integer, the length of the embedding vector for each symbol.
output_projection: None or a pair (W, B) of output projection weights and
biases; W has shape [output_size x num_decoder_symbols] and B has shape
[num_decoder_symbols]; if provided and feed_previous=True, each fed
previous output will first be multiplied by W and added B.
feed_previous: Boolean or scalar Boolean Tensor; if True, only the first of
decoder_inputs will be used (the "GO" symbol), and all other decoder
inputs will be taken from previous outputs (as in embedding_rnn_decoder).
If False, decoder_inputs are used as given (the standard decoder case).
dtype: The dtype of the initial state for both the encoder and encoder
rnn cells (default: tf.float32).
scope: VariableScope for the created subgraph; defaults to
"embedding_rnn_seq2seq"
Returns:
A tuple of the form (outputs, state), where:
outputs: A list of the same length as decoder_inputs of 2D Tensors. The
output is of shape [batch_size x cell.output_size] when
output_projection is not None (and represents the dense representation
of predicted tokens). It is of shape [batch_size x num_decoder_symbols]
when output_projection is None.
state: The state of each decoder cell in each time-step. This is a list
with length len(decoder_inputs) -- one item for each time-step.
It is a 2D Tensor of shape [batch_size x cell.state_size].
"""
with variable_scope.variable_scope(scope or "embedding_rnn_seq2seq") as scope:
if dtype is not None:
scope.set_dtype(dtype)
else:
dtype = scope.dtype
# Encoder.
encoder_cell = copy.deepcopy(cell)
encoder_cell = core_rnn_cell.EmbeddingWrapper(
encoder_cell,
embedding_classes=num_encoder_symbols,
embedding_size=embedding_size)
_, encoder_state = rnn.static_rnn(encoder_cell, encoder_inputs, dtype=dtype)
# Decoder.
if output_projection is None:
cell = core_rnn_cell.OutputProjectionWrapper(cell, num_decoder_symbols)
if isinstance(feed_previous, bool):
return embedding_rnn_decoder(
decoder_inputs,
encoder_state,
cell,
num_decoder_symbols,
embedding_size,
output_projection=output_projection,
feed_previous=feed_previous)
# If feed_previous is a Tensor, we construct 2 graphs and use cond.
def decoder(feed_previous_bool):
reuse = None if feed_previous_bool else True
with variable_scope.variable_scope(
variable_scope.get_variable_scope(), reuse=reuse):
outputs, state = embedding_rnn_decoder(
decoder_inputs,
encoder_state,
cell,
num_decoder_symbols,
embedding_size,
output_projection=output_projection,
feed_previous=feed_previous_bool,
update_embedding_for_previous=False)
state_list = [state]
if nest.is_sequence(state):
state_list = nest.flatten(state)
return outputs + state_list
outputs_and_state = control_flow_ops.cond(
feed_previous, lambda: decoder(True), lambda: decoder(False))
outputs_len = len(decoder_inputs) # Outputs length same as decoder inputs.
state_list = outputs_and_state[outputs_len:]
state = state_list[0]
if nest.is_sequence(encoder_state):
state = nest.pack_sequence_as(
structure=encoder_state, flat_sequence=state_list)
return outputs_and_state[:outputs_len], state
def embedding_tied_rnn_seq2seq(encoder_inputs,
decoder_inputs,
cell,
num_symbols,
embedding_size,
num_decoder_symbols=None,
output_projection=None,
feed_previous=False,
dtype=None,
scope=None):
"""Embedding RNN sequence-to-sequence model with tied (shared) parameters.
This model first embeds encoder_inputs by a newly created embedding (of shape
[num_symbols x input_size]). Then it runs an RNN to encode embedded
encoder_inputs into a state vector. Next, it embeds decoder_inputs using
the same embedding. Then it runs RNN decoder, initialized with the last
encoder state, on embedded decoder_inputs. The decoder output is over symbols
from 0 to num_decoder_symbols - 1 if num_decoder_symbols is none; otherwise it
is over 0 to num_symbols - 1.
Args:
encoder_inputs: A list of 1D int32 Tensors of shape [batch_size].
decoder_inputs: A list of 1D int32 Tensors of shape [batch_size].
cell: tf.compat.v1.nn.rnn_cell.RNNCell defining the cell function and size.
num_symbols: Integer; number of symbols for both encoder and decoder.
embedding_size: Integer, the length of the embedding vector for each symbol.
num_decoder_symbols: Integer; number of output symbols for decoder. If
provided, the decoder output is over symbols 0 to num_decoder_symbols - 1.
Otherwise, decoder output is over symbols 0 to num_symbols - 1. Note that
this assumes that the vocabulary is set up such that the first
num_decoder_symbols of num_symbols are part of decoding.
output_projection: None or a pair (W, B) of output projection weights and
biases; W has shape [output_size x num_symbols] and B has shape
[num_symbols]; if provided and feed_previous=True, each fed previous
output will first be multiplied by W and added B.
feed_previous: Boolean or scalar Boolean Tensor; if True, only the first of
decoder_inputs will be used (the "GO" symbol), and all other decoder
inputs will be taken from previous outputs (as in embedding_rnn_decoder).
If False, decoder_inputs are used as given (the standard decoder case).
dtype: The dtype to use for the initial RNN states (default: tf.float32).
scope: VariableScope for the created subgraph; defaults to
"embedding_tied_rnn_seq2seq".
Returns:
A tuple of the form (outputs, state), where:
outputs: A list of the same length as decoder_inputs of 2D Tensors with
shape [batch_size x output_symbols] containing the generated
outputs where output_symbols = num_decoder_symbols if
num_decoder_symbols is not None otherwise output_symbols = num_symbols.
state: The state of each decoder cell at the final time-step.
It is a 2D Tensor of shape [batch_size x cell.state_size].
Raises:
ValueError: When output_projection has the wrong shape.
"""
with variable_scope.variable_scope(
scope or "embedding_tied_rnn_seq2seq", dtype=dtype) as scope:
dtype = scope.dtype
if output_projection is not None:
proj_weights = ops.convert_to_tensor(output_projection[0], dtype=dtype)
proj_weights.get_shape().assert_is_compatible_with([None, num_symbols])
proj_biases = ops.convert_to_tensor(output_projection[1], dtype=dtype)
proj_biases.get_shape().assert_is_compatible_with([num_symbols])
embedding = variable_scope.get_variable(
"embedding", [num_symbols, embedding_size], dtype=dtype)
emb_encoder_inputs = [
embedding_ops.embedding_lookup(embedding, x) for x in encoder_inputs
]
emb_decoder_inputs = [
embedding_ops.embedding_lookup(embedding, x) for x in decoder_inputs
]
output_symbols = num_symbols
if num_decoder_symbols is not None:
output_symbols = num_decoder_symbols
if output_projection is None:
cell = core_rnn_cell.OutputProjectionWrapper(cell, output_symbols)
if isinstance(feed_previous, bool):
loop_function = _extract_argmax_and_embed(embedding, output_projection,
True) if feed_previous else None
return tied_rnn_seq2seq(
emb_encoder_inputs,
emb_decoder_inputs,
cell,
loop_function=loop_function,
dtype=dtype)
# If feed_previous is a Tensor, we construct 2 graphs and use cond.
def decoder(feed_previous_bool):
loop_function = _extract_argmax_and_embed(
embedding, output_projection, False) if feed_previous_bool else None
reuse = None if feed_previous_bool else True
with variable_scope.variable_scope(
variable_scope.get_variable_scope(), reuse=reuse):
outputs, state = tied_rnn_seq2seq(
emb_encoder_inputs,
emb_decoder_inputs,
cell,
loop_function=loop_function,
dtype=dtype)
state_list = [state]
if nest.is_sequence(state):
state_list = nest.flatten(state)
return outputs + state_list
outputs_and_state = control_flow_ops.cond(
feed_previous, lambda: decoder(True), lambda: decoder(False))
outputs_len = len(decoder_inputs) # Outputs length same as decoder inputs.
state_list = outputs_and_state[outputs_len:]
state = state_list[0]
# Calculate zero-state to know it's structure.
static_batch_size = encoder_inputs[0].get_shape()[0]
for inp in encoder_inputs[1:]:
static_batch_size.merge_with(inp.get_shape()[0])
batch_size = static_batch_size.value
if batch_size is None:
batch_size = array_ops.shape(encoder_inputs[0])[0]
zero_state = cell.zero_state(batch_size, dtype)
if nest.is_sequence(zero_state):
state = nest.pack_sequence_as(
structure=zero_state, flat_sequence=state_list)
return outputs_and_state[:outputs_len], state
def attention_decoder(decoder_inputs,
initial_state,
attention_states,
cell,
output_size=None,
num_heads=1,
loop_function=None,
dtype=None,
scope=None,
initial_state_attention=False):
"""RNN decoder with attention for the sequence-to-sequence model.
In this context "attention" means that, during decoding, the RNN can look up
information in the additional tensor attention_states, and it does this by
focusing on a few entries from the tensor. This model has proven to yield
especially good results in a number of sequence-to-sequence tasks. This
implementation is based on http://arxiv.org/abs/1412.7449 (see below for
details). It is recommended for complex sequence-to-sequence tasks.
Args:
decoder_inputs: A list of 2D Tensors [batch_size x input_size].
initial_state: 2D Tensor [batch_size x cell.state_size].
attention_states: 3D Tensor [batch_size x attn_length x attn_size].
cell: tf.compat.v1.nn.rnn_cell.RNNCell defining the cell function and size.
output_size: Size of the output vectors; if None, we use cell.output_size.
num_heads: Number of attention heads that read from attention_states.
loop_function: If not None, this function will be applied to i-th output in
order to generate i+1-th input, and decoder_inputs will be ignored, except
for the first element ("GO" symbol). This can be used for decoding,
but also for training to emulate http://arxiv.org/abs/1506.03099.
Signature -- loop_function(prev, i) = next * prev is a 2D Tensor of
shape [batch_size x output_size], * i is an integer, the step number
(when advanced control is needed), * next is a 2D Tensor of shape
[batch_size x input_size].
dtype: The dtype to use for the RNN initial state (default: tf.float32).
scope: VariableScope for the created subgraph; default: "attention_decoder".
initial_state_attention: If False (default), initial attentions are zero. If
True, initialize the attentions from the initial state and attention
states -- useful when we wish to resume decoding from a previously stored
decoder state and attention states.
Returns:
A tuple of the form (outputs, state), where:
outputs: A list of the same length as decoder_inputs of 2D Tensors of
shape [batch_size x output_size]. These represent the generated outputs.
Output i is computed from input i (which is either the i-th element
of decoder_inputs or loop_function(output {i-1}, i)) as follows.
First, we run the cell on a combination of the input and previous
attention masks:
cell_output, new_state = cell(linear(input, prev_attn), prev_state).
Then, we calculate new attention masks:
new_attn = softmax(V^T * tanh(W * attention_states + U * new_state))
and then we calculate the output:
output = linear(cell_output, new_attn).
state: The state of each decoder cell the final time-step.
It is a 2D Tensor of shape [batch_size x cell.state_size].
Raises:
ValueError: when num_heads is not positive, there are no inputs, shapes
of attention_states are not set, or input size cannot be inferred
from the input.
"""
if not decoder_inputs:
raise ValueError("Must provide at least 1 input to attention decoder.")
if num_heads < 1:
raise ValueError("With less than 1 heads, use a non-attention decoder.")
if attention_states.get_shape()[2].value is None:
raise ValueError("Shape[2] of attention_states must be known: %s" %
attention_states.get_shape())
if output_size is None:
output_size = cell.output_size
with variable_scope.variable_scope(
scope or "attention_decoder", dtype=dtype) as scope:
dtype = scope.dtype
batch_size = array_ops.shape(decoder_inputs[0])[0] # Needed for reshaping.
attn_length = attention_states.get_shape()[1].value
if attn_length is None:
attn_length = array_ops.shape(attention_states)[1]
attn_size = attention_states.get_shape()[2].value
# To calculate W1 * h_t we use a 1-by-1 convolution, need to reshape before.
hidden = array_ops.reshape(attention_states,
[-1, attn_length, 1, attn_size])
hidden_features = []
v = []
attention_vec_size = attn_size # Size of query vectors for attention.
for a in xrange(num_heads):
k = variable_scope.get_variable(
"AttnW_%d" % a, [1, 1, attn_size, attention_vec_size], dtype=dtype)
hidden_features.append(nn_ops.conv2d(hidden, k, [1, 1, 1, 1], "SAME"))
v.append(
variable_scope.get_variable(
"AttnV_%d" % a, [attention_vec_size], dtype=dtype))
state = initial_state
def attention(query):
"""Put attention masks on hidden using hidden_features and query."""
ds = [] # Results of attention reads will be stored here.
if nest.is_sequence(query): # If the query is a tuple, flatten it.
query_list = nest.flatten(query)
for q in query_list: # Check that ndims == 2 if specified.
ndims = q.get_shape().ndims
if ndims:
assert ndims == 2
query = array_ops.concat(query_list, 1)
for a in xrange(num_heads):
with variable_scope.variable_scope("Attention_%d" % a):
y = Linear(query, attention_vec_size, True)(query)
y = array_ops.reshape(y, [-1, 1, 1, attention_vec_size])
y = math_ops.cast(y, dtype)
# Attention mask is a softmax of v^T * tanh(...).
s = math_ops.reduce_sum(v[a] * math_ops.tanh(hidden_features[a] + y),
[2, 3])
a = nn_ops.softmax(math_ops.cast(s, dtype=dtypes.float32))
# Now calculate the attention-weighted vector d.
a = math_ops.cast(a, dtype)
d = math_ops.reduce_sum(
array_ops.reshape(a, [-1, attn_length, 1, 1]) * hidden, [1, 2])
ds.append(array_ops.reshape(d, [-1, attn_size]))
return ds
outputs = []
prev = None
batch_attn_size = array_ops.stack([batch_size, attn_size])
attns = [
array_ops.zeros(batch_attn_size, dtype=dtype) for _ in xrange(num_heads)
]
for a in attns: # Ensure the second shape of attention vectors is set.
a.set_shape([None, attn_size])
if initial_state_attention:
attns = attention(initial_state)
for i, inp in enumerate(decoder_inputs):
if i > 0:
variable_scope.get_variable_scope().reuse_variables()
# If loop_function is set, we use it instead of decoder_inputs.
if loop_function is not None and prev is not None:
with variable_scope.variable_scope("loop_function", reuse=True):
inp = loop_function(prev, i)
# Merge input and previous attentions into one vector of the right size.
input_size = inp.get_shape().with_rank(2)[1]
if input_size.value is None:
raise ValueError("Could not infer input size from input: %s" % inp.name)
inputs = [inp] + attns
inputs = [math_ops.cast(e, dtype) for e in inputs]
x = Linear(inputs, input_size, True)(inputs)
# Run the RNN.
cell_output, state = cell(x, state)
# Run the attention mechanism.
if i == 0 and initial_state_attention:
with variable_scope.variable_scope(
variable_scope.get_variable_scope(), reuse=True):
attns = attention(state)
else:
attns = attention(state)
with variable_scope.variable_scope("AttnOutputProjection"):
cell_output = math_ops.cast(cell_output, dtype)
inputs = [cell_output] + attns
output = Linear(inputs, output_size, True)(inputs)
if loop_function is not None:
prev = output
outputs.append(output)
return outputs, state
def embedding_attention_decoder(decoder_inputs,
initial_state,
attention_states,
cell,
num_symbols,
embedding_size,
num_heads=1,
output_size=None,
output_projection=None,
feed_previous=False,
update_embedding_for_previous=True,
dtype=None,
scope=None,
initial_state_attention=False):
"""RNN decoder with embedding and attention and a pure-decoding option.
Args:
decoder_inputs: A list of 1D batch-sized int32 Tensors (decoder inputs).
initial_state: 2D Tensor [batch_size x cell.state_size].
attention_states: 3D Tensor [batch_size x attn_length x attn_size].
cell: tf.compat.v1.nn.rnn_cell.RNNCell defining the cell function.
num_symbols: Integer, how many symbols come into the embedding.
embedding_size: Integer, the length of the embedding vector for each symbol.
num_heads: Number of attention heads that read from attention_states.
output_size: Size of the output vectors; if None, use output_size.
output_projection: None or a pair (W, B) of output projection weights and
biases; W has shape [output_size x num_symbols] and B has shape
[num_symbols]; if provided and feed_previous=True, each fed previous
output will first be multiplied by W and added B.
feed_previous: Boolean; if True, only the first of decoder_inputs will be
used (the "GO" symbol), and all other decoder inputs will be generated by:
next = embedding_lookup(embedding, argmax(previous_output)), In effect,
this implements a greedy decoder. It can also be used
during training to emulate http://arxiv.org/abs/1506.03099. If False,
decoder_inputs are used as given (the standard decoder case).
update_embedding_for_previous: Boolean; if False and feed_previous=True,
only the embedding for the first symbol of decoder_inputs (the "GO"
symbol) will be updated by back propagation. Embeddings for the symbols
generated from the decoder itself remain unchanged. This parameter has no
effect if feed_previous=False.
dtype: The dtype to use for the RNN initial states (default: tf.float32).
scope: VariableScope for the created subgraph; defaults to
"embedding_attention_decoder".
initial_state_attention: If False (default), initial attentions are zero. If
True, initialize the attentions from the initial state and attention
states -- useful when we wish to resume decoding from a previously stored
decoder state and attention states.
Returns:
A tuple of the form (outputs, state), where:
outputs: A list of the same length as decoder_inputs of 2D Tensors with
shape [batch_size x output_size] containing the generated outputs.
state: The state of each decoder cell at the final time-step.
It is a 2D Tensor of shape [batch_size x cell.state_size].
Raises:
ValueError: When output_projection has the wrong shape.
"""
if output_size is None:
output_size = cell.output_size
if output_projection is not None:
proj_biases = ops.convert_to_tensor(output_projection[1], dtype=dtype)
proj_biases.get_shape().assert_is_compatible_with([num_symbols])
with variable_scope.variable_scope(
scope or "embedding_attention_decoder", dtype=dtype) as scope:
embedding = variable_scope.get_variable("embedding",
[num_symbols, embedding_size])
loop_function = _extract_argmax_and_embed(
embedding, output_projection,
update_embedding_for_previous) if feed_previous else None
emb_inp = [
embedding_ops.embedding_lookup(embedding, i) for i in decoder_inputs
]
return attention_decoder(
emb_inp,
initial_state,
attention_states,
cell,
output_size=output_size,
num_heads=num_heads,
loop_function=loop_function,
initial_state_attention=initial_state_attention)
def embedding_attention_seq2seq(encoder_inputs,
decoder_inputs,
cell,
num_encoder_symbols,
num_decoder_symbols,
embedding_size,
num_heads=1,
output_projection=None,
feed_previous=False,
dtype=None,
scope=None,
initial_state_attention=False):
"""Embedding sequence-to-sequence model with attention.
This model first embeds encoder_inputs by a newly created embedding (of shape
[num_encoder_symbols x input_size]). Then it runs an RNN to encode
embedded encoder_inputs into a state vector. It keeps the outputs of this
RNN at every step to use for attention later. Next, it embeds decoder_inputs
by another newly created embedding (of shape [num_decoder_symbols x
input_size]). Then it runs attention decoder, initialized with the last
encoder state, on embedded decoder_inputs and attending to encoder outputs.
Warning: when output_projection is None, the size of the attention vectors
and variables will be made proportional to num_decoder_symbols, can be large.
Args:
encoder_inputs: A list of 1D int32 Tensors of shape [batch_size].
decoder_inputs: A list of 1D int32 Tensors of shape [batch_size].
cell: tf.compat.v1.nn.rnn_cell.RNNCell defining the cell function and size.
num_encoder_symbols: Integer; number of symbols on the encoder side.
num_decoder_symbols: Integer; number of symbols on the decoder side.
embedding_size: Integer, the length of the embedding vector for each symbol.
num_heads: Number of attention heads that read from attention_states.
output_projection: None or a pair (W, B) of output projection weights and
biases; W has shape [output_size x num_decoder_symbols] and B has shape
[num_decoder_symbols]; if provided and feed_previous=True, each fed
previous output will first be multiplied by W and added B.
feed_previous: Boolean or scalar Boolean Tensor; if True, only the first of
decoder_inputs will be used (the "GO" symbol), and all other decoder
inputs will be taken from previous outputs (as in embedding_rnn_decoder).
If False, decoder_inputs are used as given (the standard decoder case).
dtype: The dtype of the initial RNN state (default: tf.float32).
scope: VariableScope for the created subgraph; defaults to
"embedding_attention_seq2seq".
initial_state_attention: If False (default), initial attentions are zero. If
True, initialize the attentions from the initial state and attention
states.
Returns:
A tuple of the form (outputs, state), where:
outputs: A list of the same length as decoder_inputs of 2D Tensors with
shape [batch_size x num_decoder_symbols] containing the generated
outputs.
state: The state of each decoder cell at the final time-step.
It is a 2D Tensor of shape [batch_size x cell.state_size].
"""
with variable_scope.variable_scope(
scope or "embedding_attention_seq2seq", dtype=dtype) as scope:
dtype = scope.dtype
# Encoder.
encoder_cell = copy.deepcopy(cell)
encoder_cell = core_rnn_cell.EmbeddingWrapper(
encoder_cell,
embedding_classes=num_encoder_symbols,
embedding_size=embedding_size)
encoder_outputs, encoder_state = rnn.static_rnn(
encoder_cell, encoder_inputs, dtype=dtype)
# First calculate a concatenation of encoder outputs to put attention on.
top_states = [
array_ops.reshape(e, [-1, 1, cell.output_size]) for e in encoder_outputs
]
attention_states = array_ops.concat(top_states, 1)
# Decoder.
output_size = None
if output_projection is None:
cell = core_rnn_cell.OutputProjectionWrapper(cell, num_decoder_symbols)
output_size = num_decoder_symbols
if isinstance(feed_previous, bool):
return embedding_attention_decoder(
decoder_inputs,
encoder_state,
attention_states,
cell,
num_decoder_symbols,
embedding_size,
num_heads=num_heads,
output_size=output_size,
output_projection=output_projection,
feed_previous=feed_previous,
initial_state_attention=initial_state_attention)
# If feed_previous is a Tensor, we construct 2 graphs and use cond.
def decoder(feed_previous_bool):
reuse = None if feed_previous_bool else True
with variable_scope.variable_scope(
variable_scope.get_variable_scope(), reuse=reuse):
outputs, state = embedding_attention_decoder(
decoder_inputs,
encoder_state,
attention_states,
cell,
num_decoder_symbols,
embedding_size,
num_heads=num_heads,
output_size=output_size,
output_projection=output_projection,
feed_previous=feed_previous_bool,
update_embedding_for_previous=False,
initial_state_attention=initial_state_attention)
state_list = [state]
if nest.is_sequence(state):
state_list = nest.flatten(state)
return outputs + state_list
outputs_and_state = control_flow_ops.cond(
feed_previous, lambda: decoder(True), lambda: decoder(False))
outputs_len = len(decoder_inputs) # Outputs length same as decoder inputs.
state_list = outputs_and_state[outputs_len:]
state = state_list[0]
if nest.is_sequence(encoder_state):
state = nest.pack_sequence_as(
structure=encoder_state, flat_sequence=state_list)
return outputs_and_state[:outputs_len], state
def one2many_rnn_seq2seq(encoder_inputs,
decoder_inputs_dict,
enc_cell,
dec_cells_dict,
num_encoder_symbols,
num_decoder_symbols_dict,
embedding_size,
feed_previous=False,
dtype=None,
scope=None):
"""One-to-many RNN sequence-to-sequence model (multi-task).
This is a multi-task sequence-to-sequence model with one encoder and multiple
decoders. Reference to multi-task sequence-to-sequence learning can be found
here: http://arxiv.org/abs/1511.06114
Args:
encoder_inputs: A list of 1D int32 Tensors of shape [batch_size].
decoder_inputs_dict: A dictionary mapping decoder name (string) to the
corresponding decoder_inputs; each decoder_inputs is a list of 1D Tensors
of shape [batch_size]; num_decoders is defined as
len(decoder_inputs_dict).
enc_cell: tf.compat.v1.nn.rnn_cell.RNNCell defining the encoder cell
function and size.
dec_cells_dict: A dictionary mapping encoder name (string) to an instance of
tf.nn.rnn_cell.RNNCell.
num_encoder_symbols: Integer; number of symbols on the encoder side.
num_decoder_symbols_dict: A dictionary mapping decoder name (string) to an
integer specifying number of symbols for the corresponding decoder;
len(num_decoder_symbols_dict) must be equal to num_decoders.
embedding_size: Integer, the length of the embedding vector for each symbol.
feed_previous: Boolean or scalar Boolean Tensor; if True, only the first of
decoder_inputs will be used (the "GO" symbol), and all other decoder
inputs will be taken from previous outputs (as in embedding_rnn_decoder).
If False, decoder_inputs are used as given (the standard decoder case).
dtype: The dtype of the initial state for both the encoder and encoder
rnn cells (default: tf.float32).
scope: VariableScope for the created subgraph; defaults to
"one2many_rnn_seq2seq"
Returns:
A tuple of the form (outputs_dict, state_dict), where:
outputs_dict: A mapping from decoder name (string) to a list of the same
length as decoder_inputs_dict[name]; each element in the list is a 2D
Tensors with shape [batch_size x num_decoder_symbol_list[name]]
containing the generated outputs.
state_dict: A mapping from decoder name (string) to the final state of the
corresponding decoder RNN; it is a 2D Tensor of shape
[batch_size x cell.state_size].
Raises:
TypeError: if enc_cell or any of the dec_cells are not instances of RNNCell.
ValueError: if len(dec_cells) != len(decoder_inputs_dict).
"""
outputs_dict = {}
state_dict = {}
if not isinstance(enc_cell, rnn_cell_impl.RNNCell):
raise TypeError("enc_cell is not an RNNCell: %s" % type(enc_cell))
if set(dec_cells_dict) != set(decoder_inputs_dict):
raise ValueError("keys of dec_cells_dict != keys of decodre_inputs_dict")
for dec_cell in dec_cells_dict.values():
if not isinstance(dec_cell, rnn_cell_impl.RNNCell):
raise TypeError("dec_cell is not an RNNCell: %s" % type(dec_cell))
with variable_scope.variable_scope(
scope or "one2many_rnn_seq2seq", dtype=dtype) as scope:
dtype = scope.dtype
# Encoder.
enc_cell = core_rnn_cell.EmbeddingWrapper(
enc_cell,
embedding_classes=num_encoder_symbols,
embedding_size=embedding_size)
_, encoder_state = rnn.static_rnn(enc_cell, encoder_inputs, dtype=dtype)
# Decoder.
for name, decoder_inputs in decoder_inputs_dict.items():
num_decoder_symbols = num_decoder_symbols_dict[name]
dec_cell = dec_cells_dict[name]
with variable_scope.variable_scope("one2many_decoder_" +
str(name)) as scope:
dec_cell = core_rnn_cell.OutputProjectionWrapper(
dec_cell, num_decoder_symbols)
if isinstance(feed_previous, bool):
outputs, state = embedding_rnn_decoder(
decoder_inputs,
encoder_state,
dec_cell,
num_decoder_symbols,
embedding_size,
feed_previous=feed_previous)
else:
# If feed_previous is a Tensor, we construct 2 graphs and use cond.
def filled_embedding_rnn_decoder(feed_previous):
"""The current decoder with a fixed feed_previous parameter."""
# pylint: disable=cell-var-from-loop
reuse = None if feed_previous else True
vs = variable_scope.get_variable_scope()
with variable_scope.variable_scope(vs, reuse=reuse):
outputs, state = embedding_rnn_decoder(
decoder_inputs,
encoder_state,
dec_cell,
num_decoder_symbols,
embedding_size,
feed_previous=feed_previous)
# pylint: enable=cell-var-from-loop
state_list = [state]
if nest.is_sequence(state):
state_list = nest.flatten(state)
return outputs + state_list
outputs_and_state = control_flow_ops.cond(
feed_previous, lambda: filled_embedding_rnn_decoder(True), lambda:
filled_embedding_rnn_decoder(False))
# Outputs length is the same as for decoder inputs.
outputs_len = len(decoder_inputs)
outputs = outputs_and_state[:outputs_len]
state_list = outputs_and_state[outputs_len:]
state = state_list[0]
if nest.is_sequence(encoder_state):
state = nest.pack_sequence_as(
structure=encoder_state, flat_sequence=state_list)
outputs_dict[name] = outputs
state_dict[name] = state
return outputs_dict, state_dict
def sequence_loss_by_example(logits,
targets,
weights,
average_across_timesteps=True,
softmax_loss_function=None,
name=None):
"""Weighted cross-entropy loss for a sequence of logits (per example).
Args:
logits: List of 2D Tensors of shape [batch_size x num_decoder_symbols].
targets: List of 1D batch-sized int32 Tensors of the same length as logits.
weights: List of 1D batch-sized float-Tensors of the same length as logits.
average_across_timesteps: If set, divide the returned cost by the total
label weight.
softmax_loss_function: Function (labels, logits) -> loss-batch to be used
instead of the standard softmax (the default if this is None). **Note that
to avoid confusion, it is required for the function to accept named
arguments.**
name: Optional name for this operation, default: "sequence_loss_by_example".
Returns:
1D batch-sized float Tensor: The log-perplexity for each sequence.
Raises:
ValueError: If len(logits) is different from len(targets) or len(weights).
"""
if len(targets) != len(logits) or len(weights) != len(logits):
raise ValueError("Lengths of logits, weights, and targets must be the same "
"%d, %d, %d." % (len(logits), len(weights), len(targets)))
with ops.name_scope(name, "sequence_loss_by_example",
logits + targets + weights):
log_perp_list = []
for logit, target, weight in zip(logits, targets, weights):
if softmax_loss_function is None:
# TODO(irving,ebrevdo): This reshape is needed because
# sequence_loss_by_example is called with scalars sometimes, which
# violates our general scalar strictness policy.
target = array_ops.reshape(target, [-1])
crossent = nn_ops.sparse_softmax_cross_entropy_with_logits(
labels=target, logits=logit)
else:
crossent = softmax_loss_function(labels=target, logits=logit)
log_perp_list.append(crossent * weight)
log_perps = math_ops.add_n(log_perp_list)
if average_across_timesteps:
total_size = math_ops.add_n(weights)
total_size += 1e-12 # Just to avoid division by 0 for all-0 weights.
log_perps /= total_size
return log_perps
def sequence_loss(logits,
targets,
weights,
average_across_timesteps=True,
average_across_batch=True,
softmax_loss_function=None,
name=None):
"""Weighted cross-entropy loss for a sequence of logits, batch-collapsed.
Args:
logits: List of 2D Tensors of shape [batch_size x num_decoder_symbols].
targets: List of 1D batch-sized int32 Tensors of the same length as logits.
weights: List of 1D batch-sized float-Tensors of the same length as logits.
average_across_timesteps: If set, divide the returned cost by the total
label weight.
average_across_batch: If set, divide the returned cost by the batch size.
softmax_loss_function: Function (labels, logits) -> loss-batch to be used
instead of the standard softmax (the default if this is None). **Note that
to avoid confusion, it is required for the function to accept named
arguments.**
name: Optional name for this operation, defaults to "sequence_loss".
Returns:
A scalar float Tensor: The average log-perplexity per symbol (weighted).
Raises:
ValueError: If len(logits) is different from len(targets) or len(weights).
"""
with ops.name_scope(name, "sequence_loss", logits + targets + weights):
cost = math_ops.reduce_sum(
sequence_loss_by_example(
logits,
targets,
weights,
average_across_timesteps=average_across_timesteps,
softmax_loss_function=softmax_loss_function))
if average_across_batch:
batch_size = array_ops.shape(targets[0])[0]
return cost / math_ops.cast(batch_size, cost.dtype)
else:
return cost
def model_with_buckets(encoder_inputs,
decoder_inputs,
targets,
weights,
buckets,
seq2seq,
softmax_loss_function=None,
per_example_loss=False,
name=None):
"""Create a sequence-to-sequence model with support for bucketing.
The seq2seq argument is a function that defines a sequence-to-sequence model,
e.g., seq2seq = lambda x, y: basic_rnn_seq2seq(
x, y, rnn_cell.GRUCell(24))
Args:
encoder_inputs: A list of Tensors to feed the encoder; first seq2seq input.
decoder_inputs: A list of Tensors to feed the decoder; second seq2seq input.
targets: A list of 1D batch-sized int32 Tensors (desired output sequence).
weights: List of 1D batch-sized float-Tensors to weight the targets.
buckets: A list of pairs of (input size, output size) for each bucket.
seq2seq: A sequence-to-sequence model function; it takes 2 input that agree
with encoder_inputs and decoder_inputs, and returns a pair consisting of
outputs and states (as, e.g., basic_rnn_seq2seq).
softmax_loss_function: Function (labels, logits) -> loss-batch to be used
instead of the standard softmax (the default if this is None). **Note that
to avoid confusion, it is required for the function to accept named
arguments.**
per_example_loss: Boolean. If set, the returned loss will be a batch-sized
tensor of losses for each sequence in the batch. If unset, it will be a
scalar with the averaged loss from all examples.
name: Optional name for this operation, defaults to "model_with_buckets".
Returns:
A tuple of the form (outputs, losses), where:
outputs: The outputs for each bucket. Its j'th element consists of a list
of 2D Tensors. The shape of output tensors can be either
[batch_size x output_size] or [batch_size x num_decoder_symbols]
depending on the seq2seq model used.
losses: List of scalar Tensors, representing losses for each bucket, or,
if per_example_loss is set, a list of 1D batch-sized float Tensors.
Raises:
ValueError: If length of encoder_inputs, targets, or weights is smaller
than the largest (last) bucket.
"""
if len(encoder_inputs) < buckets[-1][0]:
raise ValueError("Length of encoder_inputs (%d) must be at least that of la"
"st bucket (%d)." % (len(encoder_inputs), buckets[-1][0]))
if len(targets) < buckets[-1][1]:
raise ValueError("Length of targets (%d) must be at least that of last "
"bucket (%d)." % (len(targets), buckets[-1][1]))
if len(weights) < buckets[-1][1]:
raise ValueError("Length of weights (%d) must be at least that of last "
"bucket (%d)." % (len(weights), buckets[-1][1]))
all_inputs = encoder_inputs + decoder_inputs + targets + weights
losses = []
outputs = []
with ops.name_scope(name, "model_with_buckets", all_inputs):
for j, bucket in enumerate(buckets):
with variable_scope.variable_scope(
variable_scope.get_variable_scope(), reuse=True if j > 0 else None):
bucket_outputs, _ = seq2seq(encoder_inputs[:bucket[0]],
decoder_inputs[:bucket[1]])
outputs.append(bucket_outputs)
if per_example_loss:
losses.append(
sequence_loss_by_example(
outputs[-1],
targets[:bucket[1]],
weights[:bucket[1]],
softmax_loss_function=softmax_loss_function))
else:
losses.append(
sequence_loss(
outputs[-1],
targets[:bucket[1]],
weights[:bucket[1]],
softmax_loss_function=softmax_loss_function))
return outputs, losses
|
tensorflow-master
|
tensorflow/contrib/legacy_seq2seq/python/ops/seq2seq.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops for image manipulation.
### API
This module provides functions for image manipulation; currently, chrominance
transforms (including changing saturation and hue) in YIQ space and
projective transforms (including rotation) are supported.
## Image Transformation `Ops`
@@angles_to_projective_transforms
@@compose_transforms
@@adjust_yiq_hsv
@@flat_transforms_to_matrices
@@matrices_to_flat_transforms
@@random_yiq_hsv
@@rotate
@@transform
@@translate
@@translations_to_projective_transforms
@@dense_image_warp
@@interpolate_spline
@@sparse_image_warp
## Image Segmentation `Ops`
@@connected_components
## Matching `Ops`
@@bipartite_match
## Random Dot Stereogram `Ops`
@@single_image_random_dot_stereograms
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.image.python.ops.dense_image_warp import dense_image_warp
from tensorflow.contrib.image.python.ops.distort_image_ops import adjust_hsv_in_yiq
from tensorflow.contrib.image.python.ops.distort_image_ops import random_hsv_in_yiq
from tensorflow.contrib.image.python.ops.image_ops import angles_to_projective_transforms
from tensorflow.contrib.image.python.ops.image_ops import bipartite_match
from tensorflow.contrib.image.python.ops.image_ops import compose_transforms
from tensorflow.contrib.image.python.ops.image_ops import connected_components
from tensorflow.contrib.image.python.ops.image_ops import flat_transforms_to_matrices
from tensorflow.contrib.image.python.ops.image_ops import matrices_to_flat_transforms
from tensorflow.contrib.image.python.ops.image_ops import rotate
from tensorflow.contrib.image.python.ops.image_ops import transform
from tensorflow.contrib.image.python.ops.image_ops import translate
from tensorflow.contrib.image.python.ops.image_ops import translations_to_projective_transforms
from tensorflow.contrib.image.python.ops.interpolate_spline import interpolate_spline
from tensorflow.contrib.image.python.ops.single_image_random_dot_stereograms import single_image_random_dot_stereograms
from tensorflow.contrib.image.python.ops.sparse_image_warp import sparse_image_warp
from tensorflow.python.util.all_util import remove_undocumented
# pylint: enable=line-too-long
remove_undocumented(__name__)
|
tensorflow-master
|
tensorflow/contrib/image/__init__.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for python single_image_random_dot_stereograms_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.image.python.ops.single_image_random_dot_stereograms \
import single_image_random_dot_stereograms
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
class SingleImageRandomDotStereogramsTest(test_util.TensorFlowTestCase):
def test_shape_function_default(self):
"""
NOTE: The output_image_shape is [X, Y, C]
while the output data is [Y, X, C] (or [H, W, C]).
As a result, by default the output_image_shape has the value
of [1024, 768, 1], but the output data will be [768, 1024, 1].
"""
x_np = [[1, 2, 3, 3, 2, 1],
[1, 2, 3, 4, 5, 2],
[1, 2, 3, 4, 5, 3],
[1, 2, 3, 4, 5, 4],
[6, 5, 4, 4, 5, 5]]
x_tf = constant_op.constant(x_np)
# By default [1024, 768, 1] => [768, 1024, 1].
sirds_1 = single_image_random_dot_stereograms(
x_tf,
convergence_dots_size=8,
number_colors=256,
normalize=True)
shape_1 = sirds_1.get_shape().as_list()
self.assertEqual(shape_1, [768, 1024, 1])
with self.cached_session():
r_tf_1 = sirds_1.eval()
self.assertAllEqual(shape_1, r_tf_1.shape)
# If color > 256 then [1024, 768, 3] => [768, 1024, 3].
sirds_2 = single_image_random_dot_stereograms(
x_tf,
convergence_dots_size=8,
number_colors=512,
normalize=True)
shape_2 = sirds_2.get_shape().as_list()
self.assertEqual(shape_2, [768, 1024, 3])
with self.cached_session():
r_tf_2 = sirds_2.eval()
self.assertAllEqual(shape_2, r_tf_2.shape)
# If explicitly set output_image_shape to [1200, 800, 1],
# then the output data should be [800, 1200, 1].
sirds_3 = single_image_random_dot_stereograms(
x_tf,
convergence_dots_size=8,
number_colors=256,
normalize=True,
output_image_shape=[1200, 800, 1])
shape_3 = sirds_3.get_shape().as_list()
self.assertEqual(shape_3, [800, 1200, 1])
with self.cached_session():
r_tf_3 = sirds_3.eval()
self.assertAllEqual(shape_3, r_tf_3.shape)
if __name__ == '__main__':
googletest.main()
|
tensorflow-master
|
tensorflow/contrib/image/python/kernel_tests/single_image_random_dot_stereograms_ops_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for interpolate_spline."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from scipy import interpolate as sc_interpolate
from tensorflow.contrib.image.python.ops import interpolate_spline
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import gradients
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.training import momentum
class _InterpolationProblem(object):
"""Abstract class for interpolation problem descriptions."""
def get_problem(self, optimizable=False, extrapolate=True, dtype='float32'):
"""Make data for an interpolation problem where all x vectors are n-d.
Args:
optimizable: If True, then make train_points a tf.Variable.
extrapolate: If False, then clamp the query_points values to be within
the max and min of train_points.
dtype: The data type to use.
Returns:
query_points, query_values, train_points, train_values: training and
test tensors for interpolation problem
"""
# The values generated here depend on a seed of 0.
np.random.seed(0)
batch_size = 1
num_training_points = 10
num_query_points = 4
init_points = np.random.uniform(
size=[batch_size, num_training_points, self.DATA_DIM])
init_points = init_points.astype(dtype)
train_points = (
variables.Variable(init_points)
if optimizable else constant_op.constant(init_points))
train_values = self.tf_function(train_points)
query_points_np = np.random.uniform(
size=[batch_size, num_query_points, self.DATA_DIM])
query_points_np = query_points_np.astype(dtype)
if not extrapolate:
query_points_np = np.clip(query_points_np, np.min(init_points),
np.max(init_points))
query_points = constant_op.constant(query_points_np)
query_values = self.np_function(query_points_np)
return query_points, query_values, train_points, train_values
class _QuadraticPlusSinProblem1D(_InterpolationProblem):
"""1D interpolation problem used for regression testing."""
DATA_DIM = 1
HARDCODED_QUERY_VALUES = {
(1.0, 0.0): [6.2647187603, -7.84362604077, -5.63690142322, 1.42928896387],
(1.0,
0.01): [6.77688289946, -8.02163669853, -5.79491157027, 1.4063285693],
(2.0,
0.0): [8.67110264937, -8.41281390883, -5.80190044693, 1.50155606059],
(2.0,
0.01): [6.70797816797, -7.49709587663, -5.28965776238, 1.52284731741],
(3.0,
0.0): [9.37691802935, -8.50390141515, -5.80786417426, 1.63467762122],
(3.0,
0.01): [4.47106304758, -5.71266128361, -3.92529303296, 1.86755293857],
(4.0,
0.0): [9.58172461111, -8.51432104771, -5.80967675388, 1.63361164256],
(4.0, 0.01): [
-3.87902711352, -0.0253462273846, 1.79857618022, -0.769339675725
]
}
def np_function(self, x):
"""Takes np array, evaluates the test function, and returns np array."""
return np.sum(
np.power((x - 0.5), 3) - 0.25 * x + 10 * np.sin(x * 10),
axis=2,
keepdims=True)
def tf_function(self, x):
"""Takes tf tensor, evaluates the test function, and returns tf tensor."""
return math_ops.reduce_mean(
math_ops.pow((x - 0.5), 3) - 0.25 * x + 10 * math_ops.sin(x * 10),
2,
keepdims=True)
class _QuadraticPlusSinProblemND(_InterpolationProblem):
"""3D interpolation problem used for regression testing."""
DATA_DIM = 3
HARDCODED_QUERY_VALUES = {
(1.0, 0.0): [1.06609663962, 1.28894849357, 1.10882405595, 1.63966936885],
(1.0, 0.01): [1.03123780748, 1.2952930985, 1.10366822954, 1.65265118569],
(2.0, 0.0): [0.627787735064, 1.43802857251, 1.00194632358, 1.91667538215],
(2.0, 0.01): [0.730159985046, 1.41702471595, 1.0065827217, 1.85758519312],
(3.0, 0.0): [0.350460417862, 1.67223539464, 1.00475331246, 2.31580322491],
(3.0,
0.01): [0.624557250556, 1.63138876667, 0.976588193162, 2.12511237866],
(4.0,
0.0): [0.898129669986, 1.24434133638, -0.938056116931, 1.59910338833],
(4.0,
0.01): [0.0930360338179, -3.38791305538, -1.00969032567, 0.745535080382],
}
def np_function(self, x):
"""Takes np array, evaluates the test function, and returns np array."""
return np.sum(
np.square(x - 0.5) + 0.25 * x + 1 * np.sin(x * 15),
axis=2,
keepdims=True)
def tf_function(self, x):
"""Takes tf tensor, evaluates the test function, and returns tf tensor."""
return math_ops.reduce_sum(
math_ops.square(x - 0.5) + 0.25 * x + 1 * math_ops.sin(x * 15),
2,
keepdims=True)
class InterpolateSplineTest(test_util.TensorFlowTestCase):
def test_1d_linear_interpolation(self):
"""For 1d linear interpolation, we can compare directly to scipy."""
tp = _QuadraticPlusSinProblem1D()
(query_points, _, train_points, train_values) = tp.get_problem(
extrapolate=False, dtype='float64')
interpolation_order = 1
with ops.name_scope('interpolator'):
interpolator = interpolate_spline.interpolate_spline(
train_points, train_values, query_points, interpolation_order)
with self.cached_session() as sess:
fetches = [query_points, train_points, train_values, interpolator]
query_points_, train_points_, train_values_, interp_ = sess.run(fetches)
# Just look at the first element of the minibatch.
# Also, trim the final singleton dimension.
interp_ = interp_[0, :, 0]
query_points_ = query_points_[0, :, 0]
train_points_ = train_points_[0, :, 0]
train_values_ = train_values_[0, :, 0]
# Compute scipy interpolation.
scipy_interp_function = sc_interpolate.interp1d(
train_points_, train_values_, kind='linear')
scipy_interpolation = scipy_interp_function(query_points_)
scipy_interpolation_on_train = scipy_interp_function(train_points_)
# Even with float64 precision, the interpolants disagree with scipy a
# bit due to the fact that we add the EPSILON to prevent sqrt(0), etc.
tol = 1e-3
self.assertAllClose(
train_values_, scipy_interpolation_on_train, atol=tol, rtol=tol)
self.assertAllClose(interp_, scipy_interpolation, atol=tol, rtol=tol)
def test_1d_interpolation(self):
"""Regression test for interpolation with 1-D points."""
tp = _QuadraticPlusSinProblem1D()
(query_points, _, train_points,
train_values) = tp.get_problem(dtype='float64')
for order in (1, 2, 3):
for reg_weight in (0, 0.01):
interpolator = interpolate_spline.interpolate_spline(
train_points, train_values, query_points, order, reg_weight)
target_interpolation = tp.HARDCODED_QUERY_VALUES[(order, reg_weight)]
target_interpolation = np.array(target_interpolation)
with self.cached_session() as sess:
interp_val = sess.run(interpolator)
self.assertAllClose(interp_val[0, :, 0], target_interpolation)
def test_nd_linear_interpolation(self):
"""Regression test for interpolation with N-D points."""
tp = _QuadraticPlusSinProblemND()
(query_points, _, train_points,
train_values) = tp.get_problem(dtype='float64')
for order in (1, 2, 3):
for reg_weight in (0, 0.01):
interpolator = interpolate_spline.interpolate_spline(
train_points, train_values, query_points, order, reg_weight)
target_interpolation = tp.HARDCODED_QUERY_VALUES[(order, reg_weight)]
target_interpolation = np.array(target_interpolation)
with self.cached_session() as sess:
interp_val = sess.run(interpolator)
self.assertAllClose(interp_val[0, :, 0], target_interpolation)
def test_nd_linear_interpolation_unspecified_shape(self):
"""Ensure that interpolation supports dynamic batch_size and num_points."""
tp = _QuadraticPlusSinProblemND()
(query_points, _, train_points,
train_values) = tp.get_problem(dtype='float64')
# Construct placeholders such that the batch size, number of train points,
# and number of query points are not known at graph construction time.
feature_dim = query_points.shape[-1]
value_dim = train_values.shape[-1]
train_points_ph = array_ops.placeholder(
dtype=train_points.dtype, shape=[None, None, feature_dim])
train_values_ph = array_ops.placeholder(
dtype=train_values.dtype, shape=[None, None, value_dim])
query_points_ph = array_ops.placeholder(
dtype=query_points.dtype, shape=[None, None, feature_dim])
order = 1
reg_weight = 0.01
interpolator = interpolate_spline.interpolate_spline(
train_points_ph, train_values_ph, query_points_ph, order, reg_weight)
target_interpolation = tp.HARDCODED_QUERY_VALUES[(order, reg_weight)]
target_interpolation = np.array(target_interpolation)
with self.cached_session() as sess:
(train_points_value, train_values_value, query_points_value) = sess.run(
[train_points, train_values, query_points])
interp_val = sess.run(
interpolator,
feed_dict={
train_points_ph: train_points_value,
train_values_ph: train_values_value,
query_points_ph: query_points_value
})
self.assertAllClose(interp_val[0, :, 0], target_interpolation)
def test_fully_unspecified_shape(self):
"""Ensure that erreor is thrown when input/output dim unspecified."""
tp = _QuadraticPlusSinProblemND()
(query_points, _, train_points,
train_values) = tp.get_problem(dtype='float64')
# Construct placeholders such that the batch size, number of train points,
# and number of query points are not known at graph construction time.
feature_dim = query_points.shape[-1]
value_dim = train_values.shape[-1]
train_points_ph = array_ops.placeholder(
dtype=train_points.dtype, shape=[None, None, feature_dim])
train_points_ph_invalid = array_ops.placeholder(
dtype=train_points.dtype, shape=[None, None, None])
train_values_ph = array_ops.placeholder(
dtype=train_values.dtype, shape=[None, None, value_dim])
train_values_ph_invalid = array_ops.placeholder(
dtype=train_values.dtype, shape=[None, None, None])
query_points_ph = array_ops.placeholder(
dtype=query_points.dtype, shape=[None, None, feature_dim])
order = 1
reg_weight = 0.01
with self.assertRaises(ValueError):
_ = interpolate_spline.interpolate_spline(
train_points_ph_invalid, train_values_ph, query_points_ph, order,
reg_weight)
with self.assertRaises(ValueError):
_ = interpolate_spline.interpolate_spline(
train_points_ph, train_values_ph_invalid, query_points_ph, order,
reg_weight)
def test_interpolation_gradient(self):
"""Make sure that backprop can run. Correctness of gradients is assumed.
Here, we create a use a small 'training' set and a more densely-sampled
set of query points, for which we know the true value in advance. The goal
is to choose x locations for the training data such that interpolating using
this training data yields the best reconstruction for the function
values at the query points. The training data locations are optimized
iteratively using gradient descent.
"""
tp = _QuadraticPlusSinProblemND()
(query_points, query_values, train_points,
train_values) = tp.get_problem(optimizable=True)
regularization = 0.001
for interpolation_order in (1, 2, 3, 4):
interpolator = interpolate_spline.interpolate_spline(
train_points, train_values, query_points, interpolation_order,
regularization)
loss = math_ops.reduce_mean(math_ops.square(query_values - interpolator))
optimizer = momentum.MomentumOptimizer(0.001, 0.9)
grad = gradients.gradients(loss, [train_points])
grad, _ = clip_ops.clip_by_global_norm(grad, 1.0)
opt_func = optimizer.apply_gradients(zip(grad, [train_points]))
init_op = variables.global_variables_initializer()
with self.cached_session() as sess:
sess.run(init_op)
for _ in range(100):
sess.run([loss, opt_func])
if __name__ == '__main__':
googletest.main()
|
tensorflow-master
|
tensorflow/contrib/image/python/kernel_tests/interpolate_spline_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for connected component analysis."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import numpy as np
from tensorflow.contrib.image.python.ops import image_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import googletest
# Image for testing connected_components, with a single, winding component.
SNAKE = np.asarray(
[[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 1, 1, 1, 1, 1, 0],
[0, 1, 0, 0, 0, 0, 0, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]]) # pyformat: disable
class SegmentationTest(test_util.TensorFlowTestCase):
def testDisconnected(self):
arr = math_ops.cast(
[[1, 0, 0, 1, 0, 0, 0, 0, 1],
[0, 1, 0, 0, 0, 1, 0, 1, 0],
[1, 0, 1, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0]],
dtypes.bool) # pyformat: disable
expected = (
[[1, 0, 0, 2, 0, 0, 0, 0, 3],
[0, 4, 0, 0, 0, 5, 0, 6, 0],
[7, 0, 8, 0, 0, 0, 9, 0, 0],
[0, 0, 0, 0, 10, 0, 0, 0, 0],
[0, 0, 11, 0, 0, 0, 0, 0, 0]]) # pyformat: disable
with self.cached_session():
self.assertAllEqual(image_ops.connected_components(arr).eval(), expected)
def testSimple(self):
arr = [[0, 1, 0], [1, 1, 1], [0, 1, 0]]
with self.cached_session():
# Single component with id 1.
self.assertAllEqual(
image_ops.connected_components(math_ops.cast(
arr, dtypes.bool)).eval(), arr)
def testSnake(self):
with self.cached_session():
# Single component with id 1.
self.assertAllEqual(
image_ops.connected_components(math_ops.cast(
SNAKE, dtypes.bool)).eval(), SNAKE)
def testSnake_disconnected(self):
for i in range(SNAKE.shape[0]):
for j in range(SNAKE.shape[1]):
with self.cached_session():
# If we disconnect any part of the snake except for the endpoints,
# there will be 2 components.
if SNAKE[i, j] and (i, j) not in [(1, 1), (6, 3)]:
disconnected_snake = SNAKE.copy()
disconnected_snake[i, j] = 0
components = image_ops.connected_components(
math_ops.cast(disconnected_snake, dtypes.bool)).eval()
self.assertEqual(components.max(), 2, 'disconnect (%d, %d)' % (i,
j))
bins = np.bincount(components.ravel())
# Nonzero number of pixels labeled 0, 1, or 2.
self.assertGreater(bins[0], 0)
self.assertGreater(bins[1], 0)
self.assertGreater(bins[2], 0)
def testMultipleImages(self):
images = [[[1, 1, 1, 1],
[1, 0, 0, 1],
[1, 0, 0, 1],
[1, 1, 1, 1]],
[[1, 0, 0, 1],
[0, 0, 0, 0],
[0, 0, 0, 0],
[1, 0, 0, 1]],
[[1, 1, 0, 1],
[0, 1, 1, 0],
[1, 0, 1, 0],
[0, 0, 1, 1]]] # pyformat: disable
expected = [[[1, 1, 1, 1],
[1, 0, 0, 1],
[1, 0, 0, 1],
[1, 1, 1, 1]],
[[2, 0, 0, 3],
[0, 0, 0, 0],
[0, 0, 0, 0],
[4, 0, 0, 5]],
[[6, 6, 0, 7],
[0, 6, 6, 0],
[8, 0, 6, 0],
[0, 0, 6, 6]]] # pyformat: disable
with self.cached_session():
self.assertAllEqual(
image_ops.connected_components(math_ops.cast(
images, dtypes.bool)).eval(), expected)
def testZeros(self):
with self.cached_session():
self.assertAllEqual(
image_ops.connected_components(
array_ops.zeros((100, 20, 50), dtypes.bool)).eval(),
np.zeros((100, 20, 50)))
def testOnes(self):
with self.cached_session():
self.assertAllEqual(
image_ops.connected_components(
array_ops.ones((100, 20, 50), dtypes.bool)).eval(),
np.tile(np.arange(100)[:, None, None] + 1, [1, 20, 50]))
def testOnes_small(self):
with self.cached_session():
self.assertAllEqual(
image_ops.connected_components(array_ops.ones((3, 5),
dtypes.bool)).eval(),
np.ones((3, 5)))
def testRandom_scipy(self):
np.random.seed(42)
images = np.random.randint(0, 2, size=(10, 100, 200)).astype(np.bool)
expected = connected_components_reference_implementation(images)
if expected is None:
return
with self.cached_session():
self.assertAllEqual(
image_ops.connected_components(images).eval(), expected)
def connected_components_reference_implementation(images):
try:
# pylint: disable=g-import-not-at-top
from scipy.ndimage import measurements
except ImportError:
logging.exception('Skipping test method because scipy could not be loaded')
return
image_or_images = np.asarray(images)
if len(image_or_images.shape) == 2:
images = image_or_images[None, :, :]
elif len(image_or_images.shape) == 3:
images = image_or_images
components = np.asarray([measurements.label(image)[0] for image in images])
# Get the count of nonzero ids for each image, and offset each image's nonzero
# ids using the cumulative sum.
num_ids_per_image = components.reshape(
[-1, components.shape[1] * components.shape[2]]).max(axis=-1)
positive_id_start_per_image = np.cumsum(num_ids_per_image)
for i in range(components.shape[0]):
new_id_start = positive_id_start_per_image[i - 1] if i > 0 else 0
components[i, components[i] > 0] += new_id_start
if len(image_or_images.shape) == 2:
return components[0, :, :]
else:
return components
if __name__ == '__main__':
googletest.main()
|
tensorflow-master
|
tensorflow/contrib/image/python/kernel_tests/segmentation_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for dense_image_warp."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
from tensorflow.contrib.image.python.ops import dense_image_warp
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.training import adam
class DenseImageWarpTest(test_util.TensorFlowTestCase):
def setUp(self):
np.random.seed(0)
def test_interpolate_small_grid_ij(self):
grid = constant_op.constant(
[[0., 1., 2.], [3., 4., 5.], [6., 7., 8.]], shape=[1, 3, 3, 1])
query_points = constant_op.constant(
[[0., 0.], [1., 0.], [2., 0.5], [1.5, 1.5]], shape=[1, 4, 2])
expected_results = np.reshape(np.array([0., 3., 6.5, 6.]), [1, 4, 1])
interp = dense_image_warp._interpolate_bilinear(grid, query_points)
with self.cached_session() as sess:
predicted = sess.run(interp)
self.assertAllClose(expected_results, predicted)
def test_interpolate_small_grid_xy(self):
grid = constant_op.constant(
[[0., 1., 2.], [3., 4., 5.], [6., 7., 8.]], shape=[1, 3, 3, 1])
query_points = constant_op.constant(
[[0., 0.], [0., 1.], [0.5, 2.0], [1.5, 1.5]], shape=[1, 4, 2])
expected_results = np.reshape(np.array([0., 3., 6.5, 6.]), [1, 4, 1])
interp = dense_image_warp._interpolate_bilinear(
grid, query_points, indexing='xy')
with self.cached_session() as sess:
predicted = sess.run(interp)
self.assertAllClose(expected_results, predicted)
def test_interpolate_small_grid_batched(self):
grid = constant_op.constant(
[[[0., 1.], [3., 4.]], [[5., 6.], [7., 8.]]], shape=[2, 2, 2, 1])
query_points = constant_op.constant([[[0., 0.], [1., 0.], [0.5, 0.5]],
[[0.5, 0.], [1., 0.], [1., 1.]]])
expected_results = np.reshape(
np.array([[0., 3., 2.], [6., 7., 8.]]), [2, 3, 1])
interp = dense_image_warp._interpolate_bilinear(grid, query_points)
with self.cached_session() as sess:
predicted = sess.run(interp)
self.assertAllClose(expected_results, predicted)
def get_image_and_flow_placeholders(self, shape, image_type, flow_type):
batch_size, height, width, numchannels = shape
image_shape = [batch_size, height, width, numchannels]
flow_shape = [batch_size, height, width, 2]
tf_type = {
'float16': dtypes.half,
'float32': dtypes.float32,
'float64': dtypes.float64
}
image = array_ops.placeholder(dtype=tf_type[image_type], shape=image_shape)
flows = array_ops.placeholder(dtype=tf_type[flow_type], shape=flow_shape)
return image, flows
def get_random_image_and_flows(self, shape, image_type, flow_type):
batch_size, height, width, numchannels = shape
image_shape = [batch_size, height, width, numchannels]
image = np.random.normal(size=image_shape)
flow_shape = [batch_size, height, width, 2]
flows = np.random.normal(size=flow_shape) * 3
return image.astype(image_type), flows.astype(flow_type)
def assert_correct_interpolation_value(self,
image,
flows,
pred_interpolation,
batch_index,
y_index,
x_index,
low_precision=False):
"""Assert that the tf interpolation matches hand-computed value."""
height = image.shape[1]
width = image.shape[2]
displacement = flows[batch_index, y_index, x_index, :]
float_y = y_index - displacement[0]
float_x = x_index - displacement[1]
floor_y = max(min(height - 2, math.floor(float_y)), 0)
floor_x = max(min(width - 2, math.floor(float_x)), 0)
ceil_y = floor_y + 1
ceil_x = floor_x + 1
alpha_y = min(max(0.0, float_y - floor_y), 1.0)
alpha_x = min(max(0.0, float_x - floor_x), 1.0)
floor_y = int(floor_y)
floor_x = int(floor_x)
ceil_y = int(ceil_y)
ceil_x = int(ceil_x)
top_left = image[batch_index, floor_y, floor_x, :]
top_right = image[batch_index, floor_y, ceil_x, :]
bottom_left = image[batch_index, ceil_y, floor_x, :]
bottom_right = image[batch_index, ceil_y, ceil_x, :]
interp_top = alpha_x * (top_right - top_left) + top_left
interp_bottom = alpha_x * (bottom_right - bottom_left) + bottom_left
interp = alpha_y * (interp_bottom - interp_top) + interp_top
atol = 1e-6
rtol = 1e-6
if low_precision:
atol = 1e-2
rtol = 1e-3
self.assertAllClose(
interp,
pred_interpolation[batch_index, y_index, x_index, :],
atol=atol,
rtol=rtol)
def check_zero_flow_correctness(self, shape, image_type, flow_type):
"""Assert using zero flows doesn't change the input image."""
image, flows = self.get_image_and_flow_placeholders(shape, image_type,
flow_type)
interp = dense_image_warp.dense_image_warp(image, flows)
with self.cached_session() as sess:
rand_image, rand_flows = self.get_random_image_and_flows(
shape, image_type, flow_type)
rand_flows *= 0
predicted_interpolation = sess.run(
interp, feed_dict={
image: rand_image,
flows: rand_flows
})
self.assertAllClose(rand_image, predicted_interpolation)
def test_zero_flows(self):
"""Apply check_zero_flow_correctness() for a few sizes and types."""
shapes_to_try = [[3, 4, 5, 6], [1, 2, 2, 1]]
for shape in shapes_to_try:
self.check_zero_flow_correctness(
shape, image_type='float32', flow_type='float32')
def check_interpolation_correctness(self,
shape,
image_type,
flow_type,
num_probes=5):
"""Interpolate, and then assert correctness for a few query locations."""
image, flows = self.get_image_and_flow_placeholders(shape, image_type,
flow_type)
interp = dense_image_warp.dense_image_warp(image, flows)
low_precision = image_type == 'float16' or flow_type == 'float16'
with self.cached_session() as sess:
rand_image, rand_flows = self.get_random_image_and_flows(
shape, image_type, flow_type)
pred_interpolation = sess.run(
interp, feed_dict={
image: rand_image,
flows: rand_flows
})
for _ in range(num_probes):
batch_index = np.random.randint(0, shape[0])
y_index = np.random.randint(0, shape[1])
x_index = np.random.randint(0, shape[2])
self.assert_correct_interpolation_value(
rand_image,
rand_flows,
pred_interpolation,
batch_index,
y_index,
x_index,
low_precision=low_precision)
def test_interpolation(self):
"""Apply check_interpolation_correctness() for a few sizes and types."""
shapes_to_try = [[3, 4, 5, 6], [1, 5, 5, 3], [1, 2, 2, 1]]
for im_type in ['float32', 'float64', 'float16']:
for flow_type in ['float32', 'float64', 'float16']:
for shape in shapes_to_try:
self.check_interpolation_correctness(shape, im_type, flow_type)
def test_gradients_exist(self):
"""Check that backprop can run.
The correctness of the gradients is assumed, since the forward propagation
is tested to be correct and we only use built-in tf ops.
However, we perform a simple test to make sure that backprop can actually
run. We treat the flows as a tf.Variable and optimize them to minimize
the difference between the interpolated image and the input image.
"""
batch_size, height, width, numchannels = [4, 5, 6, 7]
image_shape = [batch_size, height, width, numchannels]
image = random_ops.random_normal(image_shape)
flow_shape = [batch_size, height, width, 2]
init_flows = np.float32(np.random.normal(size=flow_shape) * 0.25)
flows = variables.Variable(init_flows)
interp = dense_image_warp.dense_image_warp(image, flows)
loss = math_ops.reduce_mean(math_ops.square(interp - image))
optimizer = adam.AdamOptimizer(1.0)
grad = gradients.gradients(loss, [flows])
opt_func = optimizer.apply_gradients(zip(grad, [flows]))
init_op = variables.global_variables_initializer()
with self.cached_session() as sess:
sess.run(init_op)
for _ in range(10):
sess.run(opt_func)
def test_size_exception(self):
"""Make sure it throws an exception for images that are too small."""
shape = [1, 2, 1, 1]
msg = 'Should have raised an exception for invalid image size'
with self.assertRaises(errors.InvalidArgumentError, msg=msg):
self.check_interpolation_correctness(shape, 'float32', 'float32')
if __name__ == '__main__':
googletest.main()
|
tensorflow-master
|
tensorflow/contrib/image/python/kernel_tests/dense_image_warp_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for python distort_image_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.contrib.image.python.ops import distort_image_ops
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.platform import test
# TODO(huangyp): also measure the differences between AdjustHsvInYiq and
# AdjustHsv in core.
class AdjustHueInYiqTest(test_util.TensorFlowTestCase):
def _adjust_hue_in_yiq_np(self, x_np, delta_h):
"""Rotate hue in YIQ space.
Mathematically we first convert rgb color to yiq space, rotate the hue
degrees, and then convert back to rgb.
Args:
x_np: input x with last dimension = 3.
delta_h: degree of hue rotation, in radians.
Returns:
Adjusted y with the same shape as x_np.
"""
self.assertEqual(x_np.shape[-1], 3)
x_v = x_np.reshape([-1, 3])
y_v = np.ndarray(x_v.shape, dtype=x_v.dtype)
u = np.cos(delta_h)
w = np.sin(delta_h)
# Projection matrix from RGB to YIQ. Numbers from wikipedia
# https://en.wikipedia.org/wiki/YIQ
tyiq = np.array([[0.299, 0.587, 0.114], [0.596, -0.274, -0.322],
[0.211, -0.523, 0.312]])
y_v = np.dot(x_v, tyiq.T)
# Hue rotation matrix in YIQ space.
hue_rotation = np.array([[1.0, 0.0, 0.0], [0.0, u, -w], [0.0, w, u]])
y_v = np.dot(y_v, hue_rotation.T)
# Projecting back to RGB space.
y_v = np.dot(y_v, np.linalg.inv(tyiq).T)
return y_v.reshape(x_np.shape)
def _adjust_hue_in_yiq_tf(self, x_np, delta_h):
with self.test_session(use_gpu=True):
x = constant_op.constant(x_np)
y = distort_image_ops.adjust_hsv_in_yiq(x, delta_h, 1, 1)
y_tf = y.eval()
return y_tf
def test_adjust_random_hue_in_yiq(self):
x_shapes = [
[2, 2, 3],
[4, 2, 3],
[2, 4, 3],
[2, 5, 3],
[1000, 1, 3],
]
test_styles = [
'all_random',
'rg_same',
'rb_same',
'gb_same',
'rgb_same',
]
for x_shape in x_shapes:
for test_style in test_styles:
x_np = np.random.rand(*x_shape) * 255.
delta_h = (np.random.rand() * 2.0 - 1.0) * np.pi
if test_style == 'all_random':
pass
elif test_style == 'rg_same':
x_np[..., 1] = x_np[..., 0]
elif test_style == 'rb_same':
x_np[..., 2] = x_np[..., 0]
elif test_style == 'gb_same':
x_np[..., 2] = x_np[..., 1]
elif test_style == 'rgb_same':
x_np[..., 1] = x_np[..., 0]
x_np[..., 2] = x_np[..., 0]
else:
raise AssertionError('Invalid test style: %s' % (test_style))
y_np = self._adjust_hue_in_yiq_np(x_np, delta_h)
y_tf = self._adjust_hue_in_yiq_tf(x_np, delta_h)
self.assertAllClose(y_tf, y_np, rtol=2e-4, atol=1e-4)
def test_invalid_shapes(self):
x_np = np.random.rand(2, 3) * 255.
delta_h = np.random.rand() * 2.0 - 1.0
with self.assertRaisesRegexp(ValueError, 'Shape must be at least rank 3'):
self._adjust_hue_in_yiq_tf(x_np, delta_h)
x_np = np.random.rand(4, 2, 4) * 255.
delta_h = np.random.rand() * 2.0 - 1.0
with self.assertRaisesOpError('input must have 3 channels but instead has '
'4 channels'):
self._adjust_hue_in_yiq_tf(x_np, delta_h)
class AdjustValueInYiqTest(test_util.TensorFlowTestCase):
def _adjust_value_in_yiq_np(self, x_np, scale):
return x_np * scale
def _adjust_value_in_yiq_tf(self, x_np, scale):
with self.test_session(use_gpu=True):
x = constant_op.constant(x_np)
y = distort_image_ops.adjust_hsv_in_yiq(x, 0, 1, scale)
y_tf = y.eval()
return y_tf
def test_adjust_random_value_in_yiq(self):
x_shapes = [
[2, 2, 3],
[4, 2, 3],
[2, 4, 3],
[2, 5, 3],
[1000, 1, 3],
]
test_styles = [
'all_random',
'rg_same',
'rb_same',
'gb_same',
'rgb_same',
]
for x_shape in x_shapes:
for test_style in test_styles:
x_np = np.random.rand(*x_shape) * 255.
scale = np.random.rand() * 2.0 - 1.0
if test_style == 'all_random':
pass
elif test_style == 'rg_same':
x_np[..., 1] = x_np[..., 0]
elif test_style == 'rb_same':
x_np[..., 2] = x_np[..., 0]
elif test_style == 'gb_same':
x_np[..., 2] = x_np[..., 1]
elif test_style == 'rgb_same':
x_np[..., 1] = x_np[..., 0]
x_np[..., 2] = x_np[..., 0]
else:
raise AssertionError('Invalid test style: %s' % (test_style))
y_np = self._adjust_value_in_yiq_np(x_np, scale)
y_tf = self._adjust_value_in_yiq_tf(x_np, scale)
self.assertAllClose(y_tf, y_np, rtol=2e-4, atol=1e-4)
def test_invalid_shapes(self):
x_np = np.random.rand(2, 3) * 255.
scale = np.random.rand() * 2.0 - 1.0
with self.assertRaisesRegexp(ValueError, 'Shape must be at least rank 3'):
self._adjust_value_in_yiq_tf(x_np, scale)
x_np = np.random.rand(4, 2, 4) * 255.
scale = np.random.rand() * 2.0 - 1.0
with self.assertRaisesOpError('input must have 3 channels but instead has '
'4 channels'):
self._adjust_value_in_yiq_tf(x_np, scale)
class AdjustSaturationInYiqTest(test_util.TensorFlowTestCase):
def _adjust_saturation_in_yiq_tf(self, x_np, scale):
with self.test_session(use_gpu=True):
x = constant_op.constant(x_np)
y = distort_image_ops.adjust_hsv_in_yiq(x, 0, scale, 1)
y_tf = y.eval()
return y_tf
def _adjust_saturation_in_yiq_np(self, x_np, scale):
"""Adjust saturation using linear interpolation."""
rgb_weights = np.array([0.299, 0.587, 0.114])
gray = np.sum(x_np * rgb_weights, axis=-1, keepdims=True)
y_v = x_np * scale + gray * (1 - scale)
return y_v
def test_adjust_random_saturation_in_yiq(self):
x_shapes = [
[2, 2, 3],
[4, 2, 3],
[2, 4, 3],
[2, 5, 3],
[1000, 1, 3],
]
test_styles = [
'all_random',
'rg_same',
'rb_same',
'gb_same',
'rgb_same',
]
with self.cached_session():
for x_shape in x_shapes:
for test_style in test_styles:
x_np = np.random.rand(*x_shape) * 255.
scale = np.random.rand() * 2.0 - 1.0
if test_style == 'all_random':
pass
elif test_style == 'rg_same':
x_np[..., 1] = x_np[..., 0]
elif test_style == 'rb_same':
x_np[..., 2] = x_np[..., 0]
elif test_style == 'gb_same':
x_np[..., 2] = x_np[..., 1]
elif test_style == 'rgb_same':
x_np[..., 1] = x_np[..., 0]
x_np[..., 2] = x_np[..., 0]
else:
raise AssertionError('Invalid test style: %s' % (test_style))
y_baseline = self._adjust_saturation_in_yiq_np(x_np, scale)
y_tf = self._adjust_saturation_in_yiq_tf(x_np, scale)
self.assertAllClose(y_tf, y_baseline, rtol=2e-4, atol=1e-4)
def test_invalid_shapes(self):
x_np = np.random.rand(2, 3) * 255.
scale = np.random.rand() * 2.0 - 1.0
with self.assertRaisesRegexp(ValueError, 'Shape must be at least rank 3'):
self._adjust_saturation_in_yiq_tf(x_np, scale)
x_np = np.random.rand(4, 2, 4) * 255.
scale = np.random.rand() * 2.0 - 1.0
with self.assertRaisesOpError('input must have 3 channels but instead has '
'4 channels'):
self._adjust_saturation_in_yiq_tf(x_np, scale)
class AdjustHueInYiqBenchmark(test.Benchmark):
def _benchmark_adjust_hue_in_yiq(self, device, cpu_count):
image_shape = [299, 299, 3]
warmup_rounds = 100
benchmark_rounds = 1000
config = config_pb2.ConfigProto()
if cpu_count is not None:
config.inter_op_parallelism_threads = 1
config.intra_op_parallelism_threads = cpu_count
with session.Session('', graph=ops.Graph(), config=config) as sess:
with ops.device(device):
inputs = variables.Variable(
random_ops.random_uniform(image_shape, dtype=dtypes.float32) * 255,
trainable=False,
dtype=dtypes.float32)
delta = constant_op.constant(0.1, dtype=dtypes.float32)
outputs = distort_image_ops.adjust_hsv_in_yiq(inputs, delta, 1, 1)
run_op = control_flow_ops.group(outputs)
sess.run(variables.global_variables_initializer())
for i in xrange(warmup_rounds + benchmark_rounds):
if i == warmup_rounds:
start = time.time()
sess.run(run_op)
end = time.time()
step_time = (end - start) / benchmark_rounds
tag = device + '_%s' % (cpu_count if cpu_count is not None else 'all')
print('benchmarkadjust_hue_in_yiq_299_299_3_%s step_time: %.2f us' %
(tag, step_time * 1e6))
self.report_benchmark(
name='benchmarkadjust_hue_in_yiq_299_299_3_%s' % (tag),
iters=benchmark_rounds,
wall_time=step_time)
def benchmark_adjust_hue_in_yiqCpu1(self):
self._benchmark_adjust_hue_in_yiq('/cpu:0', 1)
def benchmark_adjust_hue_in_yiqCpuAll(self):
self._benchmark_adjust_hue_in_yiq('/cpu:0', None)
def benchmark_adjust_hue_in_yiq_gpu_all(self):
self._benchmark_adjust_hue_in_yiq(test.gpu_device_name(), None)
class AdjustSaturationInYiqBenchmark(test.Benchmark):
def _benchmark_adjust_saturation_in_yiq(self, device, cpu_count):
image_shape = [299, 299, 3]
warmup_rounds = 100
benchmark_rounds = 1000
config = config_pb2.ConfigProto()
if cpu_count is not None:
config.inter_op_parallelism_threads = 1
config.intra_op_parallelism_threads = cpu_count
with session.Session('', graph=ops.Graph(), config=config) as sess:
with ops.device(device):
inputs = variables.Variable(
random_ops.random_uniform(image_shape, dtype=dtypes.float32) * 255,
trainable=False,
dtype=dtypes.float32)
scale = constant_op.constant(0.1, dtype=dtypes.float32)
outputs = distort_image_ops.adjust_hsv_in_yiq(inputs, 0, scale, 1)
run_op = control_flow_ops.group(outputs)
sess.run(variables.global_variables_initializer())
for _ in xrange(warmup_rounds):
sess.run(run_op)
start = time.time()
for _ in xrange(benchmark_rounds):
sess.run(run_op)
end = time.time()
step_time = (end - start) / benchmark_rounds
tag = '%s' % (cpu_count) if cpu_count is not None else '_all'
print('benchmarkAdjustSaturationInYiq_299_299_3_cpu%s step_time: %.2f us' %
(tag, step_time * 1e6))
self.report_benchmark(
name='benchmarkAdjustSaturationInYiq_299_299_3_cpu%s' % (tag),
iters=benchmark_rounds,
wall_time=step_time)
def benchmark_adjust_saturation_in_yiq_cpu1(self):
self._benchmark_adjust_saturation_in_yiq('/cpu:0', 1)
def benchmark_adjust_saturation_in_yiq_cpu_all(self):
self._benchmark_adjust_saturation_in_yiq('/cpu:0', None)
def benchmark_adjust_saturation_in_yiq_gpu_all(self):
self._benchmark_adjust_saturation_in_yiq(test.gpu_device_name(), None)
if __name__ == '__main__':
googletest.main()
|
tensorflow-master
|
tensorflow/contrib/image/python/kernel_tests/distort_image_ops_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for image_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
import numpy as np
from tensorflow.contrib.image.ops import gen_image_ops
from tensorflow.contrib.image.python.ops import image_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import googletest
_DTYPES = set(
[dtypes.uint8, dtypes.int32, dtypes.int64,
dtypes.float16, dtypes.float32, dtypes.float64])
class ImageOpsTest(test_util.TensorFlowTestCase):
def test_zeros(self):
for dtype in _DTYPES:
with self.cached_session():
for shape in [(5, 5), (24, 24), (2, 24, 24, 3)]:
for angle in [0, 1, np.pi / 2.0]:
image = array_ops.zeros(shape, dtype)
self.assertAllEqual(
image_ops.rotate(image, angle).eval(),
np.zeros(shape, dtype.as_numpy_dtype()))
# TODO(b/133773834) Re-enable these tests.
@unittest.skip("Skipping because of b/133773834.")
def test_rotate_even(self):
for dtype in _DTYPES:
with self.cached_session():
image = array_ops.reshape(
math_ops.cast(math_ops.range(36), dtype), (6, 6))
image_rep = array_ops.tile(image[None, :, :, None], [3, 1, 1, 1])
angles = constant_op.constant([0.0, np.pi / 4.0, np.pi / 2.0],
dtypes.float32)
image_rotated = image_ops.rotate(image_rep, angles)
self.assertAllEqual(image_rotated[:, :, :, 0].eval(),
[[[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11],
[12, 13, 14, 15, 16, 17],
[18, 19, 20, 21, 22, 23],
[24, 25, 26, 27, 28, 29],
[30, 31, 32, 33, 34, 35]],
[[0, 3, 4, 11, 17, 0], [2, 3, 9, 16, 23, 23],
[1, 8, 15, 21, 22, 29], [6, 13, 20, 21, 27, 34],
[12, 18, 19, 26, 33, 33], [0, 18, 24, 31, 32, 0]],
[[5, 11, 17, 23, 29, 35], [4, 10, 16, 22, 28, 34],
[3, 9, 15, 21, 27, 33], [2, 8, 14, 20, 26, 32],
[1, 7, 13, 19, 25, 31], [0, 6, 12, 18, 24, 30]]])
def test_rotate_odd(self):
for dtype in _DTYPES:
with self.cached_session():
image = array_ops.reshape(
math_ops.cast(math_ops.range(25), dtype), (5, 5))
image_rep = array_ops.tile(image[None, :, :, None], [3, 1, 1, 1])
angles = constant_op.constant([np.pi / 4.0, 1.0, -np.pi / 2.0],
dtypes.float32)
image_rotated = image_ops.rotate(image_rep, angles)
self.assertAllEqual(image_rotated[:, :, :, 0].eval(),
[[[0, 3, 8, 9, 0], [1, 7, 8, 13, 19],
[6, 6, 12, 18, 18], [5, 11, 16, 17, 23],
[0, 15, 16, 21, 0]],
[[0, 3, 9, 14, 0], [2, 7, 8, 13, 19],
[1, 6, 12, 18, 23], [5, 11, 16, 17, 22],
[0, 10, 15, 21, 0]],
[[20, 15, 10, 5, 0], [21, 16, 11, 6, 1],
[22, 17, 12, 7, 2], [23, 18, 13, 8, 3],
[24, 19, 14, 9, 4]]])
def test_translate(self):
for dtype in _DTYPES:
with self.cached_session():
image = constant_op.constant(
[[1, 0, 1, 0],
[0, 1, 0, 1],
[1, 0, 1, 0],
[0, 1, 0, 1]], dtype=dtype)
translation = constant_op.constant([-1, -1], dtypes.float32)
image_translated = image_ops.translate(image, translation)
self.assertAllEqual(image_translated.eval(),
[[1, 0, 1, 0],
[0, 1, 0, 0],
[1, 0, 1, 0],
[0, 0, 0, 0]])
def test_compose(self):
for dtype in _DTYPES:
with self.cached_session():
image = constant_op.constant(
[[1, 1, 1, 0],
[1, 0, 0, 0],
[1, 1, 1, 0],
[0, 0, 0, 0]], dtype=dtype)
# Rotate counter-clockwise by pi / 2.
rotation = image_ops.angles_to_projective_transforms(np.pi / 2, 4, 4)
# Translate right by 1 (the transformation matrix is always inverted,
# hence the -1).
translation = constant_op.constant([1, 0, -1,
0, 1, 0,
0, 0],
dtype=dtypes.float32)
composed = image_ops.compose_transforms(rotation, translation)
image_transformed = image_ops.transform(image, composed)
self.assertAllEqual(image_transformed.eval(),
[[0, 0, 0, 0],
[0, 1, 0, 1],
[0, 1, 0, 1],
[0, 1, 1, 1]])
def test_extreme_projective_transform(self):
for dtype in _DTYPES:
with self.cached_session():
image = constant_op.constant(
[[1, 0, 1, 0],
[0, 1, 0, 1],
[1, 0, 1, 0],
[0, 1, 0, 1]], dtype=dtype)
transformation = constant_op.constant([1, 0, 0, 0, 1, 0, -1, 0],
dtypes.float32)
image_transformed = image_ops.transform(image, transformation)
self.assertAllEqual(image_transformed.eval(),
[[1, 0, 0, 0],
[0, 0, 0, 0],
[1, 0, 0, 0],
[0, 0, 0, 0]])
def test_bilinear(self):
with self.cached_session():
image = constant_op.constant(
[[0, 0, 0, 0, 0],
[0, 1, 1, 1, 0],
[0, 1, 0, 1, 0],
[0, 1, 1, 1, 0],
[0, 0, 0, 0, 0]],
dtypes.float32)
# The following result matches:
# >>> scipy.ndimage.rotate(image, 45, order=1, reshape=False)
# which uses spline interpolation of order 1, equivalent to bilinear
# interpolation.
self.assertAllClose(
image_ops.rotate(image, np.pi / 4.0, interpolation="BILINEAR").eval(),
[[0.000, 0.000, 0.343, 0.000, 0.000],
[0.000, 0.586, 0.914, 0.586, 0.000],
[0.343, 0.914, 0.000, 0.914, 0.343],
[0.000, 0.586, 0.914, 0.586, 0.000],
[0.000, 0.000, 0.343, 0.000, 0.000]],
atol=0.001)
self.assertAllClose(
image_ops.rotate(image, np.pi / 4.0, interpolation="NEAREST").eval(),
[[0, 0, 1, 0, 0],
[0, 1, 1, 1, 0],
[1, 1, 0, 1, 1],
[0, 1, 1, 1, 0],
[0, 0, 1, 0, 0]])
def test_bilinear_uint8(self):
with self.cached_session():
image = constant_op.constant(
np.asarray(
[[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 255, 255, 255, 0.0],
[0.0, 255, 0.0, 255, 0.0],
[0.0, 255, 255, 255, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0]],
np.uint8),
dtypes.uint8)
# == np.rint((expected image above) * 255)
self.assertAllEqual(
image_ops.rotate(image, np.pi / 4.0, interpolation="BILINEAR").eval(),
[[0.0, 0.0, 87., 0.0, 0.0],
[0.0, 149, 233, 149, 0.0],
[87., 233, 0.0, 233, 87.],
[0.0, 149, 233, 149, 0.0],
[0.0, 0.0, 87., 0.0, 0.0]])
def test_rotate_static_shape(self):
image = array_ops.diag([1., 2., 3.])
result = image_ops.rotate(
image, random_ops.random_uniform((), -1, 1), interpolation="BILINEAR")
self.assertEqual(image.get_shape(), result.get_shape())
def test_transform_static_output_shape(self):
image = constant_op.constant([[1., 2.], [3., 4.]])
result = image_ops.transform(
image, random_ops.random_uniform([8], -1, 1),
output_shape=constant_op.constant([3, 5]))
self.assertAllEqual([3, 5], result.get_shape())
def _test_grad(self, shape_to_test):
with self.cached_session():
test_image_shape = shape_to_test
test_image = np.random.randn(*test_image_shape)
test_image_tensor = constant_op.constant(
test_image, shape=test_image_shape)
test_transform = image_ops.angles_to_projective_transforms(
np.pi / 2, 4, 4)
output_shape = test_image_shape
output = image_ops.transform(test_image_tensor, test_transform)
left_err = gradient_checker.compute_gradient_error(
test_image_tensor,
test_image_shape,
output,
output_shape,
x_init_value=test_image)
self.assertLess(left_err, 1e-10)
def _test_grad_different_shape(self, input_shape, output_shape):
with self.cached_session():
test_image_shape = input_shape
test_image = np.random.randn(*test_image_shape)
test_image_tensor = constant_op.constant(
test_image, shape=test_image_shape)
test_transform = image_ops.angles_to_projective_transforms(
np.pi / 2, 4, 4)
if len(output_shape) == 2:
resize_shape = output_shape
elif len(output_shape) == 3:
resize_shape = output_shape[0:2]
elif len(output_shape) == 4:
resize_shape = output_shape[1:3]
output = image_ops.transform(
images=test_image_tensor,
transforms=test_transform,
output_shape=resize_shape)
left_err = gradient_checker.compute_gradient_error(
test_image_tensor,
test_image_shape,
output,
output_shape,
x_init_value=test_image)
self.assertLess(left_err, 1e-10)
def test_grad(self):
self._test_grad([16, 16])
self._test_grad([4, 12, 12])
self._test_grad([3, 4, 12, 12])
self._test_grad_different_shape([16, 16], [8, 8])
self._test_grad_different_shape([4, 12, 3], [8, 24, 3])
self._test_grad_different_shape([3, 4, 12, 3], [3, 8, 24, 3])
def test_projective_transform_v1(self):
"""The original ImageProjectiveTransform op should take 2 arguments."""
image = constant_op.constant([[[[1], [0]], [[0], [1]]]])
transform = constant_op.constant([[1., 0., 0., 0., 1., 0., 0., 0.]])
result = gen_image_ops.image_projective_transform(
image, transform, interpolation="NEAREST")
with self.cached_session():
self.assertAllEqual([[[[1], [0]], [[0], [1]]]], result.eval())
def test_transform_data_types(self):
for dtype in _DTYPES:
image = constant_op.constant([[1, 2], [3, 4]], dtype=dtype)
value = image_ops.transform(image, [1] * 8)
with self.test_session(use_gpu=True):
self.assertAllEqual(
value.eval(),
np.array([[4, 4], [4, 4]]).astype(dtype.as_numpy_dtype()))
@test_util.run_in_graph_and_eager_modes
def test_transform_eager(self):
image = constant_op.constant([[1., 2.], [3., 4.]])
value = image_ops.transform(image, [1] * 8)
with self.test_session(use_gpu=True):
self.assertAllEqual(self.evaluate(value), np.array([[4, 4], [4, 4]]))
class BipartiteMatchTest(test_util.TensorFlowTestCase):
def _BipartiteMatchTest(self, distance_mat, distance_mat_shape,
num_valid_rows,
expected_row_to_col_match,
expected_col_to_row_match):
distance_mat_np = np.array(distance_mat, dtype=np.float32).reshape(
distance_mat_shape)
expected_row_to_col_match_np = np.array(expected_row_to_col_match,
dtype=np.int32)
expected_col_to_row_match_np = np.array(expected_col_to_row_match,
dtype=np.int32)
with self.cached_session():
distance_mat_tf = constant_op.constant(distance_mat_np,
shape=distance_mat_shape)
location_to_prior, prior_to_location = image_ops.bipartite_match(
distance_mat_tf, num_valid_rows)
location_to_prior_np = location_to_prior.eval()
prior_to_location_np = prior_to_location.eval()
self.assertAllEqual(location_to_prior_np, expected_row_to_col_match_np)
self.assertAllEqual(prior_to_location_np, expected_col_to_row_match_np)
def testBipartiteMatch(self):
distance_mat = [0.5, 0.8, 0.1,
0.3, 0.2, 0.15]
num_valid_rows = 2
expected_row_to_col_match = [2, 1]
expected_col_to_row_match = [-1, 1, 0]
self._BipartiteMatchTest(distance_mat, [2, 3], num_valid_rows,
expected_row_to_col_match,
expected_col_to_row_match)
# The case of num_valid_rows less than num-of-rows-in-distance-mat.
num_valid_rows = 1
expected_row_to_col_match = [2, -1]
expected_col_to_row_match = [-1, -1, 0]
self._BipartiteMatchTest(distance_mat, [2, 3], num_valid_rows,
expected_row_to_col_match,
expected_col_to_row_match)
# The case of num_valid_rows being 0.
num_valid_rows = 0
expected_row_to_col_match = [-1, -1]
expected_col_to_row_match = [-1, -1, -1]
self._BipartiteMatchTest(distance_mat, [2, 3], num_valid_rows,
expected_row_to_col_match,
expected_col_to_row_match)
# The case of num_valid_rows less being -1.
num_valid_rows = -1
# The expected results are the same as num_valid_rows being 2.
expected_row_to_col_match = [2, 1]
expected_col_to_row_match = [-1, 1, 0]
self._BipartiteMatchTest(distance_mat, [2, 3], num_valid_rows,
expected_row_to_col_match,
expected_col_to_row_match)
if __name__ == "__main__":
googletest.main()
|
tensorflow-master
|
tensorflow/contrib/image/python/kernel_tests/image_ops_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for sparse_image_warp."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.image.python.ops import sparse_image_warp
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import gradients
from tensorflow.python.ops import image_ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.platform import test
from tensorflow.python.training import momentum
class SparseImageWarpTest(test_util.TensorFlowTestCase):
def setUp(self):
np.random.seed(0)
def testGetBoundaryLocations(self):
image_height = 11
image_width = 11
num_points_per_edge = 4
locs = sparse_image_warp._get_boundary_locations(image_height, image_width,
num_points_per_edge)
num_points = locs.shape[0]
self.assertEqual(num_points, 4 + 4 * num_points_per_edge)
locs = [(locs[i, 0], locs[i, 1]) for i in range(num_points)]
for i in (0, image_height - 1):
for j in (0, image_width - 1):
self.assertIn((i, j), locs, '{},{} not in the locations'.format(i, j))
for i in (2, 4, 6, 8):
for j in (0, image_width - 1):
self.assertIn((i, j), locs, '{},{} not in the locations'.format(i, j))
for i in (0, image_height - 1):
for j in (2, 4, 6, 8):
self.assertIn((i, j), locs, '{},{} not in the locations'.format(i, j))
def testGetGridLocations(self):
image_height = 5
image_width = 3
grid = sparse_image_warp._get_grid_locations(image_height, image_width)
for i in range(image_height):
for j in range(image_width):
self.assertEqual(grid[i, j, 0], i)
self.assertEqual(grid[i, j, 1], j)
def testZeroShift(self):
"""Run assertZeroShift for various hyperparameters."""
for order in (1, 2):
for regularization in (0, 0.01):
for num_boundary_points in (0, 1):
self.assertZeroShift(order, regularization, num_boundary_points)
def assertZeroShift(self, order, regularization, num_boundary_points):
"""Check that warping with zero displacements doesn't change the image."""
batch_size = 1
image_height = 4
image_width = 4
channels = 3
image = np.random.uniform(
size=[batch_size, image_height, image_width, channels])
input_image_op = constant_op.constant(np.float32(image))
control_point_locations = [[1., 1.], [2., 2.], [2., 1.]]
control_point_locations = constant_op.constant(
np.float32(np.expand_dims(control_point_locations, 0)))
control_point_displacements = np.zeros(
control_point_locations.shape.as_list())
control_point_displacements = constant_op.constant(
np.float32(control_point_displacements))
(warped_image_op, flow_field) = sparse_image_warp.sparse_image_warp(
input_image_op,
control_point_locations,
control_point_locations + control_point_displacements,
interpolation_order=order,
regularization_weight=regularization,
num_boundary_points=num_boundary_points)
with self.cached_session() as sess:
warped_image, input_image, _ = sess.run(
[warped_image_op, input_image_op, flow_field])
self.assertAllClose(warped_image, input_image)
def testMoveSinglePixel(self):
"""Run assertMoveSinglePixel for various hyperparameters and data types."""
for order in (1, 2):
for num_boundary_points in (1, 2):
for type_to_use in (dtypes.float32, dtypes.float64):
self.assertMoveSinglePixel(order, num_boundary_points, type_to_use)
def assertMoveSinglePixel(self, order, num_boundary_points, type_to_use):
"""Move a single block in a small grid using warping."""
batch_size = 1
image_height = 7
image_width = 7
channels = 3
image = np.zeros([batch_size, image_height, image_width, channels])
image[:, 3, 3, :] = 1.0
input_image_op = constant_op.constant(image, dtype=type_to_use)
# Place a control point at the one white pixel.
control_point_locations = [[3., 3.]]
control_point_locations = constant_op.constant(
np.float32(np.expand_dims(control_point_locations, 0)),
dtype=type_to_use)
# Shift it one pixel to the right.
control_point_displacements = [[0., 1.0]]
control_point_displacements = constant_op.constant(
np.float32(np.expand_dims(control_point_displacements, 0)),
dtype=type_to_use)
(warped_image_op, flow_field) = sparse_image_warp.sparse_image_warp(
input_image_op,
control_point_locations,
control_point_locations + control_point_displacements,
interpolation_order=order,
num_boundary_points=num_boundary_points)
with self.cached_session() as sess:
warped_image, input_image, flow = sess.run(
[warped_image_op, input_image_op, flow_field])
# Check that it moved the pixel correctly.
self.assertAllClose(
warped_image[0, 4, 5, :],
input_image[0, 4, 4, :],
atol=1e-5,
rtol=1e-5)
# Test that there is no flow at the corners.
for i in (0, image_height - 1):
for j in (0, image_width - 1):
self.assertAllClose(
flow[0, i, j, :], np.zeros([2]), atol=1e-5, rtol=1e-5)
def load_image(self, image_file, sess):
image_op = image_ops.decode_png(
io_ops.read_file(image_file), dtype=dtypes.uint8, channels=4)[:, :, 0:3]
return sess.run(image_op)
def testSmileyFace(self):
"""Check warping accuracy by comparing to hardcoded warped images."""
test_data_dir = test.test_src_dir_path('contrib/image/python/'
'kernel_tests/test_data/')
input_file = test_data_dir + 'Yellow_Smiley_Face.png'
with self.cached_session() as sess:
input_image = self.load_image(input_file, sess)
control_points = np.asarray([[64, 59], [180 - 64, 59], [39, 111],
[180 - 39, 111], [90, 143], [58, 134],
[180 - 58, 134]]) # pyformat: disable
control_point_displacements = np.asarray(
[[-10.5, 10.5], [10.5, 10.5], [0, 0], [0, 0], [0, -10], [-20, 10.25],
[10, 10.75]])
control_points_op = constant_op.constant(
np.expand_dims(np.float32(control_points[:, [1, 0]]), 0))
control_point_displacements_op = constant_op.constant(
np.expand_dims(np.float32(control_point_displacements[:, [1, 0]]), 0))
float_image = np.expand_dims(np.float32(input_image) / 255, 0)
input_image_op = constant_op.constant(float_image)
for interpolation_order in (1, 2, 3):
for num_boundary_points in (0, 1, 4):
warp_op, _ = sparse_image_warp.sparse_image_warp(
input_image_op,
control_points_op,
control_points_op + control_point_displacements_op,
interpolation_order=interpolation_order,
num_boundary_points=num_boundary_points)
with self.cached_session() as sess:
warped_image = sess.run(warp_op)
out_image = np.uint8(warped_image[0, :, :, :] * 255)
target_file = (
test_data_dir +
'Yellow_Smiley_Face_Warp-interp' + '-{}-clamp-{}.png'.format(
interpolation_order, num_boundary_points))
target_image = self.load_image(target_file, sess)
# Check that the target_image and out_image difference is no
# bigger than 2 (on a scale of 0-255). Due to differences in
# floating point computation on different devices, the float
# output in warped_image may get rounded to a different int
# than that in the saved png file loaded into target_image.
self.assertAllClose(target_image, out_image, atol=2, rtol=1e-3)
def testThatBackpropRuns(self):
"""Run optimization to ensure that gradients can be computed."""
batch_size = 1
image_height = 9
image_width = 12
image = variables.Variable(
np.float32(
np.random.uniform(size=[batch_size, image_height, image_width, 3])))
control_point_locations = [[3., 3.]]
control_point_locations = constant_op.constant(
np.float32(np.expand_dims(control_point_locations, 0)))
control_point_displacements = [[0.25, -0.5]]
control_point_displacements = constant_op.constant(
np.float32(np.expand_dims(control_point_displacements, 0)))
warped_image, _ = sparse_image_warp.sparse_image_warp(
image,
control_point_locations,
control_point_locations + control_point_displacements,
num_boundary_points=3)
loss = math_ops.reduce_mean(math_ops.abs(warped_image - image))
optimizer = momentum.MomentumOptimizer(0.001, 0.9)
grad = gradients.gradients(loss, [image])
grad, _ = clip_ops.clip_by_global_norm(grad, 1.0)
opt_func = optimizer.apply_gradients(zip(grad, [image]))
init_op = variables.global_variables_initializer()
with self.cached_session() as sess:
sess.run(init_op)
for _ in range(5):
sess.run([loss, opt_func])
if __name__ == '__main__':
googletest.main()
|
tensorflow-master
|
tensorflow/contrib/image/python/kernel_tests/sparse_image_warp_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python layer for image_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.image.ops import gen_single_image_random_dot_stereograms_ops
from tensorflow.contrib.util import loader
from tensorflow.python.framework import ops
from tensorflow.python.platform import resource_loader
_sirds_ops = loader.load_op_library(
resource_loader.get_path_to_datafile(
"_single_image_random_dot_stereograms.so"))
def single_image_random_dot_stereograms(depth_values,
hidden_surface_removal=None,
convergence_dots_size=None,
dots_per_inch=None,
eye_separation=None,
mu=None,
normalize=None,
normalize_max=None,
normalize_min=None,
border_level=None,
number_colors=None,
output_image_shape=None,
output_data_window=None):
"""Output a RandomDotStereogram Tensor for export via encode_PNG/JPG OP.
Given the 2-D tensor 'depth_values' with encoded Z values, this operation
will encode 3-D data into a 2-D image. The output of this Op is suitable
for the encode_PNG/JPG ops. Be careful with image compression as this may
corrupt the encode 3-D data within the image.
Based upon [this
paper](http://www.learningace.com/doc/4331582/b6ab058d1e206d68ab60e4e1ead2fe6e/sirds-paper).
This outputs a SIRDS image as picture_out.png:
```python
img=[[1,2,3,3,2,1],
[1,2,3,4,5,2],
[1,2,3,4,5,3],
[1,2,3,4,5,4],
[6,5,4,4,5,5]]
session = tf.compat.v1.InteractiveSession()
sirds = single_image_random_dot_stereograms(
img,
convergence_dots_size=8,
number_colors=256,normalize=True)
out = sirds.eval()
png = tf.image.encode_png(out).eval()
with open('picture_out.png', 'wb') as f:
f.write(png)
```
Args:
depth_values: A `Tensor`. Must be one of the following types:
`float64`, `float32`, `int64`, `int32`. Z values of data to encode
into 'output_data_window' window, lower further away {0.0 floor(far),
1.0 ceiling(near) after norm}, must be 2-D tensor
hidden_surface_removal: An optional `bool`. Defaults to `True`.
Activate hidden surface removal
convergence_dots_size: An optional `int`. Defaults to `8`.
Black dot size in pixels to help view converge image, drawn on bottom
of the image
dots_per_inch: An optional `int`. Defaults to `72`.
Output device in dots/inch
eye_separation: An optional `float`. Defaults to `2.5`.
Separation between eyes in inches
mu: An optional `float`. Defaults to `0.3333`.
Depth of field, Fraction of viewing distance (eg. 1/3 = 0.3333)
normalize: An optional `bool`. Defaults to `True`.
Normalize input data to [0.0, 1.0]
normalize_max: An optional `float`. Defaults to `-100`.
Fix MAX value for Normalization (0.0) - if < MIN, autoscale
normalize_min: An optional `float`. Defaults to `100`.
Fix MIN value for Normalization (0.0) - if > MAX, autoscale
border_level: An optional `float`. Defaults to `0`.
Value of bord in depth 0.0 {far} to 1.0 {near}
number_colors: An optional `int`. Defaults to `256`. 2 (Black &
White), 256 (grayscale), and Numbers > 256 (Full Color) are
supported
output_image_shape: An optional `tf.TensorShape` or list of `ints`.
Defaults to shape `[1024, 768, 1]`. Defines output shape of returned
image in '[X,Y, Channels]' 1-grayscale, 3 color; channels will be
updated to 3 if number_colors > 256
output_data_window: An optional `tf.TensorShape` or list of `ints`.
Defaults to `[1022, 757]`. Size of "DATA" window, must be equal to or
smaller than `output_image_shape`, will be centered and use
`convergence_dots_size` for best fit to avoid overlap if possible
Returns:
A `Tensor` of type `uint8` of shape 'output_image_shape' with encoded
'depth_values'
"""
result = gen_single_image_random_dot_stereograms_ops.single_image_random_dot_stereograms( # pylint: disable=line-too-long
depth_values=depth_values,
hidden_surface_removal=hidden_surface_removal,
convergence_dots_size=convergence_dots_size,
dots_per_inch=dots_per_inch,
eye_separation=eye_separation,
mu=mu,
normalize=normalize,
normalize_max=normalize_max,
normalize_min=normalize_min,
border_level=border_level,
number_colors=number_colors,
output_image_shape=output_image_shape,
output_data_window=output_data_window)
return result
ops.NotDifferentiable("SingleImageRandomDotStereograms")
|
tensorflow-master
|
tensorflow/contrib/image/python/ops/single_image_random_dot_stereograms.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Image warping using sparse flow defined at control points."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.image.python.ops import dense_image_warp
from tensorflow.contrib.image.python.ops import interpolate_spline
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
def _get_grid_locations(image_height, image_width):
"""Wrapper for np.meshgrid."""
y_range = np.linspace(0, image_height - 1, image_height)
x_range = np.linspace(0, image_width - 1, image_width)
y_grid, x_grid = np.meshgrid(y_range, x_range, indexing='ij')
return np.stack((y_grid, x_grid), -1)
def _expand_to_minibatch(np_array, batch_size):
"""Tile arbitrarily-sized np_array to include new batch dimension."""
tiles = [batch_size] + [1] * np_array.ndim
return np.tile(np.expand_dims(np_array, 0), tiles)
def _get_boundary_locations(image_height, image_width, num_points_per_edge):
"""Compute evenly-spaced indices along edge of image."""
y_range = np.linspace(0, image_height - 1, num_points_per_edge + 2)
x_range = np.linspace(0, image_width - 1, num_points_per_edge + 2)
ys, xs = np.meshgrid(y_range, x_range, indexing='ij')
is_boundary = np.logical_or(
np.logical_or(xs == 0, xs == image_width - 1),
np.logical_or(ys == 0, ys == image_height - 1))
return np.stack([ys[is_boundary], xs[is_boundary]], axis=-1)
def _add_zero_flow_controls_at_boundary(control_point_locations,
control_point_flows, image_height,
image_width, boundary_points_per_edge):
"""Add control points for zero-flow boundary conditions.
Augment the set of control points with extra points on the
boundary of the image that have zero flow.
Args:
control_point_locations: input control points
control_point_flows: their flows
image_height: image height
image_width: image width
boundary_points_per_edge: number of points to add in the middle of each
edge (not including the corners).
The total number of points added is
4 + 4*(boundary_points_per_edge).
Returns:
merged_control_point_locations: augmented set of control point locations
merged_control_point_flows: augmented set of control point flows
"""
batch_size = tensor_shape.dimension_value(control_point_locations.shape[0])
boundary_point_locations = _get_boundary_locations(image_height, image_width,
boundary_points_per_edge)
boundary_point_flows = np.zeros([boundary_point_locations.shape[0], 2])
type_to_use = control_point_locations.dtype
boundary_point_locations = constant_op.constant(
_expand_to_minibatch(boundary_point_locations, batch_size),
dtype=type_to_use)
boundary_point_flows = constant_op.constant(
_expand_to_minibatch(boundary_point_flows, batch_size), dtype=type_to_use)
merged_control_point_locations = array_ops.concat(
[control_point_locations, boundary_point_locations], 1)
merged_control_point_flows = array_ops.concat(
[control_point_flows, boundary_point_flows], 1)
return merged_control_point_locations, merged_control_point_flows
def sparse_image_warp(image,
source_control_point_locations,
dest_control_point_locations,
interpolation_order=2,
regularization_weight=0.0,
num_boundary_points=0,
name='sparse_image_warp'):
"""Image warping using correspondences between sparse control points.
Apply a non-linear warp to the image, where the warp is specified by
the source and destination locations of a (potentially small) number of
control points. First, we use a polyharmonic spline
(`tf.contrib.image.interpolate_spline`) to interpolate the displacements
between the corresponding control points to a dense flow field.
Then, we warp the image using this dense flow field
(`tf.contrib.image.dense_image_warp`).
Let t index our control points. For regularization_weight=0, we have:
warped_image[b, dest_control_point_locations[b, t, 0],
dest_control_point_locations[b, t, 1], :] =
image[b, source_control_point_locations[b, t, 0],
source_control_point_locations[b, t, 1], :].
For regularization_weight > 0, this condition is met approximately, since
regularized interpolation trades off smoothness of the interpolant vs.
reconstruction of the interpolant at the control points.
See `tf.contrib.image.interpolate_spline` for further documentation of the
interpolation_order and regularization_weight arguments.
Args:
image: `[batch, height, width, channels]` float `Tensor`
source_control_point_locations: `[batch, num_control_points, 2]` float
`Tensor`
dest_control_point_locations: `[batch, num_control_points, 2]` float
`Tensor`
interpolation_order: polynomial order used by the spline interpolation
regularization_weight: weight on smoothness regularizer in interpolation
num_boundary_points: How many zero-flow boundary points to include at
each image edge.Usage:
num_boundary_points=0: don't add zero-flow points
num_boundary_points=1: 4 corners of the image
num_boundary_points=2: 4 corners and one in the middle of each edge
(8 points total)
num_boundary_points=n: 4 corners and n-1 along each edge
name: A name for the operation (optional).
Note that image and offsets can be of type tf.half, tf.float32, or
tf.float64, and do not necessarily have to be the same type.
Returns:
warped_image: `[batch, height, width, channels]` float `Tensor` with same
type as input image.
flow_field: `[batch, height, width, 2]` float `Tensor` containing the dense
flow field produced by the interpolation.
"""
image = ops.convert_to_tensor(image)
source_control_point_locations = ops.convert_to_tensor(
source_control_point_locations)
dest_control_point_locations = ops.convert_to_tensor(
dest_control_point_locations)
control_point_flows = (
dest_control_point_locations - source_control_point_locations)
clamp_boundaries = num_boundary_points > 0
boundary_points_per_edge = num_boundary_points - 1
with ops.name_scope(name):
batch_size, image_height, image_width, _ = image.get_shape().as_list()
# This generates the dense locations where the interpolant
# will be evaluated.
grid_locations = _get_grid_locations(image_height, image_width)
flattened_grid_locations = np.reshape(grid_locations,
[image_height * image_width, 2])
flattened_grid_locations = constant_op.constant(
_expand_to_minibatch(flattened_grid_locations, batch_size), image.dtype)
if clamp_boundaries:
(dest_control_point_locations,
control_point_flows) = _add_zero_flow_controls_at_boundary(
dest_control_point_locations, control_point_flows, image_height,
image_width, boundary_points_per_edge)
flattened_flows = interpolate_spline.interpolate_spline(
dest_control_point_locations, control_point_flows,
flattened_grid_locations, interpolation_order, regularization_weight)
dense_flows = array_ops.reshape(flattened_flows,
[batch_size, image_height, image_width, 2])
warped_image = dense_image_warp.dense_image_warp(image, dense_flows)
return warped_image, dense_flows
|
tensorflow-master
|
tensorflow/contrib/image/python/ops/sparse_image_warp.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Image warping using per-pixel flow vectors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import math_ops
def _interpolate_bilinear(grid,
query_points,
name='interpolate_bilinear',
indexing='ij'):
"""Similar to Matlab's interp2 function.
Finds values for query points on a grid using bilinear interpolation.
Args:
grid: a 4-D float `Tensor` of shape `[batch, height, width, channels]`.
query_points: a 3-D float `Tensor` of N points with shape `[batch, N, 2]`.
name: a name for the operation (optional).
indexing: whether the query points are specified as row and column (ij),
or Cartesian coordinates (xy).
Returns:
values: a 3-D `Tensor` with shape `[batch, N, channels]`
Raises:
ValueError: if the indexing mode is invalid, or if the shape of the inputs
invalid.
"""
if indexing != 'ij' and indexing != 'xy':
raise ValueError('Indexing mode must be \'ij\' or \'xy\'')
with ops.name_scope(name):
grid = ops.convert_to_tensor(grid)
query_points = ops.convert_to_tensor(query_points)
shape = grid.get_shape().as_list()
if len(shape) != 4:
msg = 'Grid must be 4 dimensional. Received size: '
raise ValueError(msg + str(grid.get_shape()))
batch_size, height, width, channels = (array_ops.shape(grid)[0],
array_ops.shape(grid)[1],
array_ops.shape(grid)[2],
array_ops.shape(grid)[3])
shape = [batch_size, height, width, channels]
query_type = query_points.dtype
grid_type = grid.dtype
with ops.control_dependencies([
check_ops.assert_equal(
len(query_points.get_shape()),
3,
message='Query points must be 3 dimensional.'),
check_ops.assert_equal(
array_ops.shape(query_points)[2],
2,
message='Query points must be size 2 in dim 2.')
]):
num_queries = array_ops.shape(query_points)[1]
with ops.control_dependencies([
check_ops.assert_greater_equal(
height, 2, message='Grid height must be at least 2.'),
check_ops.assert_greater_equal(
width, 2, message='Grid width must be at least 2.')
]):
alphas = []
floors = []
ceils = []
index_order = [0, 1] if indexing == 'ij' else [1, 0]
unstacked_query_points = array_ops.unstack(query_points, axis=2)
for dim in index_order:
with ops.name_scope('dim-' + str(dim)):
queries = unstacked_query_points[dim]
size_in_indexing_dimension = shape[dim + 1]
# max_floor is size_in_indexing_dimension - 2 so that max_floor + 1
# is still a valid index into the grid.
max_floor = math_ops.cast(size_in_indexing_dimension - 2, query_type)
min_floor = constant_op.constant(0.0, dtype=query_type)
floor = math_ops.minimum(
math_ops.maximum(min_floor, math_ops.floor(queries)), max_floor)
int_floor = math_ops.cast(floor, dtypes.int32)
floors.append(int_floor)
ceil = int_floor + 1
ceils.append(ceil)
# alpha has the same type as the grid, as we will directly use alpha
# when taking linear combinations of pixel values from the image.
alpha = math_ops.cast(queries - floor, grid_type)
min_alpha = constant_op.constant(0.0, dtype=grid_type)
max_alpha = constant_op.constant(1.0, dtype=grid_type)
alpha = math_ops.minimum(math_ops.maximum(min_alpha, alpha), max_alpha)
# Expand alpha to [b, n, 1] so we can use broadcasting
# (since the alpha values don't depend on the channel).
alpha = array_ops.expand_dims(alpha, 2)
alphas.append(alpha)
with ops.control_dependencies([
check_ops.assert_less_equal(
math_ops.cast(batch_size * height * width, dtype=dtypes.float32),
np.iinfo(np.int32).max / 8,
message="""The image size or batch size is sufficiently large
that the linearized addresses used by array_ops.gather
may exceed the int32 limit.""")
]):
flattened_grid = array_ops.reshape(
grid, [batch_size * height * width, channels])
batch_offsets = array_ops.reshape(
math_ops.range(batch_size) * height * width, [batch_size, 1])
# This wraps array_ops.gather. We reshape the image data such that the
# batch, y, and x coordinates are pulled into the first dimension.
# Then we gather. Finally, we reshape the output back. It's possible this
# code would be made simpler by using array_ops.gather_nd.
def gather(y_coords, x_coords, name):
with ops.name_scope('gather-' + name):
linear_coordinates = batch_offsets + y_coords * width + x_coords
gathered_values = array_ops.gather(flattened_grid, linear_coordinates)
return array_ops.reshape(gathered_values,
[batch_size, num_queries, channels])
# grab the pixel values in the 4 corners around each query point
top_left = gather(floors[0], floors[1], 'top_left')
top_right = gather(floors[0], ceils[1], 'top_right')
bottom_left = gather(ceils[0], floors[1], 'bottom_left')
bottom_right = gather(ceils[0], ceils[1], 'bottom_right')
# now, do the actual interpolation
with ops.name_scope('interpolate'):
interp_top = alphas[1] * (top_right - top_left) + top_left
interp_bottom = alphas[1] * (bottom_right - bottom_left) + bottom_left
interp = alphas[0] * (interp_bottom - interp_top) + interp_top
return interp
def dense_image_warp(image, flow, name='dense_image_warp'):
"""Image warping using per-pixel flow vectors.
Apply a non-linear warp to the image, where the warp is specified by a dense
flow field of offset vectors that define the correspondences of pixel values
in the output image back to locations in the source image. Specifically, the
pixel value at output[b, j, i, c] is
images[b, j - flow[b, j, i, 0], i - flow[b, j, i, 1], c].
The locations specified by this formula do not necessarily map to an int
index. Therefore, the pixel value is obtained by bilinear
interpolation of the 4 nearest pixels around
(b, j - flow[b, j, i, 0], i - flow[b, j, i, 1]). For locations outside
of the image, we use the nearest pixel values at the image boundary.
Args:
image: 4-D float `Tensor` with shape `[batch, height, width, channels]`.
flow: A 4-D float `Tensor` with shape `[batch, height, width, 2]`.
name: A name for the operation (optional).
Note that image and flow can be of type tf.half, tf.float32, or tf.float64,
and do not necessarily have to be the same type.
Returns:
A 4-D float `Tensor` with shape`[batch, height, width, channels]`
and same type as input image.
Raises:
ValueError: if height < 2 or width < 2 or the inputs have the wrong number
of dimensions.
"""
with ops.name_scope(name):
batch_size, height, width, channels = (array_ops.shape(image)[0],
array_ops.shape(image)[1],
array_ops.shape(image)[2],
array_ops.shape(image)[3])
# The flow is defined on the image grid. Turn the flow into a list of query
# points in the grid space.
grid_x, grid_y = array_ops.meshgrid(
math_ops.range(width), math_ops.range(height))
stacked_grid = math_ops.cast(
array_ops.stack([grid_y, grid_x], axis=2), flow.dtype)
batched_grid = array_ops.expand_dims(stacked_grid, axis=0)
query_points_on_grid = batched_grid - flow
query_points_flattened = array_ops.reshape(query_points_on_grid,
[batch_size, height * width, 2])
# Compute values at the query points, then reshape the result back to the
# image grid.
interpolated = _interpolate_bilinear(image, query_points_flattened)
interpolated = array_ops.reshape(interpolated,
[batch_size, height, width, channels])
return interpolated
|
tensorflow-master
|
tensorflow/contrib/image/python/ops/dense_image_warp.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Polyharmonic spline interpolation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
EPSILON = 0.0000000001
def _cross_squared_distance_matrix(x, y):
"""Pairwise squared distance between two (batch) matrices' rows (2nd dim).
Computes the pairwise distances between rows of x and rows of y
Args:
x: [batch_size, n, d] float `Tensor`
y: [batch_size, m, d] float `Tensor`
Returns:
squared_dists: [batch_size, n, m] float `Tensor`, where
squared_dists[b,i,j] = ||x[b,i,:] - y[b,j,:]||^2
"""
x_norm_squared = math_ops.reduce_sum(math_ops.square(x), 2)
y_norm_squared = math_ops.reduce_sum(math_ops.square(y), 2)
# Expand so that we can broadcast.
x_norm_squared_tile = array_ops.expand_dims(x_norm_squared, 2)
y_norm_squared_tile = array_ops.expand_dims(y_norm_squared, 1)
x_y_transpose = math_ops.matmul(x, y, adjoint_b=True)
# squared_dists[b,i,j] = ||x_bi - y_bj||^2 = x_bi'x_bi- 2x_bi'x_bj + x_bj'x_bj
squared_dists = x_norm_squared_tile - 2 * x_y_transpose + y_norm_squared_tile
return squared_dists
def _pairwise_squared_distance_matrix(x):
"""Pairwise squared distance among a (batch) matrix's rows (2nd dim).
This saves a bit of computation vs. using _cross_squared_distance_matrix(x,x)
Args:
x: `[batch_size, n, d]` float `Tensor`
Returns:
squared_dists: `[batch_size, n, n]` float `Tensor`, where
squared_dists[b,i,j] = ||x[b,i,:] - x[b,j,:]||^2
"""
x_x_transpose = math_ops.matmul(x, x, adjoint_b=True)
x_norm_squared = array_ops.matrix_diag_part(x_x_transpose)
x_norm_squared_tile = array_ops.expand_dims(x_norm_squared, 2)
# squared_dists[b,i,j] = ||x_bi - x_bj||^2 = x_bi'x_bi- 2x_bi'x_bj + x_bj'x_bj
squared_dists = x_norm_squared_tile - 2 * x_x_transpose + array_ops.transpose(
x_norm_squared_tile, [0, 2, 1])
return squared_dists
def _solve_interpolation(train_points, train_values, order,
regularization_weight):
"""Solve for interpolation coefficients.
Computes the coefficients of the polyharmonic interpolant for the 'training'
data defined by (train_points, train_values) using the kernel phi.
Args:
train_points: `[b, n, d]` interpolation centers
train_values: `[b, n, k]` function values
order: order of the interpolation
regularization_weight: weight to place on smoothness regularization term
Returns:
w: `[b, n, k]` weights on each interpolation center
v: `[b, d, k]` weights on each input dimension
Raises:
ValueError: if d or k is not fully specified.
"""
# These dimensions are set dynamically at runtime.
b, n, _ = array_ops.unstack(array_ops.shape(train_points), num=3)
d = train_points.shape[-1]
if tensor_shape.dimension_value(d) is None:
raise ValueError('The dimensionality of the input points (d) must be '
'statically-inferrable.')
k = train_values.shape[-1]
if tensor_shape.dimension_value(k) is None:
raise ValueError('The dimensionality of the output values (k) must be '
'statically-inferrable.')
# First, rename variables so that the notation (c, f, w, v, A, B, etc.)
# follows https://en.wikipedia.org/wiki/Polyharmonic_spline.
# To account for python style guidelines we use
# matrix_a for A and matrix_b for B.
c = train_points
f = train_values
# Next, construct the linear system.
with ops.name_scope('construct_linear_system'):
matrix_a = _phi(_pairwise_squared_distance_matrix(c), order) # [b, n, n]
if regularization_weight > 0:
batch_identity_matrix = array_ops.expand_dims(
linalg_ops.eye(n, dtype=c.dtype), 0)
matrix_a += regularization_weight * batch_identity_matrix
# Append ones to the feature values for the bias term in the linear model.
ones = array_ops.ones_like(c[..., :1], dtype=c.dtype)
matrix_b = array_ops.concat([c, ones], 2) # [b, n, d + 1]
# [b, n + d + 1, n]
left_block = array_ops.concat(
[matrix_a, array_ops.transpose(matrix_b, [0, 2, 1])], 1)
num_b_cols = matrix_b.get_shape()[2] # d + 1
lhs_zeros = array_ops.zeros([b, num_b_cols, num_b_cols], train_points.dtype)
right_block = array_ops.concat([matrix_b, lhs_zeros],
1) # [b, n + d + 1, d + 1]
lhs = array_ops.concat([left_block, right_block],
2) # [b, n + d + 1, n + d + 1]
rhs_zeros = array_ops.zeros([b, d + 1, k], train_points.dtype)
rhs = array_ops.concat([f, rhs_zeros], 1) # [b, n + d + 1, k]
# Then, solve the linear system and unpack the results.
with ops.name_scope('solve_linear_system'):
w_v = linalg_ops.matrix_solve(lhs, rhs)
w = w_v[:, :n, :]
v = w_v[:, n:, :]
return w, v
def _apply_interpolation(query_points, train_points, w, v, order):
"""Apply polyharmonic interpolation model to data.
Given coefficients w and v for the interpolation model, we evaluate
interpolated function values at query_points.
Args:
query_points: `[b, m, d]` x values to evaluate the interpolation at
train_points: `[b, n, d]` x values that act as the interpolation centers
( the c variables in the wikipedia article)
w: `[b, n, k]` weights on each interpolation center
v: `[b, d, k]` weights on each input dimension
order: order of the interpolation
Returns:
Polyharmonic interpolation evaluated at points defined in query_points.
"""
# First, compute the contribution from the rbf term.
pairwise_dists = _cross_squared_distance_matrix(query_points, train_points)
phi_pairwise_dists = _phi(pairwise_dists, order)
rbf_term = math_ops.matmul(phi_pairwise_dists, w)
# Then, compute the contribution from the linear term.
# Pad query_points with ones, for the bias term in the linear model.
query_points_pad = array_ops.concat([
query_points,
array_ops.ones_like(query_points[..., :1], train_points.dtype)
], 2)
linear_term = math_ops.matmul(query_points_pad, v)
return rbf_term + linear_term
def _phi(r, order):
"""Coordinate-wise nonlinearity used to define the order of the interpolation.
See https://en.wikipedia.org/wiki/Polyharmonic_spline for the definition.
Args:
r: input op
order: interpolation order
Returns:
phi_k evaluated coordinate-wise on r, for k = r
"""
# using EPSILON prevents log(0), sqrt0), etc.
# sqrt(0) is well-defined, but its gradient is not
with ops.name_scope('phi'):
if order == 1:
r = math_ops.maximum(r, EPSILON)
r = math_ops.sqrt(r)
return r
elif order == 2:
return 0.5 * r * math_ops.log(math_ops.maximum(r, EPSILON))
elif order == 4:
return 0.5 * math_ops.square(r) * math_ops.log(
math_ops.maximum(r, EPSILON))
elif order % 2 == 0:
r = math_ops.maximum(r, EPSILON)
return 0.5 * math_ops.pow(r, 0.5 * order) * math_ops.log(r)
else:
r = math_ops.maximum(r, EPSILON)
return math_ops.pow(r, 0.5 * order)
def interpolate_spline(train_points,
train_values,
query_points,
order,
regularization_weight=0.0,
name='interpolate_spline'):
r"""Interpolate signal using polyharmonic interpolation.
The interpolant has the form
$$f(x) = \sum_{i = 1}^n w_i \phi(||x - c_i||) + v^T x + b.$$
This is a sum of two terms: (1) a weighted sum of radial basis function (RBF)
terms, with the centers \\(c_1, ... c_n\\), and (2) a linear term with a bias.
The \\(c_i\\) vectors are 'training' points. In the code, b is absorbed into v
by appending 1 as a final dimension to x. The coefficients w and v are
estimated such that the interpolant exactly fits the value of the function at
the \\(c_i\\) points, the vector w is orthogonal to each \\(c_i\\), and the
vector w sums to 0. With these constraints, the coefficients can be obtained
by solving a linear system.
\\(\phi\\) is an RBF, parametrized by an interpolation
order. Using order=2 produces the well-known thin-plate spline.
We also provide the option to perform regularized interpolation. Here, the
interpolant is selected to trade off between the squared loss on the training
data and a certain measure of its curvature
([details](https://en.wikipedia.org/wiki/Polyharmonic_spline)).
Using a regularization weight greater than zero has the effect that the
interpolant will no longer exactly fit the training data. However, it may be
less vulnerable to overfitting, particularly for high-order interpolation.
Note the interpolation procedure is differentiable with respect to all inputs
besides the order parameter.
We support dynamically-shaped inputs, where batch_size, n, and m are None
at graph construction time. However, d and k must be known.
Args:
train_points: `[batch_size, n, d]` float `Tensor` of n d-dimensional
locations. These do not need to be regularly-spaced.
train_values: `[batch_size, n, k]` float `Tensor` of n c-dimensional values
evaluated at train_points.
query_points: `[batch_size, m, d]` `Tensor` of m d-dimensional locations
where we will output the interpolant's values.
order: order of the interpolation. Common values are 1 for
\\(\phi(r) = r\\), 2 for \\(\phi(r) = r^2 * log(r)\\) (thin-plate spline),
or 3 for \\(\phi(r) = r^3\\).
regularization_weight: weight placed on the regularization term.
This will depend substantially on the problem, and it should always be
tuned. For many problems, it is reasonable to use no regularization.
If using a non-zero value, we recommend a small value like 0.001.
name: name prefix for ops created by this function
Returns:
`[b, m, k]` float `Tensor` of query values. We use train_points and
train_values to perform polyharmonic interpolation. The query values are
the values of the interpolant evaluated at the locations specified in
query_points.
"""
with ops.name_scope(name):
train_points = ops.convert_to_tensor(train_points)
train_values = ops.convert_to_tensor(train_values)
query_points = ops.convert_to_tensor(query_points)
# First, fit the spline to the observed data.
with ops.name_scope('solve'):
w, v = _solve_interpolation(train_points, train_values, order,
regularization_weight)
# Then, evaluate the spline at the query locations.
with ops.name_scope('predict'):
query_values = _apply_interpolation(query_points, train_points, w, v,
order)
return query_values
|
tensorflow-master
|
tensorflow/contrib/image/python/ops/interpolate_spline.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python layer for image_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import context
from tensorflow.contrib.image.ops import gen_image_ops
from tensorflow.contrib.util import loader
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import resource_loader
_image_ops_so = loader.load_op_library(
resource_loader.get_path_to_datafile("_image_ops.so"))
_IMAGE_DTYPES = set(
[dtypes.uint8, dtypes.int32, dtypes.int64,
dtypes.float16, dtypes.float32, dtypes.float64])
ops.RegisterShape("ImageConnectedComponents")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("ImageProjectiveTransform")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("ImageProjectiveTransformV2")(common_shapes.call_cpp_shape_fn)
# TODO(ringwalt): Support a "reshape" (name used by SciPy) or "expand" (name
# used by PIL, maybe more readable) mode, which determines the correct
# output_shape and translation for the transform.
def rotate(images, angles, interpolation="NEAREST", name=None):
"""Rotate image(s) counterclockwise by the passed angle(s) in radians.
Args:
images: A tensor of shape (num_images, num_rows, num_columns, num_channels)
(NHWC), (num_rows, num_columns, num_channels) (HWC), or
(num_rows, num_columns) (HW). The rank must be statically known (the
shape is not `TensorShape(None)`.
angles: A scalar angle to rotate all images by, or (if images has rank 4)
a vector of length num_images, with an angle for each image in the batch.
interpolation: Interpolation mode. Supported values: "NEAREST", "BILINEAR".
name: The name of the op.
Returns:
Image(s) with the same type and shape as `images`, rotated by the given
angle(s). Empty space due to the rotation will be filled with zeros.
Raises:
TypeError: If `image` is an invalid type.
"""
with ops.name_scope(name, "rotate"):
image_or_images = ops.convert_to_tensor(images)
if image_or_images.dtype.base_dtype not in _IMAGE_DTYPES:
raise TypeError("Invalid dtype %s." % image_or_images.dtype)
elif image_or_images.get_shape().ndims is None:
raise TypeError("image_or_images rank must be statically known")
elif len(image_or_images.get_shape()) == 2:
images = image_or_images[None, :, :, None]
elif len(image_or_images.get_shape()) == 3:
images = image_or_images[None, :, :, :]
elif len(image_or_images.get_shape()) == 4:
images = image_or_images
else:
raise TypeError("Images should have rank between 2 and 4.")
image_height = math_ops.cast(array_ops.shape(images)[1],
dtypes.float32)[None]
image_width = math_ops.cast(array_ops.shape(images)[2],
dtypes.float32)[None]
output = transform(
images,
angles_to_projective_transforms(angles, image_height, image_width),
interpolation=interpolation)
if image_or_images.get_shape().ndims is None:
raise TypeError("image_or_images rank must be statically known")
elif len(image_or_images.get_shape()) == 2:
return output[0, :, :, 0]
elif len(image_or_images.get_shape()) == 3:
return output[0, :, :, :]
else:
return output
def translate(images, translations, interpolation="NEAREST", name=None):
"""Translate image(s) by the passed vectors(s).
Args:
images: A tensor of shape (num_images, num_rows, num_columns, num_channels)
(NHWC), (num_rows, num_columns, num_channels) (HWC), or
(num_rows, num_columns) (HW). The rank must be statically known (the
shape is not `TensorShape(None)`.
translations: A vector representing [dx, dy] or (if images has rank 4)
a matrix of length num_images, with a [dx, dy] vector for each image in
the batch.
interpolation: Interpolation mode. Supported values: "NEAREST", "BILINEAR".
name: The name of the op.
Returns:
Image(s) with the same type and shape as `images`, translated by the given
vector(s). Empty space due to the translation will be filled with zeros.
Raises:
TypeError: If `image` is an invalid type.
"""
with ops.name_scope(name, "translate"):
return transform(
images,
translations_to_projective_transforms(translations),
interpolation=interpolation)
def angles_to_projective_transforms(angles,
image_height,
image_width,
name=None):
"""Returns projective transform(s) for the given angle(s).
Args:
angles: A scalar angle to rotate all images by, or (for batches of images)
a vector with an angle to rotate each image in the batch. The rank must
be statically known (the shape is not `TensorShape(None)`.
image_height: Height of the image(s) to be transformed.
image_width: Width of the image(s) to be transformed.
Returns:
A tensor of shape (num_images, 8). Projective transforms which can be given
to `tf.contrib.image.transform`.
"""
with ops.name_scope(name, "angles_to_projective_transforms"):
angle_or_angles = ops.convert_to_tensor(
angles, name="angles", dtype=dtypes.float32)
if len(angle_or_angles.get_shape()) == 0: # pylint: disable=g-explicit-length-test
angles = angle_or_angles[None]
elif len(angle_or_angles.get_shape()) == 1:
angles = angle_or_angles
else:
raise TypeError("Angles should have rank 0 or 1.")
x_offset = ((image_width - 1) - (math_ops.cos(angles) *
(image_width - 1) - math_ops.sin(angles) *
(image_height - 1))) / 2.0
y_offset = ((image_height - 1) - (math_ops.sin(angles) *
(image_width - 1) + math_ops.cos(angles) *
(image_height - 1))) / 2.0
num_angles = array_ops.shape(angles)[0]
return array_ops.concat(
values=[
math_ops.cos(angles)[:, None],
-math_ops.sin(angles)[:, None],
x_offset[:, None],
math_ops.sin(angles)[:, None],
math_ops.cos(angles)[:, None],
y_offset[:, None],
array_ops.zeros((num_angles, 2), dtypes.float32),
],
axis=1)
def translations_to_projective_transforms(translations, name=None):
"""Returns projective transform(s) for the given translation(s).
Args:
translations: A 2-element list representing [dx, dy] or a matrix of
2-element lists representing [dx, dy] to translate for each image
(for a batch of images). The rank must be statically known (the shape
is not `TensorShape(None)`.
name: The name of the op.
Returns:
A tensor of shape (num_images, 8) projective transforms which can be given
to `tf.contrib.image.transform`.
"""
with ops.name_scope(name, "translations_to_projective_transforms"):
translation_or_translations = ops.convert_to_tensor(
translations, name="translations", dtype=dtypes.float32)
if translation_or_translations.get_shape().ndims is None:
raise TypeError(
"translation_or_translations rank must be statically known")
elif len(translation_or_translations.get_shape()) == 1:
translations = translation_or_translations[None]
elif len(translation_or_translations.get_shape()) == 2:
translations = translation_or_translations
else:
raise TypeError("Translations should have rank 1 or 2.")
num_translations = array_ops.shape(translations)[0]
# The translation matrix looks like:
# [[1 0 -dx]
# [0 1 -dy]
# [0 0 1]]
# where the last entry is implicit.
# Translation matrices are always float32.
return array_ops.concat(
values=[
array_ops.ones((num_translations, 1), dtypes.float32),
array_ops.zeros((num_translations, 1), dtypes.float32),
-translations[:, 0, None],
array_ops.zeros((num_translations, 1), dtypes.float32),
array_ops.ones((num_translations, 1), dtypes.float32),
-translations[:, 1, None],
array_ops.zeros((num_translations, 2), dtypes.float32),
],
axis=1)
def transform(images,
transforms,
interpolation="NEAREST",
output_shape=None,
name=None):
"""Applies the given transform(s) to the image(s).
Args:
images: A tensor of shape (num_images, num_rows, num_columns, num_channels)
(NHWC), (num_rows, num_columns, num_channels) (HWC), or
(num_rows, num_columns) (HW). The rank must be statically known (the
shape is not `TensorShape(None)`.
transforms: Projective transform matrix/matrices. A vector of length 8 or
tensor of size N x 8. If one row of transforms is
[a0, a1, a2, b0, b1, b2, c0, c1], then it maps the *output* point
`(x, y)` to a transformed *input* point
`(x', y') = ((a0 x + a1 y + a2) / k, (b0 x + b1 y + b2) / k)`,
where `k = c0 x + c1 y + 1`. The transforms are *inverted* compared to
the transform mapping input points to output points. Note that gradients
are not backpropagated into transformation parameters.
interpolation: Interpolation mode. Supported values: "NEAREST", "BILINEAR".
output_shape: Output dimesion after the transform, [height, width].
If None, output is the same size as input image.
name: The name of the op.
Returns:
Image(s) with the same type and shape as `images`, with the given
transform(s) applied. Transformed coordinates outside of the input image
will be filled with zeros.
Raises:
TypeError: If `image` is an invalid type.
ValueError: If output shape is not 1-D int32 Tensor.
"""
with ops.name_scope(name, "transform"):
image_or_images = ops.convert_to_tensor(images, name="images")
transform_or_transforms = ops.convert_to_tensor(
transforms, name="transforms", dtype=dtypes.float32)
if image_or_images.dtype.base_dtype not in _IMAGE_DTYPES:
raise TypeError("Invalid dtype %s." % image_or_images.dtype)
elif image_or_images.get_shape().ndims is None:
raise TypeError("image_or_images rank must be statically known")
elif len(image_or_images.get_shape()) == 2:
images = image_or_images[None, :, :, None]
elif len(image_or_images.get_shape()) == 3:
images = image_or_images[None, :, :, :]
elif len(image_or_images.get_shape()) == 4:
images = image_or_images
else:
raise TypeError("Images should have rank between 2 and 4.")
if output_shape is None:
output_shape = array_ops.shape(images)[1:3]
if not context.executing_eagerly():
output_shape_value = tensor_util.constant_value(output_shape)
if output_shape_value is not None:
output_shape = output_shape_value
output_shape = ops.convert_to_tensor(
output_shape, dtypes.int32, name="output_shape")
if not output_shape.get_shape().is_compatible_with([2]):
raise ValueError("output_shape must be a 1-D Tensor of 2 elements: "
"new_height, new_width")
if len(transform_or_transforms.get_shape()) == 1:
transforms = transform_or_transforms[None]
elif transform_or_transforms.get_shape().ndims is None:
raise TypeError(
"transform_or_transforms rank must be statically known")
elif len(transform_or_transforms.get_shape()) == 2:
transforms = transform_or_transforms
else:
raise TypeError("Transforms should have rank 1 or 2.")
output = gen_image_ops.image_projective_transform_v2(
images,
output_shape=output_shape,
transforms=transforms,
interpolation=interpolation.upper())
if len(image_or_images.get_shape()) == 2:
return output[0, :, :, 0]
elif len(image_or_images.get_shape()) == 3:
return output[0, :, :, :]
else:
return output
def compose_transforms(*transforms):
"""Composes the transforms tensors.
Args:
*transforms: List of image projective transforms to be composed. Each
transform is length 8 (single transform) or shape (N, 8) (batched
transforms). The shapes of all inputs must be equal, and at least one
input must be given.
Returns:
A composed transform tensor. When passed to `tf.contrib.image.transform`,
equivalent to applying each of the given transforms to the image in
order.
"""
assert transforms, "transforms cannot be empty"
with ops.name_scope("compose_transforms"):
composed = flat_transforms_to_matrices(transforms[0])
for tr in transforms[1:]:
# Multiply batches of matrices.
composed = math_ops.matmul(composed, flat_transforms_to_matrices(tr))
return matrices_to_flat_transforms(composed)
def flat_transforms_to_matrices(transforms):
"""Converts `tf.contrib.image` projective transforms to affine matrices.
Note that the output matrices map output coordinates to input coordinates. For
the forward transformation matrix, call `tf.linalg.inv` on the result.
Args:
transforms: Vector of length 8, or batches of transforms with shape
`(N, 8)`.
Returns:
3D tensor of matrices with shape `(N, 3, 3)`. The output matrices map the
*output coordinates* (in homogeneous coordinates) of each transform to the
corresponding *input coordinates*.
Raises:
ValueError: If `transforms` have an invalid shape.
"""
with ops.name_scope("flat_transforms_to_matrices"):
transforms = ops.convert_to_tensor(transforms, name="transforms")
if transforms.shape.ndims not in (1, 2):
raise ValueError("Transforms should be 1D or 2D, got: %s" % transforms)
# Make the transform(s) 2D in case the input is a single transform.
transforms = array_ops.reshape(transforms, constant_op.constant([-1, 8]))
num_transforms = array_ops.shape(transforms)[0]
# Add a column of ones for the implicit last entry in the matrix.
return array_ops.reshape(
array_ops.concat(
[transforms, array_ops.ones([num_transforms, 1])], axis=1),
constant_op.constant([-1, 3, 3]))
def matrices_to_flat_transforms(transform_matrices):
"""Converts affine matrices to `tf.contrib.image` projective transforms.
Note that we expect matrices that map output coordinates to input coordinates.
To convert forward transformation matrices, call `tf.linalg.inv` on the
matrices and use the result here.
Args:
transform_matrices: One or more affine transformation matrices, for the
reverse transformation in homogeneous coordinates. Shape `(3, 3)` or
`(N, 3, 3)`.
Returns:
2D tensor of flat transforms with shape `(N, 8)`, which may be passed into
`tf.contrib.image.transform`.
Raises:
ValueError: If `transform_matrices` have an invalid shape.
"""
with ops.name_scope("matrices_to_flat_transforms"):
transform_matrices = ops.convert_to_tensor(
transform_matrices, name="transform_matrices")
if transform_matrices.shape.ndims not in (2, 3):
raise ValueError(
"Matrices should be 2D or 3D, got: %s" % transform_matrices)
# Flatten each matrix.
transforms = array_ops.reshape(transform_matrices,
constant_op.constant([-1, 9]))
# Divide each matrix by the last entry (normally 1).
transforms /= transforms[:, 8:9]
return transforms[:, :8]
@ops.RegisterGradient("ImageProjectiveTransformV2")
def _image_projective_transform_grad(op, grad):
"""Computes the gradient for ImageProjectiveTransform."""
images = op.inputs[0]
transforms = op.inputs[1]
interpolation = op.get_attr("interpolation")
image_or_images = ops.convert_to_tensor(images, name="images")
transform_or_transforms = ops.convert_to_tensor(
transforms, name="transforms", dtype=dtypes.float32)
if image_or_images.dtype.base_dtype not in _IMAGE_DTYPES:
raise TypeError("Invalid dtype %s." % image_or_images.dtype)
if len(transform_or_transforms.get_shape()) == 1:
transforms = transform_or_transforms[None]
elif len(transform_or_transforms.get_shape()) == 2:
transforms = transform_or_transforms
else:
raise TypeError("Transforms should have rank 1 or 2.")
# Invert transformations
transforms = flat_transforms_to_matrices(transforms=transforms)
inverse = linalg_ops.matrix_inverse(transforms)
transforms = matrices_to_flat_transforms(inverse)
output = gen_image_ops.image_projective_transform_v2(
images=grad,
transforms=transforms,
output_shape=array_ops.shape(image_or_images)[1:3],
interpolation=interpolation)
return [output, None, None]
def bipartite_match(distance_mat,
num_valid_rows,
top_k=-1,
name="bipartite_match"):
"""Find bipartite matching based on a given distance matrix.
A greedy bi-partite matching algorithm is used to obtain the matching with
the (greedy) minimum distance.
Args:
distance_mat: A 2-D float tensor of shape `[num_rows, num_columns]`. It is a
pair-wise distance matrix between the entities represented by each row and
each column. It is an asymmetric matrix. The smaller the distance is, the
more similar the pairs are. The bipartite matching is to minimize the
distances.
num_valid_rows: A scalar or a 1-D tensor with one element describing the
number of valid rows of distance_mat to consider for the bipartite
matching. If set to be negative, then all rows from `distance_mat` are
used.
top_k: A scalar that specifies the number of top-k matches to retrieve.
If set to be negative, then is set according to the maximum number of
matches from `distance_mat`.
name: The name of the op.
Returns:
row_to_col_match_indices: A vector of length num_rows, which is the number
of rows of the input `distance_matrix`. If `row_to_col_match_indices[i]`
is not -1, row i is matched to column `row_to_col_match_indices[i]`.
col_to_row_match_indices: A vector of length num_columns, which is the
number of columns of the input distance matrix.
If `col_to_row_match_indices[j]` is not -1, column j is matched to row
`col_to_row_match_indices[j]`.
"""
result = gen_image_ops.bipartite_match(
distance_mat, num_valid_rows, top_k, name=name)
return result
def connected_components(images):
"""Labels the connected components in a batch of images.
A component is a set of pixels in a single input image, which are all adjacent
and all have the same non-zero value. The components using a squared
connectivity of one (all True entries are joined with their neighbors above,
below, left, and right). Components across all images have consecutive ids 1
through n. Components are labeled according to the first pixel of the
component appearing in row-major order (lexicographic order by
image_index_in_batch, row, col). Zero entries all have an output id of 0.
This op is equivalent with `scipy.ndimage.measurements.label` on a 2D array
with the default structuring element (which is the connectivity used here).
Args:
images: A 2D (H, W) or 3D (N, H, W) Tensor of boolean image(s).
Returns:
Components with the same shape as `images`. False entries in `images` have
value 0, and all True entries map to a component id > 0.
Raises:
TypeError: if `images` is not 2D or 3D.
"""
with ops.name_scope("connected_components"):
image_or_images = ops.convert_to_tensor(images, name="images")
if len(image_or_images.get_shape()) == 2:
images = image_or_images[None, :, :]
elif len(image_or_images.get_shape()) == 3:
images = image_or_images
else:
raise TypeError(
"images should have rank 2 (HW) or 3 (NHW). Static shape is %s" %
image_or_images.get_shape())
components = gen_image_ops.image_connected_components(images)
# TODO(ringwalt): Component id renaming should be done in the op, to avoid
# constructing multiple additional large tensors.
components_flat = array_ops.reshape(components, [-1])
unique_ids, id_index = array_ops.unique(components_flat)
id_is_zero = array_ops.where(math_ops.equal(unique_ids, 0))[:, 0]
# Map each nonzero id to consecutive values.
nonzero_consecutive_ids = math_ops.range(
array_ops.shape(unique_ids)[0] - array_ops.shape(id_is_zero)[0]) + 1
def no_zero():
# No need to insert a zero into the ids.
return nonzero_consecutive_ids
def has_zero():
# Insert a zero in the consecutive ids where zero appears in unique_ids.
# id_is_zero has length 1.
zero_id_ind = math_ops.cast(id_is_zero[0], dtypes.int32)
ids_before = nonzero_consecutive_ids[:zero_id_ind]
ids_after = nonzero_consecutive_ids[zero_id_ind:]
return array_ops.concat([ids_before, [0], ids_after], axis=0)
new_ids = control_flow_ops.cond(
math_ops.equal(array_ops.shape(id_is_zero)[0], 0), no_zero, has_zero)
components = array_ops.reshape(
array_ops.gather(new_ids, id_index), array_ops.shape(components))
if len(image_or_images.get_shape()) == 2:
return components[0, :, :]
else:
return components
ops.NotDifferentiable("BipartiteMatch")
ops.NotDifferentiable("ImageConnectedComponents")
|
tensorflow-master
|
tensorflow/contrib/image/python/ops/image_ops.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.