python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for contrib.seq2seq.python.ops.attention_wrapper."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import numpy as np
from tensorflow.contrib.seq2seq.python.ops import decoder
from tensorflow.contrib.seq2seq.python.ops import attention_wrapper as wrapper
from tensorflow.contrib.seq2seq.python.ops import helper as helper_py
from tensorflow.contrib.seq2seq.python.ops import basic_decoder
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.layers import core as layers_core
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import rnn
from tensorflow.python.ops import rnn_cell
from tensorflow.python.ops import variables
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.platform import test
from tensorflow.python.util import nest
# pylint: enable=g-import-not-at-top
# for testing
AttentionWrapperState = wrapper.AttentionWrapperState # pylint: disable=invalid-name
LSTMStateTuple = rnn_cell.LSTMStateTuple # pylint: disable=invalid-name
BasicDecoderOutput = basic_decoder.BasicDecoderOutput # pylint: disable=invalid-name
float32 = np.float32
int32 = np.int32
array = np.array
dtype = np.dtype
class ResultSummary(
collections.namedtuple('ResultSummary', ('shape', 'dtype', 'mean'))):
pass
def get_result_summary(x):
if isinstance(x, np.ndarray):
return ResultSummary(x.shape, x.dtype, x.mean())
return x
@test_util.run_v1_only('contrib code not supported in TF2.0')
class AttentionWrapperTest(test.TestCase):
def assertAllCloseOrEqual(self, x, y, **kwargs):
if isinstance(x, np.ndarray) or isinstance(x, float):
return super(AttentionWrapperTest, self).assertAllClose(
x, y, atol=1e-3, **kwargs)
else:
self.assertAllEqual(x, y, **kwargs)
def testAttentionWrapperState(self):
num_fields = len(wrapper.AttentionWrapperState._fields) # pylint: disable=protected-access
state = wrapper.AttentionWrapperState(*([None] * num_fields))
new_state = state.clone(time=1)
self.assertEqual(state.time, None)
self.assertEqual(new_state.time, 1)
def testAttentionWrapperStateShapePropgation(self):
batch_size = 5
max_time = 5
num_units = 5
memory = random_ops.random_uniform(
[batch_size, max_time, num_units], seed=1)
mechanism = wrapper.LuongAttention(num_units, memory)
cell = wrapper.AttentionWrapper(rnn_cell.LSTMCell(num_units), mechanism)
# Create zero state with static batch size.
static_state = cell.zero_state(batch_size, dtypes.float32)
# Create zero state without static batch size.
state = cell.zero_state(array_ops.shape(memory)[0], dtypes.float32)
state = static_state.clone(
cell_state=state.cell_state, attention=state.attention)
self.assertEqual(state.cell_state.c.shape, static_state.cell_state.c.shape)
self.assertEqual(state.cell_state.h.shape, static_state.cell_state.h.shape)
self.assertEqual(state.attention.shape, static_state.attention.shape)
def _testWithAttention(self,
create_attention_mechanism,
expected_final_output,
expected_final_state,
attention_mechanism_depth=3,
alignment_history=False,
expected_final_alignment_history=None,
attention_layer_size=6,
attention_layer=None,
name=''):
attention_layer_sizes = (
[attention_layer_size] if attention_layer_size is not None else None)
attention_layers = (
[attention_layer] if attention_layer is not None else None)
self._testWithMaybeMultiAttention(
is_multi=False,
create_attention_mechanisms=[create_attention_mechanism],
expected_final_output=expected_final_output,
expected_final_state=expected_final_state,
attention_mechanism_depths=[attention_mechanism_depth],
alignment_history=alignment_history,
expected_final_alignment_history=expected_final_alignment_history,
attention_layer_sizes=attention_layer_sizes,
attention_layers=attention_layers,
name=name)
def _testWithMaybeMultiAttention(self,
is_multi,
create_attention_mechanisms,
expected_final_output,
expected_final_state,
attention_mechanism_depths,
alignment_history=False,
expected_final_alignment_history=None,
attention_layer_sizes=None,
attention_layers=None,
name=''):
# Allow is_multi to be True with a single mechanism to enable test for
# passing in a single mechanism in a list.
assert len(create_attention_mechanisms) == 1 or is_multi
encoder_sequence_length = [3, 2, 3, 1, 1]
decoder_sequence_length = [2, 0, 1, 2, 3]
batch_size = 5
encoder_max_time = 8
decoder_max_time = 4
input_depth = 7
encoder_output_depth = 10
cell_depth = 9
if attention_layer_sizes is not None:
# Compute sum of attention_layer_sizes. Use encoder_output_depth if None.
attention_depth = sum(attention_layer_size or encoder_output_depth
for attention_layer_size in attention_layer_sizes)
elif attention_layers is not None:
# Compute sum of attention_layers output depth.
attention_depth = sum(
attention_layer.compute_output_shape(
[batch_size, cell_depth + encoder_output_depth]).dims[-1].value
for attention_layer in attention_layers)
else:
attention_depth = encoder_output_depth * len(create_attention_mechanisms)
decoder_inputs = array_ops.placeholder_with_default(
np.random.randn(batch_size, decoder_max_time,
input_depth).astype(np.float32),
shape=(None, None, input_depth))
encoder_outputs = array_ops.placeholder_with_default(
np.random.randn(batch_size, encoder_max_time,
encoder_output_depth).astype(np.float32),
shape=(None, None, encoder_output_depth))
attention_mechanisms = [
creator(num_units=depth,
memory=encoder_outputs,
memory_sequence_length=encoder_sequence_length)
for creator, depth in zip(create_attention_mechanisms,
attention_mechanism_depths)]
with self.session(use_gpu=True) as sess:
with vs.variable_scope(
'root',
initializer=init_ops.random_normal_initializer(stddev=0.01, seed=3)):
attention_layer_size = attention_layer_sizes
attention_layer = attention_layers
if not is_multi:
if attention_layer_size is not None:
attention_layer_size = attention_layer_size[0]
if attention_layer is not None:
attention_layer = attention_layer[0]
cell = rnn_cell.LSTMCell(cell_depth)
cell = wrapper.AttentionWrapper(
cell,
attention_mechanisms if is_multi else attention_mechanisms[0],
attention_layer_size=attention_layer_size,
alignment_history=alignment_history,
attention_layer=attention_layer)
helper = helper_py.TrainingHelper(decoder_inputs,
decoder_sequence_length)
my_decoder = basic_decoder.BasicDecoder(
cell=cell,
helper=helper,
initial_state=cell.zero_state(
dtype=dtypes.float32, batch_size=batch_size))
final_outputs, final_state, _ = decoder.dynamic_decode(my_decoder)
self.assertTrue(
isinstance(final_outputs, basic_decoder.BasicDecoderOutput))
self.assertTrue(
isinstance(final_state, wrapper.AttentionWrapperState))
self.assertTrue(
isinstance(final_state.cell_state, rnn_cell.LSTMStateTuple))
self.assertEqual((batch_size, None, attention_depth),
tuple(final_outputs.rnn_output.get_shape().as_list()))
self.assertEqual((batch_size, None),
tuple(final_outputs.sample_id.get_shape().as_list()))
self.assertEqual((batch_size, attention_depth),
tuple(final_state.attention.get_shape().as_list()))
self.assertEqual((batch_size, cell_depth),
tuple(final_state.cell_state.c.get_shape().as_list()))
self.assertEqual((batch_size, cell_depth),
tuple(final_state.cell_state.h.get_shape().as_list()))
if alignment_history:
if is_multi:
state_alignment_history = []
for history_array in final_state.alignment_history:
history = history_array.stack()
self.assertEqual(
(None, batch_size, None),
tuple(history.get_shape().as_list()))
state_alignment_history.append(history)
state_alignment_history = tuple(state_alignment_history)
else:
state_alignment_history = final_state.alignment_history.stack()
self.assertEqual(
(None, batch_size, None),
tuple(state_alignment_history.get_shape().as_list()))
nest.assert_same_structure(
cell.state_size,
cell.zero_state(batch_size, dtypes.float32))
# Remove the history from final_state for purposes of the
# remainder of the tests.
final_state = final_state._replace(alignment_history=()) # pylint: disable=protected-access
else:
state_alignment_history = ()
sess.run(variables.global_variables_initializer())
sess_results = sess.run({
'final_outputs': final_outputs,
'final_state': final_state,
'state_alignment_history': state_alignment_history,
})
final_output_info = nest.map_structure(get_result_summary,
sess_results['final_outputs'])
final_state_info = nest.map_structure(get_result_summary,
sess_results['final_state'])
print(name)
print('Copy/paste:\nexpected_final_output = %s' % str(final_output_info))
print('expected_final_state = %s' % str(final_state_info))
nest.map_structure(self.assertAllCloseOrEqual, expected_final_output,
final_output_info)
nest.map_structure(self.assertAllCloseOrEqual, expected_final_state,
final_state_info)
if alignment_history: # by default, the wrapper emits attention as output
final_alignment_history_info = nest.map_structure(
get_result_summary, sess_results['state_alignment_history'])
print('expected_final_alignment_history = %s' %
str(final_alignment_history_info))
nest.map_structure(
self.assertAllCloseOrEqual,
# outputs are batch major but the stacked TensorArray is time major
expected_final_alignment_history,
final_alignment_history_info)
def testBahdanauNormalizedDType(self):
for dtype in [np.float16, np.float32, np.float64]:
num_units = 128
encoder_outputs = array_ops.placeholder(dtype, shape=[64, None, 256])
encoder_sequence_length = array_ops.placeholder(dtypes.int32, shape=[64])
decoder_inputs = array_ops.placeholder(dtype, shape=[64, None, 128])
decoder_sequence_length = array_ops.placeholder(dtypes.int32, shape=[64])
batch_size = 64
attention_mechanism = wrapper.BahdanauAttention(
num_units=num_units,
memory=encoder_outputs,
memory_sequence_length=encoder_sequence_length,
normalize=True,
dtype=dtype,
)
cell = rnn_cell.LSTMCell(num_units)
cell = wrapper.AttentionWrapper(cell, attention_mechanism)
helper = helper_py.TrainingHelper(decoder_inputs,
decoder_sequence_length)
my_decoder = basic_decoder.BasicDecoder(
cell=cell,
helper=helper,
initial_state=cell.zero_state(
dtype=dtype, batch_size=batch_size))
final_outputs, final_state, _ = decoder.dynamic_decode(my_decoder)
self.assertTrue(
isinstance(final_outputs, basic_decoder.BasicDecoderOutput))
self.assertEqual(final_outputs.rnn_output.dtype, dtype)
self.assertTrue(
isinstance(final_state, wrapper.AttentionWrapperState))
self.assertTrue(
isinstance(final_state.cell_state, rnn_cell.LSTMStateTuple))
def testBahdanauNotNormalized(self):
create_attention_mechanism = wrapper.BahdanauAttention
expected_final_output = BasicDecoderOutput(
rnn_output=ResultSummary(
shape=(5, 3, 6), dtype=dtype('float32'), mean=-0.0052250605),
sample_id=ResultSummary(
shape=(5, 3), dtype=dtype('int32'), mean=1.4))
expected_final_state = AttentionWrapperState(
cell_state=LSTMStateTuple(
c=ResultSummary(
shape=(5, 9), dtype=dtype('float32'), mean=-0.0040092287),
h=ResultSummary(
shape=(5, 9), dtype=dtype('float32'), mean=-0.0020015112)),
attention=ResultSummary(
shape=(5, 6), dtype=dtype('float32'), mean=-0.0052052638),
time=3,
alignments=ResultSummary(
shape=(5, 8), dtype=dtype('float32'), mean=0.125),
attention_state=ResultSummary(
shape=(5, 8), dtype=dtype('float32'), mean=0.125),
alignment_history=())
expected_final_alignment_history = ResultSummary(
shape=(3, 5, 8), dtype=dtype('float32'), mean=0.12500001)
self._testWithAttention(
create_attention_mechanism,
expected_final_output,
expected_final_state,
alignment_history=True,
expected_final_alignment_history=expected_final_alignment_history,
name='testBahdanauNotNormalized')
def testBahdanauNormalized(self):
create_attention_mechanism = functools.partial(
wrapper.BahdanauAttention, normalize=True)
expected_final_output = BasicDecoderOutput(
rnn_output=ResultSummary(
shape=(5, 3, 6), dtype=dtype('float32'), mean=-0.00597103),
sample_id=ResultSummary(
shape=(5, 3), dtype=dtype('int32'), mean=1.4))
expected_final_state = AttentionWrapperState(
cell_state=LSTMStateTuple(
c=ResultSummary(
shape=(5, 9), dtype=dtype('float32'), mean=-0.0040052128),
h=ResultSummary(
shape=(5, 9), dtype=dtype('float32'), mean=-0.0019996136)),
attention=ResultSummary(
shape=(5, 6), dtype=dtype('float32'), mean=-0.00595117),
time=3,
alignments=ResultSummary(
shape=(5, 8), dtype=dtype('float32'), mean=0.125),
attention_state=ResultSummary(
shape=(5, 8), dtype=dtype('float32'), mean=0.125),
alignment_history=())
self._testWithAttention(
create_attention_mechanism,
expected_final_output,
expected_final_state,
name='testBahdanauNormalized')
def testLuongNotNormalized(self):
create_attention_mechanism = wrapper.LuongAttention
expected_final_output = BasicDecoderOutput(
rnn_output=ResultSummary(
shape=(5, 3, 6), dtype=dtype('float32'), mean=-0.0052615386),
sample_id=ResultSummary(
shape=(5, 3), dtype=dtype('int32'), mean=1.4))
expected_final_state = AttentionWrapperState(
cell_state=LSTMStateTuple(
c=ResultSummary(
shape=(5, 9), dtype=dtype('float32'), mean=-0.004009536),
h=ResultSummary(
shape=(5, 9), dtype=dtype('float32'), mean=-0.0020016613)),
attention=ResultSummary(
shape=(5, 6), dtype=dtype('float32'), mean=-0.0051812846),
time=3,
alignments=ResultSummary(
shape=(5, 8), dtype=dtype('float32'), mean=0.125),
attention_state=ResultSummary(
shape=(5, 8), dtype=dtype('float32'), mean=0.125),
alignment_history=())
self._testWithAttention(
create_attention_mechanism,
expected_final_output,
expected_final_state,
attention_mechanism_depth=9,
name='testLuongNotNormalized')
def testLuongScaledDType(self):
# Test case for GitHub issue 18099
for dt in [np.float16, np.float32, np.float64]:
num_units = 128
encoder_outputs = array_ops.placeholder(dt, shape=[64, None, 256])
encoder_sequence_length = array_ops.placeholder(dtypes.int32, shape=[64])
decoder_inputs = array_ops.placeholder(dt, shape=[64, None, 128])
decoder_sequence_length = array_ops.placeholder(dtypes.int32, shape=[64])
batch_size = 64
attention_mechanism = wrapper.LuongAttention(
num_units=num_units,
memory=encoder_outputs,
memory_sequence_length=encoder_sequence_length,
scale=True,
dtype=dt,
)
cell = rnn_cell.LSTMCell(num_units)
cell = wrapper.AttentionWrapper(cell, attention_mechanism)
helper = helper_py.TrainingHelper(decoder_inputs,
decoder_sequence_length)
my_decoder = basic_decoder.BasicDecoder(
cell=cell,
helper=helper,
initial_state=cell.zero_state(
dtype=dt, batch_size=batch_size))
final_outputs, final_state, _ = decoder.dynamic_decode(my_decoder)
self.assertTrue(
isinstance(final_outputs, basic_decoder.BasicDecoderOutput))
self.assertEqual(final_outputs.rnn_output.dtype, dt)
self.assertTrue(
isinstance(final_state, wrapper.AttentionWrapperState))
self.assertTrue(
isinstance(final_state.cell_state, rnn_cell.LSTMStateTuple))
def testLuongScaled(self):
create_attention_mechanism = functools.partial(
wrapper.LuongAttention, scale=True)
expected_final_output = BasicDecoderOutput(
rnn_output=ResultSummary(
shape=(5, 3, 6), dtype=dtype('float32'), mean=-0.0052615386),
sample_id=ResultSummary(
shape=(5, 3), dtype=dtype('int32'), mean=1.4))
expected_final_state = AttentionWrapperState(
cell_state=LSTMStateTuple(
c=ResultSummary(
shape=(5, 9), dtype=dtype('float32'), mean=-0.004009536),
h=ResultSummary(
shape=(5, 9), dtype=dtype('float32'), mean=-0.0020016613)),
attention=ResultSummary(
shape=(5, 6), dtype=dtype('float32'), mean=-0.0051812846),
time=3,
alignments=ResultSummary(
shape=(5, 8), dtype=dtype('float32'), mean=0.125),
attention_state=ResultSummary(
shape=(5, 8), dtype=dtype('float32'), mean=0.125),
alignment_history=())
self._testWithAttention(
create_attention_mechanism,
expected_final_output,
expected_final_state,
attention_mechanism_depth=9,
name='testLuongScaled')
def testNotUseAttentionLayer(self):
create_attention_mechanism = wrapper.BahdanauAttention
expected_final_output = BasicDecoderOutput(
rnn_output=ResultSummary(
shape=(5, 3, 10), dtype=dtype('float32'), mean=0.117389656),
sample_id=ResultSummary(
shape=(5, 3), dtype=dtype('int32'), mean=4.5999999999999996))
expected_final_state = AttentionWrapperState(
cell_state=LSTMStateTuple(
c=ResultSummary(
shape=(5, 9), dtype=dtype('float32'), mean=-0.0063607907),
h=ResultSummary(
shape=(5, 9), dtype=dtype('float32'), mean=-0.00323448)),
attention=ResultSummary(
shape=(5, 10), dtype=dtype('float32'), mean=0.117389656,),
time=3,
alignments=ResultSummary(
shape=(5, 8), dtype=dtype('float32'), mean=0.125),
attention_state=ResultSummary(
shape=(5, 8), dtype=dtype('float32'), mean=0.125),
alignment_history=())
self._testWithAttention(
create_attention_mechanism,
expected_final_output,
expected_final_state,
attention_layer_size=None,
name='testNotUseAttentionLayer')
def test_safe_cumprod(self):
# Create some random test input
test_input = np.random.uniform(size=(10, 20))
for axis in [0, 1]:
for exclusive in [True, False]:
with self.cached_session():
# Compute cumprod with regular tf.math.cumprod
cumprod_output = math_ops.cumprod(
test_input, axis=axis, exclusive=exclusive).eval()
# Compute cumprod with safe_cumprod
safe_cumprod_output = wrapper.safe_cumprod(
test_input, axis=axis, exclusive=exclusive).eval()
for x, y in zip(cumprod_output.shape, safe_cumprod_output.shape):
self.assertEqual(x, y)
for x, y in zip(cumprod_output.flatten(),
safe_cumprod_output.flatten()):
# Use assertAlmostEqual for the actual values due to floating point
self.assertAlmostEqual(x, y, places=5)
def test_monotonic_attention(self):
def monotonic_attention_explicit(p_choose_i, previous_attention):
"""Explicitly compute monotonic attention distribution using numpy."""
# Base case for recurrence relation
out = [previous_attention[0]]
# Explicitly follow the recurrence relation
for j in range(1, p_choose_i.shape[0]):
out.append((1 - p_choose_i[j - 1])*out[j - 1] + previous_attention[j])
return p_choose_i*np.array(out)
# Generate a random batch of choosing probabilities for seq. len. 20
p_choose_i = np.random.uniform(size=(10, 20)).astype(np.float32)
# Generate random previous attention distributions
previous_attention = np.random.uniform(size=(10, 20)).astype(np.float32)
previous_attention /= previous_attention.sum(axis=1).reshape((-1, 1))
# Create the output to test against
explicit_output = np.array([
monotonic_attention_explicit(p, a)
for p, a in zip(p_choose_i, previous_attention)])
# Compute output with TensorFlow function, for both calculation types
with self.cached_session():
recursive_output = wrapper.monotonic_attention(
p_choose_i, previous_attention, 'recursive').eval()
self.assertEqual(recursive_output.ndim, explicit_output.ndim)
for x, y in zip(recursive_output.shape, explicit_output.shape):
self.assertEqual(x, y)
for x, y in zip(recursive_output.flatten(), explicit_output.flatten()):
# Use assertAlmostEqual for the actual values due to floating point
self.assertAlmostEqual(x, y, places=5)
# Generate new p_choose_i for parallel, which is unstable when p_choose_i[n]
# is close to 1
p_choose_i = np.random.uniform(0, 0.9, size=(10, 20)).astype(np.float32)
# Create new output to test against
explicit_output = np.array([
monotonic_attention_explicit(p, a)
for p, a in zip(p_choose_i, previous_attention)])
# Compute output with TensorFlow function, for both calculation types
with self.cached_session():
parallel_output = wrapper.monotonic_attention(
p_choose_i, previous_attention, 'parallel').eval()
self.assertEqual(parallel_output.ndim, explicit_output.ndim)
for x, y in zip(parallel_output.shape, explicit_output.shape):
self.assertEqual(x, y)
for x, y in zip(parallel_output.flatten(), explicit_output.flatten()):
# Use assertAlmostEqual for the actual values due to floating point
self.assertAlmostEqual(x, y, places=5)
# Now, test hard mode, where probabilities must be 0 or 1
p_choose_i = np.random.choice(np.array([0, 1], np.float32), (10, 20))
previous_attention = np.zeros((10, 20), np.float32)
# Randomly choose input sequence indices at each timestep
random_idx = np.random.randint(0, previous_attention.shape[1],
previous_attention.shape[0])
previous_attention[np.arange(previous_attention.shape[0]), random_idx] = 1
# Create the output to test against
explicit_output = np.array([
monotonic_attention_explicit(p, a)
for p, a in zip(p_choose_i, previous_attention)])
# Compute output with TensorFlow function, for both calculation types
with self.cached_session():
hard_output = wrapper.monotonic_attention(
# TensorFlow is unhappy when these are not wrapped as tf.constant
constant_op.constant(p_choose_i),
constant_op.constant(previous_attention),
'hard').eval()
self.assertEqual(hard_output.ndim, explicit_output.ndim)
for x, y in zip(hard_output.shape, explicit_output.shape):
self.assertEqual(x, y)
for x, y in zip(hard_output.flatten(), explicit_output.flatten()):
# Use assertAlmostEqual for the actual values due to floating point
self.assertAlmostEqual(x, y, places=5)
# Now, test recursively computing attention distributions vs. sampling
def sample(p_choose_i):
"""Generate a sequence of emit-ingest decisions from p_choose_i."""
output = np.zeros(p_choose_i.shape)
t_im1 = 0
for i in range(p_choose_i.shape[0]):
for j in range(t_im1, p_choose_i.shape[1]):
if np.random.uniform() <= p_choose_i[i, j]:
output[i, j] = 1
t_im1 = j
break
else:
t_im1 = p_choose_i.shape[1]
return output
# Now, the first axis is output timestep and second is input timestep
p_choose_i = np.random.uniform(size=(4, 5)).astype(np.float32)
# Generate the average of a bunch of samples
n_samples = 100000
sampled_output = np.mean(
[sample(p_choose_i) for _ in range(n_samples)], axis=0)
# Create initial previous_attention base case
recursive_output = [np.array([1] + [0]*(p_choose_i.shape[1] - 1),
np.float32)]
# Compute output with TensorFlow function, for both calculation types
with self.cached_session():
for j in range(p_choose_i.shape[0]):
# Compute attention distribution for this output time step
recursive_output.append(wrapper.monotonic_attention(
# newaxis is for adding the expected batch dimension
p_choose_i[j][np.newaxis],
recursive_output[-1][np.newaxis], 'recursive').eval()[0])
# Stack together distributions; remove basecase
recursive_output = np.array(recursive_output[1:])
self.assertEqual(recursive_output.ndim, sampled_output.ndim)
for x, y in zip(recursive_output.shape, sampled_output.shape):
self.assertEqual(x, y)
for x, y in zip(recursive_output.flatten(), sampled_output.flatten()):
# Use a very forgiving threshold since we are sampling
self.assertAlmostEqual(x, y, places=2)
def testBahdanauMonotonicNotNormalized(self):
create_attention_mechanism = functools.partial(
wrapper.BahdanauMonotonicAttention, sigmoid_noise=1.0,
sigmoid_noise_seed=3)
expected_final_output = BasicDecoderOutput(
rnn_output=ResultSummary(
shape=(5, 3, 6), dtype=dtype('float32'), mean=-0.002122893),
sample_id=ResultSummary(
shape=(5, 3), dtype=dtype('int32'), mean=1.7333333333333334))
expected_final_state = AttentionWrapperState(
cell_state=LSTMStateTuple(
c=ResultSummary(
shape=(5, 9), dtype=dtype('float32'), mean=-0.0040002423),
h=ResultSummary(
shape=(5, 9), dtype=dtype('float32'), mean=-0.0019968653)),
attention=ResultSummary(
shape=(5, 6), dtype=dtype('float32'), mean=-5.9313523e-05),
time=3,
alignments=ResultSummary(
shape=(5, 8), dtype=dtype('float32'), mean=0.032228071),
attention_state=ResultSummary(
shape=(5, 8), dtype=dtype('float32'), mean=0.032228071),
alignment_history=())
expected_final_alignment_history = ResultSummary(
shape=(3, 5, 8), dtype=dtype('float32'), mean=0.050430927)
self._testWithAttention(
create_attention_mechanism,
expected_final_output,
expected_final_state,
alignment_history=True,
expected_final_alignment_history=expected_final_alignment_history,
name='testBahdanauMonotonicNotNormalized')
def testBahdanauMonotonicNormalized(self):
create_attention_mechanism = functools.partial(
wrapper.BahdanauMonotonicAttention, normalize=True,
sigmoid_noise=1.0, sigmoid_noise_seed=3)
expected_final_output = BasicDecoderOutput(
rnn_output=ResultSummary(
shape=(5, 3, 6), dtype=dtype('float32'), mean=-0.0025896581),
sample_id=ResultSummary(
shape=(5, 3), dtype=dtype('int32'), mean=1.73333333))
expected_final_state = AttentionWrapperState(
cell_state=LSTMStateTuple(
c=ResultSummary(
shape=(5, 9), dtype=dtype('float32'), mean=-0.0040013152),
h=ResultSummary(
shape=(5, 9), dtype=dtype('float32'), mean=-0.0019973689)),
attention=ResultSummary(
shape=(5, 6), dtype=dtype('float32'), mean=-0.00069823361),
time=3,
alignments=ResultSummary(
shape=(5, 8), dtype=dtype('float32'), mean=0.029914695),
attention_state=ResultSummary(
shape=(5, 8), dtype=dtype('float32'), mean=0.029914695),
alignment_history=())
expected_final_alignment_history = ResultSummary(
shape=(3, 5, 8), dtype=dtype('float32'), mean=0.0465225502849)
self._testWithAttention(
create_attention_mechanism,
expected_final_output,
expected_final_state,
alignment_history=True,
expected_final_alignment_history=expected_final_alignment_history,
name='testBahdanauMonotonicNormalized')
def testBahdanauMonotonicHard(self):
# Run attention mechanism with mode='hard', make sure probabilities are hard
b, t, u, d = 10, 20, 30, 40
with self.session(use_gpu=True) as sess:
a = wrapper.BahdanauMonotonicAttention(
d,
random_ops.random_normal((b, t, u)),
mode='hard')
# Just feed previous attention as [1, 0, 0, ...]
attn, unused_state = a(
random_ops.random_normal((b, d)), array_ops.one_hot([0]*b, t))
sess.run(variables.global_variables_initializer())
attn_out = attn.eval()
# All values should be 0 or 1
self.assertTrue(np.all(np.logical_or(attn_out == 0, attn_out == 1)))
# Sum of distributions should be 0 or 1 (0 when all p_choose_i are 0)
self.assertTrue(np.all(np.logical_or(attn_out.sum(axis=1) == 1,
attn_out.sum(axis=1) == 0)))
def testLuongMonotonicNotNormalized(self):
create_attention_mechanism = functools.partial(
wrapper.LuongMonotonicAttention, sigmoid_noise=1.0,
sigmoid_noise_seed=3)
expected_final_output = BasicDecoderOutput(
rnn_output=ResultSummary(
shape=(5, 3, 6), dtype=dtype('float32'), mean=-0.0021257224),
sample_id=ResultSummary(
shape=(5, 3), dtype=dtype('int32'), mean=1.7333333333333334))
expected_final_state = AttentionWrapperState(
cell_state=LSTMStateTuple(
c=ResultSummary(
shape=(5, 9), dtype=dtype('float32'), mean=-0.0040003359),
h=ResultSummary(
shape=(5, 9), dtype=dtype('float32'), mean=-0.001996913)),
attention=ResultSummary(
shape=(5, 6), dtype=dtype('float32'), mean=-5.2024145e-05),
time=3,
alignments=ResultSummary(
shape=(5, 8), dtype=dtype('float32'), mean=0.032198936),
attention_state=ResultSummary(
shape=(5, 8), dtype=dtype('float32'), mean=0.032198936),
alignment_history=())
expected_final_alignment_history = ResultSummary(
shape=(3, 5, 8), dtype=dtype('float32'), mean=0.050387777)
self._testWithAttention(
create_attention_mechanism,
expected_final_output,
expected_final_state,
attention_mechanism_depth=9,
alignment_history=True,
expected_final_alignment_history=expected_final_alignment_history,
name='testLuongMonotonicNotNormalized')
def testLuongMonotonicScaled(self):
create_attention_mechanism = functools.partial(
wrapper.LuongMonotonicAttention, scale=True, sigmoid_noise=1.0,
sigmoid_noise_seed=3)
expected_final_output = BasicDecoderOutput(
rnn_output=ResultSummary(
shape=(5, 3, 6), dtype=dtype('float32'), mean=-0.0021257224),
sample_id=ResultSummary(
shape=(5, 3), dtype=dtype('int32'), mean=1.7333333333333334))
expected_final_state = AttentionWrapperState(
cell_state=LSTMStateTuple(
c=ResultSummary(
shape=(5, 9), dtype=dtype('float32'), mean=-0.0040003359),
h=ResultSummary(
shape=(5, 9), dtype=dtype('float32'), mean=-0.001996913)),
attention=ResultSummary(
shape=(5, 6), dtype=dtype('float32'), mean=-5.2024145e-05),
time=3,
alignments=ResultSummary(
shape=(5, 8), dtype=dtype('float32'), mean=0.032198936),
attention_state=ResultSummary(
shape=(5, 8), dtype=dtype('float32'), mean=0.032198936),
alignment_history=())
expected_final_alignment_history = ResultSummary(
shape=(3, 5, 8), dtype=dtype('float32'), mean=0.050387777)
self._testWithAttention(
create_attention_mechanism,
expected_final_output,
expected_final_state,
attention_mechanism_depth=9,
alignment_history=True,
expected_final_alignment_history=expected_final_alignment_history,
name='testLuongMonotonicScaled')
def testMultiAttention(self):
create_attention_mechanisms = (
wrapper.BahdanauAttention, wrapper.LuongAttention)
expected_final_output = BasicDecoderOutput(
rnn_output=ResultSummary(
shape=(5, 3, 7), dtype=dtype('float32'), mean=0.0011709079),
sample_id=ResultSummary(
shape=(5, 3), dtype=dtype('int32'), mean=3.2000000000000002))
expected_final_state = AttentionWrapperState(
cell_state=LSTMStateTuple(
c=ResultSummary(
shape=(5, 9), dtype=dtype('float32'), mean=-0.0038725811),
h=ResultSummary(
shape=(5, 9), dtype=dtype('float32'), mean=-0.0019329828)),
attention=ResultSummary(
shape=(5, 7), dtype=dtype('float32'), mean=0.001174294),
time=3,
alignments=(
ResultSummary(shape=(5, 8), dtype=dtype('float32'), mean=0.125),
ResultSummary(shape=(5, 8), dtype=dtype('float32'), mean=0.125)),
attention_state=(
ResultSummary(shape=(5, 8), dtype=dtype('float32'), mean=0.125),
ResultSummary(shape=(5, 8), dtype=dtype('float32'), mean=0.125)),
alignment_history=())
expected_final_alignment_history = (
ResultSummary(shape=(3, 5, 8), dtype=dtype('float32'), mean=0.125),
ResultSummary(shape=(3, 5, 8), dtype=dtype('float32'), mean=0.125))
self._testWithMaybeMultiAttention(
True,
create_attention_mechanisms,
expected_final_output,
expected_final_state,
attention_mechanism_depths=[9, 9],
attention_layer_sizes=[3, 4],
alignment_history=True,
expected_final_alignment_history=expected_final_alignment_history,
name='testMultiAttention')
def testMultiAttentionWithLayerInstances(self):
create_attention_mechanisms = (
wrapper.BahdanauAttention, wrapper.LuongAttention)
expected_final_output = BasicDecoderOutput(
rnn_output=ResultSummary(
shape=(5, 3, 7), dtype=dtype('float32'), mean=0.0011709079),
sample_id=ResultSummary(
shape=(5, 3), dtype=dtype('int32'), mean=3.2000000000000002))
expected_final_state = AttentionWrapperState(
cell_state=LSTMStateTuple(
c=ResultSummary(
shape=(5, 9), dtype=dtype('float32'), mean=-0.0038725811),
h=ResultSummary(
shape=(5, 9), dtype=dtype('float32'), mean=-0.0019329828)),
attention=ResultSummary(
shape=(5, 7), dtype=dtype('float32'), mean=0.001174294),
time=3,
alignments=(
ResultSummary(shape=(5, 8), dtype=dtype('float32'), mean=0.125),
ResultSummary(shape=(5, 8), dtype=dtype('float32'), mean=0.125)),
attention_state=(
ResultSummary(shape=(5, 8), dtype=dtype('float32'), mean=0.125),
ResultSummary(shape=(5, 8), dtype=dtype('float32'), mean=0.125)),
alignment_history=())
expected_final_alignment_history = (
ResultSummary(shape=(3, 5, 8), dtype=dtype('float32'), mean=0.125),
ResultSummary(shape=(3, 5, 8), dtype=dtype('float32'), mean=0.125))
self._testWithMaybeMultiAttention(
True,
create_attention_mechanisms,
expected_final_output,
expected_final_state,
attention_mechanism_depths=[9, 9],
attention_layers=[layers_core.Dense(3, use_bias=False),
layers_core.Dense(4, use_bias=False)],
alignment_history=True,
expected_final_alignment_history=expected_final_alignment_history,
name='testMultiAttention')
def testLuongMonotonicHard(self):
# Run attention mechanism with mode='hard', make sure probabilities are hard
b, t, u, d = 10, 20, 30, 40
with self.session(use_gpu=True) as sess:
a = wrapper.LuongMonotonicAttention(
d,
random_ops.random_normal((b, t, u)),
mode='hard')
# Just feed previous attention as [1, 0, 0, ...]
attn, unused_state = a(
random_ops.random_normal((b, d)), array_ops.one_hot([0]*b, t))
sess.run(variables.global_variables_initializer())
attn_out = attn.eval()
# All values should be 0 or 1
self.assertTrue(np.all(np.logical_or(attn_out == 0, attn_out == 1)))
# Sum of distributions should be 0 or 1 (0 when all p_choose_i are 0)
self.assertTrue(np.all(np.logical_or(attn_out.sum(axis=1) == 1,
attn_out.sum(axis=1) == 0)))
def testMultiAttentionNoAttentionLayer(self):
create_attention_mechanisms = (
wrapper.BahdanauAttention, wrapper.LuongAttention)
expected_final_output = BasicDecoderOutput(
rnn_output=ResultSummary(
shape=(5, 3, 20), dtype=dtype('float32'), mean=0.115853324533),
sample_id=ResultSummary(
shape=(5, 3), dtype=dtype('int32'), mean=8.6))
expected_final_state = AttentionWrapperState(
cell_state=LSTMStateTuple(
c=ResultSummary(
shape=(5, 9), dtype=dtype('float32'), mean=-0.003545674),
h=ResultSummary(
shape=(5, 9), dtype=dtype('float32'), mean=-0.0018327223)),
attention=ResultSummary(
shape=(5, 20), dtype=dtype('float32'), mean=0.11462739855),
time=3,
alignments=(ResultSummary(
shape=(5, 8), dtype=dtype('float32'), mean=0.125),
ResultSummary(
shape=(5, 8), dtype=dtype('float32'), mean=0.125)),
alignment_history=(),
attention_state=(ResultSummary(
shape=(5, 8), dtype=dtype('float32'), mean=0.125),
ResultSummary(
shape=(5, 8), dtype=dtype('float32'), mean=0.125)))
expected_final_alignment_history = (
ResultSummary(shape=(3, 5, 8), dtype=dtype('float32'), mean=0.125),
ResultSummary(shape=(3, 5, 8), dtype=dtype('float32'), mean=0.125))
self._testWithMaybeMultiAttention(
is_multi=True,
create_attention_mechanisms=create_attention_mechanisms,
expected_final_output=expected_final_output,
expected_final_state=expected_final_state,
attention_mechanism_depths=[9, 9],
alignment_history=True,
expected_final_alignment_history=expected_final_alignment_history,
name='testMultiAttention')
def testSingleAttentionAsList(self):
create_attention_mechanisms = [wrapper.BahdanauAttention]
expected_final_output = BasicDecoderOutput(
rnn_output=ResultSummary(
shape=(5, 3, 3), dtype=dtype('float32'), mean=-0.0098485695),
sample_id=ResultSummary(
shape=(5, 3), dtype=dtype('int32'), mean=1.8))
expected_final_state = AttentionWrapperState(
cell_state=LSTMStateTuple(
c=ResultSummary(
shape=(5, 9), dtype=dtype('float32'), mean=-0.0040023471),
h=ResultSummary(
shape=(5, 9), dtype=dtype('float32'), mean=-0.0019979973)),
attention=ResultSummary(
shape=(5, 3), dtype=dtype('float32'), mean=-0.0098808752),
time=3,
alignments=(
ResultSummary(shape=(5, 8), dtype=dtype('float32'), mean=0.125),),
attention_state=(
ResultSummary(shape=(5, 8), dtype=dtype('float32'), mean=0.125),),
alignment_history=())
expected_final_alignment_history = (
ResultSummary(shape=(3, 5, 8), dtype=dtype('float32'), mean=0.125),)
self._testWithMaybeMultiAttention(
is_multi=True, # pass the AttentionMechanism wrapped in a list
create_attention_mechanisms=create_attention_mechanisms,
expected_final_output=expected_final_output,
expected_final_state=expected_final_state,
attention_mechanism_depths=[9],
attention_layer_sizes=[3],
alignment_history=True,
expected_final_alignment_history=expected_final_alignment_history,
name='testMultiAttention')
def testCustomizedAttention(self):
batch_size = 2
max_time = 3
num_units = 2
memory = constant_op.constant([[[1., 1.], [2., 2.], [3., 3.]],
[[4., 4.], [5., 5.], [6., 6.]]])
memory_sequence_length = constant_op.constant([3, 2])
attention_mechanism = wrapper.BahdanauAttention(num_units, memory,
memory_sequence_length)
# Sets all returned values to be all ones.
def _customized_attention(unused_attention_mechanism, unused_cell_output,
unused_attention_state, unused_attention_layer):
"""Customized attention.
Returns:
attention: `Tensor` of shape [batch_size, num_units], attention output.
alignments: `Tensor` of shape [batch_size, max_time], sigma value for
each input memory (prob. function of input keys).
next_attention_state: A `Tensor` representing the next state for the
attention.
"""
attention = array_ops.ones([batch_size, num_units])
alignments = array_ops.ones([batch_size, max_time])
next_attention_state = alignments
return attention, alignments, next_attention_state
attention_cell = wrapper.AttentionWrapper(
rnn_cell.LSTMCell(2),
attention_mechanism,
attention_layer_size=None, # don't use attention layer.
output_attention=False,
alignment_history=(),
attention_fn=_customized_attention,
name='attention')
self.assertEqual(num_units, attention_cell.output_size)
initial_state = attention_cell.zero_state(
batch_size=2, dtype=dtypes.float32)
source_input_emb = array_ops.ones([2, 3, 2])
source_input_length = constant_op.constant([3, 2])
# 'state' is a tuple of
# (cell_state, h, attention, alignments, alignment_history, attention_state)
output, state = rnn.dynamic_rnn(
attention_cell,
inputs=source_input_emb,
sequence_length=source_input_length,
initial_state=initial_state,
dtype=dtypes.float32)
with self.session() as sess:
sess.run(variables.global_variables_initializer())
output_value, state_value = sess.run([output, state], feed_dict={})
self.assertAllEqual(np.array([2, 3, 2]), output_value.shape)
self.assertAllClose(np.array([[1., 1.], [1., 1.]]), state_value.attention)
self.assertAllClose(
np.array([[1., 1., 1.], [1., 1., 1.]]), state_value.alignments)
self.assertAllClose(
np.array([[1., 1., 1.], [1., 1., 1.]]), state_value.attention_state)
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/seq2seq/python/kernel_tests/attention_wrapper_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for contrib.seq2seq.python.seq2seq.basic_decoder."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.seq2seq.python.ops import basic_decoder
from tensorflow.contrib.seq2seq.python.ops import helper as helper_py
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.layers import core as layers_core
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import rnn_cell
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
@test_util.run_v1_only("contrib code not supported in TF2.0")
class BasicDecoderTest(test.TestCase):
def _testStepWithTrainingHelper(self, use_output_layer):
sequence_length = [3, 4, 3, 1, 0]
batch_size = 5
max_time = 8
input_depth = 7
cell_depth = 10
output_layer_depth = 3
with self.session(use_gpu=True) as sess:
inputs = np.random.randn(batch_size, max_time,
input_depth).astype(np.float32)
cell = rnn_cell.LSTMCell(cell_depth)
helper = helper_py.TrainingHelper(
inputs, sequence_length, time_major=False)
if use_output_layer:
output_layer = layers_core.Dense(output_layer_depth, use_bias=False)
expected_output_depth = output_layer_depth
else:
output_layer = None
expected_output_depth = cell_depth
my_decoder = basic_decoder.BasicDecoder(
cell=cell,
helper=helper,
initial_state=cell.zero_state(
dtype=dtypes.float32, batch_size=batch_size),
output_layer=output_layer)
output_size = my_decoder.output_size
output_dtype = my_decoder.output_dtype
self.assertEqual(
basic_decoder.BasicDecoderOutput(expected_output_depth,
tensor_shape.TensorShape([])),
output_size)
self.assertEqual(
basic_decoder.BasicDecoderOutput(dtypes.float32, dtypes.int32),
output_dtype)
(first_finished, first_inputs, first_state) = my_decoder.initialize()
(step_outputs, step_state, step_next_inputs,
step_finished) = my_decoder.step(
constant_op.constant(0), first_inputs, first_state)
batch_size_t = my_decoder.batch_size
self.assertTrue(isinstance(first_state, rnn_cell.LSTMStateTuple))
self.assertTrue(isinstance(step_state, rnn_cell.LSTMStateTuple))
self.assertTrue(
isinstance(step_outputs, basic_decoder.BasicDecoderOutput))
self.assertEqual((batch_size, expected_output_depth),
step_outputs[0].get_shape())
self.assertEqual((batch_size,), step_outputs[1].get_shape())
self.assertEqual((batch_size, cell_depth), first_state[0].get_shape())
self.assertEqual((batch_size, cell_depth), first_state[1].get_shape())
self.assertEqual((batch_size, cell_depth), step_state[0].get_shape())
self.assertEqual((batch_size, cell_depth), step_state[1].get_shape())
if use_output_layer:
# The output layer was accessed
self.assertEqual(len(output_layer.variables), 1)
sess.run(variables.global_variables_initializer())
sess_results = sess.run({
"batch_size": batch_size_t,
"first_finished": first_finished,
"first_inputs": first_inputs,
"first_state": first_state,
"step_outputs": step_outputs,
"step_state": step_state,
"step_next_inputs": step_next_inputs,
"step_finished": step_finished
})
self.assertAllEqual([False, False, False, False, True],
sess_results["first_finished"])
self.assertAllEqual([False, False, False, True, True],
sess_results["step_finished"])
self.assertEqual(output_dtype.sample_id,
sess_results["step_outputs"].sample_id.dtype)
self.assertAllEqual(
np.argmax(sess_results["step_outputs"].rnn_output, -1),
sess_results["step_outputs"].sample_id)
def testStepWithTrainingHelperNoOutputLayer(self):
self._testStepWithTrainingHelper(use_output_layer=False)
def testStepWithTrainingHelperWithOutputLayer(self):
self._testStepWithTrainingHelper(use_output_layer=True)
def testStepWithGreedyEmbeddingHelper(self):
batch_size = 5
vocabulary_size = 7
cell_depth = vocabulary_size # cell's logits must match vocabulary size
input_depth = 10
start_tokens = np.random.randint(0, vocabulary_size, size=batch_size)
end_token = 1
with self.session(use_gpu=True) as sess:
embeddings = np.random.randn(vocabulary_size,
input_depth).astype(np.float32)
cell = rnn_cell.LSTMCell(vocabulary_size)
helper = helper_py.GreedyEmbeddingHelper(embeddings, start_tokens,
end_token)
my_decoder = basic_decoder.BasicDecoder(
cell=cell,
helper=helper,
initial_state=cell.zero_state(
dtype=dtypes.float32, batch_size=batch_size))
output_size = my_decoder.output_size
output_dtype = my_decoder.output_dtype
self.assertEqual(
basic_decoder.BasicDecoderOutput(cell_depth,
tensor_shape.TensorShape([])),
output_size)
self.assertEqual(
basic_decoder.BasicDecoderOutput(dtypes.float32, dtypes.int32),
output_dtype)
(first_finished, first_inputs, first_state) = my_decoder.initialize()
(step_outputs, step_state, step_next_inputs,
step_finished) = my_decoder.step(
constant_op.constant(0), first_inputs, first_state)
batch_size_t = my_decoder.batch_size
self.assertTrue(isinstance(first_state, rnn_cell.LSTMStateTuple))
self.assertTrue(isinstance(step_state, rnn_cell.LSTMStateTuple))
self.assertTrue(
isinstance(step_outputs, basic_decoder.BasicDecoderOutput))
self.assertEqual((batch_size, cell_depth), step_outputs[0].get_shape())
self.assertEqual((batch_size,), step_outputs[1].get_shape())
self.assertEqual((batch_size, cell_depth), first_state[0].get_shape())
self.assertEqual((batch_size, cell_depth), first_state[1].get_shape())
self.assertEqual((batch_size, cell_depth), step_state[0].get_shape())
self.assertEqual((batch_size, cell_depth), step_state[1].get_shape())
sess.run(variables.global_variables_initializer())
sess_results = sess.run({
"batch_size": batch_size_t,
"first_finished": first_finished,
"first_inputs": first_inputs,
"first_state": first_state,
"step_outputs": step_outputs,
"step_state": step_state,
"step_next_inputs": step_next_inputs,
"step_finished": step_finished
})
expected_sample_ids = np.argmax(
sess_results["step_outputs"].rnn_output, -1)
expected_step_finished = (expected_sample_ids == end_token)
expected_step_next_inputs = embeddings[expected_sample_ids]
self.assertAllEqual([False, False, False, False, False],
sess_results["first_finished"])
self.assertAllEqual(expected_step_finished, sess_results["step_finished"])
self.assertEqual(output_dtype.sample_id,
sess_results["step_outputs"].sample_id.dtype)
self.assertAllEqual(expected_sample_ids,
sess_results["step_outputs"].sample_id)
self.assertAllEqual(expected_step_next_inputs,
sess_results["step_next_inputs"])
def testStepWithSampleEmbeddingHelper(self):
batch_size = 5
vocabulary_size = 7
cell_depth = vocabulary_size # cell's logits must match vocabulary size
input_depth = 10
np.random.seed(0)
start_tokens = np.random.randint(0, vocabulary_size, size=batch_size)
end_token = 1
with self.session(use_gpu=True) as sess:
with variable_scope.variable_scope(
"testStepWithSampleEmbeddingHelper",
initializer=init_ops.constant_initializer(0.01)):
embeddings = np.random.randn(vocabulary_size,
input_depth).astype(np.float32)
cell = rnn_cell.LSTMCell(vocabulary_size)
helper = helper_py.SampleEmbeddingHelper(embeddings, start_tokens,
end_token, seed=0)
my_decoder = basic_decoder.BasicDecoder(
cell=cell,
helper=helper,
initial_state=cell.zero_state(
dtype=dtypes.float32, batch_size=batch_size))
output_size = my_decoder.output_size
output_dtype = my_decoder.output_dtype
self.assertEqual(
basic_decoder.BasicDecoderOutput(cell_depth,
tensor_shape.TensorShape([])),
output_size)
self.assertEqual(
basic_decoder.BasicDecoderOutput(dtypes.float32, dtypes.int32),
output_dtype)
(first_finished, first_inputs, first_state) = my_decoder.initialize()
(step_outputs, step_state, step_next_inputs,
step_finished) = my_decoder.step(
constant_op.constant(0), first_inputs, first_state)
batch_size_t = my_decoder.batch_size
self.assertTrue(isinstance(first_state, rnn_cell.LSTMStateTuple))
self.assertTrue(isinstance(step_state, rnn_cell.LSTMStateTuple))
self.assertTrue(
isinstance(step_outputs, basic_decoder.BasicDecoderOutput))
self.assertEqual((batch_size, cell_depth), step_outputs[0].get_shape())
self.assertEqual((batch_size,), step_outputs[1].get_shape())
self.assertEqual((batch_size, cell_depth), first_state[0].get_shape())
self.assertEqual((batch_size, cell_depth), first_state[1].get_shape())
self.assertEqual((batch_size, cell_depth), step_state[0].get_shape())
self.assertEqual((batch_size, cell_depth), step_state[1].get_shape())
sess.run(variables.global_variables_initializer())
sess_results = sess.run({
"batch_size": batch_size_t,
"first_finished": first_finished,
"first_inputs": first_inputs,
"first_state": first_state,
"step_outputs": step_outputs,
"step_state": step_state,
"step_next_inputs": step_next_inputs,
"step_finished": step_finished
})
sample_ids = sess_results["step_outputs"].sample_id
self.assertEqual(output_dtype.sample_id, sample_ids.dtype)
expected_step_finished = (sample_ids == end_token)
expected_step_next_inputs = embeddings[sample_ids]
self.assertAllEqual(expected_step_finished,
sess_results["step_finished"])
self.assertAllEqual(expected_step_next_inputs,
sess_results["step_next_inputs"])
def testStepWithScheduledEmbeddingTrainingHelper(self):
sequence_length = [3, 4, 3, 1, 0]
batch_size = 5
max_time = 8
input_depth = 7
vocabulary_size = 10
with self.session(use_gpu=True) as sess:
inputs = np.random.randn(
batch_size, max_time, input_depth).astype(np.float32)
embeddings = np.random.randn(
vocabulary_size, input_depth).astype(np.float32)
half = constant_op.constant(0.5)
cell = rnn_cell.LSTMCell(vocabulary_size)
helper = helper_py.ScheduledEmbeddingTrainingHelper(
inputs=inputs,
sequence_length=sequence_length,
embedding=embeddings,
sampling_probability=half,
time_major=False)
my_decoder = basic_decoder.BasicDecoder(
cell=cell,
helper=helper,
initial_state=cell.zero_state(
dtype=dtypes.float32, batch_size=batch_size))
output_size = my_decoder.output_size
output_dtype = my_decoder.output_dtype
self.assertEqual(
basic_decoder.BasicDecoderOutput(vocabulary_size,
tensor_shape.TensorShape([])),
output_size)
self.assertEqual(
basic_decoder.BasicDecoderOutput(dtypes.float32, dtypes.int32),
output_dtype)
(first_finished, first_inputs, first_state) = my_decoder.initialize()
(step_outputs, step_state, step_next_inputs,
step_finished) = my_decoder.step(
constant_op.constant(0), first_inputs, first_state)
batch_size_t = my_decoder.batch_size
self.assertTrue(isinstance(first_state, rnn_cell.LSTMStateTuple))
self.assertTrue(isinstance(step_state, rnn_cell.LSTMStateTuple))
self.assertTrue(
isinstance(step_outputs, basic_decoder.BasicDecoderOutput))
self.assertEqual((batch_size, vocabulary_size),
step_outputs[0].get_shape())
self.assertEqual((batch_size,), step_outputs[1].get_shape())
self.assertEqual((batch_size, vocabulary_size),
first_state[0].get_shape())
self.assertEqual((batch_size, vocabulary_size),
first_state[1].get_shape())
self.assertEqual((batch_size, vocabulary_size),
step_state[0].get_shape())
self.assertEqual((batch_size, vocabulary_size),
step_state[1].get_shape())
self.assertEqual((batch_size, input_depth),
step_next_inputs.get_shape())
sess.run(variables.global_variables_initializer())
sess_results = sess.run({
"batch_size": batch_size_t,
"first_finished": first_finished,
"first_inputs": first_inputs,
"first_state": first_state,
"step_outputs": step_outputs,
"step_state": step_state,
"step_next_inputs": step_next_inputs,
"step_finished": step_finished
})
self.assertAllEqual([False, False, False, False, True],
sess_results["first_finished"])
self.assertAllEqual([False, False, False, True, True],
sess_results["step_finished"])
sample_ids = sess_results["step_outputs"].sample_id
self.assertEqual(output_dtype.sample_id, sample_ids.dtype)
batch_where_not_sampling = np.where(sample_ids == -1)
batch_where_sampling = np.where(sample_ids > -1)
self.assertAllClose(
sess_results["step_next_inputs"][batch_where_sampling],
embeddings[sample_ids[batch_where_sampling]])
self.assertAllClose(
sess_results["step_next_inputs"][batch_where_not_sampling],
np.squeeze(inputs[batch_where_not_sampling, 1]))
def _testStepWithScheduledOutputTrainingHelper(
self, sampling_probability, use_next_inputs_fn, use_auxiliary_inputs):
sequence_length = [3, 4, 3, 1, 0]
batch_size = 5
max_time = 8
input_depth = 7
cell_depth = input_depth
if use_auxiliary_inputs:
auxiliary_input_depth = 4
auxiliary_inputs = np.random.randn(
batch_size, max_time, auxiliary_input_depth).astype(np.float32)
else:
auxiliary_inputs = None
with self.session(use_gpu=True) as sess:
inputs = np.random.randn(batch_size, max_time,
input_depth).astype(np.float32)
cell = rnn_cell.LSTMCell(cell_depth)
sampling_probability = constant_op.constant(sampling_probability)
if use_next_inputs_fn:
def next_inputs_fn(outputs):
# Use deterministic function for test.
samples = math_ops.argmax(outputs, axis=1)
return array_ops.one_hot(samples, cell_depth, dtype=dtypes.float32)
else:
next_inputs_fn = None
helper = helper_py.ScheduledOutputTrainingHelper(
inputs=inputs,
sequence_length=sequence_length,
sampling_probability=sampling_probability,
time_major=False,
next_inputs_fn=next_inputs_fn,
auxiliary_inputs=auxiliary_inputs)
my_decoder = basic_decoder.BasicDecoder(
cell=cell,
helper=helper,
initial_state=cell.zero_state(
dtype=dtypes.float32, batch_size=batch_size))
output_size = my_decoder.output_size
output_dtype = my_decoder.output_dtype
self.assertEqual(
basic_decoder.BasicDecoderOutput(cell_depth,
tensor_shape.TensorShape([])),
output_size)
self.assertEqual(
basic_decoder.BasicDecoderOutput(dtypes.float32, dtypes.int32),
output_dtype)
(first_finished, first_inputs, first_state) = my_decoder.initialize()
(step_outputs, step_state, step_next_inputs,
step_finished) = my_decoder.step(
constant_op.constant(0), first_inputs, first_state)
if use_next_inputs_fn:
output_after_next_inputs_fn = next_inputs_fn(step_outputs.rnn_output)
batch_size_t = my_decoder.batch_size
self.assertTrue(isinstance(first_state, rnn_cell.LSTMStateTuple))
self.assertTrue(isinstance(step_state, rnn_cell.LSTMStateTuple))
self.assertTrue(
isinstance(step_outputs, basic_decoder.BasicDecoderOutput))
self.assertEqual((batch_size, cell_depth), step_outputs[0].get_shape())
self.assertEqual((batch_size,), step_outputs[1].get_shape())
self.assertEqual((batch_size, cell_depth), first_state[0].get_shape())
self.assertEqual((batch_size, cell_depth), first_state[1].get_shape())
self.assertEqual((batch_size, cell_depth), step_state[0].get_shape())
self.assertEqual((batch_size, cell_depth), step_state[1].get_shape())
sess.run(variables.global_variables_initializer())
fetches = {
"batch_size": batch_size_t,
"first_finished": first_finished,
"first_inputs": first_inputs,
"first_state": first_state,
"step_outputs": step_outputs,
"step_state": step_state,
"step_next_inputs": step_next_inputs,
"step_finished": step_finished
}
if use_next_inputs_fn:
fetches["output_after_next_inputs_fn"] = output_after_next_inputs_fn
sess_results = sess.run(fetches)
self.assertAllEqual([False, False, False, False, True],
sess_results["first_finished"])
self.assertAllEqual([False, False, False, True, True],
sess_results["step_finished"])
sample_ids = sess_results["step_outputs"].sample_id
self.assertEqual(output_dtype.sample_id, sample_ids.dtype)
batch_where_not_sampling = np.where(np.logical_not(sample_ids))
batch_where_sampling = np.where(sample_ids)
auxiliary_inputs_to_concat = (
auxiliary_inputs[:, 1] if use_auxiliary_inputs else
np.array([]).reshape(batch_size, 0).astype(np.float32))
expected_next_sampling_inputs = np.concatenate(
(sess_results["output_after_next_inputs_fn"][batch_where_sampling]
if use_next_inputs_fn else
sess_results["step_outputs"].rnn_output[batch_where_sampling],
auxiliary_inputs_to_concat[batch_where_sampling]),
axis=-1)
self.assertAllClose(
sess_results["step_next_inputs"][batch_where_sampling],
expected_next_sampling_inputs)
self.assertAllClose(
sess_results["step_next_inputs"][batch_where_not_sampling],
np.concatenate(
(np.squeeze(inputs[batch_where_not_sampling, 1], axis=0),
auxiliary_inputs_to_concat[batch_where_not_sampling]),
axis=-1))
def testStepWithScheduledOutputTrainingHelperWithoutNextInputsFnOrAuxInputs(
self):
self._testStepWithScheduledOutputTrainingHelper(
sampling_probability=0.5, use_next_inputs_fn=False,
use_auxiliary_inputs=False)
def testStepWithScheduledOutputTrainingHelperWithNextInputsFn(self):
self._testStepWithScheduledOutputTrainingHelper(
sampling_probability=0.5, use_next_inputs_fn=True,
use_auxiliary_inputs=False)
def testStepWithScheduledOutputTrainingHelperWithAuxiliaryInputs(self):
self._testStepWithScheduledOutputTrainingHelper(
sampling_probability=0.5, use_next_inputs_fn=False,
use_auxiliary_inputs=True)
def testStepWithScheduledOutputTrainingHelperWithNextInputsFnAndAuxInputs(
self):
self._testStepWithScheduledOutputTrainingHelper(
sampling_probability=0.5, use_next_inputs_fn=True,
use_auxiliary_inputs=True)
def testStepWithScheduledOutputTrainingHelperWithNoSampling(self):
self._testStepWithScheduledOutputTrainingHelper(
sampling_probability=0.0, use_next_inputs_fn=True,
use_auxiliary_inputs=True)
def testStepWithInferenceHelperCategorical(self):
batch_size = 5
vocabulary_size = 7
cell_depth = vocabulary_size
start_token = 0
end_token = 6
start_inputs = array_ops.one_hot(
np.ones(batch_size) * start_token,
vocabulary_size)
# The sample function samples categorically from the logits.
sample_fn = lambda x: helper_py.categorical_sample(logits=x)
# The next inputs are a one-hot encoding of the sampled labels.
next_inputs_fn = (
lambda x: array_ops.one_hot(x, vocabulary_size, dtype=dtypes.float32))
end_fn = lambda sample_ids: math_ops.equal(sample_ids, end_token)
with self.session(use_gpu=True) as sess:
with variable_scope.variable_scope(
"testStepWithInferenceHelper",
initializer=init_ops.constant_initializer(0.01)):
cell = rnn_cell.LSTMCell(vocabulary_size)
helper = helper_py.InferenceHelper(
sample_fn, sample_shape=(), sample_dtype=dtypes.int32,
start_inputs=start_inputs, end_fn=end_fn,
next_inputs_fn=next_inputs_fn)
my_decoder = basic_decoder.BasicDecoder(
cell=cell,
helper=helper,
initial_state=cell.zero_state(
dtype=dtypes.float32, batch_size=batch_size))
output_size = my_decoder.output_size
output_dtype = my_decoder.output_dtype
self.assertEqual(
basic_decoder.BasicDecoderOutput(cell_depth,
tensor_shape.TensorShape([])),
output_size)
self.assertEqual(
basic_decoder.BasicDecoderOutput(dtypes.float32, dtypes.int32),
output_dtype)
(first_finished, first_inputs, first_state) = my_decoder.initialize()
(step_outputs, step_state, step_next_inputs,
step_finished) = my_decoder.step(
constant_op.constant(0), first_inputs, first_state)
batch_size_t = my_decoder.batch_size
self.assertTrue(isinstance(first_state, rnn_cell.LSTMStateTuple))
self.assertTrue(isinstance(step_state, rnn_cell.LSTMStateTuple))
self.assertTrue(
isinstance(step_outputs, basic_decoder.BasicDecoderOutput))
self.assertEqual((batch_size, cell_depth), step_outputs[0].get_shape())
self.assertEqual((batch_size,), step_outputs[1].get_shape())
self.assertEqual((batch_size, cell_depth), first_state[0].get_shape())
self.assertEqual((batch_size, cell_depth), first_state[1].get_shape())
self.assertEqual((batch_size, cell_depth), step_state[0].get_shape())
self.assertEqual((batch_size, cell_depth), step_state[1].get_shape())
sess.run(variables.global_variables_initializer())
sess_results = sess.run({
"batch_size": batch_size_t,
"first_finished": first_finished,
"first_inputs": first_inputs,
"first_state": first_state,
"step_outputs": step_outputs,
"step_state": step_state,
"step_next_inputs": step_next_inputs,
"step_finished": step_finished
})
sample_ids = sess_results["step_outputs"].sample_id
self.assertEqual(output_dtype.sample_id, sample_ids.dtype)
expected_step_finished = (sample_ids == end_token)
expected_step_next_inputs = np.zeros((batch_size, vocabulary_size))
expected_step_next_inputs[np.arange(batch_size), sample_ids] = 1.0
self.assertAllEqual(expected_step_finished,
sess_results["step_finished"])
self.assertAllEqual(expected_step_next_inputs,
sess_results["step_next_inputs"])
def testStepWithInferenceHelperMultilabel(self):
batch_size = 5
vocabulary_size = 7
cell_depth = vocabulary_size
start_token = 0
end_token = 6
start_inputs = array_ops.one_hot(
np.ones(batch_size) * start_token,
vocabulary_size)
# The sample function samples independent bernoullis from the logits.
sample_fn = (
lambda x: helper_py.bernoulli_sample(logits=x, dtype=dtypes.bool))
# The next inputs are a one-hot encoding of the sampled labels.
next_inputs_fn = math_ops.to_float
end_fn = lambda sample_ids: sample_ids[:, end_token]
with self.session(use_gpu=True) as sess:
with variable_scope.variable_scope(
"testStepWithInferenceHelper",
initializer=init_ops.constant_initializer(0.01)):
cell = rnn_cell.LSTMCell(vocabulary_size)
helper = helper_py.InferenceHelper(
sample_fn, sample_shape=[cell_depth], sample_dtype=dtypes.bool,
start_inputs=start_inputs, end_fn=end_fn,
next_inputs_fn=next_inputs_fn)
my_decoder = basic_decoder.BasicDecoder(
cell=cell,
helper=helper,
initial_state=cell.zero_state(
dtype=dtypes.float32, batch_size=batch_size))
output_size = my_decoder.output_size
output_dtype = my_decoder.output_dtype
self.assertEqual(
basic_decoder.BasicDecoderOutput(cell_depth, cell_depth),
output_size)
self.assertEqual(
basic_decoder.BasicDecoderOutput(dtypes.float32, dtypes.bool),
output_dtype)
(first_finished, first_inputs, first_state) = my_decoder.initialize()
(step_outputs, step_state, step_next_inputs,
step_finished) = my_decoder.step(
constant_op.constant(0), first_inputs, first_state)
batch_size_t = my_decoder.batch_size
self.assertTrue(isinstance(first_state, rnn_cell.LSTMStateTuple))
self.assertTrue(isinstance(step_state, rnn_cell.LSTMStateTuple))
self.assertTrue(
isinstance(step_outputs, basic_decoder.BasicDecoderOutput))
self.assertEqual((batch_size, cell_depth), step_outputs[0].get_shape())
self.assertEqual((batch_size, cell_depth), step_outputs[1].get_shape())
self.assertEqual((batch_size, cell_depth), first_state[0].get_shape())
self.assertEqual((batch_size, cell_depth), first_state[1].get_shape())
self.assertEqual((batch_size, cell_depth), step_state[0].get_shape())
self.assertEqual((batch_size, cell_depth), step_state[1].get_shape())
sess.run(variables.global_variables_initializer())
sess_results = sess.run({
"batch_size": batch_size_t,
"first_finished": first_finished,
"first_inputs": first_inputs,
"first_state": first_state,
"step_outputs": step_outputs,
"step_state": step_state,
"step_next_inputs": step_next_inputs,
"step_finished": step_finished
})
sample_ids = sess_results["step_outputs"].sample_id
self.assertEqual(output_dtype.sample_id, sample_ids.dtype)
expected_step_finished = sample_ids[:, end_token]
expected_step_next_inputs = sample_ids.astype(np.float32)
self.assertAllEqual(expected_step_finished,
sess_results["step_finished"])
self.assertAllEqual(expected_step_next_inputs,
sess_results["step_next_inputs"])
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/seq2seq/python/kernel_tests/basic_decoder_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for contrib.seq2seq.python.seq2seq.decoder."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.seq2seq.python.ops import basic_decoder
from tensorflow.contrib.seq2seq.python.ops import sampler as sampler_py
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.ops import rnn
from tensorflow.python.ops import rnn_cell
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
@keras_parameterized.run_all_keras_modes
class DecodeV2RNNTest(keras_parameterized.TestCase, test.TestCase):
"""Tests for DecoderV2."""
def _testDecodeRNN(self, time_major, maximum_iterations=None):
sequence_length = [3, 4, 3, 1, 0]
batch_size = 5
max_time = 8
input_depth = 7
cell_depth = 10
max_out = max(sequence_length)
with self.cached_session(use_gpu=True):
if time_major:
inputs = np.random.randn(max_time, batch_size,
input_depth).astype(np.float32)
else:
inputs = np.random.randn(batch_size, max_time,
input_depth).astype(np.float32)
input_t = constant_op.constant(inputs)
cell = rnn_cell.LSTMCell(cell_depth)
sampler = sampler_py.TrainingSampler(time_major=time_major)
my_decoder = basic_decoder.BasicDecoderV2(
cell=cell,
sampler=sampler,
output_time_major=time_major,
maximum_iterations=maximum_iterations)
initial_state = cell.zero_state(
dtype=dtypes.float32, batch_size=batch_size)
(final_outputs, unused_final_state, final_sequence_length) = my_decoder(
input_t, initial_state=initial_state, sequence_length=sequence_length)
def _t(shape):
if time_major:
return (shape[1], shape[0]) + shape[2:]
return shape
if not context.executing_eagerly():
self.assertEqual((batch_size,),
tuple(final_sequence_length.get_shape().as_list()))
self.assertEqual(
_t((batch_size, None, cell_depth)),
tuple(final_outputs.rnn_output.get_shape().as_list()))
self.assertEqual(
_t((batch_size, None)),
tuple(final_outputs.sample_id.get_shape().as_list()))
self.evaluate(variables.global_variables_initializer())
final_outputs = self.evaluate(final_outputs)
final_sequence_length = self.evaluate(final_sequence_length)
# Mostly a smoke test
time_steps = max_out
expected_length = sequence_length
if maximum_iterations is not None:
time_steps = min(max_out, maximum_iterations)
expected_length = [min(x, maximum_iterations) for x in expected_length]
if context.executing_eagerly() and maximum_iterations != 0:
# Only check the shape of output when maximum_iterations > 0, see
# b/123431432 for more details.
self.assertEqual(
_t((batch_size, time_steps, cell_depth)),
final_outputs.rnn_output.shape)
self.assertEqual(
_t((batch_size, time_steps)), final_outputs.sample_id.shape)
self.assertItemsEqual(expected_length, final_sequence_length)
def testDynamicDecodeRNNBatchMajor(self):
self._testDecodeRNN(time_major=False)
def testDynamicDecodeRNNTimeMajor(self):
self._testDecodeRNN(time_major=True)
def testDynamicDecodeRNNZeroMaxIters(self):
self._testDecodeRNN(time_major=True, maximum_iterations=0)
def testDynamicDecodeRNNOneMaxIter(self):
self._testDecodeRNN(time_major=True, maximum_iterations=1)
def _testDynamicDecodeRNNWithTrainingHelperMatchesDynamicRNN(
self, use_sequence_length):
sequence_length = [3, 4, 3, 1, 0]
batch_size = 5
max_time = 8
input_depth = 7
cell_depth = 10
max_out = max(sequence_length)
with self.cached_session(use_gpu=True):
inputs = np.random.randn(batch_size, max_time,
input_depth).astype(np.float32)
inputs = constant_op.constant(inputs)
cell = rnn_cell.LSTMCell(cell_depth)
zero_state = cell.zero_state(dtype=dtypes.float32, batch_size=batch_size)
sampler = sampler_py.TrainingSampler()
my_decoder = basic_decoder.BasicDecoderV2(
cell=cell, sampler=sampler, impute_finished=use_sequence_length)
final_decoder_outputs, final_decoder_state, _ = my_decoder(
inputs, initial_state=zero_state, sequence_length=sequence_length)
final_rnn_outputs, final_rnn_state = rnn.dynamic_rnn(
cell,
inputs,
sequence_length=sequence_length if use_sequence_length else None,
initial_state=zero_state)
self.evaluate(variables.global_variables_initializer())
eval_result = self.evaluate({
"final_decoder_outputs": final_decoder_outputs,
"final_decoder_state": final_decoder_state,
"final_rnn_outputs": final_rnn_outputs,
"final_rnn_state": final_rnn_state
})
# Decoder only runs out to max_out; ensure values are identical
# to dynamic_rnn, which also zeros out outputs and passes along state.
self.assertAllClose(eval_result["final_decoder_outputs"].rnn_output,
eval_result["final_rnn_outputs"][:, 0:max_out, :])
if use_sequence_length:
self.assertAllClose(eval_result["final_decoder_state"],
eval_result["final_rnn_state"])
def testDynamicDecodeRNNWithTrainingHelperMatchesDynamicRNNWithSeqLen(self):
self._testDynamicDecodeRNNWithTrainingHelperMatchesDynamicRNN(
use_sequence_length=True)
def testDynamicDecodeRNNWithTrainingHelperMatchesDynamicRNNNoSeqLen(self):
self._testDynamicDecodeRNNWithTrainingHelperMatchesDynamicRNN(
use_sequence_length=False)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/seq2seq/python/kernel_tests/decoder_v2_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for contrib.seq2seq.python.ops.attention_wrapper."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from absl.testing import parameterized
import numpy as np
from tensorflow.contrib.seq2seq.python.ops import attention_wrapper as wrapper
from tensorflow.contrib.seq2seq.python.ops import basic_decoder
from tensorflow.contrib.seq2seq.python.ops import sampler as sampler_py
from tensorflow.python import keras
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.keras import initializers
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.util import nest
@test_util.run_all_in_graph_and_eager_modes
class AttentionMechanismTest(test.TestCase, parameterized.TestCase):
def setUp(self):
super(AttentionMechanismTest, self).setUp()
self.batch = 10
self.timestep = 5
self.memory_size = 6
self.units = 8
self.memory = np.random.randn(self.batch, self.timestep,
self.memory_size).astype(np.float32)
self.query = np.random.randn(self.batch, self.units).astype(np.float32)
self.state = np.random.randn(self.batch, self.timestep).astype(np.float32)
@parameterized.named_parameters(
("luong", wrapper.LuongAttentionV2),
("luong_monotonic", wrapper.LuongMonotonicAttentionV2),
("bahdanau", wrapper.BahdanauAttentionV2),
("bahdanau_monotonic", wrapper.BahdanauMonotonicAttentionV2),
)
def test_attention_shape_inference(self, attention_cls):
attention = attention_cls(self.units, self.memory)
attention_score = attention([self.query, self.state])
self.assertLen(attention_score, 2)
self.assertEqual(attention_score[0].shape, (self.batch, self.timestep))
self.assertEqual(attention_score[1].shape, (self.batch, self.timestep))
@parameterized.named_parameters(
("luong", wrapper.LuongAttentionV2),
("luong_monotonic", wrapper.LuongMonotonicAttentionV2),
("bahdanau", wrapper.BahdanauAttentionV2),
("bahdanau_monotonic", wrapper.BahdanauMonotonicAttentionV2),
)
def test_get_config(self, attention_cls):
attention = attention_cls(self.units, self.memory)
config = attention.get_config()
attention_from_config = attention_cls.from_config(config)
config_from_clone = attention_from_config.get_config()
self.assertDictEqual(config, config_from_clone)
@parameterized.named_parameters(
("luong", wrapper.LuongAttentionV2),
("luong_monotonic", wrapper.LuongMonotonicAttentionV2),
("bahdanau", wrapper.BahdanauAttentionV2),
("bahdanau_monotonic", wrapper.BahdanauMonotonicAttentionV2),
)
def test_layer_output(self, attention_cls):
attention = attention_cls(self.units, self.memory)
score = attention([self.query, self.state])
self.evaluate(variables.variables_initializer(attention.variables))
score_val = self.evaluate(score)
self.assertLen(score_val, 2)
self.assertEqual(score_val[0].shape, (self.batch, self.timestep))
self.assertEqual(score_val[1].shape, (self.batch, self.timestep))
@parameterized.named_parameters(
("luong", wrapper.LuongAttentionV2),
("luong_monotonic", wrapper.LuongMonotonicAttentionV2),
("bahdanau", wrapper.BahdanauAttentionV2),
("bahdanau_monotonic", wrapper.BahdanauMonotonicAttentionV2),
)
def test_passing_memory_from_call(self, attention_cls):
attention = attention_cls(self.units, self.memory)
weights_before_query = attention.get_weights()
ref_score = attention([self.query, self.state])
self.evaluate(variables.global_variables_initializer())
ref_score_val = self.evaluate(ref_score)
all_weights = attention.get_weights()
config = attention.get_config()
# Simulate the twice invocation of calls here.
attention_from_config = attention_cls.from_config(config)
attention_from_config.build(self.memory.shape)
attention_from_config.set_weights(weights_before_query)
attention_from_config(self.memory, setup_memory=True)
attention_from_config.build([self.query.shape, self.state.shape])
attention_from_config.set_weights(all_weights)
score = attention_from_config([self.query, self.state])
score_val = self.evaluate(score)
self.assertAllClose(ref_score_val, score_val)
@parameterized.named_parameters(
("luong", wrapper.LuongAttentionV2),
("luong_monotonic", wrapper.LuongMonotonicAttentionV2),
("bahdanau", wrapper.BahdanauAttentionV2),
("bahdanau_monotonic", wrapper.BahdanauMonotonicAttentionV2),
)
def test_save_load_layer(self, attention_cls):
vocab = 20
embedding_dim = 6
inputs = keras.layers.Input(shape=[self.timestep])
encoder_input = keras.layers.Embedding(
vocab, embedding_dim, mask_zero=True)(
inputs)
encoder_output = keras.layers.LSTM(
self.memory_size, return_sequences=True)(
encoder_input)
attention = attention_cls(self.units, encoder_output)
query = keras.layers.Input(shape=[self.units])
state = keras.layers.Input(shape=[self.timestep])
score = attention([query, state])
x = np.random.randint(vocab, size=(self.batch, self.timestep))
x_test = np.random.randint(vocab, size=(self.batch, self.timestep))
y = np.random.randn(self.batch, self.timestep)
model = keras.models.Model([inputs, query, state], score)
# TODO(b/138592586): Run with single-execution-path
model.compile("rmsprop", "mse", experimental_run_tf_function=False)
model.fit([x, self.query, self.state], (y, y))
y_ref = model.predict_on_batch([x_test, self.query, self.state])
config = model.get_config()
weights = model.get_weights()
loaded_model = keras.models.Model.from_config(
config, custom_objects={attention_cls.__name__: attention_cls})
loaded_model.set_weights(weights)
# TODO(b/138592586): Run with single-execution-path
loaded_model.compile("rmsprop", "mse", experimental_run_tf_function=False)
y = loaded_model.predict_on_batch([x_test, self.query, self.state])
self.assertAllClose(y_ref, y)
# TODO(scottzhu): Add tests for model.compile(run_eagerly=True)
class ResultSummary(
collections.namedtuple("ResultSummary", ("shape", "dtype", "mean"))):
pass
def get_result_summary(x):
if isinstance(x, np.ndarray):
return ResultSummary(x.shape, x.dtype, x.mean())
return x
@test_util.run_all_in_graph_and_eager_modes
class AttentionWrapperV2Test(test.TestCase, parameterized.TestCase):
def assertAllCloseOrEqual(self, x, y, **kwargs):
if isinstance(x, np.ndarray) or isinstance(x, float):
return super(AttentionWrapperV2Test, self).assertAllClose(
x, y, atol=1e-3, **kwargs)
else:
self.assertAllEqual(x, y, **kwargs)
def setUp(self):
super(AttentionWrapperV2Test, self).setUp()
self.batch = 64
self.units = 128
self.encoder_timestep = 10
self.encoder_dim = 256
self.decoder_timestep = 12
self.encoder_outputs = np.random.randn(self.batch, self.encoder_timestep,
self.encoder_dim)
self.encoder_sequence_length = np.random.randint(
self.encoder_timestep, size=(self.batch,)).astype(np.int32)
self.decoder_inputs = np.random.randn(self.batch, self.decoder_timestep,
self.units)
self.decoder_sequence_length = np.random.randint(
self.decoder_timestep, size=(self.batch,)).astype(np.int32)
def _testWithAttention(self,
create_attention_mechanism,
expected_final_output,
expected_final_state,
attention_mechanism_depth=3,
alignment_history=False,
expected_final_alignment_history=None,
attention_layer_size=6,
attention_layer=None,
create_query_layer=False,
create_memory_layer=True,
create_attention_kwargs=None):
attention_layer_sizes = ([attention_layer_size]
if attention_layer_size is not None else None)
attention_layers = ([attention_layer]
if attention_layer is not None else None)
self._testWithMaybeMultiAttention(
is_multi=False,
create_attention_mechanisms=[create_attention_mechanism],
expected_final_output=expected_final_output,
expected_final_state=expected_final_state,
attention_mechanism_depths=[attention_mechanism_depth],
alignment_history=alignment_history,
expected_final_alignment_history=expected_final_alignment_history,
attention_layer_sizes=attention_layer_sizes,
attention_layers=attention_layers,
create_query_layer=create_query_layer,
create_memory_layer=create_memory_layer,
create_attention_kwargs=create_attention_kwargs)
def _testWithMaybeMultiAttention(self,
is_multi,
create_attention_mechanisms,
expected_final_output,
expected_final_state,
attention_mechanism_depths,
alignment_history=False,
expected_final_alignment_history=None,
attention_layer_sizes=None,
attention_layers=None,
create_query_layer=False,
create_memory_layer=True,
create_attention_kwargs=None):
# Allow is_multi to be True with a single mechanism to enable test for
# passing in a single mechanism in a list.
assert len(create_attention_mechanisms) == 1 or is_multi
encoder_sequence_length = [3, 2, 3, 1, 1]
decoder_sequence_length = [2, 0, 1, 2, 3]
batch_size = 5
encoder_max_time = 8
decoder_max_time = 4
input_depth = 7
encoder_output_depth = 10
cell_depth = 9
create_attention_kwargs = create_attention_kwargs or {}
if attention_layer_sizes is not None:
# Compute sum of attention_layer_sizes. Use encoder_output_depth if None.
attention_depth = sum(attention_layer_size or encoder_output_depth
for attention_layer_size in attention_layer_sizes)
elif attention_layers is not None:
# Compute sum of attention_layers output depth.
attention_depth = sum(
attention_layer.compute_output_shape(
[batch_size, cell_depth + encoder_output_depth]).dims[-1].value
for attention_layer in attention_layers)
else:
attention_depth = encoder_output_depth * len(create_attention_mechanisms)
decoder_inputs = np.random.randn(batch_size, decoder_max_time,
input_depth).astype(np.float32)
encoder_outputs = np.random.randn(batch_size, encoder_max_time,
encoder_output_depth).astype(np.float32)
attention_mechanisms = []
for creator, depth in zip(create_attention_mechanisms,
attention_mechanism_depths):
# Create a memory layer with deterministic initializer to avoid randomness
# in the test between graph and eager.
if create_query_layer:
create_attention_kwargs["query_layer"] = keras.layers.Dense(
depth, kernel_initializer="ones", use_bias=False)
if create_memory_layer:
create_attention_kwargs["memory_layer"] = keras.layers.Dense(
depth, kernel_initializer="ones", use_bias=False)
attention_mechanisms.append(
creator(
units=depth,
memory=encoder_outputs,
memory_sequence_length=encoder_sequence_length,
**create_attention_kwargs))
with self.cached_session(use_gpu=True):
attention_layer_size = attention_layer_sizes
attention_layer = attention_layers
if not is_multi:
if attention_layer_size is not None:
attention_layer_size = attention_layer_size[0]
if attention_layer is not None:
attention_layer = attention_layer[0]
cell = keras.layers.LSTMCell(cell_depth,
recurrent_activation="sigmoid",
kernel_initializer="ones",
recurrent_initializer="ones")
cell = wrapper.AttentionWrapper(
cell,
attention_mechanisms if is_multi else attention_mechanisms[0],
attention_layer_size=attention_layer_size,
alignment_history=alignment_history,
attention_layer=attention_layer)
if cell._attention_layers is not None:
for layer in cell._attention_layers:
if getattr(layer, "kernel_initializer") is None:
layer.kernel_initializer = initializers.glorot_uniform(seed=1337)
sampler = sampler_py.TrainingSampler()
my_decoder = basic_decoder.BasicDecoderV2(cell=cell, sampler=sampler)
initial_state = cell.get_initial_state(
dtype=dtypes.float32, batch_size=batch_size)
final_outputs, final_state, _ = my_decoder(
decoder_inputs,
initial_state=initial_state,
sequence_length=decoder_sequence_length)
self.assertIsInstance(final_outputs, basic_decoder.BasicDecoderOutput)
self.assertIsInstance(final_state, wrapper.AttentionWrapperState)
expected_time = (
expected_final_state.time if context.executing_eagerly() else None)
self.assertEqual((batch_size, expected_time, attention_depth),
tuple(final_outputs.rnn_output.get_shape().as_list()))
self.assertEqual((batch_size, expected_time),
tuple(final_outputs.sample_id.get_shape().as_list()))
self.assertEqual((batch_size, attention_depth),
tuple(final_state.attention.get_shape().as_list()))
self.assertEqual((batch_size, cell_depth),
tuple(final_state.cell_state[0].get_shape().as_list()))
self.assertEqual((batch_size, cell_depth),
tuple(final_state.cell_state[1].get_shape().as_list()))
if alignment_history:
if is_multi:
state_alignment_history = []
for history_array in final_state.alignment_history:
history = history_array.stack()
self.assertEqual((expected_time, batch_size, encoder_max_time),
tuple(history.get_shape().as_list()))
state_alignment_history.append(history)
state_alignment_history = tuple(state_alignment_history)
else:
state_alignment_history = final_state.alignment_history.stack()
self.assertEqual((expected_time, batch_size, encoder_max_time),
tuple(state_alignment_history.get_shape().as_list()))
nest.assert_same_structure(cell.state_size,
cell.zero_state(batch_size, dtypes.float32))
# Remove the history from final_state for purposes of the
# remainder of the tests.
final_state = final_state._replace(alignment_history=()) # pylint: disable=protected-access
else:
state_alignment_history = ()
self.evaluate(variables.global_variables_initializer())
eval_result = self.evaluate({
"final_outputs": final_outputs,
"final_state": final_state,
"state_alignment_history": state_alignment_history,
})
final_output_info = nest.map_structure(get_result_summary,
eval_result["final_outputs"])
final_state_info = nest.map_structure(get_result_summary,
eval_result["final_state"])
print("final_output_info: ", final_output_info)
print("final_state_info: ", final_state_info)
nest.map_structure(self.assertAllCloseOrEqual, expected_final_output,
final_output_info)
nest.map_structure(self.assertAllCloseOrEqual, expected_final_state,
final_state_info)
if alignment_history: # by default, the wrapper emits attention as output
final_alignment_history_info = nest.map_structure(
get_result_summary, eval_result["state_alignment_history"])
print("final_alignment_history_info: ", final_alignment_history_info)
nest.map_structure(
self.assertAllCloseOrEqual,
# outputs are batch major but the stacked TensorArray is time major
expected_final_alignment_history,
final_alignment_history_info)
# TODO(b/126893309): reenable np.float16 once the bug is fixed.
@parameterized.parameters([np.float32, np.float64])
def testBahdanauNormalizedDType(self, dtype):
encoder_outputs = self.encoder_outputs.astype(dtype)
decoder_inputs = self.decoder_inputs.astype(dtype)
attention_mechanism = wrapper.BahdanauAttentionV2(
units=self.units,
memory=encoder_outputs,
memory_sequence_length=self.encoder_sequence_length,
normalize=True,
dtype=dtype)
cell = keras.layers.LSTMCell(self.units, recurrent_activation="sigmoid",
dtype=dtype)
cell = wrapper.AttentionWrapper(cell, attention_mechanism, dtype=dtype)
sampler = sampler_py.TrainingSampler()
my_decoder = basic_decoder.BasicDecoderV2(cell=cell, sampler=sampler,
dtype=dtype)
final_outputs, final_state, _ = my_decoder(
decoder_inputs,
initial_state=cell.zero_state(dtype=dtype, batch_size=self.batch),
sequence_length=self.decoder_sequence_length)
self.assertIsInstance(final_outputs, basic_decoder.BasicDecoderOutput)
self.assertEqual(final_outputs.rnn_output.dtype, dtype)
self.assertIsInstance(final_state, wrapper.AttentionWrapperState)
# TODO(b/126893309): reenable np.float16 once the bug is fixed.
@parameterized.parameters([np.float32, np.float64])
def testLuongScaledDType(self, dtype):
# Test case for GitHub issue 18099
encoder_outputs = self.encoder_outputs.astype(dtype)
decoder_inputs = self.decoder_inputs.astype(dtype)
attention_mechanism = wrapper.LuongAttentionV2(
units=self.units,
memory=encoder_outputs,
memory_sequence_length=self.encoder_sequence_length,
scale=True,
dtype=dtype,
)
cell = keras.layers.LSTMCell(self.units, recurrent_activation="sigmoid",
dtype=dtype)
cell = wrapper.AttentionWrapper(cell, attention_mechanism, dtype=dtype)
sampler = sampler_py.TrainingSampler()
my_decoder = basic_decoder.BasicDecoderV2(cell=cell, sampler=sampler,
dtype=dtype)
final_outputs, final_state, _ = my_decoder(
decoder_inputs,
initial_state=cell.zero_state(dtype=dtype, batch_size=self.batch),
sequence_length=self.decoder_sequence_length)
self.assertIsInstance(final_outputs, basic_decoder.BasicDecoderOutput)
self.assertEqual(final_outputs.rnn_output.dtype, dtype)
self.assertIsInstance(final_state, wrapper.AttentionWrapperState)
def testBahdanauNotNormalized(self):
create_attention_mechanism = wrapper.BahdanauAttentionV2
create_attention_kwargs = {"kernel_initializer": "ones"}
expected_final_output = basic_decoder.BasicDecoderOutput(
rnn_output=ResultSummary(
shape=(5, 3, 6), dtype=np.dtype(np.float32), mean=0.051747426),
sample_id=ResultSummary(
shape=(5, 3), dtype=np.dtype(np.int32), mean=3.33333333))
expected_final_state = wrapper.AttentionWrapperState(
cell_state=[
ResultSummary(
shape=(5, 9), dtype=np.dtype(np.float32), mean=0.44189346),
ResultSummary(
shape=(5, 9), dtype=np.dtype(np.float32), mean=0.65429491)],
attention=ResultSummary(
shape=(5, 6), dtype=np.dtype(np.float32), mean=0.073610783),
time=3,
alignments=ResultSummary(
shape=(5, 8), dtype=np.dtype(np.float32), mean=0.125),
attention_state=ResultSummary(
shape=(5, 8), dtype=np.dtype(np.float32), mean=0.125),
alignment_history=())
expected_final_alignment_history = ResultSummary(
shape=(3, 5, 8), dtype=np.dtype(np.float32), mean=0.125)
self._testWithAttention(
create_attention_mechanism,
expected_final_output,
expected_final_state,
alignment_history=True,
create_query_layer=True,
expected_final_alignment_history=expected_final_alignment_history,
create_attention_kwargs=create_attention_kwargs)
def testBahdanauNormalized(self):
create_attention_mechanism = wrapper.BahdanauAttentionV2
create_attention_kwargs = {"kernel_initializer": "ones", "normalize": True}
expected_final_output = basic_decoder.BasicDecoderOutput(
rnn_output=ResultSummary(
shape=(5, 3, 6), dtype=np.dtype("float32"), mean=0.047594748),
sample_id=ResultSummary(
shape=(5, 3), dtype=np.dtype("int32"), mean=3.6))
expected_final_state = wrapper.AttentionWrapperState(
cell_state=[
ResultSummary(
shape=(5, 9), dtype=np.dtype("float32"), mean=0.41311637),
ResultSummary(
shape=(5, 9), dtype=np.dtype("float32"), mean=0.61683208)],
attention=ResultSummary(
shape=(5, 6), dtype=np.dtype("float32"), mean=0.090581432),
time=3,
alignments=ResultSummary(
shape=(5, 8), dtype=np.dtype("float32"), mean=0.125),
attention_state=ResultSummary(
shape=(5, 8), dtype=np.dtype("float32"), mean=0.125),
alignment_history=())
self._testWithAttention(
create_attention_mechanism,
expected_final_output,
expected_final_state,
create_query_layer=True,
create_attention_kwargs=create_attention_kwargs)
def testLuongNotNormalized(self):
create_attention_mechanism = wrapper.LuongAttentionV2
expected_final_output = basic_decoder.BasicDecoderOutput(
rnn_output=ResultSummary(
shape=(5, 3, 6), dtype=np.dtype("float32"), mean=0.05481226),
sample_id=ResultSummary(
shape=(5, 3), dtype=np.dtype("int32"), mean=3.13333333))
expected_final_state = wrapper.AttentionWrapperState(
cell_state=[
ResultSummary(
shape=(5, 9), dtype=np.dtype("float32"), mean=0.38453412),
ResultSummary(
shape=(5, 9), dtype=np.dtype("float32"), mean=0.5785929)],
attention=ResultSummary(
shape=(5, 6), dtype=np.dtype("float32"), mean=0.16311775),
time=3,
alignments=ResultSummary(
shape=(5, 8), dtype=np.dtype("float32"), mean=0.125),
attention_state=ResultSummary(
shape=(5, 8), dtype=np.dtype("float32"), mean=0.125),
alignment_history=())
self._testWithAttention(
create_attention_mechanism,
expected_final_output,
expected_final_state,
attention_mechanism_depth=9)
def testLuongScaled(self):
create_attention_mechanism = wrapper.LuongAttentionV2
create_attention_kwargs = {"scale": True}
expected_final_output = basic_decoder.BasicDecoderOutput(
rnn_output=ResultSummary(
shape=(5, 3, 6), dtype=np.dtype("float32"), mean=0.05481226),
sample_id=ResultSummary(
shape=(5, 3), dtype=np.dtype("int32"), mean=3.13333333))
expected_final_state = wrapper.AttentionWrapperState(
cell_state=[
ResultSummary(
shape=(5, 9), dtype=np.dtype("float32"), mean=0.38453412),
ResultSummary(
shape=(5, 9), dtype=np.dtype("float32"), mean=0.5785929)],
attention=ResultSummary(
shape=(5, 6), dtype=np.dtype("float32"), mean=0.16311775),
time=3,
alignments=ResultSummary(
shape=(5, 8), dtype=np.dtype("float32"), mean=0.125),
attention_state=ResultSummary(
shape=(5, 8), dtype=np.dtype("float32"), mean=0.125),
alignment_history=())
self._testWithAttention(
create_attention_mechanism,
expected_final_output,
expected_final_state,
attention_mechanism_depth=9,
create_attention_kwargs=create_attention_kwargs)
def testNotUseAttentionLayer(self):
create_attention_mechanism = wrapper.BahdanauAttentionV2
create_attention_kwargs = {"kernel_initializer": "ones"}
expected_final_output = basic_decoder.BasicDecoderOutput(
rnn_output=ResultSummary(
shape=(5, 3, 10), dtype=np.dtype("float32"), mean=0.072406612),
sample_id=ResultSummary(
shape=(5, 3), dtype=np.dtype("int32"), mean=3.86666666))
expected_final_state = wrapper.AttentionWrapperState(
cell_state=[
ResultSummary(
shape=(5, 9), dtype=np.dtype("float32"), mean=0.61177742),
ResultSummary(
shape=(5, 9), dtype=np.dtype("float32"), mean=1.032002)],
attention=ResultSummary(
shape=(5, 10), dtype=np.dtype("float32"), mean=0.011346335),
time=3,
alignments=ResultSummary(
shape=(5, 8), dtype=np.dtype("float32"), mean=0.125),
attention_state=ResultSummary(
shape=(5, 8), dtype=np.dtype("float32"), mean=0.125),
alignment_history=())
self._testWithAttention(
create_attention_mechanism,
expected_final_output,
expected_final_state,
attention_layer_size=None,
create_query_layer=True,
create_attention_kwargs=create_attention_kwargs)
def testBahdanauMonotonicNotNormalized(self):
create_attention_mechanism = wrapper.BahdanauMonotonicAttentionV2
create_attention_kwargs = {"kernel_initializer": "ones"}
expected_final_output = basic_decoder.BasicDecoderOutput(
rnn_output=ResultSummary(
shape=(5, 3, 6), dtype=np.dtype("float32"), mean=0.041342419),
sample_id=ResultSummary(
shape=(5, 3), dtype=np.dtype("int32"), mean=3.53333333))
expected_final_state = wrapper.AttentionWrapperState(
cell_state=[
ResultSummary(
shape=(5, 9), dtype=np.dtype("float32"), mean=0.33866978),
ResultSummary(
shape=(5, 9), dtype=np.dtype("float32"), mean=0.46913195)],
attention=ResultSummary(
shape=(5, 6), dtype=np.dtype("float32"), mean=0.092498459),
time=3,
alignments=ResultSummary(
shape=(5, 8), dtype=np.dtype("float32"), mean=0.12079944),
attention_state=ResultSummary(
shape=(5, 8), dtype=np.dtype("float32"), mean=0.12079944),
alignment_history=())
expected_final_alignment_history = ResultSummary(
shape=(3, 5, 8), dtype=np.dtype("float32"), mean=0.121448785067)
self._testWithAttention(
create_attention_mechanism,
expected_final_output,
expected_final_state,
alignment_history=True,
expected_final_alignment_history=expected_final_alignment_history,
create_query_layer=True,
create_attention_kwargs=create_attention_kwargs)
def testBahdanauMonotonicNormalized(self):
create_attention_mechanism = wrapper.BahdanauMonotonicAttentionV2
create_attention_kwargs = {"kernel_initializer": "ones",
"normalize": True}
expected_final_output = basic_decoder.BasicDecoderOutput(
rnn_output=ResultSummary(
shape=(5, 3, 6), dtype=np.dtype("float32"), mean=0.043294173),
sample_id=ResultSummary(
shape=(5, 3), dtype=np.dtype("int32"), mean=3.53333333))
expected_final_state = wrapper.AttentionWrapperState(
cell_state=[
ResultSummary(
shape=(5, 9), dtype=np.dtype("float32"), mean=0.40034312),
ResultSummary(
shape=(5, 9), dtype=np.dtype("float32"), mean=0.5925445)],
attention=ResultSummary(
shape=(5, 6), dtype=np.dtype("float32"), mean=0.096119694),
time=3,
alignments=ResultSummary(
shape=(5, 8), dtype=np.dtype("float32"), mean=0.1211452),
attention_state=ResultSummary(
shape=(5, 8), dtype=np.dtype("float32"), mean=0.1211452),
alignment_history=())
expected_final_alignment_history = ResultSummary(
shape=(3, 5, 8), dtype=np.dtype("float32"), mean=0.12258384)
self._testWithAttention(
create_attention_mechanism,
expected_final_output,
expected_final_state,
alignment_history=True,
expected_final_alignment_history=expected_final_alignment_history,
create_query_layer=True,
create_attention_kwargs=create_attention_kwargs)
def testLuongMonotonicNotNormalized(self):
create_attention_mechanism = wrapper.LuongMonotonicAttentionV2
expected_final_output = basic_decoder.BasicDecoderOutput(
rnn_output=ResultSummary(
shape=(5, 3, 6), dtype=np.dtype("float32"), mean=0.027387079),
sample_id=ResultSummary(
shape=(5, 3), dtype=np.dtype("int32"), mean=3.133333333))
expected_final_state = wrapper.AttentionWrapperState(
cell_state=[
ResultSummary(
shape=(5, 9), dtype=np.dtype("float32"), mean=0.32660431),
ResultSummary(
shape=(5, 9), dtype=np.dtype("float32"), mean=0.52464348)],
attention=ResultSummary(
shape=(5, 6), dtype=np.dtype("float32"), mean=0.089345723),
time=3,
alignments=ResultSummary(
shape=(5, 8), dtype=np.dtype("float32"), mean=0.11831035),
attention_state=ResultSummary(
shape=(5, 8), dtype=np.dtype("float32"), mean=0.11831035),
alignment_history=())
expected_final_alignment_history = ResultSummary(
shape=(3, 5, 8), dtype=np.dtype("float32"), mean=0.12194442004)
self._testWithAttention(
create_attention_mechanism,
expected_final_output,
expected_final_state,
attention_mechanism_depth=9,
alignment_history=True,
expected_final_alignment_history=expected_final_alignment_history)
def testLuongMonotonicScaled(self):
create_attention_mechanism = wrapper.LuongMonotonicAttentionV2
create_attention_kwargs = {"scale": True}
expected_final_output = basic_decoder.BasicDecoderOutput(
rnn_output=ResultSummary(
shape=(5, 3, 6), dtype=np.dtype("float32"), mean=0.027387079),
sample_id=ResultSummary(
shape=(5, 3), dtype=np.dtype("int32"), mean=3.13333333))
expected_final_state = wrapper.AttentionWrapperState(
cell_state=[
ResultSummary(
shape=(5, 9), dtype=np.dtype("float32"), mean=0.32660431),
ResultSummary(
shape=(5, 9), dtype=np.dtype("float32"), mean=0.52464348)],
attention=ResultSummary(
shape=(5, 6), dtype=np.dtype("float32"), mean=0.089345723),
time=3,
alignments=ResultSummary(
shape=(5, 8), dtype=np.dtype("float32"), mean=0.11831035),
attention_state=ResultSummary(
shape=(5, 8), dtype=np.dtype("float32"), mean=0.11831035),
alignment_history=())
expected_final_alignment_history = ResultSummary(
shape=(3, 5, 8), dtype=np.dtype("float32"), mean=0.12194442004)
self._testWithAttention(
create_attention_mechanism,
expected_final_output,
expected_final_state,
attention_mechanism_depth=9,
alignment_history=True,
expected_final_alignment_history=expected_final_alignment_history,
create_attention_kwargs=create_attention_kwargs)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/seq2seq/python/kernel_tests/attention_wrapper_v2_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for contrib.seq2seq.python.seq2seq.beam_search_ops."""
# pylint: disable=unused-import,g-bad-import-order
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: enable=unused-import
import itertools
import numpy as np
from tensorflow.contrib.seq2seq.python.ops import beam_search_ops
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
def _transpose_batch_time(x):
return np.transpose(x, [1, 0, 2]).astype(np.int32)
class GatherTreeTest(test.TestCase):
def testGatherTreeOne(self):
# (max_time = 4, batch_size = 1, beams = 3)
end_token = 10
step_ids = _transpose_batch_time(
[[[1, 2, 3], [4, 5, 6], [7, 8, 9], [-1, -1, -1]]])
parent_ids = _transpose_batch_time(
[[[0, 0, 0], [0, 1, 1], [2, 1, 2], [-1, -1, -1]]])
max_sequence_lengths = [3]
expected_result = _transpose_batch_time([[[2, 2, 2], [6, 5, 6], [7, 8, 9],
[10, 10, 10]]])
beams = beam_search_ops.gather_tree(
step_ids=step_ids,
parent_ids=parent_ids,
max_sequence_lengths=max_sequence_lengths,
end_token=end_token)
with self.cached_session(use_gpu=True):
self.assertAllEqual(expected_result, self.evaluate(beams))
def testBadParentValuesOnCPU(self):
# (batch_size = 1, max_time = 4, beams = 3)
# bad parent in beam 1 time 1
end_token = 10
step_ids = _transpose_batch_time(
[[[1, 2, 3], [4, 5, 6], [7, 8, 9], [-1, -1, -1]]])
parent_ids = _transpose_batch_time(
[[[0, 0, 0], [0, -1, 1], [2, 1, 2], [-1, -1, -1]]])
max_sequence_lengths = [3]
with ops.device("/cpu:0"):
with self.assertRaisesOpError(
r"parent id -1 at \(batch, time, beam\) == \(0, 0, 1\)"):
beams = beam_search_ops.gather_tree(
step_ids=step_ids,
parent_ids=parent_ids,
max_sequence_lengths=max_sequence_lengths,
end_token=end_token)
self.evaluate(beams)
def testBadParentValuesOnGPU(self):
# Only want to run this test on CUDA devices, as gather_tree is not
# registered for SYCL devices.
if not test.is_gpu_available(cuda_only=True):
return
# (max_time = 4, batch_size = 1, beams = 3)
# bad parent in beam 1 time 1; appears as a negative index at time 0
end_token = 10
step_ids = _transpose_batch_time(
[[[1, 2, 3], [4, 5, 6], [7, 8, 9], [-1, -1, -1]]])
parent_ids = _transpose_batch_time(
[[[0, 0, 0], [0, -1, 1], [2, 1, 2], [-1, -1, -1]]])
max_sequence_lengths = [3]
expected_result = _transpose_batch_time([[[2, -1, 2], [6, 5, 6], [7, 8, 9],
[10, 10, 10]]])
with ops.device("/device:GPU:0"):
beams = beam_search_ops.gather_tree(
step_ids=step_ids,
parent_ids=parent_ids,
max_sequence_lengths=max_sequence_lengths,
end_token=end_token)
self.assertAllEqual(expected_result, self.evaluate(beams))
def testGatherTreeBatch(self):
batch_size = 10
beam_width = 15
max_time = 8
max_sequence_lengths = [0, 1, 2, 4, 7, 8, 9, 10, 11, 0]
end_token = 5
with self.cached_session(use_gpu=True):
step_ids = np.random.randint(
0, high=end_token + 1, size=(max_time, batch_size, beam_width))
parent_ids = np.random.randint(
0, high=beam_width - 1, size=(max_time, batch_size, beam_width))
beams = beam_search_ops.gather_tree(
step_ids=step_ids.astype(np.int32),
parent_ids=parent_ids.astype(np.int32),
max_sequence_lengths=max_sequence_lengths,
end_token=end_token)
self.assertEqual((max_time, batch_size, beam_width), beams.shape)
beams_value = self.evaluate(beams)
for b in range(batch_size):
# Past max_sequence_lengths[b], we emit all end tokens.
b_value = beams_value[max_sequence_lengths[b]:, b, :]
self.assertAllClose(b_value, end_token * np.ones_like(b_value))
for batch, beam in itertools.product(
range(batch_size), range(beam_width)):
v = np.squeeze(beams_value[:, batch, beam])
if end_token in v:
found_bad = np.where(v == -1)[0]
self.assertEqual(0, len(found_bad))
found = np.where(v == end_token)[0]
found = found[0] # First occurrence of end_token.
# If an end_token is found, everything before it should be a
# valid id and everything after it should be -1.
if found > 0:
self.assertAllEqual(
v[:found - 1] >= 0, np.ones_like(v[:found - 1], dtype=bool))
self.assertAllClose(v[found + 1:],
end_token * np.ones_like(v[found + 1:]))
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/seq2seq/python/kernel_tests/beam_search_ops_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Seq2seq layer operations for use in neural networks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.keras import layers
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import rnn
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.util import nest
__all__ = ["Decoder", "dynamic_decode"]
_transpose_batch_time = rnn._transpose_batch_time # pylint: disable=protected-access
_zero_state_tensors = rnn_cell_impl._zero_state_tensors # pylint: disable=protected-access
@six.add_metaclass(abc.ABCMeta)
class Decoder(object):
"""An RNN Decoder abstract interface object.
Concepts used by this interface:
- `inputs`: (structure of) tensors and TensorArrays that is passed as input to
the RNNCell composing the decoder, at each time step.
- `state`: (structure of) tensors and TensorArrays that is passed to the
RNNCell instance as the state.
- `finished`: boolean tensor telling whether each sequence in the batch is
finished.
- `outputs`: Instance of BasicDecoderOutput. Result of the decoding, at each
time step.
"""
@property
def batch_size(self):
"""The batch size of input values."""
raise NotImplementedError
@property
def output_size(self):
"""A (possibly nested tuple of...) integer[s] or `TensorShape` object[s]."""
raise NotImplementedError
@property
def output_dtype(self):
"""A (possibly nested tuple of...) dtype[s]."""
raise NotImplementedError
@abc.abstractmethod
def initialize(self, name=None):
"""Called before any decoding iterations.
This methods must compute initial input values and initial state.
Args:
name: Name scope for any created operations.
Returns:
`(finished, initial_inputs, initial_state)`: initial values of
'finished' flags, inputs and state.
"""
raise NotImplementedError
@abc.abstractmethod
def step(self, time, inputs, state, name=None):
"""Called per step of decoding (but only once for dynamic decoding).
Args:
time: Scalar `int32` tensor. Current step number.
inputs: RNNCell input (possibly nested tuple of) tensor[s] for this time
step.
state: RNNCell state (possibly nested tuple of) tensor[s] from previous
time step.
name: Name scope for any created operations.
Returns:
`(outputs, next_state, next_inputs, finished)`: `outputs` is an object
containing the decoder output, `next_state` is a (structure of) state
tensors and TensorArrays, `next_inputs` is the tensor that should be used
as input for the next step, `finished` is a boolean tensor telling whether
the sequence is complete, for each sequence in the batch.
"""
raise NotImplementedError
def finalize(self, outputs, final_state, sequence_lengths):
"""Called after decoding iterations complete.
Args:
outputs: RNNCell outputs (possibly nested tuple of) tensor[s] for all time
steps.
final_state: RNNCell final state (possibly nested tuple of) tensor[s] for
last time step.
sequence_lengths: 1-D `int32` tensor containing lengths of each sequence.
Returns:
`(final_outputs, final_state)`: `final_outputs` is an object containing
the final decoder output, `final_state` is a (structure of) state tensors
and TensorArrays.
"""
raise NotImplementedError
@property
def tracks_own_finished(self):
"""Describes whether the Decoder keeps track of finished states.
Most decoders will emit a true/false `finished` value independently
at each time step. In this case, the `dynamic_decode` function keeps track
of which batch entries are already finished, and performs a logical OR to
insert new batches to the finished set.
Some decoders, however, shuffle batches / beams between time steps and
`dynamic_decode` will mix up the finished state across these entries because
it does not track the reshuffle across time steps. In this case, it is
up to the decoder to declare that it will keep track of its own finished
state by setting this property to `True`.
Returns:
Python bool.
"""
return False
class BaseDecoder(layers.Layer):
"""An RNN Decoder that is based on a Keras layer.
Concepts used by this interface:
- `inputs`: (structure of) tensors and TensorArrays that is passed as input to
the RNNCell composing the decoder, at each time step.
- `state`: (structure of) tensors and TensorArrays that is passed to the
RNNCell instance as the state.
- `memory`: (sturecute of) tensors that is usually the full output of the
encoder, which will be used for the attention wrapper for the RNNCell.
- `finished`: boolean tensor telling whether each sequence in the batch is
finished.
- `outputs`: Instance of BasicDecoderOutput. Result of the decoding, at each
time step.
"""
def __init__(self,
output_time_major=False,
impute_finished=False,
maximum_iterations=None,
parallel_iterations=32,
swap_memory=False,
**kwargs):
self.output_time_major = output_time_major
self.impute_finished = impute_finished
self.maximum_iterations = maximum_iterations
self.parallel_iterations = parallel_iterations
self.swap_memory = swap_memory
super(BaseDecoder, self).__init__(**kwargs)
def call(self, inputs, initial_state=None, **kwargs):
init_kwargs = kwargs
init_kwargs["initial_state"] = initial_state
return dynamic_decode(self,
output_time_major=self.output_time_major,
impute_finished=self.impute_finished,
maximum_iterations=self.maximum_iterations,
parallel_iterations=self.parallel_iterations,
swap_memory=self.swap_memory,
decoder_init_input=inputs,
decoder_init_kwargs=init_kwargs)
@property
def batch_size(self):
"""The batch size of input values."""
raise NotImplementedError
@property
def output_size(self):
"""A (possibly nested tuple of...) integer[s] or `TensorShape` object[s]."""
raise NotImplementedError
@property
def output_dtype(self):
"""A (possibly nested tuple of...) dtype[s]."""
raise NotImplementedError
def initialize(self, inputs, initial_state=None, **kwargs):
"""Called before any decoding iterations.
This methods must compute initial input values and initial state.
Args:
inputs: (structure of) tensors that contains the input for the decoder. In
the normal case, its a tensor with shape [batch, timestep, embedding].
initial_state: (structure of) tensors that contains the initial state for
the RNNCell.
**kwargs: Other arguments that are passed in from layer.call() method. It
could contains item like input sequence_length, or masking for input.
Returns:
`(finished, initial_inputs, initial_state)`: initial values of
'finished' flags, inputs and state.
"""
raise NotImplementedError
def step(self, time, inputs, state):
"""Called per step of decoding (but only once for dynamic decoding).
Args:
time: Scalar `int32` tensor. Current step number.
inputs: RNNCell input (possibly nested tuple of) tensor[s] for this time
step.
state: RNNCell state (possibly nested tuple of) tensor[s] from previous
time step.
Returns:
`(outputs, next_state, next_inputs, finished)`: `outputs` is an object
containing the decoder output, `next_state` is a (structure of) state
tensors and TensorArrays, `next_inputs` is the tensor that should be used
as input for the next step, `finished` is a boolean tensor telling whether
the sequence is complete, for each sequence in the batch.
"""
raise NotImplementedError
def finalize(self, outputs, final_state, sequence_lengths):
raise NotImplementedError
@property
def tracks_own_finished(self):
"""Describes whether the Decoder keeps track of finished states.
Most decoders will emit a true/false `finished` value independently
at each time step. In this case, the `dynamic_decode` function keeps track
of which batch entries are already finished, and performs a logical OR to
insert new batches to the finished set.
Some decoders, however, shuffle batches / beams between time steps and
`dynamic_decode` will mix up the finished state across these entries because
it does not track the reshuffle across time steps. In this case, it is
up to the decoder to declare that it will keep track of its own finished
state by setting this property to `True`.
Returns:
Python bool.
"""
return False
# TODO(scottzhu): Add build/get_config/from_config and other layer methods.
def _create_zero_outputs(size, dtype, batch_size):
"""Create a zero outputs Tensor structure."""
def _create(s, d):
return _zero_state_tensors(s, batch_size, d)
return nest.map_structure(_create, size, dtype)
def dynamic_decode(decoder,
output_time_major=False,
impute_finished=False,
maximum_iterations=None,
parallel_iterations=32,
swap_memory=False,
scope=None,
**kwargs):
"""Perform dynamic decoding with `decoder`.
Calls initialize() once and step() repeatedly on the Decoder object.
Args:
decoder: A `Decoder` instance.
output_time_major: Python boolean. Default: `False` (batch major). If
`True`, outputs are returned as time major tensors (this mode is faster).
Otherwise, outputs are returned as batch major tensors (this adds extra
time to the computation).
impute_finished: Python boolean. If `True`, then states for batch
entries which are marked as finished get copied through and the
corresponding outputs get zeroed out. This causes some slowdown at
each time step, but ensures that the final state and outputs have
the correct values and that backprop ignores time steps that were
marked as finished.
maximum_iterations: `int32` scalar, maximum allowed number of decoding
steps. Default is `None` (decode until the decoder is fully done).
parallel_iterations: Argument passed to `tf.while_loop`.
swap_memory: Argument passed to `tf.while_loop`.
scope: Optional variable scope to use.
**kwargs: dict, other keyword arguments for dynamic_decode. It might contain
arguments for `BaseDecoder` to initialize, which takes all tensor inputs
during call().
Returns:
`(final_outputs, final_state, final_sequence_lengths)`.
Raises:
TypeError: if `decoder` is not an instance of `Decoder`.
ValueError: if `maximum_iterations` is provided but is not a scalar.
"""
if not isinstance(decoder, (Decoder, BaseDecoder)):
raise TypeError("Expected decoder to be type Decoder, but saw: %s" %
type(decoder))
with variable_scope.variable_scope(scope, "decoder") as varscope:
# Determine context types.
ctxt = ops.get_default_graph()._get_control_flow_context() # pylint: disable=protected-access
is_xla = control_flow_util.GetContainingXLAContext(ctxt) is not None
in_while_loop = (
control_flow_util.GetContainingWhileContext(ctxt) is not None)
# Properly cache variable values inside the while_loop.
# Don't set a caching device when running in a loop, since it is possible
# that train steps could be wrapped in a tf.while_loop. In that scenario
# caching prevents forward computations in loop iterations from re-reading
# the updated weights.
if not context.executing_eagerly() and not in_while_loop:
if varscope.caching_device is None:
varscope.set_caching_device(lambda op: op.device)
if maximum_iterations is not None:
maximum_iterations = ops.convert_to_tensor(
maximum_iterations, dtype=dtypes.int32, name="maximum_iterations")
if maximum_iterations.get_shape().ndims != 0:
raise ValueError("maximum_iterations must be a scalar")
if isinstance(decoder, Decoder):
initial_finished, initial_inputs, initial_state = decoder.initialize()
else:
# For BaseDecoder that takes tensor inputs during call.
decoder_init_input = kwargs.pop("decoder_init_input", None)
decoder_init_kwargs = kwargs.pop("decoder_init_kwargs", {})
initial_finished, initial_inputs, initial_state = decoder.initialize(
decoder_init_input, **decoder_init_kwargs)
zero_outputs = _create_zero_outputs(decoder.output_size,
decoder.output_dtype,
decoder.batch_size)
if is_xla and maximum_iterations is None:
raise ValueError("maximum_iterations is required for XLA compilation.")
if maximum_iterations is not None:
initial_finished = math_ops.logical_or(
initial_finished, 0 >= maximum_iterations)
initial_sequence_lengths = array_ops.zeros_like(
initial_finished, dtype=dtypes.int32)
initial_time = constant_op.constant(0, dtype=dtypes.int32)
def _shape(batch_size, from_shape):
if (not isinstance(from_shape, tensor_shape.TensorShape) or
from_shape.ndims == 0):
return None
else:
batch_size = tensor_util.constant_value(
ops.convert_to_tensor(
batch_size, name="batch_size"))
return tensor_shape.TensorShape([batch_size]).concatenate(from_shape)
dynamic_size = maximum_iterations is None or not is_xla
def _create_ta(s, d):
return tensor_array_ops.TensorArray(
dtype=d,
size=0 if dynamic_size else maximum_iterations,
dynamic_size=dynamic_size,
element_shape=_shape(decoder.batch_size, s))
initial_outputs_ta = nest.map_structure(_create_ta, decoder.output_size,
decoder.output_dtype)
def condition(unused_time, unused_outputs_ta, unused_state, unused_inputs,
finished, unused_sequence_lengths):
return math_ops.logical_not(math_ops.reduce_all(finished))
def body(time, outputs_ta, state, inputs, finished, sequence_lengths):
"""Internal while_loop body.
Args:
time: scalar int32 tensor.
outputs_ta: structure of TensorArray.
state: (structure of) state tensors and TensorArrays.
inputs: (structure of) input tensors.
finished: bool tensor (keeping track of what's finished).
sequence_lengths: int32 tensor (keeping track of time of finish).
Returns:
`(time + 1, outputs_ta, next_state, next_inputs, next_finished,
next_sequence_lengths)`.
```
"""
(next_outputs, decoder_state, next_inputs,
decoder_finished) = decoder.step(time, inputs, state)
if decoder.tracks_own_finished:
next_finished = decoder_finished
else:
next_finished = math_ops.logical_or(decoder_finished, finished)
next_sequence_lengths = array_ops.where(
math_ops.logical_not(finished),
array_ops.fill(array_ops.shape(sequence_lengths), time + 1),
sequence_lengths)
nest.assert_same_structure(state, decoder_state)
nest.assert_same_structure(outputs_ta, next_outputs)
nest.assert_same_structure(inputs, next_inputs)
# Zero out output values past finish
if impute_finished:
emit = nest.map_structure(
lambda out, zero: array_ops.where(finished, zero, out),
next_outputs,
zero_outputs)
else:
emit = next_outputs
# Copy through states past finish
def _maybe_copy_state(new, cur):
# TensorArrays and scalar states get passed through.
if isinstance(cur, tensor_array_ops.TensorArray):
pass_through = True
else:
new.set_shape(cur.shape)
pass_through = (new.shape.ndims == 0)
return new if pass_through else array_ops.where(finished, cur, new)
if impute_finished:
next_state = nest.map_structure(
_maybe_copy_state, decoder_state, state)
else:
next_state = decoder_state
outputs_ta = nest.map_structure(lambda ta, out: ta.write(time, out),
outputs_ta, emit)
return (time + 1, outputs_ta, next_state, next_inputs, next_finished,
next_sequence_lengths)
res = control_flow_ops.while_loop(
condition,
body,
loop_vars=(
initial_time,
initial_outputs_ta,
initial_state,
initial_inputs,
initial_finished,
initial_sequence_lengths,
),
parallel_iterations=parallel_iterations,
maximum_iterations=maximum_iterations,
swap_memory=swap_memory)
final_outputs_ta = res[1]
final_state = res[2]
final_sequence_lengths = res[5]
final_outputs = nest.map_structure(lambda ta: ta.stack(), final_outputs_ta)
try:
final_outputs, final_state = decoder.finalize(
final_outputs, final_state, final_sequence_lengths)
except NotImplementedError:
pass
if not output_time_major:
final_outputs = nest.map_structure(_transpose_batch_time, final_outputs)
return final_outputs, final_state, final_sequence_lengths
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/seq2seq/python/ops/decoder.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A class of Decoders that may sample to generate the next input."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.contrib.seq2seq.python.ops import decoder
from tensorflow.contrib.seq2seq.python.ops import helper as helper_py
from tensorflow.contrib.seq2seq.python.ops import sampler as sampler_py
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras import layers
from tensorflow.python.layers import base as layers_base
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.util import nest
__all__ = [
"BasicDecoderOutput",
"BasicDecoder",
]
class BasicDecoderOutput(
collections.namedtuple("BasicDecoderOutput", ("rnn_output", "sample_id"))):
pass
class BasicDecoder(decoder.Decoder):
"""Basic sampling decoder."""
def __init__(self, cell, helper, initial_state, output_layer=None):
"""Initialize BasicDecoder.
Args:
cell: An `RNNCell` instance.
helper: A `Helper` instance.
initial_state: A (possibly nested tuple of...) tensors and TensorArrays.
The initial state of the RNNCell.
output_layer: (Optional) An instance of `tf.compat.v1.layers.Layer`, i.e.,
`tf.compat.v1.layers.Dense`. Optional layer to apply to the RNN output
prior to storing the result or sampling.
Raises:
TypeError: if `cell`, `helper` or `output_layer` have an incorrect type.
"""
rnn_cell_impl.assert_like_rnncell("cell", cell)
if not isinstance(helper, helper_py.Helper):
raise TypeError("helper must be a Helper, received: %s" % type(helper))
if (output_layer is not None and
not isinstance(output_layer, layers_base.Layer)):
raise TypeError("output_layer must be a Layer, received: %s" %
type(output_layer))
self._cell = cell
self._helper = helper
self._initial_state = initial_state
self._output_layer = output_layer
@property
def batch_size(self):
return self._helper.batch_size
def _rnn_output_size(self):
size = self._cell.output_size
if self._output_layer is None:
return size
else:
# To use layer's compute_output_shape, we need to convert the
# RNNCell's output_size entries into shapes with an unknown
# batch size. We then pass this through the layer's
# compute_output_shape and read off all but the first (batch)
# dimensions to get the output size of the rnn with the layer
# applied to the top.
output_shape_with_unknown_batch = nest.map_structure(
lambda s: tensor_shape.TensorShape([None]).concatenate(s), size)
layer_output_shape = self._output_layer.compute_output_shape(
output_shape_with_unknown_batch)
return nest.map_structure(lambda s: s[1:], layer_output_shape)
@property
def output_size(self):
# Return the cell output and the id
return BasicDecoderOutput(
rnn_output=self._rnn_output_size(),
sample_id=self._helper.sample_ids_shape)
@property
def output_dtype(self):
# Assume the dtype of the cell is the output_size structure
# containing the input_state's first component's dtype.
# Return that structure and the sample_ids_dtype from the helper.
dtype = nest.flatten(self._initial_state)[0].dtype
return BasicDecoderOutput(
nest.map_structure(lambda _: dtype, self._rnn_output_size()),
self._helper.sample_ids_dtype)
def initialize(self, name=None):
"""Initialize the decoder.
Args:
name: Name scope for any created operations.
Returns:
`(finished, first_inputs, initial_state)`.
"""
return self._helper.initialize() + (self._initial_state,)
def step(self, time, inputs, state, name=None):
"""Perform a decoding step.
Args:
time: scalar `int32` tensor.
inputs: A (structure of) input tensors.
state: A (structure of) state tensors and TensorArrays.
name: Name scope for any created operations.
Returns:
`(outputs, next_state, next_inputs, finished)`.
"""
with ops.name_scope(name, "BasicDecoderStep", (time, inputs, state)):
cell_outputs, cell_state = self._cell(inputs, state)
if self._output_layer is not None:
cell_outputs = self._output_layer(cell_outputs)
sample_ids = self._helper.sample(
time=time, outputs=cell_outputs, state=cell_state)
(finished, next_inputs, next_state) = self._helper.next_inputs(
time=time,
outputs=cell_outputs,
state=cell_state,
sample_ids=sample_ids)
outputs = BasicDecoderOutput(cell_outputs, sample_ids)
return (outputs, next_state, next_inputs, finished)
class BasicDecoderV2(decoder.BaseDecoder):
"""Basic sampling decoder."""
def __init__(self, cell, sampler, output_layer=None, **kwargs):
"""Initialize BasicDecoder.
Args:
cell: An `RNNCell` instance.
sampler: A `Sampler` instance.
output_layer: (Optional) An instance of `tf.compat.v1.layers.Layer`, i.e.,
`tf.compat.v1.layers.Dense`. Optional layer to apply to the RNN output
prior to storing the result or sampling.
**kwargs: Other keyward arguments for layer creation.
Raises:
TypeError: if `cell`, `helper` or `output_layer` have an incorrect type.
"""
rnn_cell_impl.assert_like_rnncell("cell", cell)
if not isinstance(sampler, sampler_py.Sampler):
raise TypeError("sampler must be a Sampler, received: %s" % (sampler,))
if (output_layer is not None and
not isinstance(output_layer, layers.Layer)):
raise TypeError("output_layer must be a Layer, received: %s" %
(output_layer,))
self.cell = cell
self.sampler = sampler
self.output_layer = output_layer
super(BasicDecoderV2, self).__init__(**kwargs)
def initialize(self, inputs, initial_state=None, **kwargs):
"""Initialize the decoder."""
# Assume the dtype of the cell is the output_size structure
# containing the input_state's first component's dtype.
self._cell_dtype = nest.flatten(initial_state)[0].dtype
return self.sampler.initialize(inputs, **kwargs) + (initial_state,)
@property
def batch_size(self):
return self.sampler.batch_size
def _rnn_output_size(self):
size = tensor_shape.TensorShape(self.cell.output_size)
if self.output_layer is None:
return size
else:
# To use layer's compute_output_shape, we need to convert the
# RNNCell's output_size entries into shapes with an unknown
# batch size. We then pass this through the layer's
# compute_output_shape and read off all but the first (batch)
# dimensions to get the output size of the rnn with the layer
# applied to the top.
output_shape_with_unknown_batch = nest.map_structure(
lambda s: tensor_shape.TensorShape([None]).concatenate(s), size)
layer_output_shape = self.output_layer.compute_output_shape(
output_shape_with_unknown_batch)
return nest.map_structure(lambda s: s[1:], layer_output_shape)
@property
def output_size(self):
# Return the cell output and the id
return BasicDecoderOutput(
rnn_output=self._rnn_output_size(),
sample_id=self.sampler.sample_ids_shape)
@property
def output_dtype(self):
# Assume the dtype of the cell is the output_size structure
# containing the input_state's first component's dtype.
# Return that structure and the sample_ids_dtype from the helper.
dtype = self._cell_dtype
return BasicDecoderOutput(
nest.map_structure(lambda _: dtype, self._rnn_output_size()),
self.sampler.sample_ids_dtype)
def step(self, time, inputs, state):
"""Perform a decoding step.
Args:
time: scalar `int32` tensor.
inputs: A (structure of) input tensors.
state: A (structure of) state tensors and TensorArrays.
Returns:
`(outputs, next_state, next_inputs, finished)`.
"""
cell_outputs, cell_state = self.cell(inputs, state)
if self.output_layer is not None:
cell_outputs = self.output_layer(cell_outputs)
sample_ids = self.sampler.sample(
time=time, outputs=cell_outputs, state=cell_state)
(finished, next_inputs, next_state) = self.sampler.next_inputs(
time=time,
outputs=cell_outputs,
state=cell_state,
sample_ids=sample_ids)
outputs = BasicDecoderOutput(cell_outputs, sample_ids)
return (outputs, next_state, next_inputs, finished)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/seq2seq/python/ops/basic_decoder.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/seq2seq/python/ops/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Seq2seq loss operations for use in sequence models.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.keras.losses import Loss
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
__all__ = ["sequence_loss", "SequenceLoss"]
def sequence_loss(logits,
targets,
weights,
average_across_timesteps=True,
average_across_batch=True,
sum_over_timesteps=False,
sum_over_batch=False,
softmax_loss_function=None,
name=None):
"""Weighted cross-entropy loss for a sequence of logits.
Depending on the values of `average_across_timesteps` / `sum_over_timesteps`
and `average_across_batch` / `sum_over_batch`, the return Tensor will have
rank 0, 1, or 2 as these arguments reduce the cross-entropy at each target,
which has shape `[batch_size, sequence_length]`, over their respective
dimensions. For example, if `average_across_timesteps` is `True` and
`average_across_batch` is `False`, then the return Tensor will have shape
`[batch_size]`.
Note that `average_across_timesteps` and `sum_over_timesteps` cannot be True
at same time. Same for `average_across_batch` and `sum_over_batch`.
The recommended loss reduction in tf 2.0 has been changed to sum_over, instead
of weighted average. User are recommend to use `sum_over_timesteps` and
`sum_over_batch` for reduction.
Args:
logits: A Tensor of shape
`[batch_size, sequence_length, num_decoder_symbols]` and dtype float.
The logits correspond to the prediction across all classes at each
timestep.
targets: A Tensor of shape `[batch_size, sequence_length]` and dtype
int. The target represents the true class at each timestep.
weights: A Tensor of shape `[batch_size, sequence_length]` and dtype
float. `weights` constitutes the weighting of each prediction in the
sequence. When using `weights` as masking, set all valid timesteps to 1
and all padded timesteps to 0, e.g. a mask returned by `tf.sequence_mask`.
average_across_timesteps: If set, sum the cost across the sequence
dimension and divide the cost by the total label weight across timesteps.
average_across_batch: If set, sum the cost across the batch dimension and
divide the returned cost by the batch size.
sum_over_timesteps: If set, sum the cost across the sequence dimension and
divide the size of the sequence. Note that any element with 0 weights will
be excluded from size calculation.
sum_over_batch: if set, sum the cost across the batch dimension and divide
the total cost by the batch size. Not that any element with 0 weights will
be excluded from size calculation.
softmax_loss_function: Function (labels, logits) -> loss-batch
to be used instead of the standard softmax (the default if this is None).
**Note that to avoid confusion, it is required for the function to accept
named arguments.**
name: Optional name for this operation, defaults to "sequence_loss".
Returns:
A float Tensor of rank 0, 1, or 2 depending on the
`average_across_timesteps` and `average_across_batch` arguments. By default,
it has rank 0 (scalar) and is the weighted average cross-entropy
(log-perplexity) per symbol.
Raises:
ValueError: logits does not have 3 dimensions or targets does not have 2
dimensions or weights does not have 2 dimensions.
"""
if len(logits.get_shape()) != 3:
raise ValueError("Logits must be a "
"[batch_size x sequence_length x logits] tensor")
if len(targets.get_shape()) != 2:
raise ValueError("Targets must be a [batch_size x sequence_length] tensor")
if len(weights.get_shape()) != 2:
raise ValueError("Weights must be a [batch_size x sequence_length] tensor")
if average_across_timesteps and sum_over_timesteps:
raise ValueError("average_across_timesteps and sum_over_timesteps cannot "
"be set to True at same time.")
if average_across_batch and sum_over_batch:
raise ValueError("average_across_batch and sum_over_batch cannot be set "
"to True at same time.")
with ops.name_scope(name, "sequence_loss", [logits, targets, weights]):
num_classes = array_ops.shape(logits)[2]
logits_flat = array_ops.reshape(logits, [-1, num_classes])
targets = array_ops.reshape(targets, [-1])
if softmax_loss_function is None:
crossent = nn_ops.sparse_softmax_cross_entropy_with_logits(
labels=targets, logits=logits_flat)
else:
crossent = softmax_loss_function(labels=targets, logits=logits_flat)
crossent *= array_ops.reshape(weights, [-1])
if average_across_timesteps and average_across_batch:
crossent = math_ops.reduce_sum(crossent)
total_size = math_ops.reduce_sum(weights)
crossent = math_ops.div_no_nan(crossent, total_size)
elif sum_over_timesteps and sum_over_batch:
crossent = math_ops.reduce_sum(crossent)
total_count = math_ops.cast(math_ops.count_nonzero(weights),
crossent.dtype)
crossent = math_ops.div_no_nan(crossent, total_count)
else:
crossent = array_ops.reshape(crossent, array_ops.shape(logits)[0:2])
if average_across_timesteps or average_across_batch:
reduce_axis = [0] if average_across_batch else [1]
crossent = math_ops.reduce_sum(crossent, axis=reduce_axis)
total_size = math_ops.reduce_sum(weights, axis=reduce_axis)
crossent = math_ops.div_no_nan(crossent, total_size)
elif sum_over_timesteps or sum_over_batch:
reduce_axis = [0] if sum_over_batch else [1]
crossent = math_ops.reduce_sum(crossent, axis=reduce_axis)
total_count = math_ops.cast(
math_ops.count_nonzero(weights, axis=reduce_axis),
dtype=crossent.dtype)
crossent = math_ops.div_no_nan(crossent, total_count)
return crossent
class SequenceLoss(Loss):
"""Weighted cross-entropy loss for a sequence of logits."""
def __init__(self,
average_across_timesteps=False,
average_across_batch=False,
sum_over_timesteps=True,
sum_over_batch=True,
softmax_loss_function=None,
name=None):
super(SequenceLoss, self).__init__(name=name)
self.average_across_timesteps = average_across_timesteps
self.average_across_batch = average_across_batch
self.sum_over_timesteps = sum_over_timesteps
self.sum_over_batch = sum_over_batch
self.softmax_loss_function = softmax_loss_function
def __call__(self, y_true, y_pred, sample_weight=None):
"""Override the parent __call__ to have a customized reduce behavior."""
return sequence_loss(y_pred, y_true, sample_weight,
average_across_timesteps=self.average_across_timesteps,
average_across_batch=self.average_across_batch,
sum_over_timesteps=self.sum_over_timesteps,
sum_over_batch=self.sum_over_batch,
softmax_loss_function=self.softmax_loss_function,
name=self.name)
def call(self, y_true, y_pred):
# Skip this method since the __call__ contains real implementation.
pass
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/seq2seq/python/ops/loss.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A library of helpers for use with SamplingDecoders.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
from tensorflow.contrib.seq2seq.python.ops import decoder
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.util import nest
__all__ = [
"Helper",
"TrainingHelper",
"GreedyEmbeddingHelper",
"SampleEmbeddingHelper",
"CustomHelper",
"ScheduledEmbeddingTrainingHelper",
"ScheduledOutputTrainingHelper",
"InferenceHelper",
]
_transpose_batch_time = decoder._transpose_batch_time # pylint: disable=protected-access
# The following sample functions (_call_sampler, bernoulli_sample,
# categorical_sample) mimic TensorFlow Probability distribution semantics.
def _call_sampler(sample_n_fn, sample_shape, name=None):
"""Reshapes vector of samples."""
with ops.name_scope(name, "call_sampler", values=[sample_shape]):
sample_shape = ops.convert_to_tensor(
sample_shape, dtype=dtypes.int32, name="sample_shape")
# Ensure sample_shape is a vector (vs just a scalar).
pad = math_ops.cast(math_ops.equal(array_ops.rank(sample_shape), 0),
dtypes.int32)
sample_shape = array_ops.reshape(
sample_shape,
array_ops.pad(array_ops.shape(sample_shape),
paddings=[[pad, 0]],
constant_values=1))
samples = sample_n_fn(math_ops.reduce_prod(sample_shape))
batch_event_shape = array_ops.shape(samples)[1:]
final_shape = array_ops.concat([sample_shape, batch_event_shape], 0)
return array_ops.reshape(samples, final_shape)
def bernoulli_sample(probs=None, logits=None, dtype=dtypes.int32,
sample_shape=(), seed=None):
"""Samples from Bernoulli distribution."""
if probs is None:
probs = math_ops.sigmoid(logits, name="probs")
else:
probs = ops.convert_to_tensor(probs, name="probs")
batch_shape_tensor = array_ops.shape(probs)
def _sample_n(n):
"""Sample vector of Bernoullis."""
new_shape = array_ops.concat([[n], batch_shape_tensor], 0)
uniform = random_ops.random_uniform(
new_shape, seed=seed, dtype=probs.dtype)
return math_ops.cast(math_ops.less(uniform, probs), dtype)
return _call_sampler(_sample_n, sample_shape)
def categorical_sample(logits, dtype=dtypes.int32,
sample_shape=(), seed=None):
"""Samples from categorical distribution."""
logits = ops.convert_to_tensor(logits, name="logits")
event_size = array_ops.shape(logits)[-1]
batch_shape_tensor = array_ops.shape(logits)[:-1]
def _sample_n(n):
"""Sample vector of categoricals."""
if logits.shape.ndims == 2:
logits_2d = logits
else:
logits_2d = array_ops.reshape(logits, [-1, event_size])
sample_dtype = dtypes.int64 if logits.dtype.size > 4 else dtypes.int32
draws = random_ops.multinomial(
logits_2d, n, seed=seed, output_dtype=sample_dtype)
draws = array_ops.reshape(
array_ops.transpose(draws),
array_ops.concat([[n], batch_shape_tensor], 0))
return math_ops.cast(draws, dtype)
return _call_sampler(_sample_n, sample_shape)
def _unstack_ta(inp):
return tensor_array_ops.TensorArray(
dtype=inp.dtype, size=array_ops.shape(inp)[0],
element_shape=inp.get_shape()[1:]).unstack(inp)
@six.add_metaclass(abc.ABCMeta)
class Helper(object):
"""Interface for implementing sampling in seq2seq decoders.
Helper instances are used by `BasicDecoder`.
"""
@abc.abstractproperty
def batch_size(self):
"""Batch size of tensor returned by `sample`.
Returns a scalar int32 tensor.
"""
raise NotImplementedError("batch_size has not been implemented")
@abc.abstractproperty
def sample_ids_shape(self):
"""Shape of tensor returned by `sample`, excluding the batch dimension.
Returns a `TensorShape`.
"""
raise NotImplementedError("sample_ids_shape has not been implemented")
@abc.abstractproperty
def sample_ids_dtype(self):
"""DType of tensor returned by `sample`.
Returns a DType.
"""
raise NotImplementedError("sample_ids_dtype has not been implemented")
@abc.abstractmethod
def initialize(self, name=None):
"""Returns `(initial_finished, initial_inputs)`."""
pass
@abc.abstractmethod
def sample(self, time, outputs, state, name=None):
"""Returns `sample_ids`."""
pass
@abc.abstractmethod
def next_inputs(self, time, outputs, state, sample_ids, name=None):
"""Returns `(finished, next_inputs, next_state)`."""
pass
class CustomHelper(Helper):
"""Base abstract class that allows the user to customize sampling."""
def __init__(self, initialize_fn, sample_fn, next_inputs_fn,
sample_ids_shape=None, sample_ids_dtype=None):
"""Initializer.
Args:
initialize_fn: callable that returns `(finished, next_inputs)`
for the first iteration.
sample_fn: callable that takes `(time, outputs, state)`
and emits tensor `sample_ids`.
next_inputs_fn: callable that takes `(time, outputs, state, sample_ids)`
and emits `(finished, next_inputs, next_state)`.
sample_ids_shape: Either a list of integers, or a 1-D Tensor of type
`int32`, the shape of each value in the `sample_ids` batch. Defaults to
a scalar.
sample_ids_dtype: The dtype of the `sample_ids` tensor. Defaults to int32.
"""
self._initialize_fn = initialize_fn
self._sample_fn = sample_fn
self._next_inputs_fn = next_inputs_fn
self._batch_size = None
self._sample_ids_shape = tensor_shape.TensorShape(sample_ids_shape or [])
self._sample_ids_dtype = sample_ids_dtype or dtypes.int32
@property
def batch_size(self):
if self._batch_size is None:
raise ValueError("batch_size accessed before initialize was called")
return self._batch_size
@property
def sample_ids_shape(self):
return self._sample_ids_shape
@property
def sample_ids_dtype(self):
return self._sample_ids_dtype
def initialize(self, name=None):
with ops.name_scope(name, "%sInitialize" % type(self).__name__):
(finished, next_inputs) = self._initialize_fn()
if self._batch_size is None:
self._batch_size = array_ops.size(finished)
return (finished, next_inputs)
def sample(self, time, outputs, state, name=None):
with ops.name_scope(
name, "%sSample" % type(self).__name__, (time, outputs, state)):
return self._sample_fn(time=time, outputs=outputs, state=state)
def next_inputs(self, time, outputs, state, sample_ids, name=None):
with ops.name_scope(
name, "%sNextInputs" % type(self).__name__, (time, outputs, state)):
return self._next_inputs_fn(
time=time, outputs=outputs, state=state, sample_ids=sample_ids)
class TrainingHelper(Helper):
"""A helper for use during training. Only reads inputs.
Returned sample_ids are the argmax of the RNN output logits.
"""
def __init__(self, inputs, sequence_length, time_major=False, name=None):
"""Initializer.
Args:
inputs: A (structure of) input tensors.
sequence_length: An int32 vector tensor.
time_major: Python bool. Whether the tensors in `inputs` are time major.
If `False` (default), they are assumed to be batch major.
name: Name scope for any created operations.
Raises:
ValueError: if `sequence_length` is not a 1D tensor.
"""
with ops.name_scope(name, "TrainingHelper", [inputs, sequence_length]):
inputs = ops.convert_to_tensor(inputs, name="inputs")
self._inputs = inputs
if not time_major:
inputs = nest.map_structure(_transpose_batch_time, inputs)
self._input_tas = nest.map_structure(_unstack_ta, inputs)
self._sequence_length = ops.convert_to_tensor(
sequence_length, name="sequence_length")
if self._sequence_length.get_shape().ndims != 1:
raise ValueError(
"Expected sequence_length to be a vector, but received shape: %s" %
self._sequence_length.get_shape())
self._zero_inputs = nest.map_structure(
lambda inp: array_ops.zeros_like(inp[0, :]), inputs)
self._batch_size = array_ops.size(sequence_length)
@property
def inputs(self):
return self._inputs
@property
def sequence_length(self):
return self._sequence_length
@property
def batch_size(self):
return self._batch_size
@property
def sample_ids_shape(self):
return tensor_shape.TensorShape([])
@property
def sample_ids_dtype(self):
return dtypes.int32
def initialize(self, name=None):
with ops.name_scope(name, "TrainingHelperInitialize"):
finished = math_ops.equal(0, self._sequence_length)
all_finished = math_ops.reduce_all(finished)
next_inputs = control_flow_ops.cond(
all_finished, lambda: self._zero_inputs,
lambda: nest.map_structure(lambda inp: inp.read(0), self._input_tas))
return (finished, next_inputs)
def sample(self, time, outputs, name=None, **unused_kwargs):
with ops.name_scope(name, "TrainingHelperSample", [time, outputs]):
sample_ids = math_ops.cast(
math_ops.argmax(outputs, axis=-1), dtypes.int32)
return sample_ids
def next_inputs(self, time, outputs, state, name=None, **unused_kwargs):
"""next_inputs_fn for TrainingHelper."""
with ops.name_scope(name, "TrainingHelperNextInputs",
[time, outputs, state]):
next_time = time + 1
finished = (next_time >= self._sequence_length)
all_finished = math_ops.reduce_all(finished)
def read_from_ta(inp):
return inp.read(next_time)
next_inputs = control_flow_ops.cond(
all_finished, lambda: self._zero_inputs,
lambda: nest.map_structure(read_from_ta, self._input_tas))
return (finished, next_inputs, state)
class ScheduledEmbeddingTrainingHelper(TrainingHelper):
"""A training helper that adds scheduled sampling.
Returns -1s for sample_ids where no sampling took place; valid sample id
values elsewhere.
"""
def __init__(self, inputs, sequence_length, embedding, sampling_probability,
time_major=False, seed=None, scheduling_seed=None, name=None):
"""Initializer.
Args:
inputs: A (structure of) input tensors.
sequence_length: An int32 vector tensor.
embedding: A callable that takes a vector tensor of `ids` (argmax ids),
or the `params` argument for `embedding_lookup`.
sampling_probability: A 0D `float32` tensor: the probability of sampling
categorically from the output ids instead of reading directly from the
inputs.
time_major: Python bool. Whether the tensors in `inputs` are time major.
If `False` (default), they are assumed to be batch major.
seed: The sampling seed.
scheduling_seed: The schedule decision rule sampling seed.
name: Name scope for any created operations.
Raises:
ValueError: if `sampling_probability` is not a scalar or vector.
"""
with ops.name_scope(name, "ScheduledEmbeddingSamplingWrapper",
[embedding, sampling_probability]):
if callable(embedding):
self._embedding_fn = embedding
else:
self._embedding_fn = (
lambda ids: embedding_ops.embedding_lookup(embedding, ids))
self._sampling_probability = ops.convert_to_tensor(
sampling_probability, name="sampling_probability")
if self._sampling_probability.get_shape().ndims not in (0, 1):
raise ValueError(
"sampling_probability must be either a scalar or a vector. "
"saw shape: %s" % (self._sampling_probability.get_shape()))
self._seed = seed
self._scheduling_seed = scheduling_seed
super(ScheduledEmbeddingTrainingHelper, self).__init__(
inputs=inputs,
sequence_length=sequence_length,
time_major=time_major,
name=name)
def initialize(self, name=None):
return super(ScheduledEmbeddingTrainingHelper, self).initialize(name=name)
def sample(self, time, outputs, state, name=None):
with ops.name_scope(name, "ScheduledEmbeddingTrainingHelperSample",
[time, outputs, state]):
# Return -1s where we did not sample, and sample_ids elsewhere
select_sample = bernoulli_sample(
probs=self._sampling_probability,
dtype=dtypes.bool,
sample_shape=self.batch_size,
seed=self._scheduling_seed)
return array_ops.where(
select_sample,
categorical_sample(logits=outputs, seed=self._seed),
gen_array_ops.fill([self.batch_size], -1))
def next_inputs(self, time, outputs, state, sample_ids, name=None):
with ops.name_scope(name, "ScheduledEmbeddingTrainingHelperNextInputs",
[time, outputs, state, sample_ids]):
(finished, base_next_inputs, state) = (
super(ScheduledEmbeddingTrainingHelper, self).next_inputs(
time=time,
outputs=outputs,
state=state,
sample_ids=sample_ids,
name=name))
def maybe_sample():
"""Perform scheduled sampling."""
where_sampling = math_ops.cast(
array_ops.where(sample_ids > -1), dtypes.int32)
where_not_sampling = math_ops.cast(
array_ops.where(sample_ids <= -1), dtypes.int32)
sample_ids_sampling = array_ops.gather_nd(sample_ids, where_sampling)
inputs_not_sampling = array_ops.gather_nd(
base_next_inputs, where_not_sampling)
sampled_next_inputs = self._embedding_fn(sample_ids_sampling)
base_shape = array_ops.shape(base_next_inputs)
return (array_ops.scatter_nd(indices=where_sampling,
updates=sampled_next_inputs,
shape=base_shape)
+ array_ops.scatter_nd(indices=where_not_sampling,
updates=inputs_not_sampling,
shape=base_shape))
all_finished = math_ops.reduce_all(finished)
next_inputs = control_flow_ops.cond(
all_finished, lambda: base_next_inputs, maybe_sample)
return (finished, next_inputs, state)
class ScheduledOutputTrainingHelper(TrainingHelper):
"""A training helper that adds scheduled sampling directly to outputs.
Returns False for sample_ids where no sampling took place; True elsewhere.
"""
def __init__(self, inputs, sequence_length, sampling_probability,
time_major=False, seed=None, next_inputs_fn=None,
auxiliary_inputs=None, name=None):
"""Initializer.
Args:
inputs: A (structure) of input tensors.
sequence_length: An int32 vector tensor.
sampling_probability: A 0D `float32` tensor: the probability of sampling
from the outputs instead of reading directly from the inputs.
time_major: Python bool. Whether the tensors in `inputs` are time major.
If `False` (default), they are assumed to be batch major.
seed: The sampling seed.
next_inputs_fn: (Optional) callable to apply to the RNN outputs to create
the next input when sampling. If `None` (default), the RNN outputs will
be used as the next inputs.
auxiliary_inputs: An optional (structure of) auxiliary input tensors with
a shape that matches `inputs` in all but (potentially) the final
dimension. These tensors will be concatenated to the sampled output or
the `inputs` when not sampling for use as the next input.
name: Name scope for any created operations.
Raises:
ValueError: if `sampling_probability` is not a scalar or vector.
"""
with ops.name_scope(name, "ScheduledOutputTrainingHelper",
[inputs, auxiliary_inputs, sampling_probability]):
self._sampling_probability = ops.convert_to_tensor(
sampling_probability, name="sampling_probability")
if self._sampling_probability.get_shape().ndims not in (0, 1):
raise ValueError(
"sampling_probability must be either a scalar or a vector. "
"saw shape: %s" % (self._sampling_probability.get_shape()))
if auxiliary_inputs is None:
maybe_concatenated_inputs = inputs
else:
inputs = ops.convert_to_tensor(inputs, name="inputs")
auxiliary_inputs = ops.convert_to_tensor(
auxiliary_inputs, name="auxiliary_inputs")
maybe_concatenated_inputs = nest.map_structure(
lambda x, y: array_ops.concat((x, y), -1),
inputs, auxiliary_inputs)
if not time_major:
auxiliary_inputs = nest.map_structure(
_transpose_batch_time, auxiliary_inputs)
self._auxiliary_input_tas = (
nest.map_structure(_unstack_ta, auxiliary_inputs)
if auxiliary_inputs is not None else None)
self._seed = seed
self._next_inputs_fn = next_inputs_fn
super(ScheduledOutputTrainingHelper, self).__init__(
inputs=maybe_concatenated_inputs,
sequence_length=sequence_length,
time_major=time_major,
name=name)
def initialize(self, name=None):
return super(ScheduledOutputTrainingHelper, self).initialize(name=name)
def sample(self, time, outputs, state, name=None):
with ops.name_scope(name, "ScheduledOutputTrainingHelperSample",
[time, outputs, state]):
return bernoulli_sample(
probs=self._sampling_probability,
sample_shape=self.batch_size,
seed=self._seed)
def next_inputs(self, time, outputs, state, sample_ids, name=None):
with ops.name_scope(name, "ScheduledOutputTrainingHelperNextInputs",
[time, outputs, state, sample_ids]):
(finished, base_next_inputs, state) = (
super(ScheduledOutputTrainingHelper, self).next_inputs(
time=time,
outputs=outputs,
state=state,
sample_ids=sample_ids,
name=name))
sample_ids = math_ops.cast(sample_ids, dtypes.bool)
def maybe_sample():
"""Perform scheduled sampling."""
def maybe_concatenate_auxiliary_inputs(outputs_, indices=None):
"""Concatenate outputs with auxiliary inputs, if they exist."""
if self._auxiliary_input_tas is None:
return outputs_
next_time = time + 1
auxiliary_inputs = nest.map_structure(
lambda ta: ta.read(next_time), self._auxiliary_input_tas)
if indices is not None:
auxiliary_inputs = array_ops.gather_nd(auxiliary_inputs, indices)
return nest.map_structure(
lambda x, y: array_ops.concat((x, y), -1),
outputs_, auxiliary_inputs)
if self._next_inputs_fn is None:
return array_ops.where(
sample_ids, maybe_concatenate_auxiliary_inputs(outputs),
base_next_inputs)
where_sampling = math_ops.cast(
array_ops.where(sample_ids), dtypes.int32)
where_not_sampling = math_ops.cast(
array_ops.where(math_ops.logical_not(sample_ids)), dtypes.int32)
outputs_sampling = array_ops.gather_nd(outputs, where_sampling)
inputs_not_sampling = array_ops.gather_nd(base_next_inputs,
where_not_sampling)
sampled_next_inputs = maybe_concatenate_auxiliary_inputs(
self._next_inputs_fn(outputs_sampling), where_sampling)
base_shape = array_ops.shape(base_next_inputs)
return (array_ops.scatter_nd(indices=where_sampling,
updates=sampled_next_inputs,
shape=base_shape)
+ array_ops.scatter_nd(indices=where_not_sampling,
updates=inputs_not_sampling,
shape=base_shape))
all_finished = math_ops.reduce_all(finished)
no_samples = math_ops.logical_not(math_ops.reduce_any(sample_ids))
next_inputs = control_flow_ops.cond(
math_ops.logical_or(all_finished, no_samples),
lambda: base_next_inputs, maybe_sample)
return (finished, next_inputs, state)
class GreedyEmbeddingHelper(Helper):
"""A helper for use during inference.
Uses the argmax of the output (treated as logits) and passes the
result through an embedding layer to get the next input.
"""
def __init__(self, embedding, start_tokens, end_token):
"""Initializer.
Args:
embedding: A callable that takes a vector tensor of `ids` (argmax ids),
or the `params` argument for `embedding_lookup`. The returned tensor
will be passed to the decoder input.
start_tokens: `int32` vector shaped `[batch_size]`, the start tokens.
end_token: `int32` scalar, the token that marks end of decoding.
Raises:
ValueError: if `start_tokens` is not a 1D tensor or `end_token` is not a
scalar.
"""
if callable(embedding):
self._embedding_fn = embedding
else:
self._embedding_fn = (
lambda ids: embedding_ops.embedding_lookup(embedding, ids))
self._start_tokens = ops.convert_to_tensor(
start_tokens, dtype=dtypes.int32, name="start_tokens")
self._end_token = ops.convert_to_tensor(
end_token, dtype=dtypes.int32, name="end_token")
if self._start_tokens.get_shape().ndims != 1:
raise ValueError("start_tokens must be a vector")
self._batch_size = array_ops.size(start_tokens)
if self._end_token.get_shape().ndims != 0:
raise ValueError("end_token must be a scalar")
self._start_inputs = self._embedding_fn(self._start_tokens)
@property
def batch_size(self):
return self._batch_size
@property
def sample_ids_shape(self):
return tensor_shape.TensorShape([])
@property
def sample_ids_dtype(self):
return dtypes.int32
def initialize(self, name=None):
finished = array_ops.tile([False], [self._batch_size])
return (finished, self._start_inputs)
def sample(self, time, outputs, state, name=None):
"""sample for GreedyEmbeddingHelper."""
del time, state # unused by sample_fn
# Outputs are logits, use argmax to get the most probable id
if not isinstance(outputs, ops.Tensor):
raise TypeError("Expected outputs to be a single Tensor, got: %s" %
type(outputs))
sample_ids = math_ops.argmax(outputs, axis=-1, output_type=dtypes.int32)
return sample_ids
def next_inputs(self, time, outputs, state, sample_ids, name=None):
"""next_inputs_fn for GreedyEmbeddingHelper."""
del time, outputs # unused by next_inputs_fn
finished = math_ops.equal(sample_ids, self._end_token)
all_finished = math_ops.reduce_all(finished)
next_inputs = control_flow_ops.cond(
all_finished,
# If we're finished, the next_inputs value doesn't matter
lambda: self._start_inputs,
lambda: self._embedding_fn(sample_ids))
return (finished, next_inputs, state)
class SampleEmbeddingHelper(GreedyEmbeddingHelper):
"""A helper for use during inference.
Uses sampling (from a distribution) instead of argmax and passes the
result through an embedding layer to get the next input.
"""
def __init__(self, embedding, start_tokens, end_token,
softmax_temperature=None, seed=None):
"""Initializer.
Args:
embedding: A callable that takes a vector tensor of `ids` (argmax ids),
or the `params` argument for `embedding_lookup`. The returned tensor
will be passed to the decoder input.
start_tokens: `int32` vector shaped `[batch_size]`, the start tokens.
end_token: `int32` scalar, the token that marks end of decoding.
softmax_temperature: (Optional) `float32` scalar, value to divide the
logits by before computing the softmax. Larger values (above 1.0) result
in more random samples, while smaller values push the sampling
distribution towards the argmax. Must be strictly greater than 0.
Defaults to 1.0.
seed: (Optional) The sampling seed.
Raises:
ValueError: if `start_tokens` is not a 1D tensor or `end_token` is not a
scalar.
"""
super(SampleEmbeddingHelper, self).__init__(
embedding, start_tokens, end_token)
self._softmax_temperature = softmax_temperature
self._seed = seed
def sample(self, time, outputs, state, name=None):
"""sample for SampleEmbeddingHelper."""
del time, state # unused by sample_fn
# Outputs are logits, we sample instead of argmax (greedy).
if not isinstance(outputs, ops.Tensor):
raise TypeError("Expected outputs to be a single Tensor, got: %s" %
type(outputs))
if self._softmax_temperature is None:
logits = outputs
else:
logits = outputs / self._softmax_temperature
sample_ids = categorical_sample(logits=logits, seed=self._seed)
return sample_ids
class InferenceHelper(Helper):
"""A helper to use during inference with a custom sampling function."""
def __init__(self, sample_fn, sample_shape, sample_dtype,
start_inputs, end_fn, next_inputs_fn=None):
"""Initializer.
Args:
sample_fn: A callable that takes `outputs` and emits tensor `sample_ids`.
sample_shape: Either a list of integers, or a 1-D Tensor of type `int32`,
the shape of the each sample in the batch returned by `sample_fn`.
sample_dtype: the dtype of the sample returned by `sample_fn`.
start_inputs: The initial batch of inputs.
end_fn: A callable that takes `sample_ids` and emits a `bool` vector
shaped `[batch_size]` indicating whether each sample is an end token.
next_inputs_fn: (Optional) A callable that takes `sample_ids` and returns
the next batch of inputs. If not provided, `sample_ids` is used as the
next batch of inputs.
"""
self._sample_fn = sample_fn
self._end_fn = end_fn
self._sample_shape = tensor_shape.TensorShape(sample_shape)
self._sample_dtype = sample_dtype
self._next_inputs_fn = next_inputs_fn
self._batch_size = array_ops.shape(start_inputs)[0]
self._start_inputs = ops.convert_to_tensor(
start_inputs, name="start_inputs")
@property
def batch_size(self):
return self._batch_size
@property
def sample_ids_shape(self):
return self._sample_shape
@property
def sample_ids_dtype(self):
return self._sample_dtype
def initialize(self, name=None):
finished = array_ops.tile([False], [self._batch_size])
return (finished, self._start_inputs)
def sample(self, time, outputs, state, name=None):
del time, state # unused by sample
return self._sample_fn(outputs)
def next_inputs(self, time, outputs, state, sample_ids, name=None):
del time, outputs # unused by next_inputs
if self._next_inputs_fn is None:
next_inputs = sample_ids
else:
next_inputs = self._next_inputs_fn(sample_ids)
finished = self._end_fn(sample_ids)
return (finished, next_inputs, state)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/seq2seq/python/ops/helper.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A powerful dynamic attention wrapper object."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import math
import numpy as np
from tensorflow.contrib.framework.python.framework import tensor_util
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras import initializers
from tensorflow.python.keras import layers
from tensorflow.python.keras.engine import base_layer_utils
from tensorflow.python.layers import base as layers_base
from tensorflow.python.layers import core as layers_core
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.util import nest
__all__ = [
"AttentionMechanism",
"AttentionWrapper",
"AttentionWrapperState",
"LuongAttention",
"BahdanauAttention",
"hardmax",
"safe_cumprod",
"monotonic_attention",
"BahdanauMonotonicAttention",
"LuongMonotonicAttention",
]
_zero_state_tensors = rnn_cell_impl._zero_state_tensors # pylint: disable=protected-access
class AttentionMechanism(object):
@property
def alignments_size(self):
raise NotImplementedError
@property
def state_size(self):
raise NotImplementedError
class _BaseAttentionMechanism(AttentionMechanism):
"""A base AttentionMechanism class providing common functionality.
Common functionality includes:
1. Storing the query and memory layers.
2. Preprocessing and storing the memory.
"""
def __init__(self,
query_layer,
memory,
probability_fn,
memory_sequence_length=None,
memory_layer=None,
check_inner_dims_defined=True,
score_mask_value=None,
custom_key_value_fn=None,
name=None):
"""Construct base AttentionMechanism class.
Args:
query_layer: Callable. Instance of `tf.compat.v1.layers.Layer`. The
layer's depth must match the depth of `memory_layer`. If `query_layer`
is not provided, the shape of `query` must match that of `memory_layer`.
memory: The memory to query; usually the output of an RNN encoder. This
tensor should be shaped `[batch_size, max_time, ...]`.
probability_fn: A `callable`. Converts the score and previous alignments
to probabilities. Its signature should be: `probabilities =
probability_fn(score, state)`.
memory_sequence_length (optional): Sequence lengths for the batch entries
in memory. If provided, the memory tensor rows are masked with zeros
for values past the respective sequence lengths.
memory_layer: Instance of `tf.compat.v1.layers.Layer` (may be None). The
layer's depth must match the depth of `query_layer`. If `memory_layer`
is not provided, the shape of `memory` must match that of `query_layer`.
check_inner_dims_defined: Python boolean. If `True`, the `memory`
argument's shape is checked to ensure all but the two outermost
dimensions are fully defined.
score_mask_value: (optional): The mask value for score before passing into
`probability_fn`. The default is -inf. Only used if
`memory_sequence_length` is not None.
custom_key_value_fn: (optional): The custom function for
computing keys and values.
name: Name to use when creating ops.
"""
if (query_layer is not None and
not isinstance(query_layer, layers_base.Layer)):
raise TypeError("query_layer is not a Layer: %s" %
type(query_layer).__name__)
if (memory_layer is not None and
not isinstance(memory_layer, layers_base.Layer)):
raise TypeError("memory_layer is not a Layer: %s" %
type(memory_layer).__name__)
self._query_layer = query_layer
self._memory_layer = memory_layer
self.dtype = memory_layer.dtype
if not callable(probability_fn):
raise TypeError("probability_fn must be callable, saw type: %s" %
type(probability_fn).__name__)
if score_mask_value is None:
score_mask_value = dtypes.as_dtype(
self._memory_layer.dtype).as_numpy_dtype(-np.inf)
self._probability_fn = lambda score, prev: ( # pylint:disable=g-long-lambda
probability_fn(
_maybe_mask_score(
score,
memory_sequence_length=memory_sequence_length,
score_mask_value=score_mask_value), prev))
with ops.name_scope(name, "BaseAttentionMechanismInit",
nest.flatten(memory)):
self._values = _prepare_memory(
memory,
memory_sequence_length=memory_sequence_length,
check_inner_dims_defined=check_inner_dims_defined)
self._keys = (
self.memory_layer(self._values) if self.memory_layer # pylint: disable=not-callable
else self._values)
if custom_key_value_fn is not None:
self._keys, self._values = custom_key_value_fn(self._keys, self._values)
self._batch_size = (
tensor_shape.dimension_value(self._keys.shape[0]) or
array_ops.shape(self._keys)[0])
self._alignments_size = (
tensor_shape.dimension_value(self._keys.shape[1]) or
array_ops.shape(self._keys)[1])
@property
def memory_layer(self):
return self._memory_layer
@property
def query_layer(self):
return self._query_layer
@property
def values(self):
return self._values
@property
def keys(self):
return self._keys
@property
def batch_size(self):
return self._batch_size
@property
def alignments_size(self):
return self._alignments_size
@property
def state_size(self):
return self._alignments_size
def initial_alignments(self, batch_size, dtype):
"""Creates the initial alignment values for the `AttentionWrapper` class.
This is important for AttentionMechanisms that use the previous alignment
to calculate the alignment at the next time step (e.g. monotonic attention).
The default behavior is to return a tensor of all zeros.
Args:
batch_size: `int32` scalar, the batch_size.
dtype: The `dtype`.
Returns:
A `dtype` tensor shaped `[batch_size, alignments_size]`
(`alignments_size` is the values' `max_time`).
"""
max_time = self._alignments_size
return _zero_state_tensors(max_time, batch_size, dtype)
def initial_state(self, batch_size, dtype):
"""Creates the initial state values for the `AttentionWrapper` class.
This is important for AttentionMechanisms that use the previous alignment
to calculate the alignment at the next time step (e.g. monotonic attention).
The default behavior is to return the same output as initial_alignments.
Args:
batch_size: `int32` scalar, the batch_size.
dtype: The `dtype`.
Returns:
A structure of all-zero tensors with shapes as described by `state_size`.
"""
return self.initial_alignments(batch_size, dtype)
class _BaseAttentionMechanismV2(AttentionMechanism, layers.Layer):
"""A base AttentionMechanism class providing common functionality.
Common functionality includes:
1. Storing the query and memory layers.
2. Preprocessing and storing the memory.
Note that this layer takes memory as its init parameter, which is an
anti-pattern of Keras API, we have to keep the memory as init parameter for
performance and dependency reason. Under the hood, during `__init__()`, it
will invoke `base_layer.__call__(memory, setup_memory=True)`. This will let
keras to keep track of the memory tensor as the input of this layer. Once
the `__init__()` is done, then user can query the attention by
`score = att_obj([query, state])`, and use it as a normal keras layer.
Special attention is needed when adding using this class as the base layer for
new attention:
1. Build() could be invoked at least twice. So please make sure weights are
not duplicated.
2. Layer.get_weights() might return different set of weights if the instance
has `query_layer`. The query_layer weights is not initialized until the
memory is configured.
Also note that this layer does not work with Keras model when
`model.compile(run_eagerly=True)` due to the fact that this layer is stateful.
The support for that will be added in a future version.
"""
def __init__(self,
memory,
probability_fn,
query_layer=None,
memory_layer=None,
memory_sequence_length=None,
**kwargs):
"""Construct base AttentionMechanism class.
Args:
memory: The memory to query; usually the output of an RNN encoder. This
tensor should be shaped `[batch_size, max_time, ...]`.
probability_fn: A `callable`. Converts the score and previous alignments
to probabilities. Its signature should be: `probabilities =
probability_fn(score, state)`.
query_layer: (optional): Instance of `tf.keras.Layer`. The layer's depth
must match the depth of `memory_layer`. If `query_layer` is not
provided, the shape of `query` must match that of `memory_layer`.
memory_layer: (optional): Instance of `tf.keras.Layer`. The layer's depth
must match the depth of `query_layer`. If `memory_layer` is not
provided, the shape of `memory` must match that of `query_layer`.
memory_sequence_length (optional): Sequence lengths for the batch entries
in memory. If provided, the memory tensor rows are masked with zeros for
values past the respective sequence lengths.
**kwargs: Dictionary that contains other common arguments for layer
creation.
"""
if (query_layer is not None and not isinstance(query_layer, layers.Layer)):
raise TypeError("query_layer is not a Layer: %s" %
type(query_layer).__name__)
if (memory_layer is not None and
not isinstance(memory_layer, layers.Layer)):
raise TypeError("memory_layer is not a Layer: %s" %
type(memory_layer).__name__)
self.query_layer = query_layer
self.memory_layer = memory_layer
if self.memory_layer is not None and "dtype" not in kwargs:
kwargs["dtype"] = self.memory_layer.dtype
super(_BaseAttentionMechanismV2, self).__init__(**kwargs)
if not callable(probability_fn):
raise TypeError("probability_fn must be callable, saw type: %s" %
type(probability_fn).__name__)
self.probability_fn = probability_fn
self.keys = None
self.values = None
self.batch_size = None
self._memory_initialized = False
self._check_inner_dims_defined = True
self.supports_masking = True
self.score_mask_value = dtypes.as_dtype(self.dtype).as_numpy_dtype(-np.inf)
if memory is not None:
# Setup the memory by self.__call__() with memory and memory_seq_length.
# This will make the attention follow the keras convention which takes
# all the tensor inputs via __call__().
if memory_sequence_length is None:
inputs = memory
else:
inputs = [memory, memory_sequence_length]
self.values = super(_BaseAttentionMechanismV2, self).__call__(
inputs, setup_memory=True)
def build(self, input_shape):
if not self._memory_initialized:
# This is for setting up the memory, which contains memory and optional
# memory_sequence_length. Build the memory_layer with memory shape.
if self.memory_layer is not None and not self.memory_layer.built:
if isinstance(input_shape, list):
self.memory_layer.build(input_shape[0])
else:
self.memory_layer.build(input_shape)
else:
# The input_shape should be query.shape and state.shape. Use the query
# to init the query layer.
if self.query_layer is not None and not self.query_layer.built:
self.query_layer.build(input_shape[0])
def __call__(self, inputs, **kwargs):
"""Preprocess the inputs before calling `base_layer.__call__()`.
Note that there are situation here, one for setup memory, and one with
actual query and state.
1. When the memory has not been configured, we just pass all the param to
base_layer.__call__(), which will then invoke self.call() with proper
inputs, which allows this class to setup memory.
2. When the memory has already been setup, the input should contain query
and state, and optionally processed memory. If the processed memory is
not included in the input, we will have to append it to the inputs and
give it to the base_layer.__call__(). The processed memory is the output
of first invocation of self.__call__(). If we don't add it here, then from
keras perspective, the graph is disconnected since the output from
previous call is never used.
Args:
inputs: the inputs tensors.
**kwargs: dict, other keyeword arguments for the `__call__()`
"""
if self._memory_initialized:
if len(inputs) not in (2, 3):
raise ValueError("Expect the inputs to have 2 or 3 tensors, got %d" %
len(inputs))
if len(inputs) == 2:
# We append the calculated memory here so that the graph will be
# connected.
inputs.append(self.values)
return super(_BaseAttentionMechanismV2, self).__call__(inputs, **kwargs)
def call(self, inputs, mask=None, setup_memory=False, **kwargs):
"""Setup the memory or query the attention.
There are two case here, one for setup memory, and the second is query the
attention score. `setup_memory` is the flag to indicate which mode it is.
The input list will be treated differently based on that flag.
Args:
inputs: a list of tensor that could either be `query` and `state`, or
`memory` and `memory_sequence_length`. `query` is the tensor of dtype
matching `memory` and shape `[batch_size, query_depth]`. `state` is the
tensor of dtype matching `memory` and shape `[batch_size,
alignments_size]`. (`alignments_size` is memory's `max_time`). `memory`
is the memory to query; usually the output of an RNN encoder. The tensor
should be shaped `[batch_size, max_time, ...]`. `memory_sequence_length`
(optional) is the sequence lengths for the batch entries in memory. If
provided, the memory tensor rows are masked with zeros for values past
the respective sequence lengths.
mask: optional bool tensor with shape `[batch, max_time]` for the mask of
memory. If it is not None, the corresponding item of the memory should
be filtered out during calculation.
setup_memory: boolean, whether the input is for setting up memory, or
query attention.
**kwargs: Dict, other keyword arguments for the call method.
Returns:
Either processed memory or attention score, based on `setup_memory`.
"""
if setup_memory:
if isinstance(inputs, list):
if len(inputs) not in (1, 2):
raise ValueError("Expect inputs to have 1 or 2 tensors, got %d" %
len(inputs))
memory = inputs[0]
memory_sequence_length = inputs[1] if len(inputs) == 2 else None
memory_mask = mask
else:
memory, memory_sequence_length = inputs, None
memory_mask = mask
self._setup_memory(memory, memory_sequence_length, memory_mask)
# We force the self.built to false here since only memory is initialized,
# but the real query/state has not been call() yet. The layer should be
# build and call again.
self.built = False
# Return the processed memory in order to create the Keras connectivity
# data for it.
return self.values
else:
if not self._memory_initialized:
raise ValueError("Cannot query the attention before the setup of "
"memory")
if len(inputs) not in (2, 3):
raise ValueError("Expect the inputs to have query, state, and optional "
"processed memory, got %d items" % len(inputs))
# Ignore the rest of the inputs and only care about the query and state
query, state = inputs[0], inputs[1]
return self._calculate_attention(query, state)
def _setup_memory(self, memory, memory_sequence_length=None,
memory_mask=None):
"""Pre-process the memory before actually query the memory.
This should only be called once at the first invocation of call().
Args:
memory: The memory to query; usually the output of an RNN encoder. This
tensor should be shaped `[batch_size, max_time, ...]`.
memory_sequence_length (optional): Sequence lengths for the batch entries
in memory. If provided, the memory tensor rows are masked with zeros for
values past the respective sequence lengths.
memory_mask: (Optional) The boolean tensor with shape `[batch_size,
max_time]`. For any value equal to False, the corresponding value in
memory should be ignored.
"""
if self._memory_initialized:
raise ValueError("The memory for the attention has already been setup.")
if memory_sequence_length is not None and memory_mask is not None:
raise ValueError("memory_sequence_length and memory_mask cannot be "
"used at same time for attention.")
with ops.name_scope(self.name, "BaseAttentionMechanismInit",
nest.flatten(memory)):
self.values = _prepare_memory(
memory,
memory_sequence_length=memory_sequence_length,
memory_mask=memory_mask,
check_inner_dims_defined=self._check_inner_dims_defined)
# Mark the value as check since the memory and memory mask might not
# passed from __call__(), which does not have proper keras metadata.
# TODO(omalleyt): Remove this hack once the mask the has proper keras
# history.
base_layer_utils.mark_checked(self.values)
if self.memory_layer is not None:
self.keys = self.memory_layer(self.values)
else:
self.keys = self.values
self.batch_size = (
tensor_shape.dimension_value(self.keys.shape[0]) or
array_ops.shape(self.keys)[0])
self._alignments_size = (
tensor_shape.dimension_value(self.keys.shape[1]) or
array_ops.shape(self.keys)[1])
if memory_mask is not None:
unwrapped_probability_fn = self.probability_fn
def _mask_probability_fn(score, prev):
return unwrapped_probability_fn(
_maybe_mask_score(
score,
memory_mask=memory_mask,
memory_sequence_length=memory_sequence_length,
score_mask_value=self.score_mask_value), prev)
self.probability_fn = _mask_probability_fn
self._memory_initialized = True
def _calculate_attention(self, query, state):
raise NotImplementedError(
"_calculate_attention need to be implemented by subclasses.")
def compute_mask(self, inputs, mask=None):
# There real input of the attention is query and state, and the memory layer
# mask shouldn't be pass down. Returning None for all output mask here.
return None, None
def get_config(self):
config = {}
# Since the probability_fn is likely to be a wrapped function, the child
# class should preserve the original function and how its wrapped.
if self.query_layer is not None:
config["query_layer"] = {
"class_name": self.query_layer.__class__.__name__,
"config": self.query_layer.get_config(),
}
if self.memory_layer is not None:
config["memory_layer"] = {
"class_name": self.memory_layer.__class__.__name__,
"config": self.memory_layer.get_config(),
}
# memory is a required init parameter and its a tensor. It cannot be
# serialized to config, so we put a placeholder for it.
config["memory"] = None
base_config = super(_BaseAttentionMechanismV2, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def _process_probability_fn(self, func_name):
"""Helper method to retrieve the probably function by string input."""
valid_probability_fns = {
"softmax": nn_ops.softmax,
"hardmax": hardmax,
}
if func_name not in valid_probability_fns.keys():
raise ValueError("Invalid probability function: %s, options are %s" %
(func_name, valid_probability_fns.keys()))
return valid_probability_fns[func_name]
@classmethod
def deserialize_inner_layer_from_config(cls, config, custom_objects):
"""Helper method that reconstruct the query and memory from the config.
In the get_config() method, the query and memory layer configs are
serialized into dict for persistence, this method perform the reverse action
to reconstruct the layer from the config.
Args:
config: dict, the configs that will be used to reconstruct the object.
custom_objects: dict mapping class names (or function names) of custom
(non-Keras) objects to class/functions.
Returns:
config: dict, the config with layer instance created, which is ready to be
used as init parameters.
"""
# Reconstruct the query and memory layer for parent class.
from tensorflow.python.keras.layers import deserialize as deserialize_layer # pylint: disable=g-import-not-at-top
# Instead of updating the input, create a copy and use that.
config = config.copy()
query_layer_config = config.pop("query_layer", None)
if query_layer_config:
query_layer = deserialize_layer(
query_layer_config, custom_objects=custom_objects)
config["query_layer"] = query_layer
memory_layer_config = config.pop("memory_layer", None)
if memory_layer_config:
memory_layer = deserialize_layer(
memory_layer_config, custom_objects=custom_objects)
config["memory_layer"] = memory_layer
return config
@property
def alignments_size(self):
return self._alignments_size
@property
def state_size(self):
return self._alignments_size
def initial_alignments(self, batch_size, dtype):
"""Creates the initial alignment values for the `AttentionWrapper` class.
This is important for AttentionMechanisms that use the previous alignment
to calculate the alignment at the next time step (e.g. monotonic attention).
The default behavior is to return a tensor of all zeros.
Args:
batch_size: `int32` scalar, the batch_size.
dtype: The `dtype`.
Returns:
A `dtype` tensor shaped `[batch_size, alignments_size]`
(`alignments_size` is the values' `max_time`).
"""
max_time = self._alignments_size
return _zero_state_tensors(max_time, batch_size, dtype)
def initial_state(self, batch_size, dtype):
"""Creates the initial state values for the `AttentionWrapper` class.
This is important for AttentionMechanisms that use the previous alignment
to calculate the alignment at the next time step (e.g. monotonic attention).
The default behavior is to return the same output as initial_alignments.
Args:
batch_size: `int32` scalar, the batch_size.
dtype: The `dtype`.
Returns:
A structure of all-zero tensors with shapes as described by `state_size`.
"""
return self.initial_alignments(batch_size, dtype)
def _luong_score(query, keys, scale):
"""Implements Luong-style (multiplicative) scoring function.
This attention has two forms. The first is standard Luong attention,
as described in:
Minh-Thang Luong, Hieu Pham, Christopher D. Manning.
"Effective Approaches to Attention-based Neural Machine Translation."
EMNLP 2015. https://arxiv.org/abs/1508.04025
The second is the scaled form inspired partly by the normalized form of
Bahdanau attention.
To enable the second form, call this function with `scale=True`.
Args:
query: Tensor, shape `[batch_size, num_units]` to compare to keys.
keys: Processed memory, shape `[batch_size, max_time, num_units]`.
scale: the optional tensor to scale the attention score.
Returns:
A `[batch_size, max_time]` tensor of unnormalized score values.
Raises:
ValueError: If `key` and `query` depths do not match.
"""
depth = query.get_shape()[-1]
key_units = keys.get_shape()[-1]
if depth != key_units:
raise ValueError(
"Incompatible or unknown inner dimensions between query and keys. "
"Query (%s) has units: %s. Keys (%s) have units: %s. "
"Perhaps you need to set num_units to the keys' dimension (%s)?" %
(query, depth, keys, key_units, key_units))
# Reshape from [batch_size, depth] to [batch_size, 1, depth]
# for matmul.
query = array_ops.expand_dims(query, 1)
# Inner product along the query units dimension.
# matmul shapes: query is [batch_size, 1, depth] and
# keys is [batch_size, max_time, depth].
# the inner product is asked to **transpose keys' inner shape** to get a
# batched matmul on:
# [batch_size, 1, depth] . [batch_size, depth, max_time]
# resulting in an output shape of:
# [batch_size, 1, max_time].
# we then squeeze out the center singleton dimension.
score = math_ops.matmul(query, keys, transpose_b=True)
score = array_ops.squeeze(score, [1])
if scale is not None:
score = scale * score
return score
class LuongAttention(_BaseAttentionMechanism):
"""Implements Luong-style (multiplicative) attention scoring.
This attention has two forms. The first is standard Luong attention,
as described in:
Minh-Thang Luong, Hieu Pham, Christopher D. Manning.
[Effective Approaches to Attention-based Neural Machine Translation.
EMNLP 2015.](https://arxiv.org/abs/1508.04025)
The second is the scaled form inspired partly by the normalized form of
Bahdanau attention.
To enable the second form, construct the object with parameter
`scale=True`.
"""
def __init__(self,
num_units,
memory,
memory_sequence_length=None,
scale=False,
probability_fn=None,
score_mask_value=None,
dtype=None,
custom_key_value_fn=None,
name="LuongAttention"):
"""Construct the AttentionMechanism mechanism.
Args:
num_units: The depth of the attention mechanism.
memory: The memory to query; usually the output of an RNN encoder. This
tensor should be shaped `[batch_size, max_time, ...]`.
memory_sequence_length: (optional) Sequence lengths for the batch entries
in memory. If provided, the memory tensor rows are masked with zeros
for values past the respective sequence lengths.
scale: Python boolean. Whether to scale the energy term.
probability_fn: (optional) A `callable`. Converts the score to
probabilities. The default is `tf.nn.softmax`. Other options include
`tf.contrib.seq2seq.hardmax` and `tf.contrib.sparsemax.sparsemax`.
Its signature should be: `probabilities = probability_fn(score)`.
score_mask_value: (optional) The mask value for score before passing into
`probability_fn`. The default is -inf. Only used if
`memory_sequence_length` is not None.
dtype: The data type for the memory layer of the attention mechanism.
custom_key_value_fn: (optional): The custom function for
computing keys and values.
name: Name to use when creating ops.
"""
# For LuongAttention, we only transform the memory layer; thus
# num_units **must** match expected the query depth.
if probability_fn is None:
probability_fn = nn_ops.softmax
if dtype is None:
dtype = dtypes.float32
wrapped_probability_fn = lambda score, _: probability_fn(score)
super(LuongAttention, self).__init__(
query_layer=None,
memory_layer=layers_core.Dense(
num_units, name="memory_layer", use_bias=False, dtype=dtype),
memory=memory,
probability_fn=wrapped_probability_fn,
memory_sequence_length=memory_sequence_length,
score_mask_value=score_mask_value,
custom_key_value_fn=custom_key_value_fn,
name=name)
self._num_units = num_units
self._scale = scale
self._name = name
def __call__(self, query, state):
"""Score the query based on the keys and values.
Args:
query: Tensor of dtype matching `self.values` and shape `[batch_size,
query_depth]`.
state: Tensor of dtype matching `self.values` and shape `[batch_size,
alignments_size]` (`alignments_size` is memory's `max_time`).
Returns:
alignments: Tensor of dtype matching `self.values` and shape
`[batch_size, alignments_size]` (`alignments_size` is memory's
`max_time`).
"""
with variable_scope.variable_scope(None, "luong_attention", [query]):
attention_g = None
if self._scale:
attention_g = variable_scope.get_variable(
"attention_g",
dtype=query.dtype,
initializer=init_ops.ones_initializer,
shape=())
score = _luong_score(query, self._keys, attention_g)
alignments = self._probability_fn(score, state)
next_state = alignments
return alignments, next_state
class LuongAttentionV2(_BaseAttentionMechanismV2):
"""Implements Luong-style (multiplicative) attention scoring.
This attention has two forms. The first is standard Luong attention,
as described in:
Minh-Thang Luong, Hieu Pham, Christopher D. Manning.
[Effective Approaches to Attention-based Neural Machine Translation.
EMNLP 2015.](https://arxiv.org/abs/1508.04025)
The second is the scaled form inspired partly by the normalized form of
Bahdanau attention.
To enable the second form, construct the object with parameter
`scale=True`.
"""
def __init__(self,
units,
memory,
memory_sequence_length=None,
scale=False,
probability_fn="softmax",
dtype=None,
name="LuongAttention",
**kwargs):
"""Construct the AttentionMechanism mechanism.
Args:
units: The depth of the attention mechanism.
memory: The memory to query; usually the output of an RNN encoder. This
tensor should be shaped `[batch_size, max_time, ...]`.
memory_sequence_length: (optional): Sequence lengths for the batch entries
in memory. If provided, the memory tensor rows are masked with zeros
for values past the respective sequence lengths.
scale: Python boolean. Whether to scale the energy term.
probability_fn: (optional) string, the name of function to convert the
attention score to probabilities. The default is `softmax` which is
`tf.nn.softmax`. Other options is `hardmax`, which is hardmax() within
this module. Any other value will result intovalidation error. Default
to use `softmax`.
dtype: The data type for the memory layer of the attention mechanism.
name: Name to use when creating ops.
**kwargs: Dictionary that contains other common arguments for layer
creation.
"""
# For LuongAttention, we only transform the memory layer; thus
# num_units **must** match expected the query depth.
self.probability_fn_name = probability_fn
probability_fn = self._process_probability_fn(self.probability_fn_name)
wrapped_probability_fn = lambda score, _: probability_fn(score)
if dtype is None:
dtype = dtypes.float32
memory_layer = kwargs.pop("memory_layer", None)
if not memory_layer:
memory_layer = layers.Dense(
units, name="memory_layer", use_bias=False, dtype=dtype)
self.units = units
self.scale = scale
self.scale_weight = None
super(LuongAttentionV2, self).__init__(
memory=memory,
memory_sequence_length=memory_sequence_length,
query_layer=None,
memory_layer=memory_layer,
probability_fn=wrapped_probability_fn,
name=name,
dtype=dtype,
**kwargs)
def build(self, input_shape):
super(LuongAttentionV2, self).build(input_shape)
if self.scale and self.scale_weight is None:
self.scale_weight = self.add_weight(
"attention_g", initializer=init_ops.ones_initializer, shape=())
self.built = True
def _calculate_attention(self, query, state):
"""Score the query based on the keys and values.
Args:
query: Tensor of dtype matching `self.values` and shape `[batch_size,
query_depth]`.
state: Tensor of dtype matching `self.values` and shape `[batch_size,
alignments_size]` (`alignments_size` is memory's `max_time`).
Returns:
alignments: Tensor of dtype matching `self.values` and shape
`[batch_size, alignments_size]` (`alignments_size` is memory's
`max_time`).
next_state: Same as the alignments.
"""
score = _luong_score(query, self.keys, self.scale_weight)
alignments = self.probability_fn(score, state)
next_state = alignments
return alignments, next_state
def get_config(self):
config = {
"units": self.units,
"scale": self.scale,
"probability_fn": self.probability_fn_name,
}
base_config = super(LuongAttentionV2, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config, custom_objects=None):
config = _BaseAttentionMechanismV2.deserialize_inner_layer_from_config(
config, custom_objects=custom_objects)
return cls(**config)
def _bahdanau_score(processed_query,
keys,
attention_v,
attention_g=None,
attention_b=None):
"""Implements Bahdanau-style (additive) scoring function.
This attention has two forms. The first is Bhandanau attention,
as described in:
Dzmitry Bahdanau, Kyunghyun Cho, Yoshua Bengio.
"Neural Machine Translation by Jointly Learning to Align and Translate."
ICLR 2015. https://arxiv.org/abs/1409.0473
The second is the normalized form. This form is inspired by the
weight normalization article:
Tim Salimans, Diederik P. Kingma.
"Weight Normalization: A Simple Reparameterization to Accelerate
Training of Deep Neural Networks."
https://arxiv.org/abs/1602.07868
To enable the second form, set please pass in attention_g and attention_b.
Args:
processed_query: Tensor, shape `[batch_size, num_units]` to compare to keys.
keys: Processed memory, shape `[batch_size, max_time, num_units]`.
attention_v: Tensor, shape `[num_units]`.
attention_g: Optional scalar tensor for normalization.
attention_b: Optional tensor with shape `[num_units]` for normalization.
Returns:
A `[batch_size, max_time]` tensor of unnormalized score values.
"""
# Reshape from [batch_size, ...] to [batch_size, 1, ...] for broadcasting.
processed_query = array_ops.expand_dims(processed_query, 1)
if attention_g is not None and attention_b is not None:
normed_v = attention_g * attention_v * math_ops.rsqrt(
math_ops.reduce_sum(math_ops.square(attention_v)))
return math_ops.reduce_sum(
normed_v * math_ops.tanh(keys + processed_query + attention_b), [2])
else:
return math_ops.reduce_sum(
attention_v * math_ops.tanh(keys + processed_query), [2])
class BahdanauAttention(_BaseAttentionMechanism):
"""Implements Bahdanau-style (additive) attention.
This attention has two forms. The first is Bahdanau attention,
as described in:
Dzmitry Bahdanau, Kyunghyun Cho, Yoshua Bengio.
"Neural Machine Translation by Jointly Learning to Align and Translate."
ICLR 2015. https://arxiv.org/abs/1409.0473
The second is the normalized form. This form is inspired by the
weight normalization article:
Tim Salimans, Diederik P. Kingma.
"Weight Normalization: A Simple Reparameterization to Accelerate
Training of Deep Neural Networks."
https://arxiv.org/abs/1602.07868
To enable the second form, construct the object with parameter
`normalize=True`.
"""
def __init__(self,
num_units,
memory,
memory_sequence_length=None,
normalize=False,
probability_fn=None,
score_mask_value=None,
dtype=None,
custom_key_value_fn=None,
name="BahdanauAttention"):
"""Construct the Attention mechanism.
Args:
num_units: The depth of the query mechanism.
memory: The memory to query; usually the output of an RNN encoder. This
tensor should be shaped `[batch_size, max_time, ...]`.
memory_sequence_length: (optional) Sequence lengths for the batch entries
in memory. If provided, the memory tensor rows are masked with zeros
for values past the respective sequence lengths.
normalize: Python boolean. Whether to normalize the energy term.
probability_fn: (optional) A `callable`. Converts the score to
probabilities. The default is `tf.nn.softmax`. Other options include
`tf.contrib.seq2seq.hardmax` and `tf.contrib.sparsemax.sparsemax`.
Its signature should be: `probabilities = probability_fn(score)`.
score_mask_value: (optional): The mask value for score before passing into
`probability_fn`. The default is -inf. Only used if
`memory_sequence_length` is not None.
dtype: The data type for the query and memory layers of the attention
mechanism.
custom_key_value_fn: (optional): The custom function for
computing keys and values.
name: Name to use when creating ops.
"""
if probability_fn is None:
probability_fn = nn_ops.softmax
if dtype is None:
dtype = dtypes.float32
wrapped_probability_fn = lambda score, _: probability_fn(score)
super(BahdanauAttention, self).__init__(
query_layer=layers_core.Dense(
num_units, name="query_layer", use_bias=False, dtype=dtype),
memory_layer=layers_core.Dense(
num_units, name="memory_layer", use_bias=False, dtype=dtype),
memory=memory,
probability_fn=wrapped_probability_fn,
custom_key_value_fn=custom_key_value_fn,
memory_sequence_length=memory_sequence_length,
score_mask_value=score_mask_value,
name=name)
self._num_units = num_units
self._normalize = normalize
self._name = name
def __call__(self, query, state):
"""Score the query based on the keys and values.
Args:
query: Tensor of dtype matching `self.values` and shape `[batch_size,
query_depth]`.
state: Tensor of dtype matching `self.values` and shape `[batch_size,
alignments_size]` (`alignments_size` is memory's `max_time`).
Returns:
alignments: Tensor of dtype matching `self.values` and shape
`[batch_size, alignments_size]` (`alignments_size` is memory's
`max_time`).
"""
with variable_scope.variable_scope(None, "bahdanau_attention", [query]):
processed_query = self.query_layer(query) if self.query_layer else query
attention_v = variable_scope.get_variable(
"attention_v", [self._num_units], dtype=query.dtype)
if not self._normalize:
attention_g = None
attention_b = None
else:
attention_g = variable_scope.get_variable(
"attention_g",
dtype=query.dtype,
initializer=init_ops.constant_initializer(
math.sqrt((1. / self._num_units))),
shape=())
attention_b = variable_scope.get_variable(
"attention_b", [self._num_units],
dtype=query.dtype,
initializer=init_ops.zeros_initializer())
score = _bahdanau_score(
processed_query,
self._keys,
attention_v,
attention_g=attention_g,
attention_b=attention_b)
alignments = self._probability_fn(score, state)
next_state = alignments
return alignments, next_state
class BahdanauAttentionV2(_BaseAttentionMechanismV2):
"""Implements Bahdanau-style (additive) attention.
This attention has two forms. The first is Bahdanau attention,
as described in:
Dzmitry Bahdanau, Kyunghyun Cho, Yoshua Bengio.
"Neural Machine Translation by Jointly Learning to Align and Translate."
ICLR 2015. https://arxiv.org/abs/1409.0473
The second is the normalized form. This form is inspired by the
weight normalization article:
Tim Salimans, Diederik P. Kingma.
"Weight Normalization: A Simple Reparameterization to Accelerate
Training of Deep Neural Networks."
https://arxiv.org/abs/1602.07868
To enable the second form, construct the object with parameter
`normalize=True`.
"""
def __init__(self,
units,
memory,
memory_sequence_length=None,
normalize=False,
probability_fn="softmax",
kernel_initializer="glorot_uniform",
dtype=None,
name="BahdanauAttention",
**kwargs):
"""Construct the Attention mechanism.
Args:
units: The depth of the query mechanism.
memory: The memory to query; usually the output of an RNN encoder. This
tensor should be shaped `[batch_size, max_time, ...]`.
memory_sequence_length: (optional): Sequence lengths for the batch entries
in memory. If provided, the memory tensor rows are masked with zeros
for values past the respective sequence lengths.
normalize: Python boolean. Whether to normalize the energy term.
probability_fn: (optional) string, the name of function to convert the
attention score to probabilities. The default is `softmax` which is
`tf.nn.softmax`. Other options is `hardmax`, which is hardmax() within
this module. Any other value will result into validation error. Default
to use `softmax`.
kernel_initializer: (optional), the name of the initializer for the
attention kernel.
dtype: The data type for the query and memory layers of the attention
mechanism.
name: Name to use when creating ops.
**kwargs: Dictionary that contains other common arguments for layer
creation.
"""
self.probability_fn_name = probability_fn
probability_fn = self._process_probability_fn(self.probability_fn_name)
wrapped_probability_fn = lambda score, _: probability_fn(score)
if dtype is None:
dtype = dtypes.float32
query_layer = kwargs.pop("query_layer", None)
if not query_layer:
query_layer = layers.Dense(
units, name="query_layer", use_bias=False, dtype=dtype)
memory_layer = kwargs.pop("memory_layer", None)
if not memory_layer:
memory_layer = layers.Dense(
units, name="memory_layer", use_bias=False, dtype=dtype)
self.units = units
self.normalize = normalize
self.kernel_initializer = initializers.get(kernel_initializer)
self.attention_v = None
self.attention_g = None
self.attention_b = None
super(BahdanauAttentionV2, self).__init__(
memory=memory,
memory_sequence_length=memory_sequence_length,
query_layer=query_layer,
memory_layer=memory_layer,
probability_fn=wrapped_probability_fn,
name=name,
dtype=dtype,
**kwargs)
def build(self, input_shape):
super(BahdanauAttentionV2, self).build(input_shape)
if self.attention_v is None:
self.attention_v = self.add_weight(
"attention_v", [self.units],
dtype=self.dtype,
initializer=self.kernel_initializer)
if self.normalize and self.attention_g is None and self.attention_b is None:
self.attention_g = self.add_weight(
"attention_g",
initializer=init_ops.constant_initializer(
math.sqrt((1. / self.units))),
shape=())
self.attention_b = self.add_weight(
"attention_b",
shape=[self.units],
initializer=init_ops.zeros_initializer())
self.built = True
def _calculate_attention(self, query, state):
"""Score the query based on the keys and values.
Args:
query: Tensor of dtype matching `self.values` and shape `[batch_size,
query_depth]`.
state: Tensor of dtype matching `self.values` and shape `[batch_size,
alignments_size]` (`alignments_size` is memory's `max_time`).
Returns:
alignments: Tensor of dtype matching `self.values` and shape
`[batch_size, alignments_size]` (`alignments_size` is memory's
`max_time`).
next_state: same as alignments.
"""
processed_query = self.query_layer(query) if self.query_layer else query
score = _bahdanau_score(
processed_query,
self.keys,
self.attention_v,
attention_g=self.attention_g,
attention_b=self.attention_b)
alignments = self.probability_fn(score, state)
next_state = alignments
return alignments, next_state
def get_config(self):
config = {
"units": self.units,
"normalize": self.normalize,
"probability_fn": self.probability_fn_name,
"kernel_initializer": initializers.serialize(self.kernel_initializer)
}
base_config = super(BahdanauAttentionV2, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config, custom_objects=None):
config = _BaseAttentionMechanismV2.deserialize_inner_layer_from_config(
config, custom_objects=custom_objects)
return cls(**config)
def safe_cumprod(x, *args, **kwargs):
"""Computes cumprod of x in logspace using cumsum to avoid underflow.
The cumprod function and its gradient can result in numerical instabilities
when its argument has very small and/or zero values. As long as the argument
is all positive, we can instead compute the cumulative product as
exp(cumsum(log(x))). This function can be called identically to tf.cumprod.
Args:
x: Tensor to take the cumulative product of.
*args: Passed on to cumsum; these are identical to those in cumprod.
**kwargs: Passed on to cumsum; these are identical to those in cumprod.
Returns:
Cumulative product of x.
"""
with ops.name_scope(None, "SafeCumprod", [x]):
x = ops.convert_to_tensor(x, name="x")
tiny = np.finfo(x.dtype.as_numpy_dtype).tiny
return math_ops.exp(
math_ops.cumsum(
math_ops.log(clip_ops.clip_by_value(x, tiny, 1)), *args, **kwargs))
def monotonic_attention(p_choose_i, previous_attention, mode):
"""Compute monotonic attention distribution from choosing probabilities.
Monotonic attention implies that the input sequence is processed in an
explicitly left-to-right manner when generating the output sequence. In
addition, once an input sequence element is attended to at a given output
timestep, elements occurring before it cannot be attended to at subsequent
output timesteps. This function generates attention distributions according
to these assumptions. For more information, see `Online and Linear-Time
Attention by Enforcing Monotonic Alignments`.
Args:
p_choose_i: Probability of choosing input sequence/memory element i. Should
be of shape (batch_size, input_sequence_length), and should all be in the
range [0, 1].
previous_attention: The attention distribution from the previous output
timestep. Should be of shape (batch_size, input_sequence_length). For
the first output timestep, preevious_attention[n] should be [1, 0, 0, ...,
0] for all n in [0, ... batch_size - 1].
mode: How to compute the attention distribution. Must be one of
'recursive', 'parallel', or 'hard'. * 'recursive' uses tf.scan to
recursively compute the distribution. This is slowest but is exact,
general, and does not suffer from numerical instabilities. * 'parallel'
uses parallelized cumulative-sum and cumulative-product operations to
compute a closed-form solution to the recurrence relation defining the
attention distribution. This makes it more efficient than 'recursive',
but it requires numerical checks which make the distribution non-exact.
This can be a problem in particular when input_sequence_length is long
and/or p_choose_i has entries very close to 0 or 1. * 'hard' requires that
the probabilities in p_choose_i are all either 0 or 1, and subsequently
uses a more efficient and exact solution.
Returns:
A tensor of shape (batch_size, input_sequence_length) representing the
attention distributions for each sequence in the batch.
Raises:
ValueError: mode is not one of 'recursive', 'parallel', 'hard'.
"""
# Force things to be tensors
p_choose_i = ops.convert_to_tensor(p_choose_i, name="p_choose_i")
previous_attention = ops.convert_to_tensor(
previous_attention, name="previous_attention")
if mode == "recursive":
# Use .shape[0] when it's not None, or fall back on symbolic shape
batch_size = tensor_shape.dimension_value(
p_choose_i.shape[0]) or array_ops.shape(p_choose_i)[0]
# Compute [1, 1 - p_choose_i[0], 1 - p_choose_i[1], ..., 1 - p_choose_i[-2]]
shifted_1mp_choose_i = array_ops.concat(
[array_ops.ones((batch_size, 1)), 1 - p_choose_i[:, :-1]], 1)
# Compute attention distribution recursively as
# q[i] = (1 - p_choose_i[i - 1])*q[i - 1] + previous_attention[i]
# attention[i] = p_choose_i[i]*q[i]
attention = p_choose_i * array_ops.transpose(
functional_ops.scan(
# Need to use reshape to remind TF of the shape between loop iterations
lambda x, yz: array_ops.reshape(yz[0] * x + yz[1], (batch_size,)),
# Loop variables yz[0] and yz[1]
[
array_ops.transpose(shifted_1mp_choose_i),
array_ops.transpose(previous_attention)
],
# Initial value of x is just zeros
array_ops.zeros((batch_size,))))
elif mode == "parallel":
# safe_cumprod computes cumprod in logspace with numeric checks
cumprod_1mp_choose_i = safe_cumprod(1 - p_choose_i, axis=1, exclusive=True)
# Compute recurrence relation solution
attention = p_choose_i * cumprod_1mp_choose_i * math_ops.cumsum(
previous_attention /
# Clip cumprod_1mp to avoid divide-by-zero
clip_ops.clip_by_value(cumprod_1mp_choose_i, 1e-10, 1.),
axis=1)
elif mode == "hard":
# Remove any probabilities before the index chosen last time step
p_choose_i *= math_ops.cumsum(previous_attention, axis=1)
# Now, use exclusive cumprod to remove probabilities after the first
# chosen index, like so:
# p_choose_i = [0, 0, 0, 1, 1, 0, 1, 1]
# cumprod(1 - p_choose_i, exclusive=True) = [1, 1, 1, 1, 0, 0, 0, 0]
# Product of above: [0, 0, 0, 1, 0, 0, 0, 0]
attention = p_choose_i * math_ops.cumprod(
1 - p_choose_i, axis=1, exclusive=True)
else:
raise ValueError("mode must be 'recursive', 'parallel', or 'hard'.")
return attention
def _monotonic_probability_fn(score,
previous_alignments,
sigmoid_noise,
mode,
seed=None):
"""Attention probability function for monotonic attention.
Takes in unnormalized attention scores, adds pre-sigmoid noise to encourage
the model to make discrete attention decisions, passes them through a sigmoid
to obtain "choosing" probabilities, and then calls monotonic_attention to
obtain the attention distribution. For more information, see
Colin Raffel, Minh-Thang Luong, Peter J. Liu, Ron J. Weiss, Douglas Eck,
"Online and Linear-Time Attention by Enforcing Monotonic Alignments."
ICML 2017. https://arxiv.org/abs/1704.00784
Args:
score: Unnormalized attention scores, shape `[batch_size, alignments_size]`
previous_alignments: Previous attention distribution, shape `[batch_size,
alignments_size]`
sigmoid_noise: Standard deviation of pre-sigmoid noise. Setting this larger
than 0 will encourage the model to produce large attention scores,
effectively making the choosing probabilities discrete and the resulting
attention distribution one-hot. It should be set to 0 at test-time, and
when hard attention is not desired.
mode: How to compute the attention distribution. Must be one of
'recursive', 'parallel', or 'hard'. See the docstring for
`tf.contrib.seq2seq.monotonic_attention` for more information.
seed: (optional) Random seed for pre-sigmoid noise.
Returns:
A `[batch_size, alignments_size]`-shape tensor corresponding to the
resulting attention distribution.
"""
# Optionally add pre-sigmoid noise to the scores
if sigmoid_noise > 0:
noise = random_ops.random_normal(
array_ops.shape(score), dtype=score.dtype, seed=seed)
score += sigmoid_noise * noise
# Compute "choosing" probabilities from the attention scores
if mode == "hard":
# When mode is hard, use a hard sigmoid
p_choose_i = math_ops.cast(score > 0, score.dtype)
else:
p_choose_i = math_ops.sigmoid(score)
# Convert from choosing probabilities to attention distribution
return monotonic_attention(p_choose_i, previous_alignments, mode)
class _BaseMonotonicAttentionMechanism(_BaseAttentionMechanism):
"""Base attention mechanism for monotonic attention.
Simply overrides the initial_alignments function to provide a dirac
distribution, which is needed in order for the monotonic attention
distributions to have the correct behavior.
"""
def initial_alignments(self, batch_size, dtype):
"""Creates the initial alignment values for the monotonic attentions.
Initializes to dirac distributions, i.e. [1, 0, 0, ...memory length..., 0]
for all entries in the batch.
Args:
batch_size: `int32` scalar, the batch_size.
dtype: The `dtype`.
Returns:
A `dtype` tensor shaped `[batch_size, alignments_size]`
(`alignments_size` is the values' `max_time`).
"""
max_time = self._alignments_size
return array_ops.one_hot(
array_ops.zeros((batch_size,), dtype=dtypes.int32),
max_time,
dtype=dtype)
class _BaseMonotonicAttentionMechanismV2(_BaseAttentionMechanismV2):
"""Base attention mechanism for monotonic attention.
Simply overrides the initial_alignments function to provide a dirac
distribution, which is needed in order for the monotonic attention
distributions to have the correct behavior.
"""
def initial_alignments(self, batch_size, dtype):
"""Creates the initial alignment values for the monotonic attentions.
Initializes to dirac distributions, i.e. [1, 0, 0, ...memory length..., 0]
for all entries in the batch.
Args:
batch_size: `int32` scalar, the batch_size.
dtype: The `dtype`.
Returns:
A `dtype` tensor shaped `[batch_size, alignments_size]`
(`alignments_size` is the values' `max_time`).
"""
max_time = self._alignments_size
return array_ops.one_hot(
array_ops.zeros((batch_size,), dtype=dtypes.int32),
max_time,
dtype=dtype)
class BahdanauMonotonicAttention(_BaseMonotonicAttentionMechanism):
"""Monotonic attention mechanism with Bahadanau-style energy function.
This type of attention enforces a monotonic constraint on the attention
distributions; that is once the model attends to a given point in the memory
it can't attend to any prior points at subsequence output timesteps. It
achieves this by using the _monotonic_probability_fn instead of softmax to
construct its attention distributions. Since the attention scores are passed
through a sigmoid, a learnable scalar bias parameter is applied after the
score function and before the sigmoid. Otherwise, it is equivalent to
BahdanauAttention. This approach is proposed in
Colin Raffel, Minh-Thang Luong, Peter J. Liu, Ron J. Weiss, Douglas Eck,
"Online and Linear-Time Attention by Enforcing Monotonic Alignments."
ICML 2017. https://arxiv.org/abs/1704.00784
"""
def __init__(self,
num_units,
memory,
memory_sequence_length=None,
normalize=False,
score_mask_value=None,
sigmoid_noise=0.,
sigmoid_noise_seed=None,
score_bias_init=0.,
mode="parallel",
dtype=None,
name="BahdanauMonotonicAttention"):
"""Construct the Attention mechanism.
Args:
num_units: The depth of the query mechanism.
memory: The memory to query; usually the output of an RNN encoder. This
tensor should be shaped `[batch_size, max_time, ...]`.
memory_sequence_length (optional): Sequence lengths for the batch entries
in memory. If provided, the memory tensor rows are masked with zeros
for values past the respective sequence lengths.
normalize: Python boolean. Whether to normalize the energy term.
score_mask_value: (optional): The mask value for score before passing into
`probability_fn`. The default is -inf. Only used if
`memory_sequence_length` is not None.
sigmoid_noise: Standard deviation of pre-sigmoid noise. See the docstring
for `_monotonic_probability_fn` for more information.
sigmoid_noise_seed: (optional) Random seed for pre-sigmoid noise.
score_bias_init: Initial value for score bias scalar. It's recommended to
initialize this to a negative value when the length of the memory is
large.
mode: How to compute the attention distribution. Must be one of
'recursive', 'parallel', or 'hard'. See the docstring for
`tf.contrib.seq2seq.monotonic_attention` for more information.
dtype: The data type for the query and memory layers of the attention
mechanism.
name: Name to use when creating ops.
"""
# Set up the monotonic probability fn with supplied parameters
if dtype is None:
dtype = dtypes.float32
wrapped_probability_fn = functools.partial(
_monotonic_probability_fn,
sigmoid_noise=sigmoid_noise,
mode=mode,
seed=sigmoid_noise_seed)
super(BahdanauMonotonicAttention, self).__init__(
query_layer=layers_core.Dense(
num_units, name="query_layer", use_bias=False, dtype=dtype),
memory_layer=layers_core.Dense(
num_units, name="memory_layer", use_bias=False, dtype=dtype),
memory=memory,
probability_fn=wrapped_probability_fn,
memory_sequence_length=memory_sequence_length,
score_mask_value=score_mask_value,
name=name)
self._num_units = num_units
self._normalize = normalize
self._name = name
self._score_bias_init = score_bias_init
def __call__(self, query, state):
"""Score the query based on the keys and values.
Args:
query: Tensor of dtype matching `self.values` and shape `[batch_size,
query_depth]`.
state: Tensor of dtype matching `self.values` and shape `[batch_size,
alignments_size]` (`alignments_size` is memory's `max_time`).
Returns:
alignments: Tensor of dtype matching `self.values` and shape
`[batch_size, alignments_size]` (`alignments_size` is memory's
`max_time`).
"""
with variable_scope.variable_scope(None, "bahdanau_monotonic_attention",
[query]):
processed_query = self.query_layer(query) if self.query_layer else query
attention_v = variable_scope.get_variable(
"attention_v", [self._num_units], dtype=query.dtype)
if not self._normalize:
attention_g = None
attention_b = None
else:
attention_g = variable_scope.get_variable(
"attention_g",
dtype=query.dtype,
initializer=init_ops.constant_initializer(
math.sqrt((1. / self._num_units))),
shape=())
attention_b = variable_scope.get_variable(
"attention_b", [self._num_units],
dtype=query.dtype,
initializer=init_ops.zeros_initializer())
score = _bahdanau_score(
processed_query,
self._keys,
attention_v,
attention_g=attention_g,
attention_b=attention_b)
score_bias = variable_scope.get_variable(
"attention_score_bias",
dtype=processed_query.dtype,
initializer=self._score_bias_init)
score += score_bias
alignments = self._probability_fn(score, state)
next_state = alignments
return alignments, next_state
class BahdanauMonotonicAttentionV2(_BaseMonotonicAttentionMechanismV2):
"""Monotonic attention mechanism with Bahadanau-style energy function.
This type of attention enforces a monotonic constraint on the attention
distributions; that is once the model attends to a given point in the memory
it can't attend to any prior points at subsequence output timesteps. It
achieves this by using the _monotonic_probability_fn instead of softmax to
construct its attention distributions. Since the attention scores are passed
through a sigmoid, a learnable scalar bias parameter is applied after the
score function and before the sigmoid. Otherwise, it is equivalent to
BahdanauAttention. This approach is proposed in
Colin Raffel, Minh-Thang Luong, Peter J. Liu, Ron J. Weiss, Douglas Eck,
"Online and Linear-Time Attention by Enforcing Monotonic Alignments."
ICML 2017. https://arxiv.org/abs/1704.00784
"""
def __init__(self,
units,
memory,
memory_sequence_length=None,
normalize=False,
sigmoid_noise=0.,
sigmoid_noise_seed=None,
score_bias_init=0.,
mode="parallel",
kernel_initializer="glorot_uniform",
dtype=None,
name="BahdanauMonotonicAttention",
**kwargs):
"""Construct the Attention mechanism.
Args:
units: The depth of the query mechanism.
memory: The memory to query; usually the output of an RNN encoder. This
tensor should be shaped `[batch_size, max_time, ...]`.
memory_sequence_length: (optional): Sequence lengths for the batch entries
in memory. If provided, the memory tensor rows are masked with zeros
for values past the respective sequence lengths.
normalize: Python boolean. Whether to normalize the energy term.
sigmoid_noise: Standard deviation of pre-sigmoid noise. See the docstring
for `_monotonic_probability_fn` for more information.
sigmoid_noise_seed: (optional) Random seed for pre-sigmoid noise.
score_bias_init: Initial value for score bias scalar. It's recommended to
initialize this to a negative value when the length of the memory is
large.
mode: How to compute the attention distribution. Must be one of
'recursive', 'parallel', or 'hard'. See the docstring for
`tf.contrib.seq2seq.monotonic_attention` for more information.
kernel_initializer: (optional), the name of the initializer for the
attention kernel.
dtype: The data type for the query and memory layers of the attention
mechanism.
name: Name to use when creating ops.
**kwargs: Dictionary that contains other common arguments for layer
creation.
"""
# Set up the monotonic probability fn with supplied parameters
if dtype is None:
dtype = dtypes.float32
wrapped_probability_fn = functools.partial(
_monotonic_probability_fn,
sigmoid_noise=sigmoid_noise,
mode=mode,
seed=sigmoid_noise_seed)
query_layer = kwargs.pop("query_layer", None)
if not query_layer:
query_layer = layers.Dense(
units, name="query_layer", use_bias=False, dtype=dtype)
memory_layer = kwargs.pop("memory_layer", None)
if not memory_layer:
memory_layer = layers.Dense(
units, name="memory_layer", use_bias=False, dtype=dtype)
self.units = units
self.normalize = normalize
self.sigmoid_noise = sigmoid_noise
self.sigmoid_noise_seed = sigmoid_noise_seed
self.score_bias_init = score_bias_init
self.mode = mode
self.kernel_initializer = initializers.get(kernel_initializer)
self.attention_v = None
self.attention_score_bias = None
self.attention_g = None
self.attention_b = None
super(BahdanauMonotonicAttentionV2, self).__init__(
memory=memory,
memory_sequence_length=memory_sequence_length,
query_layer=query_layer,
memory_layer=memory_layer,
probability_fn=wrapped_probability_fn,
name=name,
dtype=dtype,
**kwargs)
def build(self, input_shape):
super(BahdanauMonotonicAttentionV2, self).build(input_shape)
if self.attention_v is None:
self.attention_v = self.add_weight(
"attention_v", [self.units],
dtype=self.dtype,
initializer=self.kernel_initializer)
if self.attention_score_bias is None:
self.attention_score_bias = self.add_weight(
"attention_score_bias",
shape=(),
dtype=self.dtype,
initializer=init_ops.constant_initializer(
self.score_bias_init, dtype=self.dtype))
if self.normalize and self.attention_g is None and self.attention_b is None:
self.attention_g = self.add_weight(
"attention_g",
dtype=self.dtype,
initializer=init_ops.constant_initializer(
math.sqrt((1. / self.units))),
shape=())
self.attention_b = self.add_weight(
"attention_b", [self.units],
dtype=self.dtype,
initializer=init_ops.zeros_initializer())
self.built = True
def _calculate_attention(self, query, state):
"""Score the query based on the keys and values.
Args:
query: Tensor of dtype matching `self.values` and shape `[batch_size,
query_depth]`.
state: Tensor of dtype matching `self.values` and shape `[batch_size,
alignments_size]` (`alignments_size` is memory's `max_time`).
Returns:
alignments: Tensor of dtype matching `self.values` and shape
`[batch_size, alignments_size]` (`alignments_size` is memory's
`max_time`).
"""
processed_query = self.query_layer(query) if self.query_layer else query
score = _bahdanau_score(
processed_query,
self.keys,
self.attention_v,
attention_g=self.attention_g,
attention_b=self.attention_b)
score += self.attention_score_bias
alignments = self.probability_fn(score, state)
next_state = alignments
return alignments, next_state
def get_config(self):
config = {
"units": self.units,
"normalize": self.normalize,
"sigmoid_noise": self.sigmoid_noise,
"sigmoid_noise_seed": self.sigmoid_noise_seed,
"score_bias_init": self.score_bias_init,
"mode": self.mode,
"kernel_initializer": initializers.serialize(self.kernel_initializer),
}
base_config = super(BahdanauMonotonicAttentionV2, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config, custom_objects=None):
config = _BaseAttentionMechanismV2.deserialize_inner_layer_from_config(
config, custom_objects=custom_objects)
return cls(**config)
class LuongMonotonicAttention(_BaseMonotonicAttentionMechanism):
"""Monotonic attention mechanism with Luong-style energy function.
This type of attention enforces a monotonic constraint on the attention
distributions; that is once the model attends to a given point in the memory
it can't attend to any prior points at subsequence output timesteps. It
achieves this by using the _monotonic_probability_fn instead of softmax to
construct its attention distributions. Otherwise, it is equivalent to
LuongAttention. This approach is proposed in
Colin Raffel, Minh-Thang Luong, Peter J. Liu, Ron J. Weiss, Douglas Eck,
"Online and Linear-Time Attention by Enforcing Monotonic Alignments."
ICML 2017. https://arxiv.org/abs/1704.00784
"""
def __init__(self,
num_units,
memory,
memory_sequence_length=None,
scale=False,
score_mask_value=None,
sigmoid_noise=0.,
sigmoid_noise_seed=None,
score_bias_init=0.,
mode="parallel",
dtype=None,
name="LuongMonotonicAttention"):
"""Construct the Attention mechanism.
Args:
num_units: The depth of the query mechanism.
memory: The memory to query; usually the output of an RNN encoder. This
tensor should be shaped `[batch_size, max_time, ...]`.
memory_sequence_length (optional): Sequence lengths for the batch entries
in memory. If provided, the memory tensor rows are masked with zeros
for values past the respective sequence lengths.
scale: Python boolean. Whether to scale the energy term.
score_mask_value: (optional): The mask value for score before passing into
`probability_fn`. The default is -inf. Only used if
`memory_sequence_length` is not None.
sigmoid_noise: Standard deviation of pre-sigmoid noise. See the docstring
for `_monotonic_probability_fn` for more information.
sigmoid_noise_seed: (optional) Random seed for pre-sigmoid noise.
score_bias_init: Initial value for score bias scalar. It's recommended to
initialize this to a negative value when the length of the memory is
large.
mode: How to compute the attention distribution. Must be one of
'recursive', 'parallel', or 'hard'. See the docstring for
`tf.contrib.seq2seq.monotonic_attention` for more information.
dtype: The data type for the query and memory layers of the attention
mechanism.
name: Name to use when creating ops.
"""
# Set up the monotonic probability fn with supplied parameters
if dtype is None:
dtype = dtypes.float32
wrapped_probability_fn = functools.partial(
_monotonic_probability_fn,
sigmoid_noise=sigmoid_noise,
mode=mode,
seed=sigmoid_noise_seed)
super(LuongMonotonicAttention, self).__init__(
query_layer=None,
memory_layer=layers_core.Dense(
num_units, name="memory_layer", use_bias=False, dtype=dtype),
memory=memory,
probability_fn=wrapped_probability_fn,
memory_sequence_length=memory_sequence_length,
score_mask_value=score_mask_value,
name=name)
self._num_units = num_units
self._scale = scale
self._score_bias_init = score_bias_init
self._name = name
def __call__(self, query, state):
"""Score the query based on the keys and values.
Args:
query: Tensor of dtype matching `self.values` and shape `[batch_size,
query_depth]`.
state: Tensor of dtype matching `self.values` and shape `[batch_size,
alignments_size]` (`alignments_size` is memory's `max_time`).
Returns:
alignments: Tensor of dtype matching `self.values` and shape
`[batch_size, alignments_size]` (`alignments_size` is memory's
`max_time`).
"""
with variable_scope.variable_scope(None, "luong_monotonic_attention",
[query]):
attention_g = None
if self._scale:
attention_g = variable_scope.get_variable(
"attention_g",
dtype=query.dtype,
initializer=init_ops.ones_initializer,
shape=())
score = _luong_score(query, self._keys, attention_g)
score_bias = variable_scope.get_variable(
"attention_score_bias",
dtype=query.dtype,
initializer=self._score_bias_init)
score += score_bias
alignments = self._probability_fn(score, state)
next_state = alignments
return alignments, next_state
class LuongMonotonicAttentionV2(_BaseMonotonicAttentionMechanismV2):
"""Monotonic attention mechanism with Luong-style energy function.
This type of attention enforces a monotonic constraint on the attention
distributions; that is once the model attends to a given point in the memory
it can't attend to any prior points at subsequence output timesteps. It
achieves this by using the _monotonic_probability_fn instead of softmax to
construct its attention distributions. Otherwise, it is equivalent to
LuongAttention. This approach is proposed in
[Colin Raffel, Minh-Thang Luong, Peter J. Liu, Ron J. Weiss, Douglas Eck,
"Online and Linear-Time Attention by Enforcing Monotonic Alignments."
ICML 2017.](https://arxiv.org/abs/1704.00784)
"""
def __init__(self,
units,
memory,
memory_sequence_length=None,
scale=False,
sigmoid_noise=0.,
sigmoid_noise_seed=None,
score_bias_init=0.,
mode="parallel",
dtype=None,
name="LuongMonotonicAttention",
**kwargs):
"""Construct the Attention mechanism.
Args:
units: The depth of the query mechanism.
memory: The memory to query; usually the output of an RNN encoder. This
tensor should be shaped `[batch_size, max_time, ...]`.
memory_sequence_length: (optional): Sequence lengths for the batch entries
in memory. If provided, the memory tensor rows are masked with zeros
for values past the respective sequence lengths.
scale: Python boolean. Whether to scale the energy term.
sigmoid_noise: Standard deviation of pre-sigmoid noise. See the docstring
for `_monotonic_probability_fn` for more information.
sigmoid_noise_seed: (optional) Random seed for pre-sigmoid noise.
score_bias_init: Initial value for score bias scalar. It's recommended to
initialize this to a negative value when the length of the memory is
large.
mode: How to compute the attention distribution. Must be one of
'recursive', 'parallel', or 'hard'. See the docstring for
`tf.contrib.seq2seq.monotonic_attention` for more information.
dtype: The data type for the query and memory layers of the attention
mechanism.
name: Name to use when creating ops.
**kwargs: Dictionary that contains other common arguments for layer
creation.
"""
# Set up the monotonic probability fn with supplied parameters
if dtype is None:
dtype = dtypes.float32
wrapped_probability_fn = functools.partial(
_monotonic_probability_fn,
sigmoid_noise=sigmoid_noise,
mode=mode,
seed=sigmoid_noise_seed)
memory_layer = kwargs.pop("memory_layer", None)
if not memory_layer:
memory_layer = layers.Dense(
units, name="memory_layer", use_bias=False, dtype=dtype)
self.units = units
self.scale = scale
self.sigmoid_noise = sigmoid_noise
self.sigmoid_noise_seed = sigmoid_noise_seed
self.score_bias_init = score_bias_init
self.mode = mode
self.attention_g = None
self.attention_score_bias = None
super(LuongMonotonicAttentionV2, self).__init__(
memory=memory,
memory_sequence_length=memory_sequence_length,
query_layer=None,
memory_layer=memory_layer,
probability_fn=wrapped_probability_fn,
name=name,
dtype=dtype,
**kwargs)
def build(self, input_shape):
super(LuongMonotonicAttentionV2, self).build(input_shape)
if self.scale and self.attention_g is None:
self.attention_g = self.add_weight(
"attention_g", initializer=init_ops.ones_initializer, shape=())
if self.attention_score_bias is None:
self.attention_score_bias = self.add_weight(
"attention_score_bias",
shape=(),
initializer=init_ops.constant_initializer(
self.score_bias_init, dtype=self.dtype))
self.built = True
def _calculate_attention(self, query, state):
"""Score the query based on the keys and values.
Args:
query: Tensor of dtype matching `self.values` and shape `[batch_size,
query_depth]`.
state: Tensor of dtype matching `self.values` and shape `[batch_size,
alignments_size]` (`alignments_size` is memory's `max_time`).
Returns:
alignments: Tensor of dtype matching `self.values` and shape
`[batch_size, alignments_size]` (`alignments_size` is memory's
`max_time`).
next_state: Same as alignments
"""
score = _luong_score(query, self.keys, self.attention_g)
score += self.attention_score_bias
alignments = self.probability_fn(score, state)
next_state = alignments
return alignments, next_state
def get_config(self):
config = {
"units": self.units,
"scale": self.scale,
"sigmoid_noise": self.sigmoid_noise,
"sigmoid_noise_seed": self.sigmoid_noise_seed,
"score_bias_init": self.score_bias_init,
"mode": self.mode,
}
base_config = super(LuongMonotonicAttentionV2, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config, custom_objects=None):
config = _BaseAttentionMechanismV2.deserialize_inner_layer_from_config(
config, custom_objects=custom_objects)
return cls(**config)
class AttentionWrapperState(
collections.namedtuple("AttentionWrapperState",
("cell_state", "attention", "time", "alignments",
"alignment_history", "attention_state"))):
"""`namedtuple` storing the state of a `AttentionWrapper`.
Contains:
- `cell_state`: The state of the wrapped `RNNCell` at the previous time
step.
- `attention`: The attention emitted at the previous time step.
- `time`: int32 scalar containing the current time step.
- `alignments`: A single or tuple of `Tensor`(s) containing the alignments
emitted at the previous time step for each attention mechanism.
- `alignment_history`: (if enabled) a single or tuple of `TensorArray`(s)
containing alignment matrices from all time steps for each attention
mechanism. Call `stack()` on each to convert to a `Tensor`.
- `attention_state`: A single or tuple of nested objects
containing attention mechanism state for each attention mechanism.
The objects may contain Tensors or TensorArrays.
"""
def clone(self, **kwargs):
"""Clone this object, overriding components provided by kwargs.
The new state fields' shape must match original state fields' shape. This
will be validated, and original fields' shape will be propagated to new
fields.
Example:
```python
initial_state = attention_wrapper.zero_state(dtype=..., batch_size=...)
initial_state = initial_state.clone(cell_state=encoder_state)
```
Args:
**kwargs: Any properties of the state object to replace in the returned
`AttentionWrapperState`.
Returns:
A new `AttentionWrapperState` whose properties are the same as
this one, except any overridden properties as provided in `kwargs`.
"""
def with_same_shape(old, new):
"""Check and set new tensor's shape."""
if isinstance(old, ops.Tensor) and isinstance(new, ops.Tensor):
if not context.executing_eagerly():
return tensor_util.with_same_shape(old, new)
else:
if old.shape.as_list() != new.shape.as_list():
raise ValueError("The shape of the AttentionWrapperState is "
"expected to be same as the one to clone. "
"self.shape: %s, input.shape: %s" %
(old.shape, new.shape))
return new
return new
return nest.map_structure(
with_same_shape, self,
super(AttentionWrapperState, self)._replace(**kwargs))
def _prepare_memory(memory,
memory_sequence_length=None,
memory_mask=None,
check_inner_dims_defined=True):
"""Convert to tensor and possibly mask `memory`.
Args:
memory: `Tensor`, shaped `[batch_size, max_time, ...]`.
memory_sequence_length: `int32` `Tensor`, shaped `[batch_size]`.
memory_mask: `boolean` tensor with shape [batch_size, max_time]. The memory
should be skipped when the corresponding mask is False.
check_inner_dims_defined: Python boolean. If `True`, the `memory`
argument's shape is checked to ensure all but the two outermost dimensions
are fully defined.
Returns:
A (possibly masked), checked, new `memory`.
Raises:
ValueError: If `check_inner_dims_defined` is `True` and not
`memory.shape[2:].is_fully_defined()`.
"""
memory = nest.map_structure(lambda m: ops.convert_to_tensor(m, name="memory"),
memory)
if memory_sequence_length is not None and memory_mask is not None:
raise ValueError("memory_sequence_length and memory_mask can't be provided "
"at same time.")
if memory_sequence_length is not None:
memory_sequence_length = ops.convert_to_tensor(
memory_sequence_length, name="memory_sequence_length")
if check_inner_dims_defined:
def _check_dims(m):
if not m.get_shape()[2:].is_fully_defined():
raise ValueError("Expected memory %s to have fully defined inner dims, "
"but saw shape: %s" % (m.name, m.get_shape()))
nest.map_structure(_check_dims, memory)
if memory_sequence_length is None and memory_mask is None:
return memory
elif memory_sequence_length is not None:
seq_len_mask = array_ops.sequence_mask(
memory_sequence_length,
maxlen=array_ops.shape(nest.flatten(memory)[0])[1],
dtype=nest.flatten(memory)[0].dtype)
else:
# For memory_mask is not None
seq_len_mask = math_ops.cast(
memory_mask, dtype=nest.flatten(memory)[0].dtype)
def _maybe_mask(m, seq_len_mask):
"""Mask the memory based on the memory mask."""
rank = m.get_shape().ndims
rank = rank if rank is not None else array_ops.rank(m)
extra_ones = array_ops.ones(rank - 2, dtype=dtypes.int32)
seq_len_mask = array_ops.reshape(
seq_len_mask,
array_ops.concat((array_ops.shape(seq_len_mask), extra_ones), 0))
return m * seq_len_mask
return nest.map_structure(lambda m: _maybe_mask(m, seq_len_mask), memory)
def _maybe_mask_score(score,
memory_sequence_length=None,
memory_mask=None,
score_mask_value=None):
"""Mask the attention score based on the masks."""
if memory_sequence_length is None and memory_mask is None:
return score
if memory_sequence_length is not None and memory_mask is not None:
raise ValueError("memory_sequence_length and memory_mask can't be provided "
"at same time.")
if memory_sequence_length is not None:
message = "All values in memory_sequence_length must be greater than zero."
with ops.control_dependencies(
[check_ops.assert_positive(memory_sequence_length, message=message)]):
memory_mask = array_ops.sequence_mask(
memory_sequence_length, maxlen=array_ops.shape(score)[1])
score_mask_values = score_mask_value * array_ops.ones_like(score)
return array_ops.where(memory_mask, score, score_mask_values)
def hardmax(logits, name=None):
"""Returns batched one-hot vectors.
The depth index containing the `1` is that of the maximum logit value.
Args:
logits: A batch tensor of logit values.
name: Name to use when creating ops.
Returns:
A batched one-hot tensor.
"""
with ops.name_scope(name, "Hardmax", [logits]):
logits = ops.convert_to_tensor(logits, name="logits")
if tensor_shape.dimension_value(logits.get_shape()[-1]) is not None:
depth = tensor_shape.dimension_value(logits.get_shape()[-1])
else:
depth = array_ops.shape(logits)[-1]
return array_ops.one_hot(
math_ops.argmax(logits, -1), depth, dtype=logits.dtype)
def _compute_attention(attention_mechanism, cell_output, attention_state,
attention_layer):
"""Computes the attention and alignments for a given attention_mechanism."""
if isinstance(attention_mechanism, _BaseAttentionMechanismV2):
alignments, next_attention_state = attention_mechanism(
[cell_output, attention_state])
else:
# For other class, assume they are following _BaseAttentionMechanism, which
# takes query and state as separate parameter.
alignments, next_attention_state = attention_mechanism(
cell_output, state=attention_state)
# Reshape from [batch_size, memory_time] to [batch_size, 1, memory_time]
expanded_alignments = array_ops.expand_dims(alignments, 1)
# Context is the inner product of alignments and values along the
# memory time dimension.
# alignments shape is
# [batch_size, 1, memory_time]
# attention_mechanism.values shape is
# [batch_size, memory_time, memory_size]
# the batched matmul is over memory_time, so the output shape is
# [batch_size, 1, memory_size].
# we then squeeze out the singleton dim.
context_ = math_ops.matmul(expanded_alignments, attention_mechanism.values)
context_ = array_ops.squeeze(context_, [1])
if attention_layer is not None:
attention = attention_layer(array_ops.concat([cell_output, context_], 1))
else:
attention = context_
return attention, alignments, next_attention_state
class AttentionWrapper(rnn_cell_impl.RNNCell):
"""Wraps another `RNNCell` with attention."""
def __init__(self,
cell,
attention_mechanism,
attention_layer_size=None,
alignment_history=False,
cell_input_fn=None,
output_attention=True,
initial_cell_state=None,
name=None,
attention_layer=None,
attention_fn=None,
dtype=None):
"""Construct the `AttentionWrapper`.
**NOTE** If you are using the `BeamSearchDecoder` with a cell wrapped in
`AttentionWrapper`, then you must ensure that:
- The encoder output has been tiled to `beam_width` via
`tf.contrib.seq2seq.tile_batch` (NOT `tf.tile`).
- The `batch_size` argument passed to the `zero_state` method of this
wrapper is equal to `true_batch_size * beam_width`.
- The initial state created with `zero_state` above contains a
`cell_state` value containing properly tiled final state from the
encoder.
An example:
```
tiled_encoder_outputs = tf.contrib.seq2seq.tile_batch(
encoder_outputs, multiplier=beam_width)
tiled_encoder_final_state = tf.conrib.seq2seq.tile_batch(
encoder_final_state, multiplier=beam_width)
tiled_sequence_length = tf.contrib.seq2seq.tile_batch(
sequence_length, multiplier=beam_width)
attention_mechanism = MyFavoriteAttentionMechanism(
num_units=attention_depth,
memory=tiled_inputs,
memory_sequence_length=tiled_sequence_length)
attention_cell = AttentionWrapper(cell, attention_mechanism, ...)
decoder_initial_state = attention_cell.zero_state(
dtype, batch_size=true_batch_size * beam_width)
decoder_initial_state = decoder_initial_state.clone(
cell_state=tiled_encoder_final_state)
```
Args:
cell: An instance of `RNNCell`.
attention_mechanism: A list of `AttentionMechanism` instances or a single
instance.
attention_layer_size: A list of Python integers or a single Python
integer, the depth of the attention (output) layer(s). If None
(default), use the context as attention at each time step. Otherwise,
feed the context and cell output into the attention layer to generate
attention at each time step. If attention_mechanism is a list,
attention_layer_size must be a list of the same length. If
attention_layer is set, this must be None. If attention_fn is set, it
must guaranteed that the outputs of attention_fn also meet the above
requirements.
alignment_history: Python boolean, whether to store alignment history from
all time steps in the final output state (currently stored as a time
major `TensorArray` on which you must call `stack()`).
cell_input_fn: (optional) A `callable`. The default is:
`lambda inputs, attention: array_ops.concat([inputs, attention], -1)`.
output_attention: Python bool. If `True` (default), the output at each
time step is the attention value. This is the behavior of Luong-style
attention mechanisms. If `False`, the output at each time step is the
output of `cell`. This is the behavior of Bhadanau-style attention
mechanisms. In both cases, the `attention` tensor is propagated to the
next time step via the state and is used there. This flag only controls
whether the attention mechanism is propagated up to the next cell in an
RNN stack or to the top RNN output.
initial_cell_state: The initial state value to use for the cell when the
user calls `zero_state()`. Note that if this value is provided now, and
the user uses a `batch_size` argument of `zero_state` which does not
match the batch size of `initial_cell_state`, proper behavior is not
guaranteed.
name: Name to use when creating ops.
attention_layer: A list of `tf.compat.v1.layers.Layer` instances or a
single `tf.compat.v1.layers.Layer` instance taking the context and cell
output as inputs to generate attention at each time step. If None
(default), use the context as attention at each time step. If
attention_mechanism is a list, attention_layer must be a list of the
same length. If attention_layers_size is set, this must be None.
attention_fn: An optional callable function that allows users to provide
their own customized attention function, which takes input
(attention_mechanism, cell_output, attention_state, attention_layer) and
outputs (attention, alignments, next_attention_state). If provided, the
attention_layer_size should be the size of the outputs of attention_fn.
dtype: The cell dtype
Raises:
TypeError: `attention_layer_size` is not None and (`attention_mechanism`
is a list but `attention_layer_size` is not; or vice versa).
ValueError: if `attention_layer_size` is not None, `attention_mechanism`
is a list, and its length does not match that of `attention_layer_size`;
if `attention_layer_size` and `attention_layer` are set simultaneously.
"""
super(AttentionWrapper, self).__init__(name=name, dtype=dtype)
rnn_cell_impl.assert_like_rnncell("cell", cell)
if isinstance(attention_mechanism, (list, tuple)):
self._is_multi = True
attention_mechanisms = attention_mechanism
for attention_mechanism in attention_mechanisms:
if not isinstance(attention_mechanism, AttentionMechanism):
raise TypeError("attention_mechanism must contain only instances of "
"AttentionMechanism, saw type: %s" %
type(attention_mechanism).__name__)
else:
self._is_multi = False
if not isinstance(attention_mechanism, AttentionMechanism):
raise TypeError(
"attention_mechanism must be an AttentionMechanism or list of "
"multiple AttentionMechanism instances, saw type: %s" %
type(attention_mechanism).__name__)
attention_mechanisms = (attention_mechanism,)
if cell_input_fn is None:
cell_input_fn = (
lambda inputs, attention: array_ops.concat([inputs, attention], -1))
else:
if not callable(cell_input_fn):
raise TypeError("cell_input_fn must be callable, saw type: %s" %
type(cell_input_fn).__name__)
if attention_layer_size is not None and attention_layer is not None:
raise ValueError("Only one of attention_layer_size and attention_layer "
"should be set")
if attention_layer_size is not None:
attention_layer_sizes = tuple(
attention_layer_size if isinstance(attention_layer_size, (
list, tuple)) else (attention_layer_size,))
if len(attention_layer_sizes) != len(attention_mechanisms):
raise ValueError(
"If provided, attention_layer_size must contain exactly one "
"integer per attention_mechanism, saw: %d vs %d" %
(len(attention_layer_sizes), len(attention_mechanisms)))
self._attention_layers = tuple(
layers_core.Dense(
attention_layer_size,
name="attention_layer",
use_bias=False,
dtype=attention_mechanisms[i].dtype)
for i, attention_layer_size in enumerate(attention_layer_sizes))
self._attention_layer_size = sum(attention_layer_sizes)
elif attention_layer is not None:
self._attention_layers = tuple(
attention_layer if isinstance(attention_layer, (list, tuple)) else (
attention_layer,))
if len(self._attention_layers) != len(attention_mechanisms):
raise ValueError(
"If provided, attention_layer must contain exactly one "
"layer per attention_mechanism, saw: %d vs %d" %
(len(self._attention_layers), len(attention_mechanisms)))
self._attention_layer_size = sum(
tensor_shape.dimension_value(
layer.compute_output_shape([
None, cell.output_size +
tensor_shape.dimension_value(mechanism.values.shape[-1])
])[-1]) for layer, mechanism in zip(self._attention_layers,
attention_mechanisms))
else:
self._attention_layers = None
self._attention_layer_size = sum(
tensor_shape.dimension_value(attention_mechanism.values.shape[-1])
for attention_mechanism in attention_mechanisms)
if attention_fn is None:
attention_fn = _compute_attention
self._attention_fn = attention_fn
self._cell = cell
self._attention_mechanisms = attention_mechanisms
self._cell_input_fn = cell_input_fn
self._output_attention = output_attention
self._alignment_history = alignment_history
with ops.name_scope(name, "AttentionWrapperInit"):
if initial_cell_state is None:
self._initial_cell_state = None
else:
final_state_tensor = nest.flatten(initial_cell_state)[-1]
state_batch_size = (
tensor_shape.dimension_value(final_state_tensor.shape[0]) or
array_ops.shape(final_state_tensor)[0])
error_message = (
"When constructing AttentionWrapper %s: " % self._base_name +
"Non-matching batch sizes between the memory "
"(encoder output) and initial_cell_state. Are you using "
"the BeamSearchDecoder? You may need to tile your initial state "
"via the tf.contrib.seq2seq.tile_batch function with argument "
"multiple=beam_width.")
with ops.control_dependencies(
self._batch_size_checks(state_batch_size, error_message)):
self._initial_cell_state = nest.map_structure(
lambda s: array_ops.identity(s, name="check_initial_cell_state"),
initial_cell_state)
def _batch_size_checks(self, batch_size, error_message):
return [
check_ops.assert_equal(
batch_size, attention_mechanism.batch_size, message=error_message)
for attention_mechanism in self._attention_mechanisms
]
def _item_or_tuple(self, seq):
"""Returns `seq` as tuple or the singular element.
Which is returned is determined by how the AttentionMechanism(s) were passed
to the constructor.
Args:
seq: A non-empty sequence of items or generator.
Returns:
Either the values in the sequence as a tuple if AttentionMechanism(s)
were passed to the constructor as a sequence or the singular element.
"""
t = tuple(seq)
if self._is_multi:
return t
else:
return t[0]
@property
def output_size(self):
if self._output_attention:
return self._attention_layer_size
else:
return self._cell.output_size
@property
def state_size(self):
"""The `state_size` property of `AttentionWrapper`.
Returns:
An `AttentionWrapperState` tuple containing shapes used by this object.
"""
return AttentionWrapperState(
cell_state=self._cell.state_size,
time=tensor_shape.TensorShape([]),
attention=self._attention_layer_size,
alignments=self._item_or_tuple(
a.alignments_size for a in self._attention_mechanisms),
attention_state=self._item_or_tuple(
a.state_size for a in self._attention_mechanisms),
alignment_history=self._item_or_tuple(
a.alignments_size if self._alignment_history else ()
for a in self._attention_mechanisms)) # sometimes a TensorArray
def zero_state(self, batch_size, dtype):
"""Return an initial (zero) state tuple for this `AttentionWrapper`.
**NOTE** Please see the initializer documentation for details of how
to call `zero_state` if using an `AttentionWrapper` with a
`BeamSearchDecoder`.
Args:
batch_size: `0D` integer tensor: the batch size.
dtype: The internal state data type.
Returns:
An `AttentionWrapperState` tuple containing zeroed out tensors and,
possibly, empty `TensorArray` objects.
Raises:
ValueError: (or, possibly at runtime, InvalidArgument), if
`batch_size` does not match the output size of the encoder passed
to the wrapper object at initialization time.
"""
with ops.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]):
if self._initial_cell_state is not None:
cell_state = self._initial_cell_state
else:
cell_state = self._cell.get_initial_state(
batch_size=batch_size, dtype=dtype)
error_message = (
"When calling zero_state of AttentionWrapper %s: " % self._base_name +
"Non-matching batch sizes between the memory "
"(encoder output) and the requested batch size. Are you using "
"the BeamSearchDecoder? If so, make sure your encoder output has "
"been tiled to beam_width via tf.contrib.seq2seq.tile_batch, and "
"the batch_size= argument passed to zero_state is "
"batch_size * beam_width.")
with ops.control_dependencies(
self._batch_size_checks(batch_size, error_message)):
cell_state = nest.map_structure(
lambda s: array_ops.identity(s, name="checked_cell_state"),
cell_state)
initial_alignments = [
attention_mechanism.initial_alignments(batch_size, dtype)
for attention_mechanism in self._attention_mechanisms
]
return AttentionWrapperState(
cell_state=cell_state,
time=array_ops.zeros([], dtype=dtypes.int32),
attention=_zero_state_tensors(self._attention_layer_size, batch_size,
dtype),
alignments=self._item_or_tuple(initial_alignments),
attention_state=self._item_or_tuple(
attention_mechanism.initial_state(batch_size, dtype)
for attention_mechanism in self._attention_mechanisms),
alignment_history=self._item_or_tuple(
tensor_array_ops.TensorArray(
dtype,
size=0,
dynamic_size=True,
element_shape=alignment.shape) if self._alignment_history else
() for alignment in initial_alignments))
def call(self, inputs, state):
"""Perform a step of attention-wrapped RNN.
- Step 1: Mix the `inputs` and previous step's `attention` output via
`cell_input_fn`.
- Step 2: Call the wrapped `cell` with this input and its previous state.
- Step 3: Score the cell's output with `attention_mechanism`.
- Step 4: Calculate the alignments by passing the score through the
`normalizer`.
- Step 5: Calculate the context vector as the inner product between the
alignments and the attention_mechanism's values (memory).
- Step 6: Calculate the attention output by concatenating the cell output
and context through the attention layer (a linear layer with
`attention_layer_size` outputs).
Args:
inputs: (Possibly nested tuple of) Tensor, the input at this time step.
state: An instance of `AttentionWrapperState` containing tensors from the
previous time step.
Returns:
A tuple `(attention_or_cell_output, next_state)`, where:
- `attention_or_cell_output` depending on `output_attention`.
- `next_state` is an instance of `AttentionWrapperState`
containing the state calculated at this time step.
Raises:
TypeError: If `state` is not an instance of `AttentionWrapperState`.
"""
if not isinstance(state, AttentionWrapperState):
raise TypeError("Expected state to be instance of AttentionWrapperState. "
"Received type %s instead." % type(state))
# Step 1: Calculate the true inputs to the cell based on the
# previous attention value.
cell_inputs = self._cell_input_fn(inputs, state.attention)
cell_state = state.cell_state
cell_output, next_cell_state = self._cell(cell_inputs, cell_state)
cell_batch_size = (
tensor_shape.dimension_value(cell_output.shape[0]) or
array_ops.shape(cell_output)[0])
error_message = (
"When applying AttentionWrapper %s: " % self.name +
"Non-matching batch sizes between the memory "
"(encoder output) and the query (decoder output). Are you using "
"the BeamSearchDecoder? You may need to tile your memory input via "
"the tf.contrib.seq2seq.tile_batch function with argument "
"multiple=beam_width.")
with ops.control_dependencies(
self._batch_size_checks(cell_batch_size, error_message)):
cell_output = array_ops.identity(cell_output, name="checked_cell_output")
if self._is_multi:
previous_attention_state = state.attention_state
previous_alignment_history = state.alignment_history
else:
previous_attention_state = [state.attention_state]
previous_alignment_history = [state.alignment_history]
all_alignments = []
all_attentions = []
all_attention_states = []
maybe_all_histories = []
for i, attention_mechanism in enumerate(self._attention_mechanisms):
attention, alignments, next_attention_state = self._attention_fn(
attention_mechanism, cell_output, previous_attention_state[i],
self._attention_layers[i] if self._attention_layers else None)
alignment_history = previous_alignment_history[i].write(
state.time, alignments) if self._alignment_history else ()
all_attention_states.append(next_attention_state)
all_alignments.append(alignments)
all_attentions.append(attention)
maybe_all_histories.append(alignment_history)
attention = array_ops.concat(all_attentions, 1)
next_state = AttentionWrapperState(
time=state.time + 1,
cell_state=next_cell_state,
attention=attention,
attention_state=self._item_or_tuple(all_attention_states),
alignments=self._item_or_tuple(all_alignments),
alignment_history=self._item_or_tuple(maybe_all_histories))
if self._output_attention:
return attention, next_state
else:
return cell_output, next_state
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/seq2seq/python/ops/attention_wrapper.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Beam Search helper ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.seq2seq.ops import gen_beam_search_ops
from tensorflow.contrib.util import loader
from tensorflow.python.platform import resource_loader
_beam_search_ops_so = loader.load_op_library(
resource_loader.get_path_to_datafile("_beam_search_ops.so"))
gather_tree = gen_beam_search_ops.gather_tree
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/seq2seq/python/ops/beam_search_ops.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A library of sampler for use with SamplingDecoders."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
from tensorflow.contrib.seq2seq.python.ops import decoder
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.util import nest
__all__ = [
"Sampler",
"TrainingSampler",
"GreedyEmbeddingSampler",
"SampleEmbeddingSampler",
"CustomSampler",
"ScheduledEmbeddingTrainingSampler",
"ScheduledOutputTrainingSampler",
"InferenceSampler",
]
_transpose_batch_time = decoder._transpose_batch_time # pylint: disable=protected-access
@six.add_metaclass(abc.ABCMeta)
class Sampler(object):
"""Interface for implementing sampling in seq2seq decoders.
Sampler instances are used by `BasicDecoder`. The normal usage of a sampler is
like below:
sampler = Sampler(init_args)
(initial_finished, initial_inputs) = sampler.initialize(input_tensors)
for time_step in range(time):
cell_output, cell_state = cell.call(cell_input, previous_state)
sample_ids = sampler.sample(time_step, cell_output, cell_state)
(finished, next_inputs, next_state) = sampler.next_inputs(
time_step,cell_output, cell_state)
Note that all the tensor input should not be feed to Sampler as __init__()
parameters, instead, they should be feed by decoders via initialize().
"""
@abc.abstractmethod
def initialize(self, inputs, **kwargs):
"""initialize the sampler with the input tensors.
This method suppose to be only invoke once before the calling other methods
of the Sampler.
Args:
inputs: A (structure of) input tensors, it could be a nested tuple or a
single tensor.
**kwargs: Other kwargs for initialization. It could contain tensors like
mask for inputs, or non tensor parameter.
Returns:
`(initial_finished, initial_inputs)`.
"""
pass
@abc.abstractmethod
def sample(self, time, outputs, state):
"""Returns `sample_ids`."""
pass
@abc.abstractmethod
def next_inputs(self, time, outputs, state, sample_ids):
"""Returns `(finished, next_inputs, next_state)`."""
pass
@abc.abstractproperty
def batch_size(self):
"""Batch size of tensor returned by `sample`.
Returns a scalar int32 tensor. The return value might not available before
the invocation of initialize(), in this case, ValueError is raised.
"""
raise NotImplementedError("batch_size has not been implemented")
@abc.abstractproperty
def sample_ids_shape(self):
"""Shape of tensor returned by `sample`, excluding the batch dimension.
Returns a `TensorShape`. The return value might not available before the
invocation of initialize().
"""
raise NotImplementedError("sample_ids_shape has not been implemented")
@abc.abstractproperty
def sample_ids_dtype(self):
"""DType of tensor returned by `sample`.
Returns a DType. The return value might not available before the
invocation of initialize().
"""
raise NotImplementedError("sample_ids_dtype has not been implemented")
class CustomSampler(Sampler):
"""Base abstract class that allows the user to customize sampling."""
def __init__(self,
initialize_fn,
sample_fn,
next_inputs_fn,
sample_ids_shape=None,
sample_ids_dtype=None):
"""Initializer.
Args:
initialize_fn: callable that returns `(finished, next_inputs)` for the
first iteration.
sample_fn: callable that takes `(time, outputs, state)` and emits tensor
`sample_ids`.
next_inputs_fn: callable that takes `(time, outputs, state, sample_ids)`
and emits `(finished, next_inputs, next_state)`.
sample_ids_shape: Either a list of integers, or a 1-D Tensor of type
`int32`, the shape of each value in the `sample_ids` batch. Defaults to
a scalar.
sample_ids_dtype: The dtype of the `sample_ids` tensor. Defaults to int32.
"""
self._initialize_fn = initialize_fn
self._sample_fn = sample_fn
self._next_inputs_fn = next_inputs_fn
self._batch_size = None
self._sample_ids_shape = tensor_shape.TensorShape(sample_ids_shape or [])
self._sample_ids_dtype = sample_ids_dtype or dtypes.int32
@property
def batch_size(self):
if self._batch_size is None:
raise ValueError("batch_size accessed before initialize was called")
return self._batch_size
@property
def sample_ids_shape(self):
return self._sample_ids_shape
@property
def sample_ids_dtype(self):
return self._sample_ids_dtype
def initialize(self, inputs, **kwargs):
(finished, next_inputs) = self._initialize_fn(inputs, **kwargs)
if self._batch_size is None:
self._batch_size = array_ops.size(finished)
return (finished, next_inputs)
def sample(self, time, outputs, state):
return self._sample_fn(time=time, outputs=outputs, state=state)
def next_inputs(self, time, outputs, state, sample_ids):
return self._next_inputs_fn(
time=time, outputs=outputs, state=state, sample_ids=sample_ids)
class TrainingSampler(Sampler):
"""A Sampler for use during training.
Only reads inputs.
Returned sample_ids are the argmax of the RNN output logits.
"""
def __init__(self, time_major=False):
"""Initializer.
Args:
time_major: Python bool. Whether the tensors in `inputs` are time major.
If `False` (default), they are assumed to be batch major.
Raises:
ValueError: if `sequence_length` is not a 1D tensor.
"""
self.time_major = time_major
self._batch_size = None
@property
def batch_size(self):
if self._batch_size is None:
raise ValueError("batch_size accessed before initialize was called")
return self._batch_size
@property
def sample_ids_shape(self):
return tensor_shape.TensorShape([])
@property
def sample_ids_dtype(self):
return dtypes.int32
def initialize(self, inputs, sequence_length=None):
"""Initialize the TrainSampler.
Args:
inputs: A (structure of) input tensors.
sequence_length: An int32 vector tensor.
Returns:
(finished, next_inputs), a tuple of two items. The first item is a boolean
vector to indicate whether the item in the batch has finished. The
second item is the first slide of input data based on the timestep
dimension (usually the second dim of the input).
"""
self.inputs = ops.convert_to_tensor(inputs, name="inputs")
if not self.time_major:
inputs = nest.map_structure(_transpose_batch_time, inputs)
self.input_tas = nest.map_structure(_unstack_ta, inputs)
if sequence_length is None:
raise ValueError("sequence_length is required for TrainingSampler")
self.sequence_length = ops.convert_to_tensor(
sequence_length, name="sequence_length")
if self.sequence_length.get_shape().ndims != 1:
raise ValueError(
"Expected sequence_length to be a vector, but received shape: %s" %
self._sequence_length.get_shape())
self.zero_inputs = nest.map_structure(
lambda inp: array_ops.zeros_like(inp[0, :]), inputs)
self._batch_size = array_ops.size(self.sequence_length)
finished = math_ops.equal(0, self.sequence_length)
all_finished = math_ops.reduce_all(finished)
next_inputs = control_flow_ops.cond(
all_finished,
lambda: self.zero_inputs,
lambda: nest.map_structure(lambda inp: inp.read(0), self.input_tas))
return (finished, next_inputs)
def sample(self, time, outputs, state):
del state
sample_ids = math_ops.cast(math_ops.argmax(outputs, axis=-1), dtypes.int32)
return sample_ids
def next_inputs(self, time, outputs, state, sample_ids):
del sample_ids
next_time = time + 1
finished = (next_time >= self.sequence_length)
all_finished = math_ops.reduce_all(finished)
def read_from_ta(inp):
return inp.read(next_time)
next_inputs = control_flow_ops.cond(
all_finished,
lambda: self.zero_inputs,
lambda: nest.map_structure(read_from_ta, self.input_tas))
return (finished, next_inputs, state)
class ScheduledEmbeddingTrainingSampler(TrainingSampler):
"""A training sampler that adds scheduled sampling.
Returns -1s for sample_ids where no sampling took place; valid sample id
values elsewhere.
"""
def __init__(self,
sampling_probability,
embedding_fn=None,
time_major=False,
seed=None,
scheduling_seed=None):
"""Initializer.
Args:
sampling_probability: A `float32` 0-D or 1-D tensor: the probability of
sampling categorically from the output ids instead of reading directly
from the inputs.
embedding_fn: A callable that takes a vector tensor of `ids` (argmax ids),
or the `params` argument for `embedding_lookup`.
time_major: Python bool. Whether the tensors in `inputs` are time major.
If `False` (default), they are assumed to be batch major.
seed: The sampling seed.
scheduling_seed: The schedule decision rule sampling seed.
Raises:
ValueError: if `sampling_probability` is not a scalar or vector.
"""
if callable(embedding_fn) or embedding_fn is None:
self.embedding_fn = embedding_fn
else:
raise ValueError("embedding_fn is expected to be callable, got %s"
% type(embedding_fn))
self.sampling_probability = ops.convert_to_tensor(
sampling_probability, name="sampling_probability")
if self.sampling_probability.get_shape().ndims not in (0, 1):
raise ValueError(
"sampling_probability must be either a scalar or a vector. "
"saw shape: %s" % (self.sampling_probability.get_shape()))
self.seed = seed
self.scheduling_seed = scheduling_seed
super(ScheduledEmbeddingTrainingSampler,
self).__init__(time_major=time_major)
def initialize(self, inputs, sequence_length=None, embedding=None):
if self.embedding_fn is None:
if embedding is None:
raise ValueError("embedding is required as a keyword argument for "
"ScheduledEmbeddingTrainingSampler")
self.embedding_fn = (
lambda ids: embedding_ops.embedding_lookup(embedding, ids))
return super(ScheduledEmbeddingTrainingSampler, self).initialize(
inputs, sequence_length=sequence_length)
def sample(self, time, outputs, state):
del state
# Return -1s where we did not sample, and sample_ids elsewhere
select_sample = bernoulli_sample(
probs=self.sampling_probability,
dtype=dtypes.bool,
sample_shape=self.batch_size,
seed=self.scheduling_seed)
return array_ops.where(select_sample,
categorical_sample(logits=outputs, seed=self.seed),
gen_array_ops.fill([self.batch_size], -1))
def next_inputs(self, time, outputs, state, sample_ids):
(finished, base_next_inputs, state) = (
super(ScheduledEmbeddingTrainingSampler, self).next_inputs(
time=time, outputs=outputs, state=state, sample_ids=sample_ids))
def maybe_sample():
"""Perform scheduled sampling."""
where_sampling = math_ops.cast(
array_ops.where(sample_ids > -1), dtypes.int32)
where_not_sampling = math_ops.cast(
array_ops.where(sample_ids <= -1), dtypes.int32)
sample_ids_sampling = array_ops.gather_nd(sample_ids, where_sampling)
inputs_not_sampling = array_ops.gather_nd(base_next_inputs,
where_not_sampling)
sampled_next_inputs = self.embedding_fn(sample_ids_sampling)
base_shape = array_ops.shape(base_next_inputs)
return (array_ops.scatter_nd(
indices=where_sampling, updates=sampled_next_inputs, shape=base_shape)
+ array_ops.scatter_nd(
indices=where_not_sampling,
updates=inputs_not_sampling,
shape=base_shape))
all_finished = math_ops.reduce_all(finished)
next_inputs = control_flow_ops.cond(all_finished, lambda: base_next_inputs,
maybe_sample)
return (finished, next_inputs, state)
class ScheduledOutputTrainingSampler(TrainingSampler):
"""A training sampler that adds scheduled sampling directly to outputs.
Returns False for sample_ids where no sampling took place; True elsewhere.
"""
def __init__(self,
sampling_probability,
time_major=False,
seed=None,
next_inputs_fn=None):
"""Initializer.
Args:
sampling_probability: A `float32` scalar tensor: the probability of
sampling from the outputs instead of reading directly from the inputs.
time_major: Python bool. Whether the tensors in `inputs` are time major.
If `False` (default), they are assumed to be batch major.
seed: The sampling seed.
next_inputs_fn: (Optional) callable to apply to the RNN outputs to create
the next input when sampling. If `None` (default), the RNN outputs will
be used as the next inputs.
Raises:
ValueError: if `sampling_probability` is not a scalar or vector.
"""
self.sampling_probability = ops.convert_to_tensor(
sampling_probability, name="sampling_probability")
if self.sampling_probability.get_shape().ndims not in (0, 1):
raise ValueError(
"sampling_probability must be either a scalar or a vector. "
"saw shape: %s" % (self._sampling_probability.get_shape()))
self.seed = seed
self.next_inputs_fn = next_inputs_fn
super(ScheduledOutputTrainingSampler, self).__init__(time_major=time_major)
def initialize(self, inputs, sequence_length=None, auxiliary_inputs=None):
if auxiliary_inputs is None:
maybe_concatenated_inputs = inputs
else:
inputs = ops.convert_to_tensor(inputs)
auxiliary_inputs = ops.convert_to_tensor(auxiliary_inputs)
maybe_concatenated_inputs = nest.map_structure(
lambda x, y: array_ops.concat((x, y), -1), inputs, auxiliary_inputs)
if not self.time_major:
auxiliary_inputs = nest.map_structure(_transpose_batch_time,
auxiliary_inputs)
if auxiliary_inputs is not None:
self._auxiliary_input_tas = nest.map_structure(_unstack_ta,
auxiliary_inputs)
else:
self._auxiliary_input_tas = None
return super(ScheduledOutputTrainingSampler, self).initialize(
maybe_concatenated_inputs, sequence_length=sequence_length)
def sample(self, time, outputs, state):
del state
return bernoulli_sample(
probs=self.sampling_probability,
sample_shape=self.batch_size,
seed=self.seed)
def next_inputs(self, time, outputs, state, sample_ids):
(finished, base_next_inputs, state) = (
super(ScheduledOutputTrainingSampler, self).next_inputs(
time=time, outputs=outputs, state=state, sample_ids=sample_ids))
sample_ids = math_ops.cast(sample_ids, dtypes.bool)
def maybe_sample():
"""Perform scheduled sampling."""
def maybe_concatenate_auxiliary_inputs(outputs_, indices=None):
"""Concatenate outputs with auxiliary inputs, if they exist."""
if self._auxiliary_input_tas is None:
return outputs_
next_time = time + 1
auxiliary_inputs = nest.map_structure(lambda ta: ta.read(next_time),
self._auxiliary_input_tas)
if indices is not None:
auxiliary_inputs = array_ops.gather_nd(auxiliary_inputs, indices)
return nest.map_structure(lambda x, y: array_ops.concat((x, y), -1),
outputs_, auxiliary_inputs)
if self.next_inputs_fn is None:
return array_ops.where(sample_ids,
maybe_concatenate_auxiliary_inputs(outputs),
base_next_inputs)
where_sampling = math_ops.cast(array_ops.where(sample_ids), dtypes.int32)
where_not_sampling = math_ops.cast(
array_ops.where(math_ops.logical_not(sample_ids)), dtypes.int32)
outputs_sampling = array_ops.gather_nd(outputs, where_sampling)
inputs_not_sampling = array_ops.gather_nd(base_next_inputs,
where_not_sampling)
sampled_next_inputs = maybe_concatenate_auxiliary_inputs(
self.next_inputs_fn(outputs_sampling), where_sampling)
base_shape = array_ops.shape(base_next_inputs)
return (array_ops.scatter_nd(
indices=where_sampling, updates=sampled_next_inputs, shape=base_shape)
+ array_ops.scatter_nd(
indices=where_not_sampling,
updates=inputs_not_sampling,
shape=base_shape))
all_finished = math_ops.reduce_all(finished)
no_samples = math_ops.logical_not(math_ops.reduce_any(sample_ids))
next_inputs = control_flow_ops.cond(
math_ops.logical_or(all_finished, no_samples), lambda: base_next_inputs,
maybe_sample)
return (finished, next_inputs, state)
class GreedyEmbeddingSampler(Sampler):
"""A sampler for use during inference.
Uses the argmax of the output (treated as logits) and passes the
result through an embedding layer to get the next input.
"""
def __init__(self, embedding_fn=None):
"""Initializer.
Args:
embedding_fn: A optional callable that takes a vector tensor of `ids`
(argmax ids), or the `params` argument for `embedding_lookup`. The
returned tensor will be passed to the decoder input. Default to use
`embedding_ops.embedding_lookup`.
"""
if embedding_fn is None or callable(embedding_fn):
self.embedding_fn = embedding_fn
else:
raise ValueError("embedding_fn is expected to be a callable, got %s" %
type(embedding_fn))
self._batch_size = None
@property
def batch_size(self):
if self._batch_size is None:
raise ValueError("batch_size accessed before initialize was called")
return self._batch_size
@property
def sample_ids_shape(self):
return tensor_shape.TensorShape([])
@property
def sample_ids_dtype(self):
return dtypes.int32
def initialize(self, embedding, start_tokens=None, end_token=None):
"""Initialize the GreedyEmbeddingSampler.
Args:
embedding: tensor that contains embedding states matrix. It will be used
to generate generate outputs with start_tokens and end_tokens. The
embedding will be ignored if the embedding_fn has been provided at
__init__().
start_tokens: `int32` vector shaped `[batch_size]`, the start tokens.
end_token: `int32` scalar, the token that marks end of decoding.
Returns:
Tuple of two items: `(finished, self.start_inputs)`.
Raises:
ValueError: if `start_tokens` is not a 1D tensor or `end_token` is not a
scalar.
"""
if self.embedding_fn is None:
self.embedding_fn = (
lambda ids: embedding_ops.embedding_lookup(embedding, ids))
self.start_tokens = ops.convert_to_tensor(
start_tokens, dtype=dtypes.int32, name="start_tokens")
self.end_token = ops.convert_to_tensor(
end_token, dtype=dtypes.int32, name="end_token")
if self.start_tokens.get_shape().ndims != 1:
raise ValueError("start_tokens must be a vector")
self._batch_size = array_ops.size(start_tokens)
if self.end_token.get_shape().ndims != 0:
raise ValueError("end_token must be a scalar")
self.start_inputs = self.embedding_fn(self.start_tokens)
finished = array_ops.tile([False], [self._batch_size])
return (finished, self.start_inputs)
def sample(self, time, outputs, state):
"""sample for GreedyEmbeddingHelper."""
del time, state # unused by sample_fn
# Outputs are logits, use argmax to get the most probable id
if not isinstance(outputs, ops.Tensor):
raise TypeError(
"Expected outputs to be a single Tensor, got: %s" % type(outputs))
sample_ids = math_ops.argmax(outputs, axis=-1, output_type=dtypes.int32)
return sample_ids
def next_inputs(self, time, outputs, state, sample_ids):
"""next_inputs_fn for GreedyEmbeddingHelper."""
del time, outputs # unused by next_inputs_fn
finished = math_ops.equal(sample_ids, self.end_token)
all_finished = math_ops.reduce_all(finished)
next_inputs = control_flow_ops.cond(
all_finished,
# If we're finished, the next_inputs value doesn't matter
lambda: self.start_inputs,
lambda: self.embedding_fn(sample_ids))
return (finished, next_inputs, state)
class SampleEmbeddingSampler(GreedyEmbeddingSampler):
"""A sampler for use during inference.
Uses sampling (from a distribution) instead of argmax and passes the
result through an embedding layer to get the next input.
"""
def __init__(self, embedding_fn=None, softmax_temperature=None, seed=None):
"""Initializer.
Args:
embedding_fn: (Optional) A callable that takes a vector tensor of `ids`
(argmax ids), or the `params` argument for `embedding_lookup`. The
returned tensor will be passed to the decoder input.
softmax_temperature: (Optional) `float32` scalar, value to divide the
logits by before computing the softmax. Larger values (above 1.0) result
in more random samples, while smaller values push the sampling
distribution towards the argmax. Must be strictly greater than 0.
Defaults to 1.0.
seed: (Optional) The sampling seed.
Raises:
ValueError: if `start_tokens` is not a 1D tensor or `end_token` is not a
scalar.
"""
super(SampleEmbeddingSampler, self).__init__(embedding_fn)
self.softmax_temperature = softmax_temperature
self.seed = seed
def sample(self, time, outputs, state):
"""sample for SampleEmbeddingHelper."""
del time, state # unused by sample_fn
# Outputs are logits, we sample instead of argmax (greedy).
if not isinstance(outputs, ops.Tensor):
raise TypeError(
"Expected outputs to be a single Tensor, got: %s" % type(outputs))
if self.softmax_temperature is None:
logits = outputs
else:
logits = outputs / self.softmax_temperature
return categorical_sample(logits=logits, seed=self.seed)
class InferenceSampler(Sampler):
"""A helper to use during inference with a custom sampling function."""
def __init__(self,
sample_fn,
sample_shape,
sample_dtype,
end_fn,
next_inputs_fn=None):
"""Initializer.
Args:
sample_fn: A callable that takes `outputs` and emits tensor `sample_ids`.
sample_shape: Either a list of integers, or a 1-D Tensor of type `int32`,
the shape of the each sample in the batch returned by `sample_fn`.
sample_dtype: the dtype of the sample returned by `sample_fn`.
end_fn: A callable that takes `sample_ids` and emits a `bool` vector
shaped `[batch_size]` indicating whether each sample is an end token.
next_inputs_fn: (Optional) A callable that takes `sample_ids` and returns
the next batch of inputs. If not provided, `sample_ids` is used as the
next batch of inputs.
"""
self.sample_fn = sample_fn
self.sample_shape = tensor_shape.TensorShape(sample_shape)
self.sample_dtype = sample_dtype
self.end_fn = end_fn
self.next_inputs_fn = next_inputs_fn
self._batch_size = None
@property
def batch_size(self):
if self._batch_size is None:
raise ValueError("batch_size accessed before initialize was called")
return self._batch_size
@property
def sample_ids_shape(self):
return self.sample_shape
@property
def sample_ids_dtype(self):
return self.sample_dtype
def initialize(self, start_inputs):
self.start_inputs = ops.convert_to_tensor(start_inputs, name="start_inputs")
self._batch_size = array_ops.shape(start_inputs)[0]
finished = array_ops.tile([False], [self._batch_size])
return (finished, self.start_inputs)
def sample(self, time, outputs, state):
del time, state # unused by sample
return self.sample_fn(outputs)
def next_inputs(self, time, outputs, state, sample_ids):
del time, outputs # unused by next_inputs
if self.next_inputs_fn is None:
next_inputs = sample_ids
else:
next_inputs = self.next_inputs_fn(sample_ids)
finished = self.end_fn(sample_ids)
return (finished, next_inputs, state)
# The following sample functions (_call_sampler, bernoulli_sample,
# categorical_sample) mimic TensorFlow Probability distribution semantics.
def _call_sampler(sample_n_fn, sample_shape, name=None):
"""Reshapes vector of samples."""
with ops.name_scope(name, "call_sampler", values=[sample_shape]):
sample_shape = ops.convert_to_tensor(
sample_shape, dtype=dtypes.int32, name="sample_shape")
# Ensure sample_shape is a vector (vs just a scalar).
pad = math_ops.cast(
math_ops.equal(array_ops.rank(sample_shape), 0), dtypes.int32)
sample_shape = array_ops.reshape(
sample_shape,
array_ops.pad(
array_ops.shape(sample_shape),
paddings=[[pad, 0]],
constant_values=1))
samples = sample_n_fn(math_ops.reduce_prod(sample_shape))
batch_event_shape = array_ops.shape(samples)[1:]
final_shape = array_ops.concat([sample_shape, batch_event_shape], 0)
return array_ops.reshape(samples, final_shape)
def bernoulli_sample(probs=None,
logits=None,
dtype=dtypes.int32,
sample_shape=(),
seed=None):
"""Samples from Bernoulli distribution."""
if probs is None:
probs = math_ops.sigmoid(logits, name="probs")
else:
probs = ops.convert_to_tensor(probs, name="probs")
batch_shape_tensor = array_ops.shape(probs)
def _sample_n(n):
"""Sample vector of Bernoullis."""
new_shape = array_ops.concat([[n], batch_shape_tensor], 0)
uniform = random_ops.random_uniform(new_shape, seed=seed, dtype=probs.dtype)
return math_ops.cast(math_ops.less(uniform, probs), dtype)
return _call_sampler(_sample_n, sample_shape)
def categorical_sample(logits, dtype=dtypes.int32, sample_shape=(), seed=None):
"""Samples from categorical distribution."""
logits = ops.convert_to_tensor(logits, name="logits")
event_size = array_ops.shape(logits)[-1]
batch_shape_tensor = array_ops.shape(logits)[:-1]
def _sample_n(n):
"""Sample vector of categoricals."""
if logits.shape.ndims == 2:
logits_2d = logits
else:
logits_2d = array_ops.reshape(logits, [-1, event_size])
sample_dtype = dtypes.int64 if logits.dtype.size > 4 else dtypes.int32
draws = random_ops.multinomial(
logits_2d, n, seed=seed, output_dtype=sample_dtype)
draws = array_ops.reshape(
array_ops.transpose(draws),
array_ops.concat([[n], batch_shape_tensor], 0))
return math_ops.cast(draws, dtype)
return _call_sampler(_sample_n, sample_shape)
def _unstack_ta(inp):
return tensor_array_ops.TensorArray(
dtype=inp.dtype,
size=array_ops.shape(inp)[0],
element_shape=inp.get_shape()[1:]).unstack(inp)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/seq2seq/python/ops/sampler.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A decoder that performs beam search."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
from tensorflow.contrib.seq2seq.python.ops import attention_wrapper
from tensorflow.contrib.seq2seq.python.ops import beam_search_ops
from tensorflow.contrib.seq2seq.python.ops import decoder
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.keras import layers
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.platform import tf_logging
from tensorflow.python.util import nest
__all__ = [
"BeamSearchDecoderOutput",
"BeamSearchDecoderState",
"BeamSearchDecoder",
"FinalBeamSearchDecoderOutput",
"tile_batch",
]
class BeamSearchDecoderState(
collections.namedtuple("BeamSearchDecoderState",
("cell_state", "log_probs", "finished", "lengths",
"accumulated_attention_probs"))):
pass
class BeamSearchDecoderOutput(
collections.namedtuple("BeamSearchDecoderOutput",
("scores", "predicted_ids", "parent_ids"))):
pass
class FinalBeamSearchDecoderOutput(
collections.namedtuple("FinalBeamDecoderOutput",
["predicted_ids", "beam_search_decoder_output"])):
"""Final outputs returned by the beam search after all decoding is finished.
Args:
predicted_ids: The final prediction. A tensor of shape
`[batch_size, T, beam_width]` (or `[T, batch_size, beam_width]` if
`output_time_major` is True). Beams are ordered from best to worst.
beam_search_decoder_output: An instance of `BeamSearchDecoderOutput` that
describes the state of the beam search.
"""
pass
def _tile_batch(t, multiplier):
"""Core single-tensor implementation of tile_batch."""
t = ops.convert_to_tensor(t, name="t")
shape_t = array_ops.shape(t)
if t.shape.ndims is None or t.shape.ndims < 1:
raise ValueError("t must have statically known rank")
tiling = [1] * (t.shape.ndims + 1)
tiling[1] = multiplier
tiled_static_batch_size = (
t.shape.dims[0].value * multiplier
if t.shape.dims[0].value is not None else None)
tiled = array_ops.tile(array_ops.expand_dims(t, 1), tiling)
tiled = array_ops.reshape(tiled,
array_ops.concat(
([shape_t[0] * multiplier], shape_t[1:]), 0))
tiled.set_shape(
tensor_shape.TensorShape([tiled_static_batch_size]).concatenate(
t.shape[1:]))
return tiled
def tile_batch(t, multiplier, name=None):
"""Tile the batch dimension of a (possibly nested structure of) tensor(s) t.
For each tensor t in a (possibly nested structure) of tensors,
this function takes a tensor t shaped `[batch_size, s0, s1, ...]` composed of
minibatch entries `t[0], ..., t[batch_size - 1]` and tiles it to have a shape
`[batch_size * multiplier, s0, s1, ...]` composed of minibatch entries
`t[0], t[0], ..., t[1], t[1], ...` where each minibatch entry is repeated
`multiplier` times.
Args:
t: `Tensor` shaped `[batch_size, ...]`.
multiplier: Python int.
name: Name scope for any created operations.
Returns:
A (possibly nested structure of) `Tensor` shaped
`[batch_size * multiplier, ...]`.
Raises:
ValueError: if tensor(s) `t` do not have a statically known rank or
the rank is < 1.
"""
flat_t = nest.flatten(t)
with ops.name_scope(name, "tile_batch", flat_t + [multiplier]):
return nest.map_structure(lambda t_: _tile_batch(t_, multiplier), t)
def gather_tree_from_array(t, parent_ids, sequence_length):
"""Calculates the full beams for `TensorArray`s.
Args:
t: A stacked `TensorArray` of size `max_time` that contains `Tensor`s of
shape `[batch_size, beam_width, s]` or `[batch_size * beam_width, s]`
where `s` is the depth shape.
parent_ids: The parent ids of shape `[max_time, batch_size, beam_width]`.
sequence_length: The sequence length of shape `[batch_size, beam_width]`.
Returns:
A `Tensor` which is a stacked `TensorArray` of the same size and type as
`t` and where beams are sorted in each `Tensor` according to `parent_ids`.
"""
max_time = parent_ids.shape.dims[0].value or array_ops.shape(parent_ids)[0]
batch_size = parent_ids.shape.dims[1].value or array_ops.shape(parent_ids)[1]
beam_width = parent_ids.shape.dims[2].value or array_ops.shape(parent_ids)[2]
# Generate beam ids that will be reordered by gather_tree.
beam_ids = array_ops.expand_dims(
array_ops.expand_dims(math_ops.range(beam_width), 0), 0)
beam_ids = array_ops.tile(beam_ids, [max_time, batch_size, 1])
max_sequence_lengths = math_ops.cast(
math_ops.reduce_max(sequence_length, axis=1), dtypes.int32)
sorted_beam_ids = beam_search_ops.gather_tree(
step_ids=beam_ids,
parent_ids=parent_ids,
max_sequence_lengths=max_sequence_lengths,
end_token=beam_width + 1)
# For out of range steps, simply copy the same beam.
in_bound_steps = array_ops.transpose(
array_ops.sequence_mask(sequence_length, maxlen=max_time),
perm=[2, 0, 1])
sorted_beam_ids = array_ops.where(
in_bound_steps, x=sorted_beam_ids, y=beam_ids)
# Generate indices for gather_nd.
time_ind = array_ops.tile(array_ops.reshape(
math_ops.range(max_time), [-1, 1, 1]), [1, batch_size, beam_width])
batch_ind = array_ops.tile(array_ops.reshape(
math_ops.range(batch_size), [-1, 1, 1]), [1, max_time, beam_width])
batch_ind = array_ops.transpose(batch_ind, perm=[1, 0, 2])
indices = array_ops.stack([time_ind, batch_ind, sorted_beam_ids], -1)
# Gather from a tensor with collapsed additional dimensions.
gather_from = t
final_shape = array_ops.shape(gather_from)
gather_from = array_ops.reshape(
gather_from, [max_time, batch_size, beam_width, -1])
ordered = array_ops.gather_nd(gather_from, indices)
ordered = array_ops.reshape(ordered, final_shape)
return ordered
def _check_ndims(t):
if t.shape.ndims is None:
raise ValueError(
"Expected tensor (%s) to have known rank, but ndims == None." % t)
def _check_static_batch_beam_maybe(shape, batch_size, beam_width):
"""Raises an exception if dimensions are known statically and can not be
reshaped to [batch_size, beam_size, -1].
"""
reshaped_shape = tensor_shape.TensorShape([batch_size, beam_width, None])
if (batch_size is not None and shape.dims[0].value is not None
and (shape[0] != batch_size * beam_width
or (shape.ndims >= 2 and shape.dims[1].value is not None
and (shape[0] != batch_size or shape[1] != beam_width)))):
tf_logging.warn("TensorArray reordering expects elements to be "
"reshapable to %s which is incompatible with the "
"current shape %s. Consider setting "
"reorder_tensor_arrays to False to disable TensorArray "
"reordering during the beam search."
% (reshaped_shape, shape))
return False
return True
def _check_batch_beam(t, batch_size, beam_width):
"""Returns an Assert operation checking that the elements of the stacked
TensorArray can be reshaped to [batch_size, beam_size, -1]. At this point,
the TensorArray elements have a known rank of at least 1.
"""
error_message = ("TensorArray reordering expects elements to be "
"reshapable to [batch_size, beam_size, -1] which is "
"incompatible with the dynamic shape of %s elements. "
"Consider setting reorder_tensor_arrays to False to disable "
"TensorArray reordering during the beam search."
% (t if context.executing_eagerly() else t.name))
rank = t.shape.ndims
shape = array_ops.shape(t)
if rank == 2:
condition = math_ops.equal(shape[1], batch_size * beam_width)
else:
condition = math_ops.logical_or(
math_ops.equal(shape[1], batch_size * beam_width),
math_ops.logical_and(
math_ops.equal(shape[1], batch_size),
math_ops.equal(shape[2], beam_width)))
return control_flow_ops.Assert(condition, [error_message])
class BeamSearchDecoderMixin(object):
"""BeamSearchDecoderMixin contains the common methods for BeamSearchDecoder.
It is expected to be used a base class for concrete BeamSearchDecoder. Since
this is a mixin class, it is expected to be used together with other class as
base.
"""
def __init__(self,
cell,
beam_width,
output_layer=None,
length_penalty_weight=0.0,
coverage_penalty_weight=0.0,
reorder_tensor_arrays=True,
**kwargs):
"""Initialize the BeamSearchDecoderMixin.
Args:
cell: An `RNNCell` instance.
beam_width: Python integer, the number of beams.
output_layer: (Optional) An instance of `tf.keras.layers.Layer`, i.e.,
`tf.keras.layers.Dense`. Optional layer to apply to the RNN output
prior to storing the result or sampling.
length_penalty_weight: Float weight to penalize length. Disabled with 0.0.
coverage_penalty_weight: Float weight to penalize the coverage of source
sentence. Disabled with 0.0.
reorder_tensor_arrays: If `True`, `TensorArray`s' elements within the cell
state will be reordered according to the beam search path. If the
`TensorArray` can be reordered, the stacked form will be returned.
Otherwise, the `TensorArray` will be returned as is. Set this flag to
`False` if the cell state contains `TensorArray`s that are not amenable
to reordering.
**kwargs: Dict, other keyword arguments for parent class.
Raises:
TypeError: if `cell` is not an instance of `RNNCell`,
or `output_layer` is not an instance of `tf.keras.layers.Layer`.
"""
rnn_cell_impl.assert_like_rnncell("cell", cell) # pylint: disable=protected-access
if (output_layer is not None and
not isinstance(output_layer, layers.Layer)):
raise TypeError(
"output_layer must be a Layer, received: %s" % type(output_layer))
self._cell = cell
self._output_layer = output_layer
self._reorder_tensor_arrays = reorder_tensor_arrays
self._start_tokens = None
self._end_token = None
self._batch_size = None
self._beam_width = beam_width
self._length_penalty_weight = length_penalty_weight
self._coverage_penalty_weight = coverage_penalty_weight
super(BeamSearchDecoderMixin, self).__init__(**kwargs)
@property
def batch_size(self):
return self._batch_size
def _rnn_output_size(self):
"""Get the output shape from the RNN layer."""
size = self._cell.output_size
if self._output_layer is None:
return size
else:
# To use layer's compute_output_shape, we need to convert the
# RNNCell's output_size entries into shapes with an unknown
# batch size. We then pass this through the layer's
# compute_output_shape and read off all but the first (batch)
# dimensions to get the output size of the rnn with the layer
# applied to the top.
output_shape_with_unknown_batch = nest.map_structure(
lambda s: tensor_shape.TensorShape([None]).concatenate(s), size)
layer_output_shape = self._output_layer.compute_output_shape(
output_shape_with_unknown_batch)
return nest.map_structure(lambda s: s[1:], layer_output_shape)
@property
def tracks_own_finished(self):
"""The BeamSearchDecoder shuffles its beams and their finished state.
For this reason, it conflicts with the `dynamic_decode` function's
tracking of finished states. Setting this property to true avoids
early stopping of decoding due to mismanagement of the finished state
in `dynamic_decode`.
Returns:
`True`.
"""
return True
@property
def output_size(self):
# Return the cell output and the id
return BeamSearchDecoderOutput(
scores=tensor_shape.TensorShape([self._beam_width]),
predicted_ids=tensor_shape.TensorShape([self._beam_width]),
parent_ids=tensor_shape.TensorShape([self._beam_width]))
def finalize(self, outputs, final_state, sequence_lengths):
"""Finalize and return the predicted_ids.
Args:
outputs: An instance of BeamSearchDecoderOutput.
final_state: An instance of BeamSearchDecoderState. Passed through to the
output.
sequence_lengths: An `int64` tensor shaped `[batch_size, beam_width]`.
The sequence lengths determined for each beam during decode.
**NOTE** These are ignored; the updated sequence lengths are stored in
`final_state.lengths`.
Returns:
outputs: An instance of `FinalBeamSearchDecoderOutput` where the
predicted_ids are the result of calling _gather_tree.
final_state: The same input instance of `BeamSearchDecoderState`.
"""
del sequence_lengths
# Get max_sequence_length across all beams for each batch.
max_sequence_lengths = math_ops.cast(
math_ops.reduce_max(final_state.lengths, axis=1), dtypes.int32)
predicted_ids = beam_search_ops.gather_tree(
outputs.predicted_ids,
outputs.parent_ids,
max_sequence_lengths=max_sequence_lengths,
end_token=self._end_token)
if self._reorder_tensor_arrays:
final_state = final_state._replace(cell_state=nest.map_structure(
lambda t: self._maybe_sort_array_beams(
t, outputs.parent_ids, final_state.lengths),
final_state.cell_state))
outputs = FinalBeamSearchDecoderOutput(
beam_search_decoder_output=outputs, predicted_ids=predicted_ids)
return outputs, final_state
def _merge_batch_beams(self, t, s=None):
"""Merges the tensor from a batch of beams into a batch by beams.
More exactly, t is a tensor of dimension [batch_size, beam_width, s]. We
reshape this into [batch_size*beam_width, s]
Args:
t: Tensor of dimension [batch_size, beam_width, s]
s: (Possibly known) depth shape.
Returns:
A reshaped version of t with dimension [batch_size * beam_width, s].
"""
if isinstance(s, ops.Tensor):
s = tensor_shape.as_shape(tensor_util.constant_value(s))
else:
s = tensor_shape.TensorShape(s)
t_shape = array_ops.shape(t)
static_batch_size = tensor_util.constant_value(self._batch_size)
batch_size_beam_width = (
None
if static_batch_size is None else static_batch_size * self._beam_width)
reshaped_t = array_ops.reshape(
t,
array_ops.concat(([self._batch_size * self._beam_width], t_shape[2:]),
0))
reshaped_t.set_shape(
(tensor_shape.TensorShape([batch_size_beam_width]).concatenate(s)))
return reshaped_t
def _split_batch_beams(self, t, s=None):
"""Splits the tensor from a batch by beams into a batch of beams.
More exactly, t is a tensor of dimension [batch_size*beam_width, s]. We
reshape this into [batch_size, beam_width, s]
Args:
t: Tensor of dimension [batch_size*beam_width, s].
s: (Possibly known) depth shape.
Returns:
A reshaped version of t with dimension [batch_size, beam_width, s].
Raises:
ValueError: If, after reshaping, the new tensor is not shaped
`[batch_size, beam_width, s]` (assuming batch_size and beam_width
are known statically).
"""
if isinstance(s, ops.Tensor):
s = tensor_shape.TensorShape(tensor_util.constant_value(s))
else:
s = tensor_shape.TensorShape(s)
t_shape = array_ops.shape(t)
reshaped_t = array_ops.reshape(
t,
array_ops.concat(([self._batch_size, self._beam_width], t_shape[1:]),
0))
static_batch_size = tensor_util.constant_value(self._batch_size)
expected_reshaped_shape = tensor_shape.TensorShape(
[static_batch_size, self._beam_width]).concatenate(s)
if not reshaped_t.shape.is_compatible_with(expected_reshaped_shape):
raise ValueError("Unexpected behavior when reshaping between beam width "
"and batch size. The reshaped tensor has shape: %s. "
"We expected it to have shape "
"(batch_size, beam_width, depth) == %s. Perhaps you "
"forgot to create a zero_state with "
"batch_size=encoder_batch_size * beam_width?" %
(reshaped_t.shape, expected_reshaped_shape))
reshaped_t.set_shape(expected_reshaped_shape)
return reshaped_t
def _maybe_split_batch_beams(self, t, s):
"""Maybe splits the tensor from a batch by beams into a batch of beams.
We do this so that we can use nest and not run into problems with shapes.
Args:
t: `Tensor`, either scalar or shaped `[batch_size * beam_width] + s`.
s: `Tensor`, Python int, or `TensorShape`.
Returns:
If `t` is a matrix or higher order tensor, then the return value is
`t` reshaped to `[batch_size, beam_width] + s`. Otherwise `t` is
returned unchanged.
Raises:
ValueError: If the rank of `t` is not statically known.
"""
if isinstance(t, tensor_array_ops.TensorArray):
return t
_check_ndims(t)
if t.shape.ndims >= 1:
return self._split_batch_beams(t, s)
else:
return t
def _maybe_merge_batch_beams(self, t, s):
"""Splits the tensor from a batch by beams into a batch of beams.
More exactly, `t` is a tensor of dimension `[batch_size * beam_width] + s`,
then we reshape it to `[batch_size, beam_width] + s`.
Args:
t: `Tensor` of dimension `[batch_size * beam_width] + s`.
s: `Tensor`, Python int, or `TensorShape`.
Returns:
A reshaped version of t with shape `[batch_size, beam_width] + s`.
Raises:
ValueError: If the rank of `t` is not statically known.
"""
if isinstance(t, tensor_array_ops.TensorArray):
return t
_check_ndims(t)
if t.shape.ndims >= 2:
return self._merge_batch_beams(t, s)
else:
return t
def _maybe_sort_array_beams(self, t, parent_ids, sequence_length):
"""Maybe sorts beams within a `TensorArray`.
Args:
t: A `TensorArray` of size `max_time` that contains `Tensor`s of shape
`[batch_size, beam_width, s]` or `[batch_size * beam_width, s]` where
`s` is the depth shape.
parent_ids: The parent ids of shape `[max_time, batch_size, beam_width]`.
sequence_length: The sequence length of shape `[batch_size, beam_width]`.
Returns:
A `TensorArray` where beams are sorted in each `Tensor` or `t` itself if
it is not a `TensorArray` or does not meet shape requirements.
"""
if not isinstance(t, tensor_array_ops.TensorArray):
return t
if t.element_shape.ndims is None or t.element_shape.ndims < 1:
tf_logging.warn("The TensorArray %s in the cell state is not amenable to "
"sorting based on the beam search result. For a "
"TensorArray to be sorted, its elements shape must be "
"defined and have at least a rank of 1, but saw shape: %s"
% (t.handle.name, t.element_shape))
return t
if not _check_static_batch_beam_maybe(
t.element_shape, tensor_util.constant_value(self._batch_size),
self._beam_width):
return t
t = t.stack()
with ops.control_dependencies(
[_check_batch_beam(t, self._batch_size, self._beam_width)]):
return gather_tree_from_array(t, parent_ids, sequence_length)
def step(self, time, inputs, state, name=None):
"""Perform a decoding step.
Args:
time: scalar `int32` tensor.
inputs: A (structure of) input tensors.
state: A (structure of) state tensors and TensorArrays.
name: Name scope for any created operations.
Returns:
`(outputs, next_state, next_inputs, finished)`.
"""
batch_size = self._batch_size
beam_width = self._beam_width
end_token = self._end_token
length_penalty_weight = self._length_penalty_weight
coverage_penalty_weight = self._coverage_penalty_weight
with ops.name_scope(name, "BeamSearchDecoderStep", (time, inputs, state)):
cell_state = state.cell_state
inputs = nest.map_structure(
lambda inp: self._merge_batch_beams(inp, s=inp.shape[2:]), inputs)
cell_state = nest.map_structure(self._maybe_merge_batch_beams, cell_state,
self._cell.state_size)
cell_outputs, next_cell_state = self._cell(inputs, cell_state)
cell_outputs = nest.map_structure(
lambda out: self._split_batch_beams(out, out.shape[1:]), cell_outputs)
next_cell_state = nest.map_structure(
self._maybe_split_batch_beams, next_cell_state, self._cell.state_size)
if self._output_layer is not None:
cell_outputs = self._output_layer(cell_outputs)
beam_search_output, beam_search_state = _beam_search_step(
time=time,
logits=cell_outputs,
next_cell_state=next_cell_state,
beam_state=state,
batch_size=batch_size,
beam_width=beam_width,
end_token=end_token,
length_penalty_weight=length_penalty_weight,
coverage_penalty_weight=coverage_penalty_weight)
finished = beam_search_state.finished
sample_ids = beam_search_output.predicted_ids
next_inputs = control_flow_ops.cond(
math_ops.reduce_all(finished), lambda: self._start_inputs,
lambda: self._embedding_fn(sample_ids))
return (beam_search_output, beam_search_state, next_inputs, finished)
class BeamSearchDecoder(BeamSearchDecoderMixin, decoder.Decoder):
# Note that the inheritance hierarchy is important here. The Mixin has to be
# the first parent class since we will use super().__init__(), and Mixin which
# is a object will properly invoke the __init__ method of other parent class.
"""BeamSearch sampling decoder.
**NOTE** If you are using the `BeamSearchDecoder` with a cell wrapped in
`AttentionWrapper`, then you must ensure that:
- The encoder output has been tiled to `beam_width` via
`tf.contrib.seq2seq.tile_batch` (NOT `tf.tile`).
- The `batch_size` argument passed to the `zero_state` method of this
wrapper is equal to `true_batch_size * beam_width`.
- The initial state created with `zero_state` above contains a
`cell_state` value containing properly tiled final state from the
encoder.
An example:
```
tiled_encoder_outputs = tf.contrib.seq2seq.tile_batch(
encoder_outputs, multiplier=beam_width)
tiled_encoder_final_state = tf.contrib.seq2seq.tile_batch(
encoder_final_state, multiplier=beam_width)
tiled_sequence_length = tf.contrib.seq2seq.tile_batch(
sequence_length, multiplier=beam_width)
attention_mechanism = MyFavoriteAttentionMechanism(
num_units=attention_depth,
memory=tiled_inputs,
memory_sequence_length=tiled_sequence_length)
attention_cell = AttentionWrapper(cell, attention_mechanism, ...)
decoder_initial_state = attention_cell.zero_state(
dtype, batch_size=true_batch_size * beam_width)
decoder_initial_state = decoder_initial_state.clone(
cell_state=tiled_encoder_final_state)
```
Meanwhile, with `AttentionWrapper`, coverage penalty is suggested to use
when computing scores (https://arxiv.org/pdf/1609.08144.pdf). It encourages
the decoder to cover all inputs.
"""
def __init__(self,
cell,
embedding,
start_tokens,
end_token,
initial_state,
beam_width,
output_layer=None,
length_penalty_weight=0.0,
coverage_penalty_weight=0.0,
reorder_tensor_arrays=True):
"""Initialize the BeamSearchDecoder.
Args:
cell: An `RNNCell` instance.
embedding: A callable that takes a vector tensor of `ids` (argmax ids),
or the `params` argument for `embedding_lookup`.
start_tokens: `int32` vector shaped `[batch_size]`, the start tokens.
end_token: `int32` scalar, the token that marks end of decoding.
initial_state: A (possibly nested tuple of...) tensors and TensorArrays.
beam_width: Python integer, the number of beams.
output_layer: (Optional) An instance of `tf.keras.layers.Layer`, i.e.,
`tf.keras.layers.Dense`. Optional layer to apply to the RNN output
prior to storing the result or sampling.
length_penalty_weight: Float weight to penalize length. Disabled with 0.0.
coverage_penalty_weight: Float weight to penalize the coverage of source
sentence. Disabled with 0.0.
reorder_tensor_arrays: If `True`, `TensorArray`s' elements within the cell
state will be reordered according to the beam search path. If the
`TensorArray` can be reordered, the stacked form will be returned.
Otherwise, the `TensorArray` will be returned as is. Set this flag to
`False` if the cell state contains `TensorArray`s that are not amenable
to reordering.
Raises:
TypeError: if `cell` is not an instance of `RNNCell`,
or `output_layer` is not an instance of `tf.keras.layers.Layer`.
ValueError: If `start_tokens` is not a vector or
`end_token` is not a scalar.
"""
super(BeamSearchDecoder, self).__init__(
cell,
beam_width,
output_layer=output_layer,
length_penalty_weight=length_penalty_weight,
coverage_penalty_weight=coverage_penalty_weight,
reorder_tensor_arrays=reorder_tensor_arrays)
if callable(embedding):
self._embedding_fn = embedding
else:
self._embedding_fn = (
lambda ids: embedding_ops.embedding_lookup(embedding, ids))
self._start_tokens = ops.convert_to_tensor(
start_tokens, dtype=dtypes.int32, name="start_tokens")
if self._start_tokens.get_shape().ndims != 1:
raise ValueError("start_tokens must be a vector")
self._end_token = ops.convert_to_tensor(
end_token, dtype=dtypes.int32, name="end_token")
if self._end_token.get_shape().ndims != 0:
raise ValueError("end_token must be a scalar")
self._batch_size = array_ops.size(start_tokens)
self._initial_cell_state = nest.map_structure(
self._maybe_split_batch_beams, initial_state, self._cell.state_size)
self._start_tokens = array_ops.tile(
array_ops.expand_dims(self._start_tokens, 1), [1, self._beam_width])
self._start_inputs = self._embedding_fn(self._start_tokens)
self._finished = array_ops.one_hot(
array_ops.zeros([self._batch_size], dtype=dtypes.int32),
depth=self._beam_width,
on_value=False,
off_value=True,
dtype=dtypes.bool)
def initialize(self, name=None):
"""Initialize the decoder.
Args:
name: Name scope for any created operations.
Returns:
`(finished, start_inputs, initial_state)`.
"""
finished, start_inputs = self._finished, self._start_inputs
dtype = nest.flatten(self._initial_cell_state)[0].dtype
log_probs = array_ops.one_hot( # shape(batch_sz, beam_sz)
array_ops.zeros([self._batch_size], dtype=dtypes.int32),
depth=self._beam_width,
on_value=ops.convert_to_tensor(0.0, dtype=dtype),
off_value=ops.convert_to_tensor(-np.Inf, dtype=dtype),
dtype=dtype)
init_attention_probs = get_attention_probs(
self._initial_cell_state, self._coverage_penalty_weight)
if init_attention_probs is None:
init_attention_probs = ()
initial_state = BeamSearchDecoderState(
cell_state=self._initial_cell_state,
log_probs=log_probs,
finished=finished,
lengths=array_ops.zeros(
[self._batch_size, self._beam_width], dtype=dtypes.int64),
accumulated_attention_probs=init_attention_probs)
return (finished, start_inputs, initial_state)
@property
def output_dtype(self):
# Assume the dtype of the cell is the output_size structure
# containing the input_state's first component's dtype.
# Return that structure and int32 (the id)
dtype = nest.flatten(self._initial_cell_state)[0].dtype
return BeamSearchDecoderOutput(
scores=nest.map_structure(lambda _: dtype, self._rnn_output_size()),
predicted_ids=dtypes.int32,
parent_ids=dtypes.int32)
class BeamSearchDecoderV2(BeamSearchDecoderMixin, decoder.BaseDecoder):
# Note that the inheritance hierarchy is important here. The Mixin has to be
# the first parent class since we will use super().__init__(), and Mixin which
# is a object will properly invoke the __init__ method of other parent class.
"""BeamSearch sampling decoder.
**NOTE** If you are using the `BeamSearchDecoder` with a cell wrapped in
`AttentionWrapper`, then you must ensure that:
- The encoder output has been tiled to `beam_width` via
`tf.contrib.seq2seq.tile_batch` (NOT `tf.tile`).
- The `batch_size` argument passed to the `zero_state` method of this
wrapper is equal to `true_batch_size * beam_width`.
- The initial state created with `zero_state` above contains a
`cell_state` value containing properly tiled final state from the
encoder.
An example:
```
tiled_encoder_outputs = tf.contrib.seq2seq.tile_batch(
encoder_outputs, multiplier=beam_width)
tiled_encoder_final_state = tf.contrib.seq2seq.tile_batch(
encoder_final_state, multiplier=beam_width)
tiled_sequence_length = tf.contrib.seq2seq.tile_batch(
sequence_length, multiplier=beam_width)
attention_mechanism = MyFavoriteAttentionMechanism(
num_units=attention_depth,
memory=tiled_inputs,
memory_sequence_length=tiled_sequence_length)
attention_cell = AttentionWrapper(cell, attention_mechanism, ...)
decoder_initial_state = attention_cell.zero_state(
dtype, batch_size=true_batch_size * beam_width)
decoder_initial_state = decoder_initial_state.clone(
cell_state=tiled_encoder_final_state)
```
Meanwhile, with `AttentionWrapper`, coverage penalty is suggested to use
when computing scores (https://arxiv.org/pdf/1609.08144.pdf). It encourages
the decoding to cover all inputs.
"""
def __init__(self,
cell,
beam_width,
embedding_fn=None,
output_layer=None,
length_penalty_weight=0.0,
coverage_penalty_weight=0.0,
reorder_tensor_arrays=True,
**kwargs):
"""Initialize the BeamSearchDecoderV2.
Args:
cell: An `RNNCell` instance.
beam_width: Python integer, the number of beams.
embedding_fn: A callable that takes a vector tensor of `ids` (argmax ids).
output_layer: (Optional) An instance of `tf.keras.layers.Layer`, i.e.,
`tf.keras.layers.Dense`. Optional layer to apply to the RNN output
prior to storing the result or sampling.
length_penalty_weight: Float weight to penalize length. Disabled with 0.0.
coverage_penalty_weight: Float weight to penalize the coverage of source
sentence. Disabled with 0.0.
reorder_tensor_arrays: If `True`, `TensorArray`s' elements within the cell
state will be reordered according to the beam search path. If the
`TensorArray` can be reordered, the stacked form will be returned.
Otherwise, the `TensorArray` will be returned as is. Set this flag to
`False` if the cell state contains `TensorArray`s that are not amenable
to reordering.
**kwargs: Dict, other keyword arguments for initialization.
Raises:
TypeError: if `cell` is not an instance of `RNNCell`,
or `output_layer` is not an instance of `tf.keras.layers.Layer`.
"""
super(BeamSearchDecoderV2, self).__init__(
cell,
beam_width,
output_layer=output_layer,
length_penalty_weight=length_penalty_weight,
coverage_penalty_weight=coverage_penalty_weight,
reorder_tensor_arrays=reorder_tensor_arrays,
**kwargs)
if embedding_fn is None or callable(embedding_fn):
self._embedding_fn = embedding_fn
else:
raise ValueError("embedding_fn is expected to be a callable, got %s" %
type(embedding_fn))
def initialize(self,
embedding,
start_tokens,
end_token,
initial_state):
"""Initialize the decoder.
Args:
embedding: A tensor from the embedding layer output, which is the
`params` argument for `embedding_lookup`.
start_tokens: `int32` vector shaped `[batch_size]`, the start tokens.
end_token: `int32` scalar, the token that marks end of decoding.
initial_state: A (possibly nested tuple of...) tensors and TensorArrays.
Returns:
`(finished, start_inputs, initial_state)`.
Raises:
ValueError: If `start_tokens` is not a vector or `end_token` is not a
scalar.
"""
if embedding is not None and self._embedding_fn is not None:
raise ValueError(
"embedding and embedding_fn cannot be provided at same time")
elif embedding is not None:
self._embedding_fn = (
lambda ids: embedding_ops.embedding_lookup(embedding, ids))
self._start_tokens = ops.convert_to_tensor(
start_tokens, dtype=dtypes.int32, name="start_tokens")
if self._start_tokens.get_shape().ndims != 1:
raise ValueError("start_tokens must be a vector")
self._end_token = ops.convert_to_tensor(
end_token, dtype=dtypes.int32, name="end_token")
if self._end_token.get_shape().ndims != 0:
raise ValueError("end_token must be a scalar")
self._batch_size = array_ops.size(start_tokens)
self._initial_cell_state = nest.map_structure(
self._maybe_split_batch_beams, initial_state, self._cell.state_size)
self._start_tokens = array_ops.tile(
array_ops.expand_dims(self._start_tokens, 1), [1, self._beam_width])
self._start_inputs = self._embedding_fn(self._start_tokens)
self._finished = array_ops.one_hot(
array_ops.zeros([self._batch_size], dtype=dtypes.int32),
depth=self._beam_width,
on_value=False,
off_value=True,
dtype=dtypes.bool)
finished, start_inputs = self._finished, self._start_inputs
dtype = nest.flatten(self._initial_cell_state)[0].dtype
log_probs = array_ops.one_hot( # shape(batch_sz, beam_sz)
array_ops.zeros([self._batch_size], dtype=dtypes.int32),
depth=self._beam_width,
on_value=ops.convert_to_tensor(0.0, dtype=dtype),
off_value=ops.convert_to_tensor(-np.Inf, dtype=dtype),
dtype=dtype)
init_attention_probs = get_attention_probs(
self._initial_cell_state, self._coverage_penalty_weight)
if init_attention_probs is None:
init_attention_probs = ()
initial_state = BeamSearchDecoderState(
cell_state=self._initial_cell_state,
log_probs=log_probs,
finished=finished,
lengths=array_ops.zeros(
[self._batch_size, self._beam_width], dtype=dtypes.int64),
accumulated_attention_probs=init_attention_probs)
return (finished, start_inputs, initial_state)
@property
def output_dtype(self):
# Assume the dtype of the cell is the output_size structure
# containing the input_state's first component's dtype.
# Return that structure and int32 (the id)
dtype = nest.flatten(self._initial_cell_state)[0].dtype
return BeamSearchDecoderOutput(
scores=nest.map_structure(lambda _: dtype, self._rnn_output_size()),
predicted_ids=dtypes.int32,
parent_ids=dtypes.int32)
def call(self, embeddning, start_tokens, end_token, initial_state, **kwargs):
init_kwargs = kwargs
init_kwargs["start_tokens"] = start_tokens
init_kwargs["end_token"] = end_token
init_kwargs["initial_state"] = initial_state
return decoder.dynamic_decode(self,
output_time_major=self.output_time_major,
impute_finished=self.impute_finished,
maximum_iterations=self.maximum_iterations,
parallel_iterations=self.parallel_iterations,
swap_memory=self.swap_memory,
decoder_init_input=embeddning,
decoder_init_kwargs=init_kwargs)
def _beam_search_step(time, logits, next_cell_state, beam_state, batch_size,
beam_width, end_token, length_penalty_weight,
coverage_penalty_weight):
"""Performs a single step of Beam Search Decoding.
Args:
time: Beam search time step, should start at 0. At time 0 we assume
that all beams are equal and consider only the first beam for
continuations.
logits: Logits at the current time step. A tensor of shape
`[batch_size, beam_width, vocab_size]`
next_cell_state: The next state from the cell, e.g. an instance of
AttentionWrapperState if the cell is attentional.
beam_state: Current state of the beam search.
An instance of `BeamSearchDecoderState`.
batch_size: The batch size for this input.
beam_width: Python int. The size of the beams.
end_token: The int32 end token.
length_penalty_weight: Float weight to penalize length. Disabled with 0.0.
coverage_penalty_weight: Float weight to penalize the coverage of source
sentence. Disabled with 0.0.
Returns:
A new beam state.
"""
static_batch_size = tensor_util.constant_value(batch_size)
# Calculate the current lengths of the predictions
prediction_lengths = beam_state.lengths
previously_finished = beam_state.finished
not_finished = math_ops.logical_not(previously_finished)
# Calculate the total log probs for the new hypotheses
# Final Shape: [batch_size, beam_width, vocab_size]
step_log_probs = nn_ops.log_softmax(logits)
step_log_probs = _mask_probs(step_log_probs, end_token, previously_finished)
total_probs = array_ops.expand_dims(beam_state.log_probs, 2) + step_log_probs
# Calculate the continuation lengths by adding to all continuing beams.
vocab_size = logits.shape.dims[-1].value or array_ops.shape(logits)[-1]
lengths_to_add = array_ops.one_hot(
indices=array_ops.fill([batch_size, beam_width], end_token),
depth=vocab_size,
on_value=math_ops.to_int64(0),
off_value=math_ops.to_int64(1),
dtype=dtypes.int64)
add_mask = math_ops.cast(not_finished, dtypes.int64)
lengths_to_add *= array_ops.expand_dims(add_mask, 2)
new_prediction_lengths = (
lengths_to_add + array_ops.expand_dims(prediction_lengths, 2))
# Calculate the accumulated attention probabilities if coverage penalty is
# enabled.
accumulated_attention_probs = None
attention_probs = get_attention_probs(
next_cell_state, coverage_penalty_weight)
if attention_probs is not None:
attention_probs *= array_ops.expand_dims(
math_ops.cast(not_finished, dtypes.float32), 2)
accumulated_attention_probs = (
beam_state.accumulated_attention_probs + attention_probs)
# Calculate the scores for each beam
scores = _get_scores(
log_probs=total_probs,
sequence_lengths=new_prediction_lengths,
length_penalty_weight=length_penalty_weight,
coverage_penalty_weight=coverage_penalty_weight,
finished=previously_finished,
accumulated_attention_probs=accumulated_attention_probs)
time = ops.convert_to_tensor(time, name="time")
# During the first time step we only consider the initial beam
scores_flat = array_ops.reshape(scores, [batch_size, -1])
# Pick the next beams according to the specified successors function
next_beam_size = ops.convert_to_tensor(
beam_width, dtype=dtypes.int32, name="beam_width")
next_beam_scores, word_indices = nn_ops.top_k(scores_flat, k=next_beam_size)
next_beam_scores.set_shape([static_batch_size, beam_width])
word_indices.set_shape([static_batch_size, beam_width])
# Pick out the probs, beam_ids, and states according to the chosen predictions
next_beam_probs = _tensor_gather_helper(
gather_indices=word_indices,
gather_from=total_probs,
batch_size=batch_size,
range_size=beam_width * vocab_size,
gather_shape=[-1],
name="next_beam_probs")
# Note: just doing the following
# math_ops.cast(
# word_indices % vocab_size,
# dtypes.int32,
# name="next_beam_word_ids")
# would be a lot cleaner but for reasons unclear, that hides the results of
# the op which prevents capturing it with tfdbg debug ops.
raw_next_word_ids = math_ops.mod(
word_indices, vocab_size, name="next_beam_word_ids")
next_word_ids = math_ops.cast(raw_next_word_ids, dtypes.int32)
next_beam_ids = math_ops.cast(
word_indices / vocab_size, dtypes.int32, name="next_beam_parent_ids")
# Append new ids to current predictions
previously_finished = _tensor_gather_helper(
gather_indices=next_beam_ids,
gather_from=previously_finished,
batch_size=batch_size,
range_size=beam_width,
gather_shape=[-1])
next_finished = math_ops.logical_or(
previously_finished,
math_ops.equal(next_word_ids, end_token),
name="next_beam_finished")
# Calculate the length of the next predictions.
# 1. Finished beams remain unchanged.
# 2. Beams that are now finished (EOS predicted) have their length
# increased by 1.
# 3. Beams that are not yet finished have their length increased by 1.
lengths_to_add = math_ops.cast(
math_ops.logical_not(previously_finished), dtypes.int64)
next_prediction_len = _tensor_gather_helper(
gather_indices=next_beam_ids,
gather_from=beam_state.lengths,
batch_size=batch_size,
range_size=beam_width,
gather_shape=[-1])
next_prediction_len += lengths_to_add
next_accumulated_attention_probs = ()
if accumulated_attention_probs is not None:
next_accumulated_attention_probs = _tensor_gather_helper(
gather_indices=next_beam_ids,
gather_from=accumulated_attention_probs,
batch_size=batch_size,
range_size=beam_width,
gather_shape=[batch_size * beam_width, -1],
name="next_accumulated_attention_probs")
# Pick out the cell_states according to the next_beam_ids. We use a
# different gather_shape here because the cell_state tensors, i.e.
# the tensors that would be gathered from, all have dimension
# greater than two and we need to preserve those dimensions.
# pylint: disable=g-long-lambda
next_cell_state = nest.map_structure(
lambda gather_from: _maybe_tensor_gather_helper(
gather_indices=next_beam_ids,
gather_from=gather_from,
batch_size=batch_size,
range_size=beam_width,
gather_shape=[batch_size * beam_width, -1]),
next_cell_state)
# pylint: enable=g-long-lambda
next_state = BeamSearchDecoderState(
cell_state=next_cell_state,
log_probs=next_beam_probs,
lengths=next_prediction_len,
finished=next_finished,
accumulated_attention_probs=next_accumulated_attention_probs)
output = BeamSearchDecoderOutput(
scores=next_beam_scores,
predicted_ids=next_word_ids,
parent_ids=next_beam_ids)
return output, next_state
def get_attention_probs(next_cell_state, coverage_penalty_weight):
"""Get attention probabilities from the cell state.
Args:
next_cell_state: The next state from the cell, e.g. an instance of
AttentionWrapperState if the cell is attentional.
coverage_penalty_weight: Float weight to penalize the coverage of source
sentence. Disabled with 0.0.
Returns:
The attention probabilities with shape `[batch_size, beam_width, max_time]`
if coverage penalty is enabled. Otherwise, returns None.
Raises:
ValueError: If no cell is attentional but coverage penalty is enabled.
"""
if coverage_penalty_weight == 0.0:
return None
# Attention probabilities of each attention layer. Each with shape
# `[batch_size, beam_width, max_time]`.
probs_per_attn_layer = []
if isinstance(next_cell_state, attention_wrapper.AttentionWrapperState):
probs_per_attn_layer = [attention_probs_from_attn_state(next_cell_state)]
elif isinstance(next_cell_state, tuple):
for state in next_cell_state:
if isinstance(state, attention_wrapper.AttentionWrapperState):
probs_per_attn_layer.append(attention_probs_from_attn_state(state))
if not probs_per_attn_layer:
raise ValueError(
"coverage_penalty_weight must be 0.0 if no cell is attentional.")
if len(probs_per_attn_layer) == 1:
attention_probs = probs_per_attn_layer[0]
else:
# Calculate the average attention probabilities from all attention layers.
attention_probs = [
array_ops.expand_dims(prob, -1) for prob in probs_per_attn_layer]
attention_probs = array_ops.concat(attention_probs, -1)
attention_probs = math_ops.reduce_mean(attention_probs, -1)
return attention_probs
def _get_scores(log_probs, sequence_lengths, length_penalty_weight,
coverage_penalty_weight, finished, accumulated_attention_probs):
"""Calculates scores for beam search hypotheses.
Args:
log_probs: The log probabilities with shape
`[batch_size, beam_width, vocab_size]`.
sequence_lengths: The array of sequence lengths.
length_penalty_weight: Float weight to penalize length. Disabled with 0.0.
coverage_penalty_weight: Float weight to penalize the coverage of source
sentence. Disabled with 0.0.
finished: A boolean tensor of shape `[batch_size, beam_width]` that
specifies which elements in the beam are finished already.
accumulated_attention_probs: Accumulated attention probabilities up to the
current time step, with shape `[batch_size, beam_width, max_time]` if
coverage_penalty_weight is not 0.0.
Returns:
The scores normalized by the length_penalty and coverage_penalty.
Raises:
ValueError: accumulated_attention_probs is None when coverage penalty is
enabled.
"""
length_penalty_ = _length_penalty(
sequence_lengths=sequence_lengths, penalty_factor=length_penalty_weight)
length_penalty_ = math_ops.cast(length_penalty_, dtype=log_probs.dtype)
scores = log_probs / length_penalty_
coverage_penalty_weight = ops.convert_to_tensor(
coverage_penalty_weight, name="coverage_penalty_weight")
if coverage_penalty_weight.shape.ndims != 0:
raise ValueError("coverage_penalty_weight should be a scalar, "
"but saw shape: %s" % coverage_penalty_weight.shape)
if tensor_util.constant_value(coverage_penalty_weight) == 0.0:
return scores
if accumulated_attention_probs is None:
raise ValueError(
"accumulated_attention_probs can be None only if coverage penalty is "
"disabled.")
# Add source sequence length mask before computing coverage penalty.
accumulated_attention_probs = array_ops.where(
math_ops.equal(accumulated_attention_probs, 0.0),
array_ops.ones_like(accumulated_attention_probs),
accumulated_attention_probs)
# coverage penalty =
# sum over `max_time` {log(min(accumulated_attention_probs, 1.0))}
coverage_penalty = math_ops.reduce_sum(
math_ops.log(math_ops.minimum(accumulated_attention_probs, 1.0)), 2)
# Apply coverage penalty to finished predictions.
coverage_penalty *= math_ops.cast(finished, dtypes.float32)
weighted_coverage_penalty = coverage_penalty * coverage_penalty_weight
# Reshape from [batch_size, beam_width] to [batch_size, beam_width, 1]
weighted_coverage_penalty = array_ops.expand_dims(
weighted_coverage_penalty, 2)
return scores + weighted_coverage_penalty
def attention_probs_from_attn_state(attention_state):
"""Calculates the average attention probabilities.
Args:
attention_state: An instance of `AttentionWrapperState`.
Returns:
The attention probabilities in the given AttentionWrapperState.
If there're multiple attention mechanisms, return the average value from
all attention mechanisms.
"""
# Attention probabilities over time steps, with shape
# `[batch_size, beam_width, max_time]`.
attention_probs = attention_state.alignments
if isinstance(attention_probs, tuple):
attention_probs = [
array_ops.expand_dims(prob, -1) for prob in attention_probs]
attention_probs = array_ops.concat(attention_probs, -1)
attention_probs = math_ops.reduce_mean(attention_probs, -1)
return attention_probs
def _length_penalty(sequence_lengths, penalty_factor):
"""Calculates the length penalty. See https://arxiv.org/abs/1609.08144.
Returns the length penalty tensor:
```
[(5+sequence_lengths)/6]**penalty_factor
```
where all operations are performed element-wise.
Args:
sequence_lengths: `Tensor`, the sequence lengths of each hypotheses.
penalty_factor: A scalar that weights the length penalty.
Returns:
If the penalty is `0`, returns the scalar `1.0`. Otherwise returns
the length penalty factor, a tensor with the same shape as
`sequence_lengths`.
"""
penalty_factor = ops.convert_to_tensor(penalty_factor, name="penalty_factor")
penalty_factor.set_shape(()) # penalty should be a scalar.
static_penalty = tensor_util.constant_value(penalty_factor)
if static_penalty is not None and static_penalty == 0:
return 1.0
return math_ops.div(
(5. + math_ops.cast(sequence_lengths, dtypes.float32))**penalty_factor,
(5. + 1.)**penalty_factor)
def _mask_probs(probs, eos_token, finished):
"""Masks log probabilities.
The result is that finished beams allocate all probability mass to eos and
unfinished beams remain unchanged.
Args:
probs: Log probabilities of shape `[batch_size, beam_width, vocab_size]`
eos_token: An int32 id corresponding to the EOS token to allocate
probability to.
finished: A boolean tensor of shape `[batch_size, beam_width]` that
specifies which elements in the beam are finished already.
Returns:
A tensor of shape `[batch_size, beam_width, vocab_size]`, where unfinished
beams stay unchanged and finished beams are replaced with a tensor with all
probability on the EOS token.
"""
vocab_size = array_ops.shape(probs)[2]
# All finished examples are replaced with a vector that has all
# probability on EOS
finished_row = array_ops.one_hot(
eos_token,
vocab_size,
dtype=probs.dtype,
on_value=ops.convert_to_tensor(0., dtype=probs.dtype),
off_value=probs.dtype.min)
finished_probs = array_ops.tile(
array_ops.reshape(finished_row, [1, 1, -1]),
array_ops.concat([array_ops.shape(finished), [1]], 0))
finished_mask = array_ops.tile(
array_ops.expand_dims(finished, 2), [1, 1, vocab_size])
return array_ops.where(finished_mask, finished_probs, probs)
def _maybe_tensor_gather_helper(gather_indices, gather_from, batch_size,
range_size, gather_shape):
"""Maybe applies _tensor_gather_helper.
This applies _tensor_gather_helper when the gather_from dims is at least as
big as the length of gather_shape. This is used in conjunction with nest so
that we don't apply _tensor_gather_helper to inapplicable values like scalars.
Args:
gather_indices: The tensor indices that we use to gather.
gather_from: The tensor that we are gathering from.
batch_size: The batch size.
range_size: The number of values in each range. Likely equal to beam_width.
gather_shape: What we should reshape gather_from to in order to preserve the
correct values. An example is when gather_from is the attention from an
AttentionWrapperState with shape [batch_size, beam_width, attention_size].
There, we want to preserve the attention_size elements, so gather_shape is
[batch_size * beam_width, -1]. Then, upon reshape, we still have the
attention_size as desired.
Returns:
output: Gathered tensor of shape tf.shape(gather_from)[:1+len(gather_shape)]
or the original tensor if its dimensions are too small.
"""
if isinstance(gather_from, tensor_array_ops.TensorArray):
return gather_from
_check_ndims(gather_from)
if gather_from.shape.ndims >= len(gather_shape):
return _tensor_gather_helper(
gather_indices=gather_indices,
gather_from=gather_from,
batch_size=batch_size,
range_size=range_size,
gather_shape=gather_shape)
else:
return gather_from
def _tensor_gather_helper(gather_indices,
gather_from,
batch_size,
range_size,
gather_shape,
name=None):
"""Helper for gathering the right indices from the tensor.
This works by reshaping gather_from to gather_shape (e.g. [-1]) and then
gathering from that according to the gather_indices, which are offset by
the right amounts in order to preserve the batch order.
Args:
gather_indices: The tensor indices that we use to gather.
gather_from: The tensor that we are gathering from.
batch_size: The input batch size.
range_size: The number of values in each range. Likely equal to beam_width.
gather_shape: What we should reshape gather_from to in order to preserve the
correct values. An example is when gather_from is the attention from an
AttentionWrapperState with shape [batch_size, beam_width, attention_size].
There, we want to preserve the attention_size elements, so gather_shape is
[batch_size * beam_width, -1]. Then, upon reshape, we still have the
attention_size as desired.
name: The tensor name for set of operations. By default this is
'tensor_gather_helper'. The final output is named 'output'.
Returns:
output: Gathered tensor of shape tf.shape(gather_from)[:1+len(gather_shape)]
"""
with ops.name_scope(name, "tensor_gather_helper"):
range_ = array_ops.expand_dims(math_ops.range(batch_size) * range_size, 1)
gather_indices = array_ops.reshape(gather_indices + range_, [-1])
output = array_ops.gather(
array_ops.reshape(gather_from, gather_shape), gather_indices)
final_shape = array_ops.shape(gather_from)[:1 + len(gather_shape)]
static_batch_size = tensor_util.constant_value(batch_size)
final_static_shape = (
tensor_shape.TensorShape([static_batch_size]).concatenate(
gather_from.shape[1:1 + len(gather_shape)]))
output = array_ops.reshape(output, final_shape, name="output")
output.set_shape(final_static_shape)
return output
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/seq2seq/python/ops/beam_search_decoder.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Kafka Dataset.
@@KafkaDataset
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.kafka.python.ops.kafka_dataset_ops import KafkaDataset
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = [
"KafkaDataset",
]
remove_undocumented(__name__)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/kafka/__init__.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
# ==============================================================================
"""Tests for KafkaDataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.kafka.python.ops import kafka_dataset_ops
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class KafkaDatasetTest(test.TestCase):
def setUp(self):
# The Kafka server has to be setup before the test
# and tear down after the test manually.
# The docker engine has to be installed.
#
# To setup the Kafka server:
# $ bash kafka_test.sh start kafka
#
# To team down the Kafka server:
# $ bash kafka_test.sh stop kafka
pass
def testKafkaDataset(self):
topics = array_ops.placeholder(dtypes.string, shape=[None])
num_epochs = array_ops.placeholder(dtypes.int64, shape=[])
batch_size = array_ops.placeholder(dtypes.int64, shape=[])
repeat_dataset = kafka_dataset_ops.KafkaDataset(
topics, group="test", eof=True).repeat(num_epochs)
batch_dataset = repeat_dataset.batch(batch_size)
iterator = iterator_ops.Iterator.from_structure(
dataset_ops.get_legacy_output_types(batch_dataset))
init_op = iterator.make_initializer(repeat_dataset)
init_batch_op = iterator.make_initializer(batch_dataset)
get_next = iterator.get_next()
with self.cached_session() as sess:
# Basic test: read from topic 0.
sess.run(init_op, feed_dict={topics: ["test:0:0:4"], num_epochs: 1})
for i in range(5):
self.assertEqual("D" + str(i), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Basic test: read from topic 1.
sess.run(init_op, feed_dict={topics: ["test:0:5:-1"], num_epochs: 1})
for i in range(5):
self.assertEqual("D" + str(i + 5), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Basic test: read from both topics.
sess.run(
init_op,
feed_dict={
topics: ["test:0:0:4", "test:0:5:-1"],
num_epochs: 1
})
for j in range(2):
for i in range(5):
self.assertEqual("D" + str(i + j * 5), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Test repeated iteration through both files.
sess.run(
init_op,
feed_dict={
topics: ["test:0:0:4", "test:0:5:-1"],
num_epochs: 10
})
for _ in range(10):
for j in range(2):
for i in range(5):
self.assertEqual("D" + str(i + j * 5), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Test batched and repeated iteration through both files.
sess.run(
init_batch_op,
feed_dict={
topics: ["test:0:0:4", "test:0:5:-1"],
num_epochs: 10,
batch_size: 5
})
for _ in range(10):
self.assertAllEqual(["D" + str(i) for i in range(5)],
sess.run(get_next))
self.assertAllEqual(["D" + str(i + 5) for i in range(5)],
sess.run(get_next))
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/kafka/python/kernel_tests/kafka_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Kafka Dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.kafka.python.ops import gen_dataset_ops
from tensorflow.contrib.kafka.python.ops import kafka_op_loader # pylint: disable=unused-import
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.util import deprecation
class KafkaDataset(dataset_ops.DatasetSource):
"""A Kafka Dataset that consumes the message.
"""
@deprecation.deprecated(
None,
"tf.contrib.kafka will be removed in 2.0, the support for Apache Kafka "
"will continue to be provided through the tensorflow/io GitHub project.")
def __init__(self,
topics,
servers="localhost",
group="",
eof=False,
timeout=1000):
"""Create a KafkaReader.
Args:
topics: A `tf.string` tensor containing one or more subscriptions,
in the format of [topic:partition:offset:length],
by default length is -1 for unlimited.
servers: A list of bootstrap servers.
group: The consumer group id.
eof: If True, the kafka reader will stop on EOF.
timeout: The timeout value for the Kafka Consumer to wait
(in millisecond).
"""
self._topics = ops.convert_to_tensor(
topics, dtype=dtypes.string, name="topics")
self._servers = ops.convert_to_tensor(
servers, dtype=dtypes.string, name="servers")
self._group = ops.convert_to_tensor(
group, dtype=dtypes.string, name="group")
self._eof = ops.convert_to_tensor(eof, dtype=dtypes.bool, name="eof")
self._timeout = ops.convert_to_tensor(
timeout, dtype=dtypes.int64, name="timeout")
super(KafkaDataset, self).__init__(self._as_variant_tensor())
def _as_variant_tensor(self):
return gen_dataset_ops.kafka_dataset(self._topics, self._servers,
self._group, self._eof, self._timeout)
@property
def element_spec(self):
return tensor_spec.TensorSpec([], dtypes.string)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/kafka/python/ops/kafka_dataset_ops.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python helper for loading kafka ops and kernels."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.util import loader
from tensorflow.python.platform import resource_loader
_dataset_ops = loader.load_op_library(
resource_loader.get_path_to_datafile("../../_dataset_ops.so"))
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/kafka/python/ops/kafka_op_loader.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Exposes the python wrapper for TensorRT graph transforms."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,wildcard-import
from tensorflow.contrib.tensorrt.python import *
# pylint: enable=unused-import,wildcard-import
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/tensorrt/__init__.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Exposes the Python wrapper conversion to trt_graph."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.compiler.tensorrt import trt_convert
def create_inference_graph(
input_graph_def,
outputs,
max_batch_size=1,
max_workspace_size_bytes=trt_convert.DEFAULT_TRT_MAX_WORKSPACE_SIZE_BYTES,
precision_mode=trt_convert.TrtPrecisionMode.FP32,
minimum_segment_size=3,
is_dynamic_op=False,
maximum_cached_engines=1,
input_saved_model_dir=None,
input_saved_model_tags=None,
output_saved_model_dir=None,
session_config=None):
return trt_convert.create_inference_graph(
input_graph_def=input_graph_def,
outputs=outputs,
max_batch_size=max_batch_size,
max_workspace_size_bytes=max_workspace_size_bytes,
precision_mode=precision_mode,
minimum_segment_size=minimum_segment_size,
is_dynamic_op=is_dynamic_op,
maximum_cached_engines=maximum_cached_engines,
input_saved_model_dir=input_saved_model_dir,
input_saved_model_tags=input_saved_model_tags,
output_saved_model_dir=output_saved_model_dir,
session_config=session_config)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/tensorrt/python/trt_convert.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Exposes the python wrapper for TensorRT graph transforms."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,line-too-long
from tensorflow.contrib.tensorrt.python.trt_convert import create_inference_graph
# pylint: enable=unused-import,line-too-long
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/tensorrt/python/__init__.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Sequence File Dataset.
@@SequenceFileDataset
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.hadoop.python.ops.hadoop_dataset_ops import SequenceFileDataset
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = [
"SequenceFileDataset",
]
remove_undocumented(__name__)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/hadoop/__init__.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
# ==============================================================================
"""Tests for SequenceFileDataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.contrib.hadoop.python.ops import hadoop_dataset_ops
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.platform import resource_loader
from tensorflow.python.platform import test
class SequenceFileDatasetTest(test.TestCase):
def test_sequence_file_dataset(self):
"""Test case for SequenceFileDataset.
The file is generated with `org.apache.hadoop.io.Text` for key/value.
There are 25 records in the file with the format of:
key = XXX
value = VALUEXXX
where XXX is replaced as the line number (starts with 001).
"""
filename = os.path.join(resource_loader.get_data_files_path(),
"testdata", "string.seq")
filenames = constant_op.constant([filename], dtypes.string)
num_repeats = 2
dataset = hadoop_dataset_ops.SequenceFileDataset(filenames).repeat(
num_repeats)
iterator = dataset_ops.make_initializable_iterator(dataset)
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op)
for _ in range(num_repeats): # Dataset is repeated.
for i in range(25): # 25 records.
v0 = b"%03d" % (i + 1)
v1 = b"VALUE%03d" % (i + 1)
self.assertEqual((v0, v1), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/hadoop/python/kernel_tests/hadoop_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python helper for loading hadoop ops and kernels."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.util import loader
from tensorflow.python.platform import resource_loader
_dataset_ops = loader.load_op_library(
resource_loader.get_path_to_datafile("../../_dataset_ops.so"))
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/hadoop/python/ops/hadoop_op_loader.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SequenceFile Dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.hadoop.python.ops import gen_dataset_ops
from tensorflow.contrib.hadoop.python.ops import hadoop_op_loader # pylint: disable=unused-import
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.util import deprecation
class SequenceFileDataset(dataset_ops.DatasetSource):
"""A Sequence File Dataset that reads the sequence file."""
@deprecation.deprecated(
None,
"tf.contrib.hadoop will be removed in 2.0, the support for Apache Hadoop "
"will continue to be provided through the tensorflow/io GitHub project.")
def __init__(self, filenames):
"""Create a `SequenceFileDataset`.
`SequenceFileDataset` allows a user to read data from a hadoop sequence
file. A sequence file consists of (key value) pairs sequentially. At
the moment, `org.apache.hadoop.io.Text` is the only serialization type
being supported, and there is no compression support.
For example:
```python
tf.compat.v1.enable_eager_execution()
dataset = tf.contrib.hadoop.SequenceFileDataset("/foo/bar.seq")
# Prints the (key, value) pairs inside a hadoop sequence file.
for key, value in dataset:
print(key, value)
```
Args:
filenames: A `tf.string` tensor containing one or more filenames.
"""
self._filenames = ops.convert_to_tensor(
filenames, dtype=dtypes.string, name="filenames")
variant_tensor = gen_dataset_ops.sequence_file_dataset(
self._filenames, self._flat_types)
super(SequenceFileDataset, self).__init__(variant_tensor)
@property
def element_spec(self):
return (tensor_spec.TensorSpec([], dtypes.string),
tensor_spec.TensorSpec([], dtypes.string))
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/hadoop/python/ops/hadoop_dataset_ops.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""hooks: A module containing `SessionRunHook`s for use with `MonitoredSession`.
@@ProfilerHook
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import
from tensorflow.contrib.hooks.python.training import *
# pylint: enable=wildcard-import
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = ['ProfilerHook']
remove_undocumented(__name__, _allowed_symbols)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/hooks/__init__.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Experimental `SessionRunHooks` for use with `MonitoredSession`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import
from tensorflow.contrib.hooks.python.training import *
# pylint: enable=wildcard-import
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/hooks/python/__init__.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""hooks: A module containing `SessionRunHook`s for use with `MonitoredSession`.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.hooks.python.training.profiler_hook import ProfilerHook
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/hooks/python/training/__init__.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Placeholder of ProfilerHook for backward compatibility."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.training import basic_session_run_hooks
ProfilerHook = basic_session_run_hooks.ProfilerHook # pylint: disable=invalid-name
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/hooks/python/training/profiler_hook.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops and modules related to batch.
@@batch_function_v1
@@batch_function
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.batching.python.ops.batch_ops import batch_function
from tensorflow.python.util.all_util import remove_undocumented
remove_undocumented(__name__)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/batching/__init__.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operations for automatic batching and unbatching."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import gen_batch_ops
# pylint: disable=unused-import
from tensorflow.python.ops.batch_ops import batch
from tensorflow.python.ops.batch_ops import batch_function
from tensorflow.python.ops.batch_ops import unbatch
# pylint: enable=unused-import
@ops.RegisterGradient("Batch")
def _BatchGrad(op, *out_grads): # pylint: disable=invalid-name
"""Gradient for batch op."""
gradients = []
for i in range(len(op.inputs)):
gradients.append(
gen_batch_ops.unbatch(
out_grads[i],
op.outputs[-2],
op.outputs[-1],
timeout_micros=op.get_attr("grad_timeout_micros"),
shared_name="batch_gradient_{}_{}".format(op.name, i)))
return gradients
@ops.RegisterGradient("Unbatch")
def _UnbatchGrad(op, grad): # pylint: disable=invalid-name
return [
gen_batch_ops.unbatch_grad(
op.inputs[0],
op.inputs[1],
grad,
op.inputs[2],
shared_name="unbatch_gradient_{}".format(op.name)), None, None
]
def batch_function_v1(num_batch_threads,
max_batch_size,
batch_timeout_micros,
allowed_batch_sizes=None,
grad_timeout_micros=60 * 1000 * 1000,
unbatch_timeout_micros=60 * 1000 * 1000,
max_enqueued_batches=10):
"""Batches the computation done by the decorated function.
This is the older version of batch_function(). Please use the former instead
of this.
Args:
num_batch_threads: Number of scheduling threads for processing batches
of work. Determines the number of batches processed in parallel.
max_batch_size: Batch sizes will never be bigger than this.
batch_timeout_micros: Maximum number of microseconds to wait before
outputting an incomplete batch.
allowed_batch_sizes: Optional list of allowed batch sizes. If left empty,
does nothing. Otherwise, supplies a list of batch sizes, causing the op
to pad batches up to one of those sizes. The entries must increase
monotonically, and the final entry must equal max_batch_size.
grad_timeout_micros: The timeout to use for the gradient. See the
documentation of the unbatch op for more details. Defaults to 60s.
unbatch_timeout_micros: The timeout to use for unbatching. See the
documentation of the unbatch op for more details. Defaults to 60s.
max_enqueued_batches: The maximum depth of the batch queue. Defaults to 10.
Returns:
The decorated function will return the unbatched computation output Tensors.
"""
def decorator(f): # pylint: disable=missing-docstring
def decorated(*args):
with ops.name_scope("batch") as name:
for a in args:
if not isinstance(a, ops.Tensor):
raise ValueError("All arguments to functions decorated with "
"`batch_function` are supposed to be Tensors; "
"found %s" % repr(a))
batched_tensors, batch_index, id_t = gen_batch_ops.batch(
args,
num_batch_threads=num_batch_threads,
max_batch_size=max_batch_size,
batch_timeout_micros=batch_timeout_micros,
max_enqueued_batches=max_enqueued_batches,
allowed_batch_sizes=allowed_batch_sizes,
grad_timeout_micros=grad_timeout_micros,
shared_name=name)
outputs = f(*batched_tensors)
if isinstance(outputs, ops.Tensor):
outputs_list = [outputs]
else:
outputs_list = outputs
with ops.name_scope("unbatch") as unbatch_name:
unbatched = [
gen_batch_ops.unbatch(t, batch_index, id_t,
timeout_micros=unbatch_timeout_micros,
shared_name=unbatch_name + "/" + t.name)
for t in outputs_list]
if isinstance(outputs, ops.Tensor):
return unbatched[0]
return unbatched
return decorated
return decorator
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/batching/python/ops/batch_ops.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the currently experimental in-graph batch ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
import time
from tensorflow.contrib.batching.python.ops import batch_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.platform import test
def delayed_plus1(x):
"""Sleeps for 100ms then returns x+1."""
time.sleep(0.1)
return x + 1
class BatchOpsTest(test.TestCase):
"""Tests for batch_ops.{un,}batch."""
def testBasicUnbatchV1Decorated(self):
"""Tests that the batch_function_v1 decorator works."""
with self.cached_session() as sess:
@batch_ops.batch_function_v1(1, 10, 100000)
def computation(in_t):
return in_t + 1
inp = array_ops.placeholder(dtype=dtypes.int32, shape=[1])
result = computation(inp)
thread_results = []
def worker():
thread_results.extend(sess.run([result], feed_dict={inp: [1]}))
worker_thread = threading.Thread(target=worker)
worker_thread.start()
main_results = sess.run([result], feed_dict={inp: [2]})
worker_thread.join()
self.assertEqual(thread_results[0], [2])
self.assertEqual(main_results[0], [3])
def testUnbatchGrad(self):
"""Tests that batch and unbatch are differentiable."""
with self.cached_session() as sess:
inp = array_ops.placeholder(dtype=dtypes.float32, shape=[1])
batched, index, id_t = batch_ops.batch(
[inp], num_batch_threads=1, max_batch_size=2,
batch_timeout_micros=36000000, grad_timeout_micros=1000000,
batching_queue="")
computation = batched[0] * batched[0]
result = batch_ops.unbatch(computation, index, id_t,
timeout_micros=1000000, shared_name="unbatch")
grad = gradients_impl.gradients(result, inp)
thread_results = []
def worker():
thread_results.extend(sess.run([grad], feed_dict={inp: [1]}))
worker_thread = threading.Thread(target=worker)
worker_thread.start()
main_results = sess.run([grad], feed_dict={inp: [2]})
worker_thread.join()
self.assertEqual(thread_results[0], [2])
self.assertEqual(main_results[0], [4])
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/batching/python/ops/batch_ops_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions to copy elements between graphs.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.copy_graph.python.util import copy_elements
# pylint: disable=wildcard-import
from tensorflow.contrib.copy_graph.python.util.copy_elements import *
# pylint: enable=wildcard-import
from tensorflow.python.util.all_util import remove_undocumented
remove_undocumented(__name__, doc_string_modules=[copy_elements])
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/copy_graph/__init__.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions for copying elements from one graph to another.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/copy_graph/python/__init__.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions for copying elements from one graph to another.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/copy_graph/python/util/__init__.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""## Functions for copying elements from one graph to another.
These functions allow for recursive copying of elements (ops and variables)
from one graph to another. The copied elements are initialized inside a
user-specified scope in the other graph. There are separate functions to
copy ops and variables.
There is also a function to retrieve the copied version of an op from the
first graph inside a scope in the second graph.
@@copy_op_to_graph
@@copy_variable_to_graph
@@get_copied_op
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from copy import deepcopy
from tensorflow.python.ops.variables import VariableV1
from tensorflow.python.client.session import Session
from tensorflow.python.framework import ops
__all__ = ['copy_op_to_graph', 'copy_variable_to_graph', 'get_copied_op']
def copy_variable_to_graph(org_instance, to_graph, scope=''):
"""Given a `Variable` instance from one `Graph`, initializes and returns
a copy of it from another `Graph`, under the specified scope
(default `""`).
Args:
org_instance: A `Variable` from some `Graph`.
to_graph: The `Graph` to copy the `Variable` to.
scope: A scope for the new `Variable` (default `""`).
Returns:
The copied `Variable` from `to_graph`.
Raises:
TypeError: If `org_instance` is not a `Variable`.
"""
if not isinstance(org_instance, VariableV1):
raise TypeError(str(org_instance) + ' is not a Variable')
#The name of the new variable
if scope != '':
new_name = (scope + '/' + org_instance.name[:org_instance.name.index(':')])
else:
new_name = org_instance.name[:org_instance.name.index(':')]
#Get the collections that the new instance needs to be added to.
#The new collections will also be a part of the given scope,
#except the special ones required for variable initialization and
#training.
collections = []
for name, collection in org_instance.graph._collections.items():
if org_instance in collection:
if (name == ops.GraphKeys.GLOBAL_VARIABLES or
name == ops.GraphKeys.TRAINABLE_VARIABLES or scope == ''):
collections.append(name)
else:
collections.append(scope + '/' + name)
#See if it's trainable.
trainable = (
org_instance in org_instance.graph.get_collection(
ops.GraphKeys.TRAINABLE_VARIABLES))
#Get the initial value
with org_instance.graph.as_default():
temp_session = Session()
init_value = temp_session.run(org_instance.initialized_value())
#Initialize the new variable
with to_graph.as_default():
new_var = VariableV1(
init_value,
trainable,
name=new_name,
collections=collections,
validate_shape=False)
return new_var
def copy_op_to_graph(org_instance, to_graph, variables, scope=''):
"""Returns a copy of an operation from another Graph under a specified scope.
Given an `Operation` `org_instance` from one `Graph`,
initializes and returns a copy of it from another `Graph`,
under the specified scope (default `""`).
The copying is done recursively, so any `Operation` whose output
is required to evaluate the `org_instance`, is also copied (unless
already done).
Since `Variable` instances are copied separately, those required
to evaluate `org_instance` must be provided as input.
Args:
org_instance: An `Operation` from some `Graph`. Could be a
`Placeholder` as well.
to_graph: The `Graph` to copy `org_instance` to.
variables: An iterable of `Variable` instances to copy `org_instance` to.
scope: A scope for the new `Variable` (default `""`).
Returns:
The copied `Operation` from `to_graph`.
Raises:
TypeError: If `org_instance` is not an `Operation` or `Tensor`.
"""
#The name of the new instance
if scope != '':
new_name = scope + '/' + org_instance.name
else:
new_name = org_instance.name
#Extract names of variables
copied_variables = dict((x.name, x) for x in variables)
#If a variable by the new name already exists, return the
#correspondng tensor that will act as an input
if new_name in copied_variables:
return to_graph.get_tensor_by_name(copied_variables[new_name].name)
#If an instance of the same name exists, return appropriately
try:
already_present = to_graph.as_graph_element(
new_name, allow_tensor=True, allow_operation=True)
return already_present
except:
pass
#Get the collections that the new instance needs to be added to.
#The new collections will also be a part of the given scope.
collections = []
for name, collection in org_instance.graph._collections.items():
if org_instance in collection:
if scope == '':
collections.append(name)
else:
collections.append(scope + '/' + name)
#Take action based on the class of the instance
if isinstance(org_instance, ops.Tensor):
#If it's a Tensor, it is one of the outputs of the underlying
#op. Therefore, copy the op itself and return the appropriate
#output.
op = org_instance.op
new_op = copy_op_to_graph(op, to_graph, variables, scope)
output_index = op.outputs.index(org_instance)
new_tensor = new_op.outputs[output_index]
#Add to collections if any
for collection in collections:
to_graph.add_to_collection(collection, new_tensor)
return new_tensor
elif isinstance(org_instance, ops.Operation):
op = org_instance
#If it has an original_op parameter, copy it
if op._original_op is not None:
new_original_op = copy_op_to_graph(op._original_op, to_graph, variables,
scope)
else:
new_original_op = None
#If it has control inputs, call this function recursively on each.
new_control_inputs = [
copy_op_to_graph(x, to_graph, variables, scope)
for x in op.control_inputs
]
#If it has inputs, call this function recursively on each.
new_inputs = [
copy_op_to_graph(x, to_graph, variables, scope) for x in op.inputs
]
#Make a new node_def based on that of the original.
#An instance of tensorflow.core.framework.node_def_pb2.NodeDef, it
#stores String-based info such as name, device and type of the op.
#Unique to every Operation instance.
new_node_def = deepcopy(op.node_def)
#Change the name
new_node_def.name = new_name
#Copy the other inputs needed for initialization
output_types = op._output_types[:]
input_types = op._input_types[:]
#Make a copy of the op_def too.
#Its unique to every _type_ of Operation.
op_def = deepcopy(op.op_def)
#Initialize a new Operation instance
new_op = ops.Operation(new_node_def, to_graph, new_inputs, output_types,
new_control_inputs, input_types, new_original_op,
op_def)
#Use Graph's hidden methods to add the op
to_graph._record_op_seen_by_control_dependencies(new_op)
# pylint: disable=protected-access
for device_function in to_graph._device_functions_outer_to_inner:
new_op._set_device(device_function(new_op))
# pylint: enable=protected-access
return new_op
else:
raise TypeError('Could not copy instance: ' + str(org_instance))
def get_copied_op(org_instance, graph, scope=''):
"""Given an `Operation` instance from some `Graph`, returns
its namesake from `graph`, under the specified scope
(default `""`).
If a copy of `org_instance` is present in `graph` under the given
`scope`, it will be returned.
Args:
org_instance: An `Operation` from some `Graph`.
graph: The `Graph` to be searched for a copr of `org_instance`.
scope: The scope `org_instance` is present in.
Returns:
The `Operation` copy from `graph`.
"""
#The name of the copied instance
if scope != '':
new_name = scope + '/' + org_instance.name
else:
new_name = org_instance.name
return graph.as_graph_element(
new_name, allow_tensor=True, allow_operation=True)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/copy_graph/python/util/copy_elements.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for contrib.copy_graph.python.util.copy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.copy_graph.python.util import copy_elements
from tensorflow.python.client import session as session_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class CopyVariablesTest(test.TestCase):
def setUp(self):
self.graph1 = ops.Graph()
self.graph2 = ops.Graph()
def testVariableCopy(self):
with self.graph1.as_default():
#Define a Variable in graph1
some_var = variables.VariableV1(2)
#Initialize session
sess1 = session_lib.Session()
#Initialize the Variable
variables.global_variables_initializer().run(session=sess1)
#Make a copy of some_var in the defsult scope in graph2
copy1 = copy_elements.copy_variable_to_graph(some_var, self.graph2)
#Make another copy with different scope
copy2 = copy_elements.copy_variable_to_graph(some_var,
self.graph2,
"test_scope")
#Initialize both the copies
with self.graph2.as_default():
#Initialize Session
sess2 = session_lib.Session()
#Initialize the Variables
variables.global_variables_initializer().run(session=sess2)
#Ensure values in all three variables are the same
v1 = some_var.eval(session=sess1)
v2 = copy1.eval(session=sess2)
v3 = copy2.eval(session=sess2)
assert isinstance(copy1, variables.Variable)
assert isinstance(copy2, variables.Variable)
assert v1 == v2 == v3 == 2
class CopyOpsTest(test.TestCase):
def setUp(self):
self.graph1 = ops.Graph()
self.graph2 = ops.Graph()
def testOpsCopy(self):
with self.graph1.as_default():
#Initialize a basic expression y = ax + b
x = array_ops.placeholder("float")
a = variables.VariableV1(3.0)
b = constant_op.constant(4.0)
ax = math_ops.multiply(x, a)
y = math_ops.add(ax, b)
#Initialize session
sess1 = session_lib.Session()
#Initialize the Variable
variables.global_variables_initializer().run(session=sess1)
#First, initialize a as a Variable in graph2
a1 = copy_elements.copy_variable_to_graph(a, self.graph2)
#Initialize a1 in graph2
with self.graph2.as_default():
#Initialize session
sess2 = session_lib.Session()
#Initialize the Variable
variables.global_variables_initializer().run(session=sess2)
#Initialize a copy of y in graph2
y1 = copy_elements.copy_op_to_graph(y, self.graph2, [a1])
#Now that y has been copied, x must be copied too.
#Get that instance
x1 = copy_elements.get_copied_op(x, self.graph2)
#Compare values of y & y1 for a sample input
#and check if they match
v1 = y.eval({x: 5}, session=sess1)
v2 = y1.eval({x1: 5}, session=sess2)
assert v1 == v2
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/copy_graph/python/util/copy_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module for cloud ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
# pylint: disable=line-too-long,wildcard-import,g-import-not-at-top
from tensorflow.contrib.cloud.python.ops.bigquery_reader_ops import *
from tensorflow.contrib.cloud.python.ops.gcs_config_ops import *
if os.name != 'nt':
from tensorflow.contrib.bigtable.python.ops.bigtable_api import BigtableClient
from tensorflow.contrib.bigtable.python.ops.bigtable_api import BigtableTable
del os
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = [
'BigQueryReader',
'BigtableClient',
'BigtableTable',
'BlockCacheParams',
'configure_colab_session',
'configure_gcs',
'ConfigureGcsHook',
]
remove_undocumented(__name__, _allowed_symbols)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/cloud/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for BigQueryReader Op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import re
import socket
import threading
from six.moves import SimpleHTTPServer
from six.moves import socketserver
from tensorflow.contrib.cloud.python.ops import bigquery_reader_ops as cloud
from tensorflow.core.example import example_pb2
from tensorflow.core.framework import types_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import compat
_PROJECT = "test-project"
_DATASET = "test-dataset"
_TABLE = "test-table"
# List representation of the test rows in the 'test-table' in BigQuery.
# The schema for each row is: [int64, string, float].
# The values for rows are generated such that some columns have null values. The
# general formula here is:
# - The int64 column is present in every row.
# - The string column is only available in even rows.
# - The float column is only available in every third row.
_ROWS = [[0, "s_0", 0.1], [1, None, None], [2, "s_2", None], [3, None, 3.1],
[4, "s_4", None], [5, None, None], [6, "s_6", 6.1], [7, None, None],
[8, "s_8", None], [9, None, 9.1]]
# Schema for 'test-table'.
# The schema currently has three columns: int64, string, and float
_SCHEMA = {
"kind": "bigquery#table",
"id": "test-project:test-dataset.test-table",
"schema": {
"fields": [{
"name": "int64_col",
"type": "INTEGER",
"mode": "NULLABLE"
}, {
"name": "string_col",
"type": "STRING",
"mode": "NULLABLE"
}, {
"name": "float_col",
"type": "FLOAT",
"mode": "NULLABLE"
}]
}
}
def _ConvertRowToExampleProto(row):
"""Converts the input row to an Example proto.
Args:
row: Input Row instance.
Returns:
An Example proto initialized with row values.
"""
example = example_pb2.Example()
example.features.feature["int64_col"].int64_list.value.append(row[0])
if row[1] is not None:
example.features.feature["string_col"].bytes_list.value.append(
compat.as_bytes(row[1]))
if row[2] is not None:
example.features.feature["float_col"].float_list.value.append(row[2])
return example
class IPv6TCPServer(socketserver.TCPServer):
address_family = socket.AF_INET6
class FakeBigQueryServer(threading.Thread):
"""Fake http server to return schema and data for sample table."""
def __init__(self, address, port):
"""Creates a FakeBigQueryServer.
Args:
address: Server address
port: Server port. Pass 0 to automatically pick an empty port.
"""
threading.Thread.__init__(self)
self.handler = BigQueryRequestHandler
try:
self.httpd = socketserver.TCPServer((address, port), self.handler)
self.host_port = "{}:{}".format(*self.httpd.server_address)
except IOError:
self.httpd = IPv6TCPServer((address, port), self.handler)
self.host_port = "[{}]:{}".format(*self.httpd.server_address)
def run(self):
self.httpd.serve_forever()
def shutdown(self):
self.httpd.shutdown()
self.httpd.socket.close()
class BigQueryRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
"""Responds to BigQuery HTTP requests.
Attributes:
num_rows: num_rows in the underlying table served by this class.
"""
num_rows = 0
def do_GET(self):
if "data?maxResults=" not in self.path:
# This is a schema request.
_SCHEMA["numRows"] = self.num_rows
response = json.dumps(_SCHEMA)
else:
# This is a data request.
#
# Extract max results and start index.
max_results = int(re.findall(r"maxResults=(\d+)", self.path)[0])
start_index = int(re.findall(r"startIndex=(\d+)", self.path)[0])
# Send the rows as JSON.
rows = []
for row in _ROWS[start_index:start_index + max_results]:
row_json = {
"f": [{
"v": str(row[0])
}, {
"v": str(row[1]) if row[1] is not None else None
}, {
"v": str(row[2]) if row[2] is not None else None
}]
}
rows.append(row_json)
response = json.dumps({
"kind": "bigquery#table",
"id": "test-project:test-dataset.test-table",
"rows": rows
})
self.send_response(200)
self.end_headers()
self.wfile.write(compat.as_bytes(response))
def _SetUpQueue(reader):
"""Sets up a queue for a reader."""
queue = data_flow_ops.FIFOQueue(8, [types_pb2.DT_STRING], shapes=())
key, value = reader.read(queue)
queue.enqueue_many(reader.partitions()).run()
queue.close().run()
return key, value
class BigQueryReaderOpsTest(test.TestCase):
def setUp(self):
super(BigQueryReaderOpsTest, self).setUp()
self.server = FakeBigQueryServer("localhost", 0)
self.server.start()
logging.info("server address is %s", self.server.host_port)
# An override to bypass the GCP auth token retrieval logic
# in google_auth_provider.cc.
os.environ["GOOGLE_AUTH_TOKEN_FOR_TESTING"] = "not-used"
def tearDown(self):
self.server.shutdown()
super(BigQueryReaderOpsTest, self).tearDown()
def _ReadAndCheckRowsUsingFeatures(self, num_rows):
self.server.handler.num_rows = num_rows
with self.cached_session() as sess:
feature_configs = {
"int64_col":
parsing_ops.FixedLenFeature(
[1], dtype=dtypes.int64),
"string_col":
parsing_ops.FixedLenFeature(
[1], dtype=dtypes.string, default_value="s_default"),
}
reader = cloud.BigQueryReader(
project_id=_PROJECT,
dataset_id=_DATASET,
table_id=_TABLE,
num_partitions=4,
features=feature_configs,
timestamp_millis=1,
test_end_point=self.server.host_port)
key, value = _SetUpQueue(reader)
seen_rows = []
features = parsing_ops.parse_example(
array_ops.reshape(value, [1]), feature_configs)
for _ in range(num_rows):
int_value, str_value = sess.run(
[features["int64_col"], features["string_col"]])
# Parse values returned from the session.
self.assertEqual(int_value.shape, (1, 1))
self.assertEqual(str_value.shape, (1, 1))
int64_col = int_value[0][0]
string_col = str_value[0][0]
seen_rows.append(int64_col)
# Compare.
expected_row = _ROWS[int64_col]
self.assertEqual(int64_col, expected_row[0])
self.assertEqual(
compat.as_str(string_col), ("s_%d" % int64_col) if expected_row[1]
else "s_default")
self.assertItemsEqual(seen_rows, range(num_rows))
with self.assertRaisesOpError("is closed and has insufficient elements "
"\\(requested 1, current size 0\\)"):
sess.run([key, value])
def testReadingSingleRowUsingFeatures(self):
self._ReadAndCheckRowsUsingFeatures(1)
def testReadingMultipleRowsUsingFeatures(self):
self._ReadAndCheckRowsUsingFeatures(10)
def testReadingMultipleRowsUsingColumns(self):
num_rows = 10
self.server.handler.num_rows = num_rows
with self.cached_session() as sess:
reader = cloud.BigQueryReader(
project_id=_PROJECT,
dataset_id=_DATASET,
table_id=_TABLE,
num_partitions=4,
columns=["int64_col", "float_col", "string_col"],
timestamp_millis=1,
test_end_point=self.server.host_port)
key, value = _SetUpQueue(reader)
seen_rows = []
for row_index in range(num_rows):
returned_row_id, example_proto = sess.run([key, value])
example = example_pb2.Example()
example.ParseFromString(example_proto)
self.assertIn("int64_col", example.features.feature)
feature = example.features.feature["int64_col"]
self.assertEqual(len(feature.int64_list.value), 1)
int64_col = feature.int64_list.value[0]
seen_rows.append(int64_col)
# Create our expected Example.
expected_example = example_pb2.Example()
expected_example = _ConvertRowToExampleProto(_ROWS[int64_col])
# Compare.
self.assertProtoEquals(example, expected_example)
self.assertEqual(row_index, int(returned_row_id))
self.assertItemsEqual(seen_rows, range(num_rows))
with self.assertRaisesOpError("is closed and has insufficient elements "
"\\(requested 1, current size 0\\)"):
sess.run([key, value])
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/cloud/python/ops/bigquery_reader_ops_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the gcs_config_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.cloud.python.ops import gcs_config_ops
from tensorflow.python.platform import test
class GcsConfigOpsTest(test.TestCase):
def testSetBlockCache(self):
cfg = gcs_config_ops.BlockCacheParams(max_bytes=1024*1024*1024)
with self.cached_session() as sess:
gcs_config_ops.configure_gcs(sess, block_cache=cfg)
def testConfigureGcsHook(self):
creds = {'client_id': 'fake_client',
'refresh_token': 'fake_token',
'client_secret': 'fake_secret',
'type': 'authorized_user'}
hook = gcs_config_ops.ConfigureGcsHook(credentials=creds)
hook.begin()
with self.cached_session() as sess:
sess.run = lambda _, feed_dict=None, options=None, run_metadata=None: None
hook.after_create_session(sess, None)
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/cloud/python/ops/gcs_config_ops_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""BigQuery reading support for TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.cloud.python.ops import gen_bigquery_reader_ops
from tensorflow.python.framework import ops
from tensorflow.python.ops import io_ops
class BigQueryReader(io_ops.ReaderBase):
"""A Reader that outputs keys and tf.Example values from a BigQuery table.
Example use:
```python
# Assume a BigQuery has the following schema,
# name STRING,
# age INT,
# state STRING
# Create the parse_examples list of features.
features = dict(
name=tf.io.FixedLenFeature([1], tf.string),
age=tf.io.FixedLenFeature([1], tf.int32),
state=tf.io.FixedLenFeature([1], dtype=tf.string, default_value="UNK"))
# Create a Reader.
reader = bigquery_reader_ops.BigQueryReader(project_id=PROJECT,
dataset_id=DATASET,
table_id=TABLE,
timestamp_millis=TIME,
num_partitions=NUM_PARTITIONS,
features=features)
# Populate a queue with the BigQuery Table partitions.
queue = tf.compat.v1.train.string_input_producer(reader.partitions())
# Read and parse examples.
row_id, examples_serialized = reader.read(queue)
examples = tf.io.parse_example(examples_serialized, features=features)
# Process the Tensors examples["name"], examples["age"], etc...
```
Note that to create a reader a snapshot timestamp is necessary. This
will enable the reader to look at a consistent snapshot of the table.
For more information, see 'Table Decorators' in BigQuery docs.
See ReaderBase for supported methods.
"""
def __init__(self,
project_id,
dataset_id,
table_id,
timestamp_millis,
num_partitions,
features=None,
columns=None,
test_end_point=None,
name=None):
"""Creates a BigQueryReader.
Args:
project_id: GCP project ID.
dataset_id: BigQuery dataset ID.
table_id: BigQuery table ID.
timestamp_millis: timestamp to snapshot the table in milliseconds since
the epoch. Relative (negative or zero) snapshot times are not allowed.
For more details, see 'Table Decorators' in BigQuery docs.
num_partitions: Number of non-overlapping partitions to read from.
features: parse_example compatible dict from keys to `VarLenFeature` and
`FixedLenFeature` objects. Keys are read as columns from the db.
columns: list of columns to read, can be set iff features is None.
test_end_point: Used only for testing purposes (optional).
name: a name for the operation (optional).
Raises:
TypeError: - If features is neither None nor a dict or
- If columns is neither None nor a list or
- If both features and columns are None or set.
"""
if (features is None) == (columns is None):
raise TypeError("exactly one of features and columns must be set.")
if features is not None:
if not isinstance(features, dict):
raise TypeError("features must be a dict.")
self._columns = list(features.keys())
elif columns is not None:
if not isinstance(columns, list):
raise TypeError("columns must be a list.")
self._columns = columns
self._project_id = project_id
self._dataset_id = dataset_id
self._table_id = table_id
self._timestamp_millis = timestamp_millis
self._num_partitions = num_partitions
self._test_end_point = test_end_point
reader = gen_bigquery_reader_ops.big_query_reader(
name=name,
project_id=self._project_id,
dataset_id=self._dataset_id,
table_id=self._table_id,
timestamp_millis=self._timestamp_millis,
columns=self._columns,
test_end_point=self._test_end_point)
super(BigQueryReader, self).__init__(reader)
def partitions(self, name=None):
"""Returns serialized BigQueryTablePartition messages.
These messages represent a non-overlapping division of a table for a
bulk read.
Args:
name: a name for the operation (optional).
Returns:
`1-D` string `Tensor` of serialized `BigQueryTablePartition` messages.
"""
return gen_bigquery_reader_ops.generate_big_query_reader_partitions(
name=name,
project_id=self._project_id,
dataset_id=self._dataset_id,
table_id=self._table_id,
timestamp_millis=self._timestamp_millis,
num_partitions=self._num_partitions,
test_end_point=self._test_end_point,
columns=self._columns)
ops.NotDifferentiable("BigQueryReader")
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/cloud/python/ops/bigquery_reader_ops.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""GCS file system configuration for TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
from tensorflow.contrib.cloud.python.ops import gen_gcs_config_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.training import training
# @tf_export('contrib.cloud.BlockCacheParams')
class BlockCacheParams(object):
"""BlockCacheParams is a struct used for configuring the GCS Block Cache."""
def __init__(self, block_size=None, max_bytes=None, max_staleness=None):
self._block_size = block_size or 128 * 1024 * 1024
self._max_bytes = max_bytes or 2 * self._block_size
self._max_staleness = max_staleness or 0
@property
def block_size(self):
return self._block_size
@property
def max_bytes(self):
return self._max_bytes
@property
def max_staleness(self):
return self._max_staleness
# @tf_export('contrib.cloud.ConfigureGcsHook')
class ConfigureGcsHook(training.SessionRunHook):
"""ConfigureGcsHook configures GCS when used with Estimator/TPUEstimator.
Warning: GCS `credentials` may be transmitted over the network unencrypted.
Please ensure that the network is trusted before using this function. For
users running code entirely within Google Cloud, your data is protected by
encryption in between data centers. For more information, please take a look
at https://cloud.google.com/security/encryption-in-transit/.
Example:
```
sess = tf.compat.v1.Session()
refresh_token = raw_input("Refresh token: ")
client_secret = raw_input("Client secret: ")
client_id = "<REDACTED>"
creds = {
"client_id": client_id,
"refresh_token": refresh_token,
"client_secret": client_secret,
"type": "authorized_user",
}
tf.contrib.cloud.configure_gcs(sess, credentials=creds)
```
"""
def _verify_dictionary(self, creds_dict):
if 'refresh_token' in creds_dict or 'private_key' in creds_dict:
return True
return False
def __init__(self, credentials=None, block_cache=None):
"""Constructs a ConfigureGcsHook.
Args:
credentials: A json-formatted string.
block_cache: A `BlockCacheParams`
Raises:
ValueError: If credentials is improperly formatted or block_cache is not a
BlockCacheParams.
"""
if credentials is not None:
if isinstance(credentials, str):
try:
data = json.loads(credentials)
except ValueError as e:
raise ValueError('credentials was not a well formed JSON string.', e)
if not self._verify_dictionary(data):
raise ValueError(
'credentials has neither a "refresh_token" nor a "private_key" '
'field.')
elif isinstance(credentials, dict):
if not self._verify_dictionary(credentials):
raise ValueError('credentials has neither a "refresh_token" nor a '
'"private_key" field.')
credentials = json.dumps(credentials)
else:
raise ValueError('credentials is of an unknown type')
self._credentials = credentials
if block_cache and not isinstance(block_cache, BlockCacheParams):
raise ValueError('block_cache must be an instance of BlockCacheParams.')
self._block_cache = block_cache
def begin(self):
if self._credentials:
self._credentials_placeholder = array_ops.placeholder(dtypes.string)
self._credentials_op = gen_gcs_config_ops.gcs_configure_credentials(
self._credentials_placeholder)
else:
self._credentials_op = None
if self._block_cache:
self._block_cache_op = gen_gcs_config_ops.gcs_configure_block_cache(
max_cache_size=self._block_cache.max_bytes,
block_size=self._block_cache.block_size,
max_staleness=self._block_cache.max_staleness)
else:
self._block_cache_op = None
def after_create_session(self, session, coord):
del coord
if self._credentials_op:
session.run(
self._credentials_op,
feed_dict={self._credentials_placeholder: self._credentials})
if self._block_cache_op:
session.run(self._block_cache_op)
def configure_gcs(session, credentials=None, block_cache=None, device=None):
"""Configures the GCS file system for a given a session.
Warning: GCS `credentials` may be transmitted over the network unencrypted.
Please ensure that the network is trusted before using this function. For
users running code entirely within Google Cloud, your data is protected by
encryption in between data centers. For more information, please take a look
at https://cloud.google.com/security/encryption-in-transit/.
Args:
session: A `tf.compat.v1.Session` session that should be used to configure
the GCS file system.
credentials: [Optional.] A JSON string
block_cache: [Optional.] A BlockCacheParams to configure the block cache .
device: [Optional.] The device to place the configure ops.
"""
def configure(credentials, block_cache):
"""Helper function to actually configure GCS."""
if credentials:
if isinstance(credentials, dict):
credentials = json.dumps(credentials)
placeholder = array_ops.placeholder(dtypes.string)
op = gen_gcs_config_ops.gcs_configure_credentials(placeholder)
session.run(op, feed_dict={placeholder: credentials})
if block_cache:
op = gen_gcs_config_ops.gcs_configure_block_cache(
max_cache_size=block_cache.max_bytes,
block_size=block_cache.block_size,
max_staleness=block_cache.max_staleness)
session.run(op)
if device:
with ops.device(device):
return configure(credentials, block_cache)
return configure(credentials, block_cache)
def configure_colab_session(session):
"""ConfigureColabSession configures the GCS file system in Colab.
Args:
session: A `tf.compat.v1.Session` session.
"""
# Read from the application default credentials (adc).
adc_filename = os.environ.get(
'GOOGLE_APPLICATION_CREDENTIALS', '/content/adc.json')
with open(adc_filename) as f:
data = json.load(f)
configure_gcs(session, credentials=data)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/cloud/python/ops/gcs_config_ops.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops for representing Bayesian computation.
Use [tfp](/probability/api_docs/python/tfp) instead.
## This package provides classes for Bayesian computation with TensorFlow.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,line-too-long
from tensorflow.contrib.bayesflow.python.ops import monte_carlo
# pylint: enable=unused-import,line-too-long
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = [
'monte_carlo',
]
remove_undocumented(__name__, _allowed_symbols)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/bayesflow/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""ops module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/bayesflow/python/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Monte Carlo Ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib import layers as layers_lib
from tensorflow.contrib.bayesflow.python.ops import monte_carlo_impl as monte_carlo_lib
from tensorflow.contrib.bayesflow.python.ops.monte_carlo_impl import _get_samples
from tensorflow.contrib.distributions.python.ops import mvn_diag as mvn_diag_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import distribution as distribution_lib
from tensorflow.python.ops.distributions import kullback_leibler
from tensorflow.python.ops.distributions import normal as normal_lib
from tensorflow.python.platform import test
layers = layers_lib
mc = monte_carlo_lib
class ExpectationImportanceSampleTest(test.TestCase):
def test_normal_integral_mean_and_var_correctly_estimated(self):
n = int(1e6)
with self.cached_session():
mu_p = constant_op.constant([-1.0, 1.0], dtype=dtypes.float64)
mu_q = constant_op.constant([0.0, 0.0], dtype=dtypes.float64)
sigma_p = constant_op.constant([0.5, 0.5], dtype=dtypes.float64)
sigma_q = constant_op.constant([1.0, 1.0], dtype=dtypes.float64)
p = normal_lib.Normal(loc=mu_p, scale=sigma_p)
q = normal_lib.Normal(loc=mu_q, scale=sigma_q)
# Compute E_p[X].
e_x = mc.expectation_importance_sampler(
f=lambda x: x, log_p=p.log_prob, sampling_dist_q=q, n=n, seed=42)
# Compute E_p[X^2].
e_x2 = mc.expectation_importance_sampler(
f=math_ops.square, log_p=p.log_prob, sampling_dist_q=q, n=n, seed=42)
stddev = math_ops.sqrt(e_x2 - math_ops.square(e_x))
# Relative tolerance (rtol) chosen 2 times as large as minimim needed to
# pass.
# Convergence of mean is +- 0.003 if n = 100M
# Convergence of stddev is +- 0.00001 if n = 100M
self.assertEqual(p.batch_shape, e_x.get_shape())
self.assertAllClose(p.mean().eval(), e_x.eval(), rtol=0.01)
self.assertAllClose(p.stddev().eval(), stddev.eval(), rtol=0.02)
def test_multivariate_normal_prob_positive_product_of_components(self):
# Test that importance sampling can correctly estimate the probability that
# the product of components in a MultivariateNormal are > 0.
n = 1000
with self.cached_session():
p = mvn_diag_lib.MultivariateNormalDiag(
loc=[0.], scale_diag=[1.0, 1.0])
q = mvn_diag_lib.MultivariateNormalDiag(
loc=[0.5], scale_diag=[3., 3.])
# Compute E_p[X_1 * X_2 > 0], with X_i the ith component of X ~ p(x).
# Should equal 1/2 because p is a spherical Gaussian centered at (0, 0).
def indicator(x):
x1_times_x2 = math_ops.reduce_prod(x, axis=[-1])
return 0.5 * (math_ops.sign(x1_times_x2) + 1.0)
prob = mc.expectation_importance_sampler(
f=indicator, log_p=p.log_prob, sampling_dist_q=q, n=n, seed=42)
# Relative tolerance (rtol) chosen 2 times as large as minimim needed to
# pass.
# Convergence is +- 0.004 if n = 100k.
self.assertEqual(p.batch_shape, prob.get_shape())
self.assertAllClose(0.5, prob.eval(), rtol=0.05)
class ExpectationImportanceSampleLogspaceTest(test.TestCase):
def test_normal_distribution_second_moment_estimated_correctly(self):
# Test the importance sampled estimate against an analytical result.
n = int(1e6)
with self.cached_session():
mu_p = constant_op.constant([0.0, 0.0], dtype=dtypes.float64)
mu_q = constant_op.constant([-1.0, 1.0], dtype=dtypes.float64)
sigma_p = constant_op.constant([1.0, 2 / 3.], dtype=dtypes.float64)
sigma_q = constant_op.constant([1.0, 1.0], dtype=dtypes.float64)
p = normal_lib.Normal(loc=mu_p, scale=sigma_p)
q = normal_lib.Normal(loc=mu_q, scale=sigma_q)
# Compute E_p[X^2].
# Should equal [1, (2/3)^2]
log_e_x2 = mc.expectation_importance_sampler_logspace(
log_f=lambda x: math_ops.log(math_ops.square(x)),
log_p=p.log_prob,
sampling_dist_q=q,
n=n,
seed=42)
e_x2 = math_ops.exp(log_e_x2)
# Relative tolerance (rtol) chosen 2 times as large as minimim needed to
# pass.
self.assertEqual(p.batch_shape, e_x2.get_shape())
self.assertAllClose([1., (2 / 3.)**2], e_x2.eval(), rtol=0.02)
class GetSamplesTest(test.TestCase):
"""Test the private method 'get_samples'."""
def test_raises_if_both_z_and_n_are_none(self):
with self.cached_session():
dist = normal_lib.Normal(loc=0., scale=1.)
z = None
n = None
seed = None
with self.assertRaisesRegexp(ValueError, 'exactly one'):
_get_samples(dist, z, n, seed)
def test_raises_if_both_z_and_n_are_not_none(self):
with self.cached_session():
dist = normal_lib.Normal(loc=0., scale=1.)
z = dist.sample(seed=42)
n = 1
seed = None
with self.assertRaisesRegexp(ValueError, 'exactly one'):
_get_samples(dist, z, n, seed)
def test_returns_n_samples_if_n_provided(self):
with self.cached_session():
dist = normal_lib.Normal(loc=0., scale=1.)
z = None
n = 10
seed = None
z = _get_samples(dist, z, n, seed)
self.assertEqual((10,), z.get_shape())
def test_returns_z_if_z_provided(self):
with self.cached_session():
dist = normal_lib.Normal(loc=0., scale=1.)
z = dist.sample(10, seed=42)
n = None
seed = None
z = _get_samples(dist, z, n, seed)
self.assertEqual((10,), z.get_shape())
class ExpectationTest(test.TestCase):
def test_works_correctly(self):
with self.cached_session() as sess:
x = constant_op.constant([-1e6, -100, -10, -1, 1, 10, 100, 1e6])
p = normal_lib.Normal(loc=x, scale=1.)
# We use the prefex "efx" to mean "E_p[f(X)]".
f = lambda u: u
efx_true = x
samples = p.sample(int(1e5), seed=1)
efx_reparam = mc.expectation(f, samples, p.log_prob)
efx_score = mc.expectation(f, samples, p.log_prob,
use_reparametrization=False)
[
efx_true_,
efx_reparam_,
efx_score_,
efx_true_grad_,
efx_reparam_grad_,
efx_score_grad_,
] = sess.run([
efx_true,
efx_reparam,
efx_score,
gradients_impl.gradients(efx_true, x)[0],
gradients_impl.gradients(efx_reparam, x)[0],
gradients_impl.gradients(efx_score, x)[0],
])
self.assertAllEqual(np.ones_like(efx_true_grad_), efx_true_grad_)
self.assertAllClose(efx_true_, efx_reparam_, rtol=0.005, atol=0.)
self.assertAllClose(efx_true_, efx_score_, rtol=0.005, atol=0.)
self.assertAllEqual(np.ones_like(efx_true_grad_, dtype=np.bool),
np.isfinite(efx_reparam_grad_))
self.assertAllEqual(np.ones_like(efx_true_grad_, dtype=np.bool),
np.isfinite(efx_score_grad_))
self.assertAllClose(efx_true_grad_, efx_reparam_grad_,
rtol=0.03, atol=0.)
# Variance is too high to be meaningful, so we'll only check those which
# converge.
self.assertAllClose(efx_true_grad_[2:-2],
efx_score_grad_[2:-2],
rtol=0.05, atol=0.)
def test_docstring_example_normal(self):
with self.cached_session() as sess:
num_draws = int(1e5)
mu_p = constant_op.constant(0.)
mu_q = constant_op.constant(1.)
p = normal_lib.Normal(loc=mu_p, scale=1.)
q = normal_lib.Normal(loc=mu_q, scale=2.)
exact_kl_normal_normal = kullback_leibler.kl_divergence(p, q)
approx_kl_normal_normal = monte_carlo_lib.expectation(
f=lambda x: p.log_prob(x) - q.log_prob(x),
samples=p.sample(num_draws, seed=42),
log_prob=p.log_prob,
use_reparametrization=(p.reparameterization_type
== distribution_lib.FULLY_REPARAMETERIZED))
[exact_kl_normal_normal_, approx_kl_normal_normal_] = sess.run([
exact_kl_normal_normal, approx_kl_normal_normal])
self.assertEqual(
True,
p.reparameterization_type == distribution_lib.FULLY_REPARAMETERIZED)
self.assertAllClose(exact_kl_normal_normal_, approx_kl_normal_normal_,
rtol=0.01, atol=0.)
# Compare gradients. (Not present in `docstring`.)
gradp = lambda fp: gradients_impl.gradients(fp, mu_p)[0]
gradq = lambda fq: gradients_impl.gradients(fq, mu_q)[0]
[
gradp_exact_kl_normal_normal_,
gradq_exact_kl_normal_normal_,
gradp_approx_kl_normal_normal_,
gradq_approx_kl_normal_normal_,
] = sess.run([
gradp(exact_kl_normal_normal),
gradq(exact_kl_normal_normal),
gradp(approx_kl_normal_normal),
gradq(approx_kl_normal_normal),
])
self.assertAllClose(gradp_exact_kl_normal_normal_,
gradp_approx_kl_normal_normal_,
rtol=0.01, atol=0.)
self.assertAllClose(gradq_exact_kl_normal_normal_,
gradq_approx_kl_normal_normal_,
rtol=0.01, atol=0.)
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/bayesflow/python/kernel_tests/monte_carlo_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Monte Carlo integration and helpers.
Use [tfp.monte_carlo](/probability/api_docs/python/tfp/monte_carlo) instead.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.contrib.bayesflow.python.ops.monte_carlo_impl import *
# pylint: enable=wildcard-import
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = [
'expectation',
'expectation_importance_sampler',
'expectation_importance_sampler_logspace',
]
remove_undocumented(__name__, _allowed_symbols)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/bayesflow/python/ops/monte_carlo.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Monte Carlo integration and helpers.
@@expectation
@@expectation_importance_sampler
@@expectation_importance_sampler_logspace
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.util import deprecation
__all__ = [
'expectation',
'expectation_importance_sampler',
'expectation_importance_sampler_logspace',
]
def expectation_importance_sampler(f,
log_p,
sampling_dist_q,
z=None,
n=None,
seed=None,
name='expectation_importance_sampler'):
r"""Monte Carlo estimate of \\(E_p[f(Z)] = E_q[f(Z) p(Z) / q(Z)]\\).
With \\(p(z) := exp^{log_p(z)}\\), this `Op` returns
\\(n^{-1} sum_{i=1}^n [ f(z_i) p(z_i) / q(z_i) ], z_i ~ q,\\)
\\(\approx E_q[ f(Z) p(Z) / q(Z) ]\\)
\\(= E_p[f(Z)]\\)
This integral is done in log-space with max-subtraction to better handle the
often extreme values that `f(z) p(z) / q(z)` can take on.
If `f >= 0`, it is up to 2x more efficient to exponentiate the result of
`expectation_importance_sampler_logspace` applied to `Log[f]`.
User supplies either `Tensor` of samples `z`, or number of samples to draw `n`
Args:
f: Callable mapping samples from `sampling_dist_q` to `Tensors` with shape
broadcastable to `q.batch_shape`.
For example, `f` works "just like" `q.log_prob`.
log_p: Callable mapping samples from `sampling_dist_q` to `Tensors` with
shape broadcastable to `q.batch_shape`.
For example, `log_p` works "just like" `sampling_dist_q.log_prob`.
sampling_dist_q: The sampling distribution.
`tfp.distributions.Distribution`.
`float64` `dtype` recommended.
`log_p` and `q` should be supported on the same set.
z: `Tensor` of samples from `q`, produced by `q.sample` for some `n`.
n: Integer `Tensor`. Number of samples to generate if `z` is not provided.
seed: Python integer to seed the random number generator.
name: A name to give this `Op`.
Returns:
The importance sampling estimate. `Tensor` with `shape` equal
to batch shape of `q`, and `dtype` = `q.dtype`.
"""
q = sampling_dist_q
with ops.name_scope(name, values=[z, n]):
z = _get_samples(q, z, n, seed)
log_p_z = log_p(z)
q_log_prob_z = q.log_prob(z)
def _importance_sampler_positive_f(log_f_z):
# Same as expectation_importance_sampler_logspace, but using Tensors
# rather than samples and functions. Allows us to sample once.
log_values = log_f_z + log_p_z - q_log_prob_z
return _logspace_mean(log_values)
# With \\(f_{plus}(z) = max(0, f(z)), f_{minus}(z) = max(0, -f(z))\\),
# \\(E_p[f(Z)] = E_p[f_{plus}(Z)] - E_p[f_{minus}(Z)]\\)
# \\( = E_p[f_{plus}(Z) + 1] - E_p[f_{minus}(Z) + 1]\\)
# Without incurring bias, 1 is added to each to prevent zeros in logspace.
# The logarithm is approximately linear around 1 + epsilon, so this is good
# for small values of 'z' as well.
f_z = f(z)
log_f_plus_z = math_ops.log(nn.relu(f_z) + 1.)
log_f_minus_z = math_ops.log(nn.relu(-1. * f_z) + 1.)
log_f_plus_integral = _importance_sampler_positive_f(log_f_plus_z)
log_f_minus_integral = _importance_sampler_positive_f(log_f_minus_z)
return math_ops.exp(log_f_plus_integral) - math_ops.exp(log_f_minus_integral)
def expectation_importance_sampler_logspace(
log_f,
log_p,
sampling_dist_q,
z=None,
n=None,
seed=None,
name='expectation_importance_sampler_logspace'):
r"""Importance sampling with a positive function, in log-space.
With \\(p(z) := exp^{log_p(z)}\\), and \\(f(z) = exp{log_f(z)}\\),
this `Op` returns
\\(Log[ n^{-1} sum_{i=1}^n [ f(z_i) p(z_i) / q(z_i) ] ], z_i ~ q,\\)
\\(\approx Log[ E_q[ f(Z) p(Z) / q(Z) ] ]\\)
\\(= Log[E_p[f(Z)]]\\)
This integral is done in log-space with max-subtraction to better handle the
often extreme values that `f(z) p(z) / q(z)` can take on.
In contrast to `expectation_importance_sampler`, this `Op` returns values in
log-space.
User supplies either `Tensor` of samples `z`, or number of samples to draw `n`
Args:
log_f: Callable mapping samples from `sampling_dist_q` to `Tensors` with
shape broadcastable to `q.batch_shape`.
For example, `log_f` works "just like" `sampling_dist_q.log_prob`.
log_p: Callable mapping samples from `sampling_dist_q` to `Tensors` with
shape broadcastable to `q.batch_shape`.
For example, `log_p` works "just like" `q.log_prob`.
sampling_dist_q: The sampling distribution.
`tfp.distributions.Distribution`.
`float64` `dtype` recommended.
`log_p` and `q` should be supported on the same set.
z: `Tensor` of samples from `q`, produced by `q.sample` for some `n`.
n: Integer `Tensor`. Number of samples to generate if `z` is not provided.
seed: Python integer to seed the random number generator.
name: A name to give this `Op`.
Returns:
Logarithm of the importance sampling estimate. `Tensor` with `shape` equal
to batch shape of `q`, and `dtype` = `q.dtype`.
"""
q = sampling_dist_q
with ops.name_scope(name, values=[z, n]):
z = _get_samples(q, z, n, seed)
log_values = log_f(z) + log_p(z) - q.log_prob(z)
return _logspace_mean(log_values)
def _logspace_mean(log_values):
"""Evaluate `Log[E[values]]` in a stable manner.
Args:
log_values: `Tensor` holding `Log[values]`.
Returns:
`Tensor` of same `dtype` as `log_values`, reduced across dim 0.
`Log[Mean[values]]`.
"""
# center = Max[Log[values]], with stop-gradient
# The center hopefully keep the exponentiated term small. It is canceled
# from the final result, so putting stop gradient on it will not change the
# final result. We put stop gradient on to eliminate unnecessary computation.
center = array_ops.stop_gradient(_sample_max(log_values))
# centered_values = exp{Log[values] - E[Log[values]]}
centered_values = math_ops.exp(log_values - center)
# log_mean_of_values = Log[ E[centered_values] ] + center
# = Log[ E[exp{log_values - E[log_values]}] ] + center
# = Log[E[values]] - E[log_values] + center
# = Log[E[values]]
log_mean_of_values = math_ops.log(_sample_mean(centered_values)) + center
return log_mean_of_values
@deprecation.deprecated(
'2018-10-01',
'The tf.contrib.bayesflow library has moved to '
'TensorFlow Probability (https://github.com/tensorflow/probability). '
'Use `tfp.monte_carlo.expectation` instead.',
warn_once=True)
def expectation(f, samples, log_prob=None, use_reparametrization=True,
axis=0, keep_dims=False, name=None):
r"""Computes the Monte-Carlo approximation of \\(E_p[f(X)]\\).
This function computes the Monte-Carlo approximation of an expectation, i.e.,
\\(E_p[f(X)] \approx= m^{-1} sum_i^m f(x_j), x_j\ ~iid\ p(X)\\)
where:
- `x_j = samples[j, ...]`,
- `log(p(samples)) = log_prob(samples)` and
- `m = prod(shape(samples)[axis])`.
Tricks: Reparameterization and Score-Gradient
When p is "reparameterized", i.e., a diffeomorphic transformation of a
parameterless distribution (e.g.,
`Normal(Y; m, s) <=> Y = sX + m, X ~ Normal(0,1)`), we can swap gradient and
expectation, i.e.,
grad[ Avg{ \\(s_i : i=1...n\\) } ] = Avg{ grad[\\(s_i\\)] : i=1...n } where
S_n = Avg{\\(s_i\\)}` and `\\(s_i = f(x_i), x_i ~ p\\).
However, if p is not reparameterized, TensorFlow's gradient will be incorrect
since the chain-rule stops at samples of non-reparameterized distributions.
(The non-differentiated result, `approx_expectation`, is the same regardless
of `use_reparametrization`.) In this circumstance using the Score-Gradient
trick results in an unbiased gradient, i.e.,
```none
grad[ E_p[f(X)] ]
= grad[ int dx p(x) f(x) ]
= int dx grad[ p(x) f(x) ]
= int dx [ p'(x) f(x) + p(x) f'(x) ]
= int dx p(x) [p'(x) / p(x) f(x) + f'(x) ]
= int dx p(x) grad[ f(x) p(x) / stop_grad[p(x)] ]
= E_p[ grad[ f(x) p(x) / stop_grad[p(x)] ] ]
```
Unless p is not reparametrized, it is usually preferable to
`use_reparametrization = True`.
Warning: users are responsible for verifying `p` is a "reparameterized"
distribution.
Example Use:
```python
import tensorflow_probability as tfp
tfd = tfp.distributions
# Monte-Carlo approximation of a reparameterized distribution, e.g., Normal.
num_draws = int(1e5)
p = tfd.Normal(loc=0., scale=1.)
q = tfd.Normal(loc=1., scale=2.)
exact_kl_normal_normal = tfd.kl_divergence(p, q)
# ==> 0.44314718
approx_kl_normal_normal = tfp.monte_carlo.expectation(
f=lambda x: p.log_prob(x) - q.log_prob(x),
samples=p.sample(num_draws, seed=42),
log_prob=p.log_prob,
use_reparametrization=(p.reparameterization_type
== distribution.FULLY_REPARAMETERIZED))
# ==> 0.44632751
# Relative Error: <1%
# Monte-Carlo approximation of non-reparameterized distribution, e.g., Gamma.
num_draws = int(1e5)
p = ds.Gamma(concentration=1., rate=1.)
q = ds.Gamma(concentration=2., rate=3.)
exact_kl_gamma_gamma = tfd.kl_divergence(p, q)
# ==> 0.37999129
approx_kl_gamma_gamma = tfp.monte_carlo.expectation(
f=lambda x: p.log_prob(x) - q.log_prob(x),
samples=p.sample(num_draws, seed=42),
log_prob=p.log_prob,
use_reparametrization=(p.reparameterization_type
== distribution.FULLY_REPARAMETERIZED))
# ==> 0.37696719
# Relative Error: <1%
# For comparing the gradients, see `monte_carlo_test.py`.
```
Note: The above example is for illustration only. To compute approximate
KL-divergence, the following is preferred:
```python
approx_kl_p_q = tfp.vi.monte_carlo_csiszar_f_divergence(
f=bf.kl_reverse,
p_log_prob=q.log_prob,
q=p,
num_draws=num_draws)
```
Args:
f: Python callable which can return `f(samples)`.
samples: `Tensor` of samples used to form the Monte-Carlo approximation of
\\(E_p[f(X)]\\). A batch of samples should be indexed by `axis`
dimensions.
log_prob: Python callable which can return `log_prob(samples)`. Must
correspond to the natural-logarithm of the pdf/pmf of each sample. Only
required/used if `use_reparametrization=False`.
Default value: `None`.
use_reparametrization: Python `bool` indicating that the approximation
should use the fact that the gradient of samples is unbiased. Whether
`True` or `False`, this arg only affects the gradient of the resulting
`approx_expectation`.
Default value: `True`.
axis: The dimensions to average. If `None`, averages all
dimensions.
Default value: `0` (the left-most dimension).
keep_dims: If True, retains averaged dimensions using size `1`.
Default value: `False`.
name: A `name_scope` for operations created by this function.
Default value: `None` (which implies "expectation").
Returns:
approx_expectation: `Tensor` corresponding to the Monte-Carlo approximation
of \\(E_p[f(X)]\\).
Raises:
ValueError: if `f` is not a Python `callable`.
ValueError: if `use_reparametrization=False` and `log_prob` is not a Python
`callable`.
"""
with ops.name_scope(name, 'expectation', [samples]):
if not callable(f):
raise ValueError('`f` must be a callable function.')
if use_reparametrization:
return math_ops.reduce_mean(f(samples), axis=axis, keepdims=keep_dims)
else:
if not callable(log_prob):
raise ValueError('`log_prob` must be a callable function.')
stop = array_ops.stop_gradient # For readability.
x = stop(samples)
logpx = log_prob(x)
fx = f(x) # Call `f` once in case it has side-effects.
# We now rewrite f(x) so that:
# `grad[f(x)] := grad[f(x)] + f(x) * grad[logqx]`.
# To achieve this, we use a trick that
# `h(x) - stop(h(x)) == zeros_like(h(x))`
# but its gradient is grad[h(x)].
# Note that IEEE754 specifies that `x - x == 0.` and `x + 0. == x`, hence
# this trick loses no precision. For more discussion regarding the
# relevant portions of the IEEE754 standard, see the StackOverflow
# question,
# "Is there a floating point value of x, for which x-x == 0 is false?"
# http://stackoverflow.com/q/2686644
fx += stop(fx) * (logpx - stop(logpx)) # Add zeros_like(logpx).
return math_ops.reduce_mean(fx, axis=axis, keepdims=keep_dims)
def _sample_mean(values):
"""Mean over sample indices. In this module this is always [0]."""
return math_ops.reduce_mean(values, axis=[0])
def _sample_max(values):
"""Max over sample indices. In this module this is always [0]."""
return math_ops.reduce_max(values, axis=[0])
def _get_samples(dist, z, n, seed):
"""Check args and return samples."""
with ops.name_scope('get_samples', values=[z, n]):
if (n is None) == (z is None):
raise ValueError(
'Must specify exactly one of arguments "n" and "z". Found: '
'n = %s, z = %s' % (n, z))
if n is not None:
return dist.sample(n, seed=seed)
else:
return ops.convert_to_tensor(z, name='z')
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/bayesflow/python/ops/monte_carlo_impl.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""tensorboard module containing volatile or experimental code."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Add projects here, they will show up under tf.contrib.tensorboard.
from tensorflow.contrib.tensorboard import plugins
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/tensorboard/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""tensorboard plugins module containing volatile or experimental code."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Add projects here, they will show up under tf.contrib.tensorboard.plugins
from tensorflow.contrib.tensorboard.plugins import projector
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/tensorboard/plugins/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Public API for the Embedding Projector.
@@ProjectorPluginAsset
@@ProjectorConfig
@@EmbeddingInfo
@@EmbeddingMetadata
@@SpriteMetadata
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from google.protobuf import text_format
from tensorflow.contrib.tensorboard.plugins.projector import projector_config_pb2
# pylint: disable=wildcard-import
from tensorflow.contrib.tensorboard.plugins.projector.projector_config_pb2 import *
# pylint: enable=wildcard-import
from tensorflow.python.lib.io import file_io
def visualize_embeddings(summary_writer, config):
"""Stores a config file used by the embedding projector.
Args:
summary_writer: The summary writer used for writing events.
config: `tf.contrib.tensorboard.plugins.projector.ProjectorConfig`
proto that holds the configuration for the projector such as paths to
checkpoint files and metadata files for the embeddings. If
`config.model_checkpoint_path` is none, it defaults to the
`logdir` used by the summary_writer.
Raises:
ValueError: If the summary writer does not have a `logdir`.
"""
logdir = summary_writer.get_logdir()
# Sanity checks.
if logdir is None:
raise ValueError('Summary writer must have a logdir')
# Saving the config file in the logdir.
config_pbtxt = text_format.MessageToString(config)
# FYI - the 'projector_config.pbtxt' string is hardcoded in the projector
# plugin.
# TODO(dandelion): Restore this to a reference to the projector plugin
file_io.write_string_to_file(
os.path.join(logdir, 'projector_config.pbtxt'), config_pbtxt)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/tensorboard/plugins/projector/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""API tests for the projector plugin in TensorBoard."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
from google.protobuf import text_format
from tensorflow.contrib.tensorboard.plugins import projector
from tensorflow.contrib.tensorboard.plugins.projector import projector_config_pb2
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.summary.writer import writer as writer_lib
class ProjectorApiTest(test.TestCase):
def testVisualizeEmbeddings(self):
# Create a dummy configuration.
config = projector_config_pb2.ProjectorConfig()
config.model_checkpoint_path = 'test'
emb1 = config.embeddings.add()
emb1.tensor_name = 'tensor1'
emb1.metadata_path = 'metadata1'
# Call the API method to save the configuration to a temporary dir.
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir)
writer = writer_lib.FileWriter(temp_dir)
projector.visualize_embeddings(writer, config)
# Read the configurations from disk and make sure it matches the original.
with gfile.GFile(os.path.join(temp_dir, 'projector_config.pbtxt')) as f:
config2 = projector_config_pb2.ProjectorConfig()
text_format.Parse(f.read(), config2)
self.assertEqual(config, config2)
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/tensorboard/plugins/projector/projector_api_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A module containing optimization routines."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import
from tensorflow.contrib.opt.python.training.adam_gs_optimizer import *
from tensorflow.contrib.opt.python.training.adamax import *
from tensorflow.contrib.opt.python.training.addsign import *
from tensorflow.contrib.opt.python.training.agn_optimizer import *
from tensorflow.contrib.opt.python.training.drop_stale_gradient_optimizer import *
from tensorflow.contrib.opt.python.training.elastic_average_optimizer import *
from tensorflow.contrib.opt.python.training.external_optimizer import *
from tensorflow.contrib.opt.python.training.lars_optimizer import *
from tensorflow.contrib.opt.python.training.ggt import *
from tensorflow.contrib.opt.python.training.lazy_adam_optimizer import *
from tensorflow.contrib.opt.python.training.lazy_adam_gs_optimizer import *
from tensorflow.contrib.opt.python.training.model_average_optimizer import *
from tensorflow.contrib.opt.python.training.moving_average_optimizer import *
from tensorflow.contrib.opt.python.training.multitask_optimizer_wrapper import *
from tensorflow.contrib.opt.python.training.nadam_optimizer import *
from tensorflow.contrib.opt.python.training.reg_adagrad_optimizer import *
from tensorflow.contrib.opt.python.training.shampoo import *
from tensorflow.contrib.opt.python.training.weight_decay_optimizers import *
from tensorflow.contrib.opt.python.training.powersign import *
from tensorflow.contrib.opt.python.training.variable_clipping_optimizer import *
# pylint: enable=wildcard-import
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = [
'AdaMaxOptimizer',
'AdamGSOptimizer',
'PowerSignOptimizer',
'AddSignOptimizer',
'DelayCompensatedGradientDescentOptimizer',
'DropStaleGradientOptimizer',
'ExternalOptimizerInterface',
'LARSOptimizer',
'LazyAdamGSOptimizer',
'LazyAdamOptimizer',
'NadamOptimizer',
'MovingAverageOptimizer',
'MomentumWOptimizer',
'AdamWOptimizer',
'DecoupledWeightDecayExtension',
'extend_with_decoupled_weight_decay',
'ScipyOptimizerInterface',
'VariableClippingOptimizer',
'MultitaskOptimizerWrapper',
'clip_gradients_by_global_norm',
'AGNOptimizer',
'AGNCustomGetter',
'ElasticAverageOptimizer',
'ElasticAverageCustomGetter',
'ModelAverageOptimizer',
'ModelAverageCustomGetter',
'GGTOptimizer',
'ShampooOptimizer',
'RegAdagradOptimizer',
]
remove_undocumented(__name__, _allowed_symbols)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/opt/__init__.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for DropStaleGradientOptimizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import portpicker
from tensorflow.contrib.opt.python.training import drop_stale_gradient_optimizer
from tensorflow.python.client import session
from tensorflow.python.framework import ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import server_lib
from tensorflow.python.training import training_util
# Creates the workers and return their sessions, graphs, train_ops.
def _get_workers(num_workers, staleness):
worker_ports = [portpicker.pick_unused_port() for _ in range(num_workers)]
cluster_dict = {
'worker': ['localhost:%s' % port for port in worker_ports],
'ps': ['localhost:%s' % portpicker.pick_unused_port()]
}
cs = server_lib.ClusterSpec(cluster_dict)
workers = [
server_lib.Server(
cs, job_name='worker', task_index=ix, start=True)
for ix in range(num_workers)
]
server_lib.Server(cs, job_name='ps', task_index=0, start=True)
sessions = []
graphs = []
train_ops = []
# To simulate stale cases, maintaining two queues for computing and
# applying gradients respectively. In the phase of computing gradients,
# all workers except chief worker compute gradients together and chief worker
# computes after all other worers' computing finished. In the phase of
# applying gradients, chief worker will first apply gradients, then all other
# workers will apply gradients one by one. Therefore, the chief worker will
# always have 0 staleness, each of all other workers will have a unique
# staleness value from [1, num_workers).
for worker_id in range(num_workers):
graph = ops.Graph()
with graph.as_default():
global_step = training_util.create_global_step()
var_0 = variables.VariableV1(0.0, name='v0')
var_1 = variables.VariableV1(1.0, name='v1')
compute_gradients_queue = data_flow_ops.FIFOQueue(
-1, global_step.dtype.base_dtype, shapes=(),
name='compute_gradients_queue', shared_name='compute_gradients_queue')
apply_gradients_queue = data_flow_ops.FIFOQueue(
-1, global_step.dtype.base_dtype, shapes=(),
name='apply_gradients_queue', shared_name='apply_gradients_queue')
# Gradients for loss on var_0 and var_1 will be 1.0.
loss = 0 - var_0 - var_1
sgd_opt = gradient_descent.GradientDescentOptimizer(1.0)
stale_check_opt = (
drop_stale_gradient_optimizer.DropStaleGradientOptimizer(
sgd_opt, staleness))
# Compute gradients.
if worker_id == 0:
with ops.control_dependencies(
[compute_gradients_queue.dequeue_many(num_workers - 1)]):
grad_and_vars = stale_check_opt.compute_gradients(loss)
else:
grad_and_vars = stale_check_opt.compute_gradients(loss)
with ops.control_dependencies([t[0] for t in grad_and_vars]):
worker_enqueue_op = compute_gradients_queue.enqueue(global_step)
# Apply gradients.
if worker_id == 0:
with ops.control_dependencies(
[stale_check_opt.apply_gradients(grad_and_vars, global_step)]):
train_op = apply_gradients_queue.enqueue(global_step)
else:
with ops.control_dependencies([worker_enqueue_op]):
with ops.control_dependencies([apply_gradients_queue.dequeue()]):
with ops.control_dependencies(
[stale_check_opt.apply_gradients(
grad_and_vars, global_step)]):
train_op = apply_gradients_queue.enqueue(global_step)
sess = session.Session(workers[worker_id].target)
sessions.append(sess)
graphs.append(graph)
train_ops.append(train_op)
return sessions, graphs, train_ops
class DropStaleGradientOptimizerTest(test.TestCase):
def _run(self, train_op, sess):
sess.run(train_op)
def test1Worker(self):
num_workers = 1
sessions, graphs, train_ops = _get_workers(num_workers, 0)
with graphs[0].as_default():
sessions[0].run(variables.global_variables_initializer())
global_step = training_util.get_global_step(graphs[0])
var_0 = graphs[0].get_tensor_by_name('v0:0')
var_1 = graphs[0].get_tensor_by_name('v1:0')
stale_counter = graphs[0].get_tensor_by_name('stale_counter:0')
# Verify the initialized value.
self.assertAllEqual(0.0, sessions[0].run(var_0))
self.assertAllEqual(1.0, sessions[0].run(var_1))
self.assertAllEqual(0.0, sessions[0].run(stale_counter))
self.assertAllEqual(0, sessions[0].run(global_step))
sessions[0].run(train_ops[0])
# Verify the updated value after 1 step.
self.assertAllEqual(1, sessions[0].run(global_step))
self.assertAllEqual(0.0 + 1.0, sessions[0].run(var_0))
self.assertAllEqual(1.0 + 1.0, sessions[0].run(var_1))
self.assertAllEqual(1, sessions[0].run(global_step))
def test1WorkerNegativeStaleness(self):
num_workers = 1
sessions, graphs, train_ops = _get_workers(num_workers, -1)
with graphs[0].as_default():
sessions[0].run(variables.global_variables_initializer())
global_step = training_util.get_global_step(graphs[0])
var_0 = graphs[0].get_tensor_by_name('v0:0')
var_1 = graphs[0].get_tensor_by_name('v1:0')
stale_counter = graphs[0].get_tensor_by_name('stale_counter:0')
# Verify the initialized value.
self.assertAllEqual(0.0, sessions[0].run(var_0))
self.assertAllEqual(1.0, sessions[0].run(var_1))
self.assertAllEqual(0.0, sessions[0].run(stale_counter))
self.assertAllEqual(0, sessions[0].run(global_step))
sessions[0].run(train_ops[0])
# Verify no updates because max staleness is negative.
self.assertAllEqual(0, sessions[0].run(global_step))
self.assertAllEqual(1.0, sessions[0].run(stale_counter))
self.assertAllEqual(0.0, sessions[0].run(var_0))
self.assertAllEqual(1.0, sessions[0].run(var_1))
def test2WorkersStaleness0(self):
num_workers = 2
sessions, graphs, train_ops = _get_workers(num_workers, 0)
with graphs[0].as_default():
sessions[0].run(variables.global_variables_initializer())
global_step = training_util.get_global_step(graphs[0])
var_0 = graphs[0].get_tensor_by_name('v0:0')
var_1 = graphs[0].get_tensor_by_name('v1:0')
stale_counter = graphs[0].get_tensor_by_name('stale_counter:0')
# Verify the initialized value.
self.assertAllEqual(0.0, sessions[0].run(var_0))
self.assertAllEqual(1.0, sessions[0].run(var_1))
self.assertAllEqual(0.0, sessions[0].run(stale_counter))
self.assertAllEqual(0, sessions[0].run(global_step))
thread_0 = self.checkedThread(
target=self._run, args=(train_ops[0], sessions[0]))
thread_1 = self.checkedThread(
target=self._run, args=(train_ops[1], sessions[1]))
thread_0.start()
thread_1.start()
thread_0.join()
thread_1.join()
# With 2 workers and max staleness set to 0, only chief worker will update
# var_0 and var_1.
self.assertAllEqual(1, sessions[0].run(global_step))
self.assertAllEqual(1.0, sessions[0].run(stale_counter))
self.assertAllEqual(0.0 + 1.0, sessions[0].run(var_0))
self.assertAllEqual(1.0 + 1.0, sessions[0].run(var_1))
def test2WorkersStaleness1(self):
num_workers = 2
sessions, graphs, train_ops = _get_workers(num_workers, 1)
with graphs[0].as_default():
sessions[0].run(variables.global_variables_initializer())
global_step = training_util.get_global_step(graphs[0])
var_0 = graphs[0].get_tensor_by_name('v0:0')
var_1 = graphs[0].get_tensor_by_name('v1:0')
stale_counter = graphs[0].get_tensor_by_name('stale_counter:0')
# Verify the initialized value.
self.assertAllEqual(0.0, sessions[0].run(var_0))
self.assertAllEqual(1.0, sessions[0].run(var_1))
self.assertAllEqual(0.0, sessions[0].run(stale_counter))
self.assertAllEqual(0, sessions[0].run(global_step))
thread_0 = self.checkedThread(
target=self._run, args=(train_ops[0], sessions[0]))
thread_1 = self.checkedThread(
target=self._run, args=(train_ops[1], sessions[1]))
thread_0.start()
thread_1.start()
thread_0.join()
thread_1.join()
# With 2 workers and max staleness set to 1, both workers will update
# var_0 and var_1.
self.assertAllEqual(2, sessions[0].run(global_step))
self.assertAllEqual(0.0, sessions[0].run(stale_counter))
self.assertAllEqual(0.0 + 2.0, sessions[0].run(var_0))
self.assertAllEqual(1.0 + 2.0, sessions[0].run(var_1))
def test3WorkersStaleness0(self):
num_workers = 3
sessions, graphs, train_ops = _get_workers(num_workers, 0)
with graphs[0].as_default():
sessions[0].run(variables.global_variables_initializer())
global_step = training_util.get_global_step(graphs[0])
var_0 = graphs[0].get_tensor_by_name('v0:0')
var_1 = graphs[0].get_tensor_by_name('v1:0')
stale_counter = graphs[0].get_tensor_by_name('stale_counter:0')
# Verify the initialized value.
self.assertAllEqual(0.0, sessions[0].run(var_0))
self.assertAllEqual(1.0, sessions[0].run(var_1))
self.assertAllEqual(0.0, sessions[0].run(stale_counter))
self.assertAllEqual(0, sessions[0].run(global_step))
thread_0 = self.checkedThread(
target=self._run, args=(train_ops[0], sessions[0]))
thread_1 = self.checkedThread(
target=self._run, args=(train_ops[1], sessions[1]))
thread_2 = self.checkedThread(
target=self._run, args=(train_ops[2], sessions[2]))
thread_0.start()
thread_1.start()
thread_2.start()
thread_0.join()
thread_1.join()
thread_2.join()
# With 3 workers and max staleness set to 0, only chief worker will update
# var_0 and var_1.
self.assertAllEqual(1, sessions[0].run(global_step))
self.assertAllEqual(2.0, sessions[0].run(stale_counter))
self.assertAllEqual(0.0 + 1.0, sessions[0].run(var_0))
self.assertAllEqual(1.0 + 1.0, sessions[0].run(var_1))
def test3WorkersStaleness1(self):
num_workers = 3
sessions, graphs, train_ops = _get_workers(num_workers, 1)
with graphs[0].as_default():
sessions[0].run(variables.global_variables_initializer())
global_step = training_util.get_global_step(graphs[0])
var_0 = graphs[0].get_tensor_by_name('v0:0')
var_1 = graphs[0].get_tensor_by_name('v1:0')
stale_counter = graphs[0].get_tensor_by_name('stale_counter:0')
# Verify the initialized value.
self.assertAllEqual(0.0, sessions[0].run(var_0))
self.assertAllEqual(1.0, sessions[0].run(var_1))
self.assertAllEqual(0.0, sessions[0].run(stale_counter))
self.assertAllEqual(0, sessions[0].run(global_step))
thread_0 = self.checkedThread(
target=self._run, args=(train_ops[0], sessions[0]))
thread_1 = self.checkedThread(
target=self._run, args=(train_ops[1], sessions[1]))
thread_2 = self.checkedThread(
target=self._run, args=(train_ops[2], sessions[2]))
thread_0.start()
thread_1.start()
thread_2.start()
thread_0.join()
thread_1.join()
thread_2.join()
# With 3 workers and max staleness set to 1, chief worker and only one of
# the two other workers will update var_0 and var_1.
self.assertAllEqual(2, sessions[0].run(global_step))
self.assertAllEqual(1.0, sessions[0].run(stale_counter))
self.assertAllEqual(0.0 + 2.0, sessions[0].run(var_0))
self.assertAllEqual(1.0 + 2.0, sessions[0].run(var_1))
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/opt/python/training/drop_stale_gradient_optimizer_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Wrapper optimizer for checking and dropping stale gradients."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.summary import summary
from tensorflow.python.training import optimizer
from tensorflow.python.training import training_util
class DropStaleGradientOptimizer(optimizer.Optimizer):
"""Wrapper optimizer that checks and drops stale gradient.
This optimizer records the global step for each worker before computing
gradients and compares it with the global step at the time of applying the
gradients. If the difference is larger than a threshold, it will drop all
the computed gradients.
"""
def __init__(self,
opt,
staleness,
use_locking=False,
name="DropStaleGradient"):
"""Constructs a new DropStaleGradientOptimizer.
Args:
opt: The actual optimizer that will be used to compute and apply the
gradients. Must be one of the Optimizer classes.
staleness: The maximum staleness allowed for the optimizer.
use_locking: If `True` use locks for clip update operations.
name: Optional name prefix for the operations created when applying
gradients. Defaults to "DropStaleGradient".
"""
super(DropStaleGradientOptimizer, self).__init__(use_locking, name)
self._opt = opt
self._staleness = staleness
def compute_gradients(self, loss, *args, **kwargs):
# Record current global step for worker.
with ops.colocate_with(loss):
self._local_step = training_util.get_global_step() + 0
with ops.control_dependencies([self._local_step]):
loss = gen_array_ops.identity(loss)
return self._opt.compute_gradients(loss, *args, **kwargs)
def get_slot(self, *args, **kwargs):
return self._opt.get_slot(*args, **kwargs)
def get_slot_names(self, *args, **kwargs):
return self._opt.get_slot_names(*args, **kwargs)
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
gradients = []
# Number of stale gradients.
with ops.colocate_with(global_step):
stale_counter = variable_scope.get_variable(
"stale_counter", [],
initializer=init_ops.zeros_initializer(),
trainable=False)
def _AcceptGradientOp():
with ops.control_dependencies(
[self._opt.apply_gradients(
grads_and_vars, global_step=global_step, name=name)]):
return gen_array_ops.identity(0.0)
def _DropGradientOp():
return gen_array_ops.identity(1.0)
for grad_and_var in grads_and_vars:
grad = grad_and_var[0]
if isinstance(grad, ops.Tensor):
gradients.append(grad)
elif grad is not None:
gradients.append(grad.op)
with ops.control_dependencies(gradients), ops.colocate_with(global_step):
staleness = gen_array_ops.reshape(
global_step - self._local_step, shape=())
conditional_update = stale_counter.assign_add(control_flow_ops.cond(
gen_math_ops.less_equal(staleness, self._staleness),
_AcceptGradientOp, _DropGradientOp))
summary.scalar(
"Gradient staleness percentage",
stale_counter / (math_ops.cast(global_step + 1, dtypes.float32)))
return conditional_update
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/opt/python/training/drop_stale_gradient_optimizer.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for LazyAdamGSOptimizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.contrib.opt.python.training import lazy_adam_gs_optimizer
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
def adam_update_numpy(param,
g_t,
t,
m,
v,
alpha=0.001,
beta1=0.9,
beta2=0.999,
epsilon=1e-8):
alpha_t = alpha * np.sqrt(1 - beta2**t) / (1 - beta1**t)
m_t = beta1 * m + (1 - beta1) * g_t
v_t = beta2 * v + (1 - beta2) * g_t * g_t
param_t = param - alpha_t * m_t / (np.sqrt(v_t) + epsilon)
return param_t, m_t, v_t
class LazyAdamGSOptimizerTest(test.TestCase, parameterized.TestCase):
@parameterized.parameters([False, True])
def testSparse(self, use_resource):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
if use_resource:
global_step = resource_variable_ops.ResourceVariable(
array_ops.zeros([], dtypes.int64))
var0 = resource_variable_ops.ResourceVariable(var0_np)
var1 = resource_variable_ops.ResourceVariable(var1_np)
else:
global_step = variables.Variable(array_ops.zeros([], dtypes.int64))
var0 = variables.Variable(var0_np)
var1 = variables.Variable(var1_np)
grads0_np_indices = np.array([0, 1], dtype=np.int32)
grads0 = ops.IndexedSlices(
constant_op.constant(grads0_np),
constant_op.constant(grads0_np_indices), constant_op.constant([2]))
grads1_np_indices = np.array([0, 1], dtype=np.int32)
grads1 = ops.IndexedSlices(
constant_op.constant(grads1_np),
constant_op.constant(grads1_np_indices), constant_op.constant([2]))
opt = lazy_adam_gs_optimizer.LazyAdamGSOptimizer(
global_step=global_step)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]),
global_step=global_step)
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
beta1_power, beta2_power = opt._get_beta_accumulators()
# Run 3 steps of Adam
for t in range(1, 4):
self.assertAllCloseAccordingToType(0.9**t, beta1_power.eval())
self.assertAllCloseAccordingToType(0.999**t, beta2_power.eval())
update.run()
var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, var0.eval())
self.assertAllCloseAccordingToType(var1_np, var1.eval())
@parameterized.parameters([False, True])
def testSparseDevicePlacement(self, use_resource):
for index_dtype in [dtypes.int32, dtypes.int64]:
with self.cached_session(force_gpu=test.is_gpu_available()):
# If a GPU is available, tests that all optimizer ops can be placed on
# it (i.e. they have GPU kernels).
if use_resource:
global_step = resource_variable_ops.ResourceVariable(
array_ops.zeros([], dtypes.int64))
var = resource_variable_ops.ResourceVariable([[1.0], [2.0]])
else:
global_step = variables.Variable(array_ops.zeros([], dtypes.int64))
var = variables.Variable([[1.0], [2.0]])
indices = constant_op.constant([0, 1], dtype=index_dtype)
gathered_sum = math_ops.reduce_sum(array_ops.gather(var, indices))
optimizer = lazy_adam_gs_optimizer.LazyAdamGSOptimizer(
global_step=global_step, learning_rate=3.0)
minimize_op = optimizer.minimize(gathered_sum, global_step=global_step)
variables.global_variables_initializer().run()
minimize_op.run()
@parameterized.parameters([False, True])
def testSparseRepeatedIndices(self, use_resource):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
if use_resource:
repeated_index_global_step = resource_variable_ops.ResourceVariable(
array_ops.zeros([], dtypes.int64))
aggregated_global_step = resource_variable_ops.ResourceVariable(
array_ops.zeros([], dtypes.int64))
repeated_index_update_var = resource_variable_ops.ResourceVariable(
[[1.0], [2.0]], dtype=dtype)
aggregated_update_var = resource_variable_ops.ResourceVariable(
[[1.0], [2.0]], dtype=dtype)
else:
repeated_index_global_step = variables.Variable(
array_ops.zeros([], dtypes.int64))
aggregated_global_step = variables.Variable(
array_ops.zeros([], dtypes.int64))
repeated_index_update_var = variables.Variable(
[[1.0], [2.0]], dtype=dtype)
aggregated_update_var = variables.Variable(
[[1.0], [2.0]], dtype=dtype)
grad_repeated_index = ops.IndexedSlices(
constant_op.constant(
[0.1, 0.1], shape=[2, 1], dtype=dtype),
constant_op.constant([1, 1]),
constant_op.constant([2, 1]))
grad_aggregated = ops.IndexedSlices(
constant_op.constant(
[0.2], shape=[1, 1], dtype=dtype),
constant_op.constant([1]),
constant_op.constant([2, 1]))
repeated_update_opt = lazy_adam_gs_optimizer.LazyAdamGSOptimizer(
global_step=repeated_index_global_step)
repeated_update = repeated_update_opt.apply_gradients(
[(grad_repeated_index, repeated_index_update_var)],
global_step=repeated_index_global_step)
aggregated_update_opt = lazy_adam_gs_optimizer.LazyAdamGSOptimizer(
global_step=aggregated_global_step)
aggregated_update = aggregated_update_opt.apply_gradients(
[(grad_aggregated, aggregated_update_var)],
global_step=aggregated_global_step)
variables.global_variables_initializer().run()
self.assertAllClose(aggregated_update_var.eval(),
repeated_index_update_var.eval())
for _ in range(3):
repeated_update.run()
aggregated_update.run()
self.assertAllClose(aggregated_update_var.eval(),
repeated_index_update_var.eval())
def doTestBasic(self, use_resource=False, use_callable_params=False):
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
with self.session(graph=ops.Graph()):
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
if use_resource:
global_step = resource_variable_ops.ResourceVariable(
array_ops.zeros([], dtypes.int64), name="global_step_%d" % i)
var0 = resource_variable_ops.ResourceVariable(
var0_np, name="var0_%d" % i)
var1 = resource_variable_ops.ResourceVariable(
var1_np, name="var1_%d" % i)
else:
global_step = variables.Variable(array_ops.zeros([], dtypes.int64))
var0 = variables.Variable(var0_np)
var1 = variables.Variable(var1_np)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
learning_rate = lambda: 0.001
beta1 = lambda: 0.9
beta2 = lambda: 0.999
epsilon = lambda: 1e-8
if not use_callable_params:
learning_rate = learning_rate()
beta1 = beta1()
beta2 = beta2()
epsilon = epsilon()
opt = lazy_adam_gs_optimizer.LazyAdamGSOptimizer(
global_step=global_step, learning_rate=learning_rate)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]),
global_step=global_step)
opt_variables = opt.variables()
beta1_power, beta2_power = opt._get_beta_accumulators()
self.assertIsNotNone(beta1_power)
self.assertIsNotNone(beta2_power is not None)
self.assertNotIn(beta1_power, opt_variables)
self.assertNotIn(beta2_power, opt_variables)
if not context.executing_eagerly():
with ops.Graph().as_default():
# Shouldn't return non-slot variables from other graphs.
self.assertEqual(0, len(opt.variables()))
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 3 steps of Adam
for t in range(1, 4):
if not context.executing_eagerly():
self.evaluate(update)
self.assertAllCloseAccordingToType(
0.9**(t + 1), self.evaluate(beta1_power))
self.assertAllCloseAccordingToType(
0.999**(t + 1), self.evaluate(beta2_power))
else:
if t > 1:
opt.apply_gradients(zip([grads0, grads1], [var0, var1]),
global_step=global_step)
beta1_power, beta2_power = opt._get_beta_accumulators()
self.assertAllCloseAccordingToType(
0.9**t, self.evaluate(beta1_power))
self.assertAllCloseAccordingToType(
0.999**t, self.evaluate(beta2_power))
var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
if use_resource:
self.assertEqual("var0_%d/Adam:0" % (i,),
opt.get_slot(var=var0, name="m").name)
def testBasic(self):
with self.cached_session():
self.doTestBasic(use_resource=False)
@test_util.run_in_graph_and_eager_modes(reset_test=True)
def testResourceBasic(self):
self.doTestBasic(use_resource=True)
def testBasicCallableParams(self):
with context.eager_mode():
self.doTestBasic(use_resource=True, use_callable_params=True)
def testTensorLearningRate(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
global_step = variables.Variable(array_ops.zeros([], dtypes.int64))
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = variables.Variable(var0_np)
var1 = variables.Variable(var1_np)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
opt = lazy_adam_gs_optimizer.LazyAdamGSOptimizer(
global_step=global_step, learning_rate=constant_op.constant(0.001))
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]),
global_step=global_step)
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
beta1_power, beta2_power = opt._get_beta_accumulators()
# Run 3 steps of Adam
for t in range(1, 4):
self.assertAllCloseAccordingToType(0.9**t, beta1_power.eval())
self.assertAllCloseAccordingToType(0.999**t, beta2_power.eval())
update.run()
var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, var0.eval())
self.assertAllCloseAccordingToType(var1_np, var1.eval())
def testSharing(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
global_step = variables.Variable(array_ops.zeros([], dtypes.int64))
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = variables.Variable(var0_np)
var1 = variables.Variable(var1_np)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
opt = lazy_adam_gs_optimizer.LazyAdamGSOptimizer(
global_step=global_step)
update1 = opt.apply_gradients(zip([grads0, grads1], [var0, var1]),
global_step=global_step)
update2 = opt.apply_gradients(zip([grads0, grads1], [var0, var1]),
global_step=global_step)
variables.global_variables_initializer().run()
beta1_power, beta2_power = opt._get_beta_accumulators()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
# Run 3 steps of intertwined Adam1 and Adam2.
for t in range(1, 4):
self.assertAllCloseAccordingToType(0.9**t, beta1_power.eval())
self.assertAllCloseAccordingToType(0.999**t, beta2_power.eval())
if t % 2 == 0:
update1.run()
else:
update2.run()
var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, var0.eval())
self.assertAllCloseAccordingToType(var1_np, var1.eval())
def testTwoSessions(self):
optimizer = lazy_adam_gs_optimizer.LazyAdamGSOptimizer()
with context.eager_mode():
var0 = variables.Variable(np.array([1.0, 2.0]), name="v0")
grads0 = constant_op.constant(np.array([0.1, 0.1]))
optimizer.apply_gradients([(grads0, var0)])
g = ops.Graph()
with g.as_default():
with self.session(graph=g):
var0 = variables.Variable(np.array([1.0, 2.0]), name="v0")
grads0 = constant_op.constant(np.array([0.1, 0.1]))
optimizer.apply_gradients([(grads0, var0)])
gg = ops.Graph()
with gg.as_default():
with self.session(graph=gg):
var0 = variables.Variable(np.array([1.0, 2.0]), name="v0")
grads0 = constant_op.constant(np.array([0.1, 0.1]))
# If the optimizer saves any state not keyed by graph the following line
# fails.
optimizer.apply_gradients([(grads0, var0)])
def testSlotsUniqueEager(self):
with context.eager_mode():
v1 = resource_variable_ops.ResourceVariable(1.)
v2 = resource_variable_ops.ResourceVariable(1.)
opt = lazy_adam_gs_optimizer.LazyAdamGSOptimizer(1.)
opt.minimize(lambda: v1 + v2)
# There should be two non-slot variables, and two unique slot variables
# for v1 and v2 respectively.
self.assertLen(set(opt.variables()), 4)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/opt/python/training/lazy_adam_gs_optimizer_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Wrapper optimizer for Elastic Average SGD """
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.training import optimizer
from tensorflow.python.training import saver
from tensorflow.python.training import session_run_hook
from tensorflow.python.training.saving import saveable_object_util
LOCAL_VARIABLE_NAME = 'local_center_variable'
GLOBAL_VARIABLE_NAME = 'global_center_variable'
GLOBAL_STEP = 'global_step'
class ElasticAverageCustomGetter(object):
"""Custom_getter class is used to do:
1. Change trainable variables to local collection and place them at worker
device
2. Generate global variables(global center variables)
3. Generate local variables(local center variables) which record the global
variables and place them at worker device
Notice that the class should be used with tf.replica_device_setter,
so that the global center variables and global step variable can be placed
at ps device. Besides, use 'tf.compat.v1.get_variable' instead of
'tf.Variable' to
use this custom getter.
For example,
ea_custom_getter = ElasticAverageCustomGetter(worker_device)
with tf.device(
tf.compat.v1.train.replica_device_setter(
worker_device=worker_device,
ps_device="/job:ps",
cluster=cluster)),
tf.compat.v1.variable_scope('',custom_getter=ea_custom_getter):
...
create your model here
...
with tf.device(worker_device):
opt = tf.compat.v1.train.MomentumOptimizer(...)
optimizer = ElasticAverageOptimizer(
opt,
num_worker=2,
moving_rate=0.01, # or use default value
communication_period=20,
ea_custom_getter=ea_custom_getter)
...
train_op = optimizer.apply_gradients(
grads_vars,
global_step=global_step)
...
hooks = [optimizer.make_session_run_hook(is_chief, task_index)]
...
with tf.compat.v1.train.MonitoredTrainingSession(master=server.target,
is_chief=is_chief,
checkpoint_dir=("...),
save_checkpoint_secs=600,
hooks=hooks) as mon_sess:
"""
def __init__(self, worker_device):
"""Create a new `ElasticAverageCustomGetter`.
Args:
worker_device: String. Name of the `worker` job.
"""
self._worker_device = worker_device
self._local_map = {}
self._global_map = {}
def __call__(self, getter, name, trainable, collections, *args, **kwargs):
if trainable:
with ops.device(self._worker_device):
local_var = getter(
name,
trainable=True,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
*args,
**kwargs)
if kwargs['reuse'] == True:
return local_var
global_center_variable = getter(
name='%s/%s' % (GLOBAL_VARIABLE_NAME, name),
trainable=False,
collections=[ops.GraphKeys.GLOBAL_VARIABLES],
*args,
**kwargs)
with ops.device(self._worker_device):
local_center_variable = getter(
name='%s/%s' % (LOCAL_VARIABLE_NAME, name),
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
*args,
**kwargs)
if kwargs['partitioner'] is None:
self._local_map[local_var] = local_center_variable
self._global_map[local_var] = global_center_variable
else:
v_list = list(local_var)
for i in range(len(v_list)):
self._local_map[v_list[i]] \
= list(local_center_variable)[i]
self._global_map[v_list[i]] \
= list(global_center_variable)[i]
return local_var
else:
kwargs['trainable'] = trainable
kwargs['collections'] = collections
if ops.GraphKeys.LOCAL_VARIABLES in collections:
with ops.device(self._worker_device):
return getter(name, *args, **kwargs)
else:
return getter(name, *args, **kwargs)
class ElasticAverageOptimizer(optimizer.Optimizer):
"""Wrapper optimizer that implements the Elastic Average SGD algorithm.
This is an async optimizer. During the training, Each worker will update
the local variables and maintains its own local_step, which starts from 0
and is incremented by 1 after each update of local variables. Whenever
the communication period divides the local step, the worker requests
the current global center variables and then computed the elastic difference
between global center variables and local variables. The elastic difference
then be used to update both local variables and global variables.
"""
# Default value as paper described
BETA = 0.9
def __init__(self,
opt,
num_worker,
ea_custom_getter,
communication_period=10,
moving_rate=None,
rho=None,
use_locking=True,
synchronous=False,
name='ElasticAverageOptimizer'):
"""Construct a new gradient descent optimizer.
Args:
opt: The actual optimizer that will be used to update local variables.
Must be one of the Optimizer classes.
num_worker: The number of workers
ea_custom_getter: The ElasticAverageCustomGetter
communication_period: An int point value to controls the frequency of the
communication between every worker and the ps.
moving_rate: A floating point value to control the elastic difference.
rho: the amount of exploration we allow in the model. The default value is
moving_rate/learning_rate rho=0.0 is suggested in async mode.
use_locking: If True use locks for update operations.
synchronous: Add_sync_queues_and_barrier or not.
True: all workers will wait for each other before start training
False: worker can start training when its initilization is done,
no need to wait for everyone is ready. in case one worker is
restarted, it can join and continue training without being
blocked.
name: Optional name prefix for the operations created when applying
gradients. Defaults to "ElasticAverageOptimizer".
"""
super(ElasticAverageOptimizer, self).__init__(use_locking, name)
self._opt = opt
self._num_worker = num_worker
self._period = communication_period
self._local_map = ea_custom_getter._local_map
self._global_map = ea_custom_getter._global_map
self._synchronous = synchronous
if moving_rate is None:
self._moving_rate = self.BETA / communication_period / num_worker
else:
self._moving_rate = moving_rate
if rho is None:
self._rho = self._moving_rate / self._opt._learning_rate
else:
self._rho = rho
self._local_step = variable_scope.get_variable(
initializer=0,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
name='local_step')
self._opt._prepare()
def compute_gradients(self,
loss,
var_list=None,
gate_gradients=optimizer.Optimizer.GATE_OP,
aggregation_method=None,
colocate_gradients_with_ops=False,
grad_loss=None):
"""Compute gradients of `loss` for the variables in `var_list`.
Add rho*elastic_difference to loss to control the exploration
This is the first part of `minimize()`. It returns a list
of (gradient, variable) pairs where "gradient" is the gradient
for "variable". Note that "gradient" can be a `Tensor`, an
`IndexedSlices`, or `None` if there is no gradient for the
given variable.
Args:
loss: A Tensor containing the value to minimize.
var_list: Optional list or tuple of `tf.Variable` to update to minimize
`loss`. Defaults to the list of variables collected in the graph under
the key `GraphKey.TRAINABLE_VARIABLES`.
gate_gradients: How to gate the computation of gradients. Can be
`GATE_NONE`, `GATE_OP`, or `GATE_GRAPH`.
aggregation_method: Specifies the method used to combine gradient terms.
Valid values are defined in the class `AggregationMethod`.
colocate_gradients_with_ops: If True, try colocating gradients with the
corresponding op.
grad_loss: Optional. A `Tensor` holding the gradient computed for `loss`.
Returns:
A list of (gradient, variable) pairs. Variable is always present, but
gradient can be `None`.
Raises:
TypeError: If `var_list` contains anything else than `Variable` objects.
ValueError: If some arguments are invalid.
"""
if not var_list:
var_list = variables.trainable_variables()
elastic_difference = [
math_ops.subtract(v, lv)
for v, lv in zip(variables.trainable_variables(),
[self._local_map[var] for var in var_list])
]
distance_loss = self._rho * math_ops.add_n(
[gen_nn_ops.l2_loss(ed) for ed in elastic_difference])
total_loss = loss + distance_loss
return self._opt.compute_gradients(total_loss, var_list, gate_gradients,
aggregation_method,
colocate_gradients_with_ops, grad_loss)
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
"""Apply gradients to global variables.
This is the second part of `minimize()`. It returns an `Operation` that
applies gradients.
Args:
grads_and_vars: List of (gradient, variable) pairs as returned by
`compute_gradients()`.
global_step: Optional `Variable` to increment by one after the variables
have been updated.
name: Optional name for the returned operation. Default to the name
passed to the `Optimizer` constructor.
Returns:
An `Operation` that applies the specified gradients. If `global_step`
was not None, that operation also increments `global_step`.
Raises:
TypeError: If `grads_and_vars` is malformed.
ValueError: If none of the variables have gradients.
"""
global_old = set(n.op.name for n in variables.global_variables())
apply_updates = self._opt.apply_gradients(grads_and_vars)
global_new = set(n.op.name for n in variables.global_variables())
with ops.control_dependencies([apply_updates]):
local_update = state_ops.assign_add(
self._local_step, 1, name='local_step_update').op
# this is for place the variables created by optimizer to local collection
# e.g., AdamOptimizer will create beta as global variables
def _adjust_optimizer_variable_collection(opt_vars):
g = ops.get_default_graph()
idx = 0
for _ in range(len(g._collections[ops.GraphKeys.GLOBAL_VARIABLES])):
var = g.get_collection_ref(ops.GraphKeys.GLOBAL_VARIABLES)[idx]
name = var.op.name
if name in opt_vars:
ops.add_to_collection(ops.GraphKeys.LOCAL_VARIABLES, var)
del g.get_collection_ref(ops.GraphKeys.GLOBAL_VARIABLES)[idx]
else:
idx += 1
_adjust_optimizer_variable_collection(global_new - global_old)
# update global variables.
def _Update_global_variables():
local_vars = [v for g, v in grads_and_vars if g is not None]
global_center_vars = [self._global_map[var] for var in local_vars]
local_center_vars = [self._local_map[var] for var in local_vars]
local_center_vars_update = []
for lvar, var in zip(local_center_vars, global_center_vars):
local_center_vars_update.append(lvar.assign(var))
update_ops = []
differences = []
with ops.control_dependencies(local_center_vars_update):
for v, lv in zip(local_vars, local_center_vars):
with ops.device(v.device):
differences.append(math_ops.subtract(v, lv))
for lvar, diff in zip(local_vars, differences):
with ops.device(lvar.device):
update_ops.append(
state_ops.assign_sub(lvar,
math_ops.multiply(self._moving_rate,
diff)))
for var, diff in zip(global_center_vars, differences):
with ops.device(var.device):
update_ops.append(
state_ops.assign_add(var,
math_ops.multiply(self._moving_rate,
diff)))
if global_step:
with ops.colocate_with(global_step):
update_ops.append(state_ops.assign_add(global_step, 1))
variable_update = control_flow_ops.group(*(update_ops))
return variable_update
with ops.control_dependencies([local_update]):
condition = math_ops.equal(
math_ops.mod(self._local_step, self._period), 0)
conditional_update = control_flow_ops.cond(condition,
_Update_global_variables,
control_flow_ops.no_op)
return conditional_update
def get_init_op(self, task_index):
"""Returns the op to let all the local variables and local center
variables equal to the global center variables before the training begins
"""
def _Add_sync_queues_and_barrier(enqueue_after_list):
"""Adds ops to enqueue on all worker queues"""
sync_queues = [
data_flow_ops.FIFOQueue(
self._num_worker, [dtypes.bool],
shapes=[[]],
shared_name='%s%s' % ('variable_init_sync_queue', i))
for i in range(self._num_worker)
]
queue_ops = []
# For each other worker, add an entry in a queue
token = constant_op.constant(False)
with ops.control_dependencies(enqueue_after_list):
for i, q in enumerate(sync_queues):
if i == task_index:
queue_ops.append(control_flow_ops.no_op())
else:
queue_ops.append(q.enqueue(token))
queue_ops.append(
sync_queues[task_index].dequeue_many(len(sync_queues) - 1))
return control_flow_ops.group(*queue_ops)
init_ops = []
local_vars = variables.trainable_variables()
global_center_vars = [self._global_map[var] for var in local_vars]
local_center_vars = [self._local_map[var] for var in local_vars]
if not (local_vars and global_center_vars and local_center_vars):
raise ValueError('The lists of local_variables, global_center_variables, '
'local_center_variables should not be empty ')
for lvar, gc_var, lc_var in zip(local_vars, global_center_vars,
local_center_vars):
init_ops.append(state_ops.assign(lvar, gc_var))
init_ops.append(state_ops.assign(lc_var, gc_var))
init_op = control_flow_ops.group(*(init_ops))
if self._synchronous == False:
return init_op
sync_queue_op = _Add_sync_queues_and_barrier([init_op])
return sync_queue_op
def make_session_run_hook(self, is_chief, task_index):
"""Creates a hook to handle ElasticAverageOptimizerHook ops such as initialization."""
return _ElasticAverageOptimizerHook(self, is_chief, task_index)
def swapping_saver(self, var_list=None, name='swapping_saver', **kwargs):
"""Create a saver copy global_center_variable to trainable variables
Please call this function after all your variables created with
ElasticAverageCustomGetter. For evaluations or inference, use this saver
during training. It will save the global_center_variable of the trained
parameters under the original parameter names.
Args:
var_list: List of variables to save, as per `Saver()`. If set to None,
save all the trainable_variables that have been created before this
call.
name: The name of the saver.
**kwargs: Keyword arguments of `Saver()`.
Returns:
A `tf.compat.v1.train.Saver` object.
Raises:
RuntimeError: global_center_variable is empty, please make sure
this is called after model created and
ElasticAverageCustomGetter is used when declaring you model
"""
if not self._global_map:
raise RuntimeError('global_center_variable is empty, please make sure '
'this is called after model created and '
'ElasticAverageCustomGetter is used when declaring '
'you model')
if var_list is None:
var_list = variables.trainable_variables()
if not isinstance(var_list, dict):
var_list = saveable_object_util.op_list_to_dict(var_list)
swapped_var_list = {}
for key, var in var_list.items():
tensor = var
if not isinstance(var, list):
for tvar in variables.trainable_variables():
if tvar.op.name == var.op.name:
tensor = self._global_map.get(tvar, var)
break
else: #partitioned variable
tensor = [self._global_map.get(lvar, lvar) for lvar in var]
swapped_var_list[key] = tensor
return saver.Saver(swapped_var_list, name=name, **kwargs)
class _ElasticAverageOptimizerHook(session_run_hook.SessionRunHook):
def __init__(self, ea_optimizer, is_chief, task_index):
"""Creates hook to handle ElasticAverageOptimizer initialization ops.
Args:
ea_optimizer: `ElasticAverageOptimizer` which this hook will initialize.
is_chief: `Bool`, whether is this a chief replica or not.
"""
self._ea_optimizer = ea_optimizer
self._is_chief = is_chief
self._task_index = task_index
def begin(self):
self._local_init_op = variables.local_variables_initializer()
self._global_init_op = None
if self._is_chief:
self._global_init_op = variables.global_variables_initializer()
self._variable_init_op = self._ea_optimizer.get_init_op(self._task_index)
def after_create_session(self, session, coord):
"""Run initialization ops"""
session.run(self._variable_init_op)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/opt/python/training/elastic_average_optimizer.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of the sign decay functions used in PowerSign and AddSign.
See [Bello et al., ICML 2017] Neural Optimizer Search with Reinforcement
Learning for details.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
def get_linear_decay_fn(decay_steps):
"""Returns a function that computes a linear decay.
This decay computes linear annealing:
max(0, (decay_steps - global_step) / decay_steps)
Example usage:
```
decay_steps = 1000
linear_decay_fn = get_linear_decay_fn(decay_steps)
decayed = linear_decay_fn(global_step)
x *= decayed
```
Args:
decay_steps: number of steps to decay over.
Returns:
linear_decay_fn: a function that computes the linear decay.
"""
# pylint:disable=missing-docstring
def linear_decay_fn(global_step):
if global_step is None:
raise ValueError("global_step is required for linear_decay.")
global_step = math_ops.minimum(global_step, decay_steps)
remaining_steps = math_ops.cast(
decay_steps, dtypes.int32) - math_ops.cast(global_step, dtypes.int32)
decayed = (math_ops.cast(remaining_steps, dtypes.float32) /
math_ops.cast(decay_steps, dtypes.float32))
return math_ops.maximum(0.0, decayed)
# pylint:enable=missing-docstring
return linear_decay_fn
def get_cosine_decay_fn(decay_steps, num_periods=0.5, zero_after=None):
"""Returns a function that computes a cosine decay.
This decay computes cosine annealing:
0.5 * (1.0 + cos(2.0 * pi * num_periods * global_step / decay_steps))
This decay can be used to decay the sign quantity in the AddSign and PowerSign
optimizers discovered in
[Bello et al., ICML 2017] Neural Optimizer Search with RL.
Example usage:
```
decay_steps = 1000
num_periods = 2
cosine_decay_fn = get_cosine_decay_fn(decay_steps, num_periods=num_periods)
decayed = cosine_decay_fn(global_step)
x *= decayed
```
Args:
decay_steps: number of steps to decay over.
num_periods: number of periods for cosine signal. 0.5 by default,
which maps the last decay step to 0.
zero_after: if not None, number after which the decay function
will just return 0.
Returns:
cosine_decay_fn: a function that computes the cosine decay.
"""
# pylint:disable=missing-docstring
def cosine_decay_fn(global_step):
if global_step is None:
raise ValueError("global_step is required for cosine_decay.")
global_step = math_ops.minimum(global_step, decay_steps)
completed_fraction = (math_ops.cast(global_step, dtypes.float32) /
math_ops.cast(decay_steps, dtypes.float32))
fraction = 2.0 * num_periods * completed_fraction
decayed = 0.5 * (
1.0 + math_ops.cos(constant_op.constant(math.pi) * fraction))
if zero_after is not None:
decayed = array_ops.where(
math_ops.greater_equal(fraction, 2 * zero_after), 0.0, decayed)
return decayed
# pylint:enable=missing-docstring
return cosine_decay_fn
def get_restart_decay_fn(decay_steps, num_periods=1, zero_after=None):
"""Returns a function that computes a restart decay.
This decay computes
0.5 * (1.0 + cos(pi * (num_periods * global_step) % num_training_steps))
This is a simplified version of the restart decay introduced in
"SGDR: Stochastic Gradient Descent with Warm Restarts"
by Ilya Loshchilov & Frank Hutter, Proceedings of
ICLR'2017, available at https://arxiv.org/pdf/1608.03983.pdf
This decay can be used to decay the sign quantity in the AddSign and PowerSign
optimizers discovered in
[Bello et al., ICML 2017] Neural Optimizer Search with RL.
Example usage:
```
decay_steps = 1000
num_periods = 2.0
restart_decay_fn = get_restart_decay_fn(decay_steps,
num_periods=num_periods)
decayed = restart_decay_fn(global_step)
x *= decayed
```
Args:
decay_steps: number of steps to decay over.
num_periods: number of periods for cosine signal. 1 by default,
which maps the last decay step to 0.
zero_after: if not None, number after which the decay function
will return 0.
Returns:
restart_decay_fn: a function that computes the restart decay.
"""
# pylint:disable=missing-docstring
def restart_decay_fn(global_step):
if global_step is None:
raise ValueError("global_step is required for cosine_decay.")
global_step = math_ops.minimum(global_step, decay_steps)
num = math_ops.mod(num_periods * math_ops.cast(global_step, dtypes.float32),
decay_steps)
fraction = num / math_ops.cast(decay_steps, dtypes.float32)
decayed = 0.5 * (
1.0 + math_ops.cos(constant_op.constant(math.pi) * fraction))
if zero_after is not None:
tmp = (math_ops.cast(num_periods * global_step, dtypes.float32) /
math_ops.cast(decay_steps, dtypes.float32))
decayed = array_ops.where(
math_ops.greater_equal(tmp, zero_after), 0.0, decayed)
return decayed
# pylint:enable=missing-docstring
return restart_decay_fn
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/opt/python/training/sign_decay.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""An optimizer wrapper for stateful optimizers with multitask loss."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import types
import six
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.training import optimizer
__all__ = ['MultitaskOptimizerWrapper', 'clip_gradients_by_global_norm']
def _is_all_zeros(grad):
all_zeros = math_ops.equal(math_ops.count_nonzero(grad), 0)
return all_zeros
def _get_wrapper(fn, opt):
def wrapper(self, grad, *args, **kwargs): # pylint: disable=unused-argument
all_zeros = _is_all_zeros(grad)
def call_fn():
with ops.control_dependencies([fn(grad, *args, **kwargs)]):
return control_flow_ops.no_op()
return control_flow_ops.cond(all_zeros, control_flow_ops.no_op, call_fn)
wrapper = types.MethodType(wrapper, opt)
return wrapper
class MultitaskOptimizerWrapper(object):
"""Optimizer wrapper making all-zero gradients harmless.
This might be useful when a multi-task loss is used,
and some components of the loss might be
not present (e.g. masked out) in some training batches.
Technically their gradient would be zero,
which would normally affect the optimizer state
(e.g. push running average to zero).
However this is not the desired behaviour,
since the missing loss component
should be treated as unknown rather than zero.
This wrapper filters out all-zero gradient tensors,
therefore preserving the optimizer state.
If gradient clipping by global norm is used,
the provided function clip_gradients_by_global_norm
should be used (and specified explicitly by the user).
Otherwise the global norm would be underestimated
because of all-zero tensors that should be ignored.
The gradient calculation and application
are delegated to an underlying optimizer.
The gradient application is altered only for all-zero tensors.
Example:
```python
momentum_optimizer = tf.compat.v1.train.MomentumOptimizer(
learning_rate, momentum=0.9)
multitask_momentum_optimizer = tf.contrib.opt.MultitaskOptimizerWrapper(
momentum_optimizer)
gradvars = multitask_momentum_optimizer.compute_gradients(
loss)
gradvars_clipped, _ = tf.contrib.opt.clip_gradients_by_global_norm(
gradvars, 15.0)
train_op = multitask_momentum_optimizer.apply_gradients(
gradvars_clipped, global_step=batch)
```
"""
def __init__(self, opt):
"""Constructor.
Args:
opt: an instance of a class that implements tf.train.Optimizer.
"""
if not isinstance(opt, optimizer.Optimizer):
raise TypeError(
'Supplied optimizer must be an instance of tf.train.Optimizer')
self._opt = opt
overridden_methods = ('_apply_dense', '_resource_apply_dense',
'_apply_sparse', '_resource_apply_sparse')
for name in overridden_methods:
fn = getattr(self._opt, name)
wrapper = _get_wrapper(fn, self._opt)
setattr(self._opt, name, wrapper)
def __getattr__(self, name):
return getattr(self._opt, name)
def clip_gradients_by_global_norm(gradients_variables, clip_norm=20.):
"""Clips gradients of a multitask loss by their global norm.
Ignores all-zero tensors when computing the global norm.
Args:
gradients_variables: a list of pairs (gradient, variable).
clip_norm: a float Tensor, the global norm to clip on. Default is 20.0.
Returns:
list: A list of pairs of the same type as gradients_variables,.
fixed_global_norm: A 0-D (scalar) Tensor representing the global norm.
"""
gradients, variables = six.moves.zip(*gradients_variables)
def _replace_nonexisting_grad(grad):
if grad is None:
return grad
all_zeros = _is_all_zeros(grad)
return control_flow_ops.cond(
all_zeros,
lambda: array_ops.zeros([], dtype=dtypes.as_dtype(grad.dtype)),
lambda: grad)
nonzero_gradients = [_replace_nonexisting_grad(g) for g in gradients]
fixed_global_norm = clip_ops.global_norm(nonzero_gradients)
gradients, _ = clip_ops.clip_by_global_norm(
gradients, clip_norm, use_norm=fixed_global_norm)
return list(six.moves.zip(gradients, variables)), fixed_global_norm
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/opt/python/training/multitask_optimizer_wrapper.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for sign_decay."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from tensorflow.contrib.opt.python.training import sign_decay
from tensorflow.python.platform import test
def py_linear_decay_fn(decay_steps):
def linear_decay(step):
step = min(step, decay_steps)
return float(decay_steps - step) / decay_steps
return linear_decay
def py_cosine_decay_fn(decay_steps, num_periods=0.5, zero_after=None):
def cosine_decay(step):
step = min(step, decay_steps)
fraction = 2.0 * num_periods * step / float(decay_steps)
if zero_after is not None and fraction >= 2 * zero_after:
return 0.0
return 0.5 * (1.0 + math.cos(math.pi * fraction))
return cosine_decay
def py_restart_decay_fn(decay_steps, num_periods=1, zero_after=None):
def restart_decay(step):
step = min(step, decay_steps)
tmp = num_periods * step / float(decay_steps)
fraction = (
num_periods * step % decay_steps) / float(decay_steps)
if zero_after is not None and tmp >= zero_after:
return 0
return 0.5 * (1.0 + math.cos(math.pi * fraction))
return restart_decay
class SignDecaysTest(test.TestCase):
def testLinearDecay(self):
num_training_steps = 1000
linear_decay_fn = sign_decay.get_linear_decay_fn(num_training_steps)
for step in range(0, 1000, 100):
with self.cached_session():
tf_decayed = linear_decay_fn(step).eval()
py_decayed = py_linear_decay_fn(num_training_steps)(step)
self.assertAlmostEqual(tf_decayed, py_decayed, places=4)
def testCosineDecay(self):
num_training_steps = 1000
cosine_decay_fn = sign_decay.get_cosine_decay_fn(num_training_steps)
cosine_decay_2_fn = sign_decay.get_cosine_decay_fn(
num_training_steps, num_periods=5, zero_after=2)
for step in range(0, 1000, 100):
with self.cached_session():
tf_decayed = cosine_decay_fn(step).eval()
py_decayed = py_cosine_decay_fn(num_training_steps)(step)
self.assertAlmostEqual(tf_decayed, py_decayed, places=4)
tf_decayed = cosine_decay_2_fn(step).eval()
py_decayed = py_cosine_decay_fn(
num_training_steps, num_periods=5, zero_after=2)(step)
self.assertAlmostEqual(tf_decayed, py_decayed, places=4)
def testRestartDecay(self):
num_training_steps = 1000
restart_decay_fn = sign_decay.get_restart_decay_fn(num_training_steps)
restart_decay_2_fn = sign_decay.get_restart_decay_fn(
num_training_steps, num_periods=5, zero_after=2)
for step in range(0, 1000, 100):
with self.cached_session():
tf_decayed = restart_decay_fn(step).eval()
py_decayed = py_restart_decay_fn(num_training_steps)(step)
self.assertAlmostEqual(tf_decayed, py_decayed, places=4)
tf_decayed = restart_decay_2_fn(step).eval()
py_decayed = py_restart_decay_fn(
num_training_steps, num_periods=5, zero_after=2)(step)
self.assertAlmostEqual(tf_decayed, py_decayed, places=4)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/opt/python/training/sign_decay_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for PowerSign."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
from tensorflow.contrib.opt.python.training import powersign
from tensorflow.contrib.opt.python.training import sign_decay
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
def py_linear_decay_fn(decay_steps):
def linear_decay(step):
step = min(step, decay_steps)
return float(decay_steps - step) / decay_steps
return linear_decay
def powersign_update_numpy(params,
g_t,
m,
lr,
base=math.e,
beta=0.9,
py_sign_decay_fn=None,
t=None):
m_t = beta * m + (1 - beta) * g_t
if py_sign_decay_fn is None:
sign_decayed = 1.0
else:
sign_decayed = py_sign_decay_fn(t-1)
multiplier = base ** (sign_decayed * np.sign(g_t) * np.sign(m_t))
params_t = params - lr * multiplier * g_t
return params_t, m_t
class PowerSignTest(test.TestCase):
def _testDense(self,
use_resource=False,
learning_rate=0.1,
sign_decay_fn=None,
py_sign_decay_fn=None,
base=math.e,
beta=0.9):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session(use_gpu=True):
# Initialize variables for numpy implementation.
m0, m1 = 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
if use_resource:
var0 = resource_variable_ops.ResourceVariable(var0_np)
var1 = resource_variable_ops.ResourceVariable(var1_np)
global_step = resource_variable_ops.ResourceVariable(
0, trainable=False)
else:
var0 = variables.VariableV1(var0_np)
var1 = variables.VariableV1(var1_np)
global_step = variables.VariableV1(
0, trainable=False)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
opt = powersign.PowerSignOptimizer(
learning_rate=learning_rate,
base=base,
beta=beta,
sign_decay_fn=sign_decay_fn,
)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]),
global_step=global_step)
neg_update = opt.apply_gradients(zip([-grads0, -grads1], [var0, var1]),
global_step=global_step)
if not context.executing_eagerly():
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 7 steps of powersign
# first 4 steps with positive gradient
# last 3 steps with negative gradient (sign(gm) should be -1)
for t in range(1, 8):
if t < 5:
if not context.executing_eagerly():
self.evaluate(update)
elif t > 1:
opt.apply_gradients(zip([grads0, grads1], [var0, var1]),
global_step=global_step)
else:
if not context.executing_eagerly():
self.evaluate(neg_update)
elif t > 1:
opt.apply_gradients(zip([-grads0, -grads1], [var0, var1]),
global_step=global_step)
var0_np, m0 = powersign_update_numpy(
var0_np,
grads0_np if t < 5 else -grads0_np,
m0,
learning_rate,
base=base,
beta=beta,
py_sign_decay_fn=py_sign_decay_fn,
t=t,
)
var1_np, m1 = powersign_update_numpy(
var1_np,
grads1_np if t < 5 else -grads1_np,
m1,
learning_rate,
base=base,
beta=beta,
py_sign_decay_fn=py_sign_decay_fn,
t=t,
)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
def testDense(self):
decay_steps = 10
sign_decay_fn = sign_decay.get_linear_decay_fn(decay_steps)
py_sign_decay_fn = py_linear_decay_fn(decay_steps)
self._testDense(use_resource=False)
self._testDense(use_resource=False,
learning_rate=0.1,
base=10.0,
beta=0.8)
self._testDense(use_resource=False,
sign_decay_fn=sign_decay_fn,
py_sign_decay_fn=py_sign_decay_fn)
self._testDense(use_resource=True)
self._testDense(use_resource=True, learning_rate=0.1, base=10.0, beta=0.8)
self._testDense(use_resource=True,
sign_decay_fn=sign_decay_fn,
py_sign_decay_fn=py_sign_decay_fn)
def _testSparse(self,
use_resource=False,
learning_rate=0.1,
sign_decay_fn=None,
py_sign_decay_fn=None,
base=math.e,
beta=0.9):
with self.session(use_gpu=True):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
# Initialize variables for numpy implementation.
m0, m1 = 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
if use_resource:
var0 = resource_variable_ops.ResourceVariable(var0_np)
var1 = resource_variable_ops.ResourceVariable(var1_np)
global_step = resource_variable_ops.ResourceVariable(
0, trainable=False)
else:
var0 = variables.VariableV1(var0_np)
var1 = variables.VariableV1(var1_np)
global_step = variables.VariableV1(
0, trainable=False)
grads0_np_indices = np.array([0, 1], dtype=np.int32)
grads0 = ops.IndexedSlices(
constant_op.constant(grads0_np),
constant_op.constant(grads0_np_indices), constant_op.constant([2]))
grads1_np_indices = np.array([0, 1], dtype=np.int32)
grads1 = ops.IndexedSlices(
constant_op.constant(grads1_np),
constant_op.constant(grads1_np_indices), constant_op.constant([2]))
opt = powersign.PowerSignOptimizer(
learning_rate=learning_rate,
base=base,
beta=beta,
sign_decay_fn=sign_decay_fn,
)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]),
global_step=global_step)
neg_update = opt.apply_gradients(zip([-grads0, -grads1], [var0, var1]),
global_step=global_step)
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
# Run 7 steps of powersign
# first 4 steps with positive gradient
# last 3 steps with negative gradient (sign(gm) should be -1)
for t in range(1, 8):
if t < 5:
update.run()
else:
neg_update.run()
var0_np, m0 = powersign_update_numpy(
var0_np,
grads0_np if t < 5 else -grads0_np,
m0,
learning_rate,
base=base,
beta=beta,
py_sign_decay_fn=py_sign_decay_fn,
t=t,
)
var1_np, m1 = powersign_update_numpy(
var1_np,
grads1_np if t < 5 else -grads1_np,
m1,
learning_rate,
base=base,
beta=beta,
py_sign_decay_fn=py_sign_decay_fn,
t=t,
)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, var0.eval())
self.assertAllCloseAccordingToType(var1_np, var1.eval())
def testSparse(self):
decay_steps = 10
sign_decay_fn = sign_decay.get_linear_decay_fn(decay_steps)
py_sign_decay_fn = py_linear_decay_fn(decay_steps)
self._testSparse(use_resource=False)
self._testSparse(use_resource=False,
learning_rate=0.01,
base=2.0,
beta=0.8)
self._testSparse(use_resource=False,
sign_decay_fn=sign_decay_fn,
py_sign_decay_fn=py_sign_decay_fn)
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/opt/python/training/powersign_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""RegAdagrad for TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.ops import math_ops
from tensorflow.python.training import adagrad
from tensorflow.python.training import training_ops
from tensorflow.python.util import tf_contextlib
class RegAdagradOptimizer(adagrad.AdagradOptimizer):
"""RegAdagrad: Adagrad with updates that optionally skip updating the slots.
This is meant to address the problem of additional regularization terms in the
loss function affecting learning rate decay and causing hyper-param
entanglement. Example usage:
loss = tf.nn.cross_entropy(x, labels)
reg_loss = reg_strength * tf.reduce_sum(x * x)
opt = tf.contrib.opt.RegAdagradOptimizer(learning_rate)
loss_update = opt.minimize(loss)
with opt.avoid_updating_slots():
reg_update = opt.minimize(reg_loss)
total_update = tf.group([loss_update, reg_update])
# ...
sess.run(total_update, ...)
"""
def __init__(self,
learning_rate,
initial_accumulator_value=0.1,
use_locking=False,
name="RegAdagrad"):
super(RegAdagradOptimizer, self).__init__(
learning_rate,
initial_accumulator_value=initial_accumulator_value,
use_locking=use_locking,
name=name)
self._should_update_slots = True
@tf_contextlib.contextmanager
def avoid_updating_slots(self):
old = self._should_update_slots
self._should_update_slots = False
try:
yield
finally:
self._should_update_slots = old
def _apply_dense(self, grad, var):
acc = self.get_slot(var, "accumulator")
return training_ops.apply_adagrad(
var,
acc,
math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
grad,
use_locking=self._use_locking,
update_slots=self._should_update_slots)
def _resource_apply_dense(self, grad, var, update_slots=True):
acc = self.get_slot(var, "accumulator")
return training_ops.resource_apply_adagrad(
var.handle,
acc.handle,
math_ops.cast(self._learning_rate_tensor, grad.dtype.base_dtype),
grad,
use_locking=self._use_locking,
update_slots=self._should_update_slots)
def _apply_sparse(self, grad, var, update_slots=True):
acc = self.get_slot(var, "accumulator")
return training_ops.sparse_apply_adagrad(
var,
acc,
math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
grad.values,
grad.indices,
use_locking=self._use_locking,
update_slots=self._should_update_slots)
def _resource_apply_sparse(self, grad, var, indices, update_slots=True):
acc = self.get_slot(var, "accumulator")
return training_ops.resource_sparse_apply_adagrad(
var.handle,
acc.handle,
math_ops.cast(self._learning_rate_tensor, grad.dtype),
grad,
indices,
use_locking=self._use_locking,
update_slots=self._should_update_slots)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/opt/python/training/reg_adagrad_optimizer.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for GGTOptimizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.opt.python.training.ggt import GGTOptimizer
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
def ggt_update_numpy(param,
g_t,
lr,
grad_buffer,
m,
window,
t,
beta1=0.9,
eps=1e-4,
svd_eps=1e-6,
sigma_eps=1e-2):
"""Tests the correctness of one step of GGT."""
m_t = m * beta1 + (1 - beta1) * g_t
grad_buffer[((t - 1) % window), :] = m_t
m_matrix = np.transpose(grad_buffer / np.sqrt(np.minimum(t, window)))
mm = np.dot(np.transpose(m_matrix), m_matrix)
damping = np.eye(window) * svd_eps
u, sigma, _ = np.linalg.svd(mm + damping)
sigma_sqrt_inv = np.power(np.sqrt(sigma) + sigma_eps, -3)
new_step = np.linalg.multi_dot([
m_matrix, u,
np.diag(sigma_sqrt_inv),
np.transpose(u),
np.transpose(m_matrix), m_t
])
sigma_sqrt_min = np.sqrt(sigma).min()
if sigma_sqrt_min > eps:
new_step += (m_t - np.linalg.multi_dot([
m_matrix, u,
np.diag(1.0 / sigma),
np.transpose(u),
np.transpose(m_matrix), m_t
])) * (1.0 / sigma_sqrt_min)
param_t = param - lr * new_step
return param_t, m_t, grad_buffer
class GGTOptimizerTest(test.TestCase):
def doTestBasic(self, use_resource=False):
# SVD does not support float16
for i, dtype in enumerate([dtypes.float32, dtypes.float64]):
with self.session(graph=ops.Graph()):
# Initialize variables for numpy implementation.
m0 = 0.0
window = 3
grad_buffer = np.zeros((window, 4), dtype=dtype.as_numpy_dtype)
lr = 0.001
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
if use_resource:
var0 = resource_variable_ops.ResourceVariable(
var0_np, name="var0_%d" % i)
var1 = resource_variable_ops.ResourceVariable(
var1_np, name="var1_%d" % i)
else:
var0 = variables.Variable(var0_np, name="var0")
var1 = variables.Variable(var1_np, name="var1")
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
opt = GGTOptimizer(learning_rate=lr, window=window)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
opt_variables = opt.variables()
m_t = opt._get_moment1()
grad_buffer_t = opt._get_grad_buffer()
g_t = opt._get_flat_grad()
self.assertTrue(m_t is not None)
self.assertTrue(grad_buffer_t is not None)
self.assertTrue(g_t is not None)
self.assertIn(m_t, opt_variables)
self.assertIn(grad_buffer_t, opt_variables)
self.assertIn(g_t, opt_variables)
with ops.Graph().as_default():
# Shouldn't return non-slot variables from other graphs.
self.assertEqual(0, len(opt.variables()))
if not context.executing_eagerly():
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
m_t = opt._get_moment1()
grad_buffer_t = opt._get_grad_buffer()
g_t = opt._get_flat_grad()
# Run 3 steps of GGT
for t in range(1, 4):
if not context.executing_eagerly():
self.evaluate(update)
elif t > 1:
opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
if t == 1:
self.assertAllCloseAccordingToType(
np.array([0.01, 0.01, 0.001, 0.001]), self.evaluate(m_t))
self.assertAllCloseAccordingToType(
np.array([[0.01, 0.01, 0.001, 0.001], [0., 0., 0., 0.],
[0., 0., 0., 0.]]), self.evaluate(grad_buffer_t))
elif t == 2:
self.assertAllCloseAccordingToType(
np.array([0.019, 0.019, 0.0019, 0.0019]), self.evaluate(m_t))
self.assertAllCloseAccordingToType(
np.array([[0.01, 0.01, 0.001, 0.001],
[0.019, 0.019, 0.0019, 0.0019], [0., 0., 0., 0.]]),
self.evaluate(grad_buffer_t))
else:
self.assertAllCloseAccordingToType(
np.array([0.0271, 0.0271, 0.00271, 0.00271]),
self.evaluate(m_t))
self.assertAllCloseAccordingToType(
np.array([[0.01, 0.01, 0.001,
0.001], [0.019, 0.019, 0.0019, 0.0019],
[0.0271, 0.0271, 0.00271, 0.00271]]),
self.evaluate(grad_buffer_t))
self.assertAllCloseAccordingToType([0.1, 0.1, 0.01, 0.01],
self.evaluate(g_t))
var_np = np.append(var0_np, var1_np)
grads_np = np.append(grads0_np, grads1_np)
var_np, m0, grad_buffer = ggt_update_numpy(var_np, grads_np, lr,
grad_buffer, m0, window, t)
var0_np = var_np[:2]
var1_np = var_np[2:]
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
def testBasic(self):
with self.cached_session():
self.doTestBasic(use_resource=False)
@test_util.run_in_graph_and_eager_modes(reset_test=True)
def testResourceBasic(self):
self.doTestBasic(use_resource=True)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/opt/python/training/ggt_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for external_optimizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.opt.python.training import external_optimizer
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
# pylint: disable=g-import-not-at-top,unused-import
try:
import __builtin__ as builtins
except ImportError:
import builtins
class MockOptimizerInterface(external_optimizer.ExternalOptimizerInterface):
NUM_STEP_CALLS = 5
NUM_LOSS_CALLS = 2
def _minimize(self, initial_val, loss_grad_func, step_callback,
optimizer_kwargs, **unused_kwargs):
"""Minimize (x - x0)**2 / 2 with respect to x."""
for _ in range(self.NUM_LOSS_CALLS):
loss_grad_func(initial_val)
for _ in range(self.NUM_STEP_CALLS):
step_callback(initial_val)
_, grad = loss_grad_func(initial_val)
return initial_val - grad
class TestCase(test.TestCase):
def assertAllClose(self, array1, array2, rtol=1e-5, atol=1e-5):
array1 = np.asarray(array1)
array2 = np.asarray(array2)
if not array1.shape:
array1 = np.array([array1])
if not array2.shape:
array2 = np.array([array2])
super(TestCase, self).assertAllClose(array1, array2, rtol=rtol, atol=atol)
class ExternalOptimizerInterfaceTest(TestCase):
def test_optimize(self):
scalar = variables.VariableV1(random_ops.random_normal([]), 'scalar')
vector = variables.VariableV1(random_ops.random_normal([2]), 'vector')
matrix = variables.VariableV1(random_ops.random_normal([2, 3]), 'matrix')
minimum_location = constant_op.constant(np.arange(9), dtype=dtypes.float32)
loss = math_ops.reduce_sum(
math_ops.square(vector - minimum_location[:2])) / 2.
loss += math_ops.reduce_sum(
math_ops.square(scalar - minimum_location[2])) / 2.
loss += math_ops.reduce_sum(
math_ops.square(
matrix - array_ops.reshape(minimum_location[3:], [2, 3]))) / 2.
optimizer = MockOptimizerInterface(loss)
with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
optimizer.minimize(sess)
self.assertAllClose(np.arange(2), sess.run(vector))
self.assertAllClose(np.arange(1) + 2, sess.run(scalar))
self.assertAllClose(np.arange(6).reshape(2, 3) + 3, sess.run(matrix))
def test_callbacks(self):
vector_val = np.array([7., -2.], dtype=np.float32)
vector = variables.VariableV1(vector_val, 'vector')
minimum_location_val = np.arange(2)
minimum_location = constant_op.constant(
minimum_location_val, dtype=dtypes.float32)
loss = math_ops.reduce_sum(math_ops.square(vector - minimum_location)) / 2.
loss_val = ((vector_val - minimum_location_val)**2).sum() / 2.
optimizer = MockOptimizerInterface(loss)
with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
initial_vector_val = sess.run(vector)
extra_fetches = [loss]
step_callback = test.mock.Mock()
loss_callback = test.mock.Mock()
optimizer.minimize(
sess,
fetches=extra_fetches,
loss_callback=loss_callback,
step_callback=step_callback)
call = test.mock.call(loss_val)
loss_calls = [call] * MockOptimizerInterface.NUM_LOSS_CALLS
loss_callback.assert_has_calls(loss_calls)
args, _ = step_callback.call_args
self.assertAllClose(initial_vector_val, args[0])
class ScipyOptimizerInterfaceTest(TestCase):
def _objective(self, x):
"""Rosenbrock function. (Carl Edward Rasmussen, 2001-07-21).
f(x) = sum_{i=1:D-1} 100*(x(i+1) - x(i)^2)^2 + (1-x(i))^2
Args:
x: a Variable
Returns:
f: a tensor (objective value)
"""
d = array_ops.size(x)
s = math_ops.add(
100 * math_ops.square(
math_ops.subtract(
array_ops.strided_slice(x, [1], [d]),
math_ops.square(array_ops.strided_slice(x, [0], [d - 1])))),
math_ops.square(
math_ops.subtract(1.0, array_ops.strided_slice(x, [0], [d - 1]))))
return math_ops.reduce_sum(s)
def _test_optimization_method(self,
method,
options,
rtol=1e-5,
atol=1e-5,
dimension=5):
x = variables.VariableV1(array_ops.zeros(dimension))
optimizer = external_optimizer.ScipyOptimizerInterface(
self._objective(x), method=method, options=options)
with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
optimizer.minimize(sess)
self.assertAllClose(np.ones(dimension), sess.run(x), rtol=rtol, atol=atol)
def test_unconstrained(self):
dimension = 5
x = variables.VariableV1(array_ops.zeros(dimension))
optimizer = external_optimizer.ScipyOptimizerInterface(self._objective(x))
with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
optimizer.minimize(sess)
self.assertAllClose(np.ones(dimension), sess.run(x))
def test_nelder_mead_method2(self):
self._test_optimization_method(
method='Nelder-Mead', options={}, rtol=1e-4, atol=1e-4)
def test_newton_cg_method(self):
self._test_optimization_method(
method='Newton-CG',
options={'eps': 1e-03,
'xtol': 1e-05},
rtol=1e-3,
atol=1e-3)
def test_newton_tnc_method(self):
self._test_optimization_method(
method='TNC',
options={'gtol': -5,
'maxiter': 1000},
rtol=1e-1,
atol=1e-1)
def test_cobyla_method(self):
# COBYLA does not reach the global optima
self._test_optimization_method(
method='COBYLA',
options={
'maxiter': 9000,
},
rtol=1e-1,
atol=1e-1,
dimension=2)
def test_slsqp_method(self):
self._test_optimization_method(
method='SLSQP', options={}, rtol=1e-3, atol=1e-3)
def test_cg_method(self):
self._test_optimization_method(
method='CG', options={'gtol': 1e-03}, rtol=1e-3, atol=1e-3)
def test_other_optimization_methods(self):
# These methods do not require special options to converge on rosenbrock
methods = ['Powell', 'BFGS', 'L-BFGS-B']
for method in methods:
self._test_optimization_method(method=method, options={})
def test_nonlinear_programming(self):
vector_initial_value = [7., 7.]
vector = variables.VariableV1(vector_initial_value, 'vector')
# Make norm as small as possible.
loss = math_ops.reduce_sum(math_ops.square(vector))
# Ensure y = 1.
equalities = [vector[1] - 1.]
# Ensure x >= 1. Thus optimum should be at (1, 1).
inequalities = [vector[0] - 1.]
optimizer = external_optimizer.ScipyOptimizerInterface(
loss, equalities=equalities, inequalities=inequalities, method='SLSQP')
with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
optimizer.minimize(sess)
self.assertAllClose(np.ones(2), sess.run(vector))
def test_scalar_bounds(self):
vector_initial_value = [7., 7.]
vector = variables.VariableV1(vector_initial_value, 'vector')
# Make norm as small as possible.
loss = math_ops.reduce_sum(math_ops.square(vector))
# Make the minimum value of each component be 1.
var_to_bounds = {vector: (1., np.infty)}
optimizer = external_optimizer.ScipyOptimizerInterface(
loss, var_to_bounds=var_to_bounds)
with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
optimizer.minimize(sess)
self.assertAllClose(np.ones(2), sess.run(vector))
def test_vector_bounds(self):
vector_initial_value = [7., 7.]
vector = variables.VariableV1(vector_initial_value, 'vector')
# Make norm as small as possible.
loss = math_ops.reduce_sum(math_ops.square(vector))
var_to_bounds = {vector: ([None, 2.], None)}
optimizer = external_optimizer.ScipyOptimizerInterface(
loss, var_to_bounds=var_to_bounds)
with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
optimizer.minimize(sess)
self.assertAllClose([0., 2.], sess.run(vector))
def test_optimizer_kwargs(self):
# Checks that the 'method' argument is stil present
# after running optimizer.minimize().
# Bug reference: b/64065260
vector_initial_value = [7., 7.]
vector = variables.VariableV1(vector_initial_value, 'vector')
loss = math_ops.reduce_sum(math_ops.square(vector))
optimizer = external_optimizer.ScipyOptimizerInterface(
loss, method='SLSQP')
with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
optimizer.minimize(sess)
method = optimizer.optimizer_kwargs.get('method')
self.assertEqual('SLSQP', method)
def test_callbacks(self):
vector_val = np.array([7., -2.], dtype=np.float32)
vector = variables.VariableV1(vector_val, 'vector')
minimum_location_val = np.arange(2)
minimum_location = constant_op.constant(
minimum_location_val, dtype=dtypes.float32)
loss = math_ops.reduce_sum(math_ops.square(vector - minimum_location)) / 2.
loss_val_first = ((vector_val - minimum_location_val)**2).sum() / 2.
optimizer = external_optimizer.ScipyOptimizerInterface(loss, method='SLSQP')
with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
initial_vector_val = sess.run(vector)
extra_fetches = [loss]
step_callback = test.mock.Mock()
loss_callback = test.mock.Mock()
optimizer.minimize(
sess,
fetches=extra_fetches,
loss_callback=loss_callback,
step_callback=step_callback)
loss_val_last = sess.run(loss)
call_first = test.mock.call(loss_val_first)
call_last = test.mock.call(loss_val_last)
loss_calls = [call_first, call_last]
loss_callback.assert_has_calls(loss_calls, any_order=True)
args, _ = step_callback.call_args
self.assertAllClose(minimum_location_val, args[0])
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/opt/python/training/external_optimizer_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ElasticAverageOptimizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import portpicker
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import device_setter
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import saver
from tensorflow.python.training import server_lib
from tensorflow.python.training import training
from tensorflow.python.training import training_util
from tensorflow.contrib.opt.python.training.elastic_average_optimizer import \
ElasticAverageOptimizer, ElasticAverageCustomGetter, GLOBAL_VARIABLE_NAME
def create_local_cluster(num_workers, num_ps, protocol="grpc"):
"""Create local GRPC servers and return them."""
worker_ports = [portpicker.pick_unused_port() for _ in range(num_workers)]
ps_ports = [portpicker.pick_unused_port() for _ in range(num_ps)]
cluster_dict = {
"worker": ["localhost:%s" % port for port in worker_ports],
"ps": ["localhost:%s" % port for port in ps_ports]
}
cs = server_lib.ClusterSpec(cluster_dict)
workers = [
server_lib.Server(
cs, job_name="worker", protocol=protocol, task_index=ix, start=True)
for ix in range(num_workers)
]
ps_servers = [
server_lib.Server(
cs, job_name="ps", protocol=protocol, task_index=ix, start=True)
for ix in range(num_ps)
]
return cluster_dict, workers, ps_servers
# Creates the workers and return their sessions, graphs, train_ops.
# Chief worker will update at last
def _get_workers(num_workers, period, workers, moving_rate, num_ps=1):
sessions = []
graphs = []
train_ops = []
savers = []
for worker_id in range(num_workers):
graph = ops.Graph()
is_chief = (worker_id == 0)
with graph.as_default():
worker_device = "/job:worker/task:%d/cpu:0" % (worker_id)
ea_custom = ElasticAverageCustomGetter(worker_device=worker_device)
with variable_scope.variable_scope(
"", custom_getter=ea_custom), ops.device(
device_setter.replica_device_setter(
worker_device=worker_device,
ps_device="/job:ps/task:0/cpu:0",
ps_tasks=1)):
global_step = training_util.get_or_create_global_step()
var_0 = variable_scope.get_variable(initializer=0.0, name="v0")
var_1 = variable_scope.get_variable(initializer=1.0, name="v1")
if num_ps > 1:
with variable_scope.variable_scope(
"",
partitioner=partitioned_variables.fixed_size_partitioner(
num_ps, axis=0),
custom_getter=ea_custom), ops.device(
device_setter.replica_device_setter(
worker_device=worker_device,
ps_device="/job:ps/task:0/cpu:0",
ps_tasks=num_ps)):
partition_var = variable_scope.get_variable(
'partition_var',
shape=[2, 4],
initializer=init_ops.ones_initializer)
part_0 = list(partition_var)[0]
part_1 = list(partition_var)[1]
with ops.device("/job:worker/task:" + str(worker_id)):
grads_0 = constant_op.constant(-1.0)
grads_1 = constant_op.constant(-1.0)
grads_part_0 = constant_op.constant([[-1., -1., -1., -1.]])
grads_part_1 = constant_op.constant([[-1., -1., -1., -1.]])
sgd_opt = gradient_descent.GradientDescentOptimizer(1.0)
opt = ElasticAverageOptimizer(
opt=sgd_opt,
num_worker=num_workers,
moving_rate=moving_rate,
communication_period=period,
ea_custom_getter=ea_custom)
if num_ps == 1:
train_op = [
opt.apply_gradients(([grads_0, var_0], [grads_1, var_1]),
global_step)
]
else:
train_op = [
opt.apply_gradients(([grads_0, var_0],
[grads_1, var_1],
[grads_part_0, part_0],
[grads_part_1, part_1]),
global_step)
]
easgd_hook = opt.make_session_run_hook(is_chief, worker_id)
saver = opt.swapping_saver()
# Creates MonitoredSession
sess = training.MonitoredTrainingSession(
workers[worker_id].target, hooks=[easgd_hook])
sessions.append(sess)
graphs.append(graph)
train_ops.append(train_op)
savers.append(saver)
return sessions, graphs, train_ops, savers
class ElasticAverageOptimizerTest(test.TestCase):
def _run(self, train_op, sess):
sess.run(train_op)
def test1Workers2Period(self):
num_workers = 1
communication_period = 2
num_ps = 1
cluster, workers, _ = create_local_cluster(
num_workers=num_workers, num_ps=num_ps)
sessions, graphs, train_ops, savers = _get_workers(
num_workers, communication_period, workers, 1.0)
var_0 = graphs[0].get_tensor_by_name("v0:0")
var_1 = graphs[0].get_tensor_by_name("v1:0")
global_step = training_util.get_global_step(graphs[0])
var_0_g = graphs[0].get_tensor_by_name(GLOBAL_VARIABLE_NAME + "/v0:0")
var_1_g = graphs[0].get_tensor_by_name(GLOBAL_VARIABLE_NAME + "/v1:0")
# Verify the initialized value.
self.assertAllEqual(0.0, sessions[0].run(var_0))
self.assertAllEqual(1.0, sessions[0].run(var_1))
self.assertAllEqual(0.0, sessions[0].run(var_0_g))
self.assertAllEqual(1.0, sessions[0].run(var_1_g))
self.assertAllEqual(0, sessions[0].run(global_step))
sessions[0].run(train_ops[0])
self.assertAllEqual(1.0, sessions[0].run(var_0))
self.assertAllEqual(2.0, sessions[0].run(var_1))
self.assertAllEqual(0.0, sessions[0].run(var_0_g))
self.assertAllEqual(1.0, sessions[0].run(var_1_g))
self.assertAllEqual(0, sessions[0].run(global_step))
# iteration 2, global variable update
sessions[0].run(train_ops[0])
self.assertAllEqual(0.0, sessions[0].run(var_0))
self.assertAllEqual(1.0, sessions[0].run(var_1))
self.assertAllEqual(2.0, sessions[0].run(var_0_g))
self.assertAllEqual(3.0, sessions[0].run(var_1_g))
self.assertAllEqual(1, sessions[0].run(global_step))
# iteration 3
sessions[0].run(train_ops[0])
self.assertAllEqual(1.0, sessions[0].run(var_0))
self.assertAllEqual(2.0, sessions[0].run(var_1))
self.assertAllEqual(2.0, sessions[0].run(var_0_g))
self.assertAllEqual(3.0, sessions[0].run(var_1_g))
self.assertAllEqual(1, sessions[0].run(global_step))
sessions[0].run(train_ops[0])
# save, data will be global value
outfile = os.path.join(test.get_temp_dir(), "model")
savers[0].save(sessions[0]._sess._sess._sess._sess,
save_path=outfile)
ops.reset_default_graph() # restore on a new graph
with session.Session() as sess:
v0 = variable_scope.get_variable(initializer=0.0, name="v0")
v1 = variable_scope.get_variable(initializer=1.0, name="v1")
sess.run(variables.local_variables_initializer())
saver_opt = saver.Saver(var_list=[v1, v0])
saver_opt.restore(sess, outfile)
self.assertAllEqual(2.0, sess.run(v0))
self.assertAllEqual(3.0, sess.run(v1))
def test2Worker1Period(self):
num_workers = 2
communication_period = 1
num_ps = 2
cluster, workers, _ = create_local_cluster(
num_workers=num_workers, num_ps=num_ps)
sessions, graphs, train_ops, savers = _get_workers(
num_workers, communication_period, workers, 0.5, num_ps=2)
var_0 = graphs[0].get_tensor_by_name("v0:0")
var_1 = graphs[0].get_tensor_by_name("v1:0")
var_0_1 = graphs[1].get_tensor_by_name("v0:0")
var_1_1 = graphs[1].get_tensor_by_name("v1:0")
var_0_g = graphs[0].get_tensor_by_name(GLOBAL_VARIABLE_NAME + "/v0:0")
var_1_g = graphs[0].get_tensor_by_name(GLOBAL_VARIABLE_NAME + "/v1:0")
part_0_g = graphs[0].get_tensor_by_name(
GLOBAL_VARIABLE_NAME + "/partition_var/part_0:0")
# Verify the initialized value.
self.assertAllEqual(0.0, sessions[0].run(var_0))
self.assertAllEqual(1.0, sessions[0].run(var_1))
self.assertAllEqual(0.0, sessions[1].run(var_0_1))
self.assertAllEqual(1.0, sessions[1].run(var_1_1))
self.assertAllEqual(0.0, sessions[0].run(var_0_g))
self.assertAllEqual(1.0, sessions[0].run(var_1_g))
sessions[0].run(train_ops[0])
sessions[1].run(train_ops[1])
self.assertAllEqual(0.5, sessions[0].run(var_0))
self.assertAllEqual(1.5, sessions[0].run(var_1))
self.assertAllEqual(0.75, sessions[0].run(var_0_g))
self.assertAllEqual(1.75, sessions[0].run(var_1_g))
self.assertAllEqual(0.75, sessions[1].run(var_0_1))
self.assertAllEqual(1.75, sessions[1].run(var_1_1))
# part_0 of global_center copy
part_0_g = sessions[0].run(part_0_g)
outfile = os.path.join(test.get_temp_dir(), "model")
savers[0].save(sessions[0]._sess._sess._sess._sess,
save_path=outfile)
# verify restore of partitioned_variables
ops.reset_default_graph() # restore on a new graph
g = ops.get_default_graph()
with session.Session() as sess, g.as_default():
with variable_scope.variable_scope(
"",
partitioner=partitioned_variables.fixed_size_partitioner(
num_ps, axis=0)):
partition_var = variable_scope.get_variable(
'partition_var',
shape=[2, 4],
initializer=init_ops.ones_initializer)
s = saver.Saver(var_list=[partition_var])
s.restore(sess, outfile)
part_0 = g.get_tensor_by_name('partition_var/part_0:0')
self.assertAllEqual(part_0_g, sess.run(part_0))
def testPS2TasksWithClusterSpecClass(self):
cluster_spec = server_lib.ClusterSpec({
"ps": ["ps0:2222", "ps1:2222"],
"worker": ["worker0:2222", "worker1:2222", "worker2:2222"]
})
ea_custom = ElasticAverageCustomGetter(worker_device="/job:worker/task:0")
from tensorflow.python.training import device_setter
with ops.device(
device_setter.replica_device_setter(cluster=cluster_spec,
worker_device="/job:worker/task:0",
ps_device="/job:ps")), \
variable_scope.variable_scope("", custom_getter=ea_custom):
v = variable_scope.get_variable(initializer=[1, 2], name="v")
w = variable_scope.get_variable(initializer=[2, 1], name="w")
v_g, w_g = ea_custom._global_map[v], ea_custom._global_map[w]
self.assertDeviceEqual("/job:worker/task:0", v.device)
self.assertDeviceEqual("job:ps/task:0", v_g.device)
self.assertDeviceEqual("/job:worker/task:0", w.device)
self.assertDeviceEqual("job:ps/task:1", w_g.device)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/opt/python/training/elastic_average_optimizer_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.training import optimizer
from tensorflow.python.training import session_run_hook
GLOBAL_VARIABLE_NAME = 'global_center_variable'
GRAD_VARIABLE_NAME = 'grad_variable'
class AGNCustomGetter(object):
"""Custom_getter class is used to do:
1. Change trainable variables to local collection and place them at worker
device
2. Generate global variables(global center variables)
3. Generate grad variables(gradients) which record the gradients sum
and place them at worker device
Notice that the class should be used with tf.replica_device_setter,
so that the global center variables and global step variable can be placed
at ps device.
"""
def __init__(self, worker_device):
"""
Args:
worker_device: put the grad_variables on worker device
"""
self._worker_device = worker_device
self._global_map = {}
self._grad_map = {}
def __call__(self, getter, name, trainable, collections, *args, **kwargs):
if trainable:
with ops.device(self._worker_device):
local_var = getter(
name,
trainable=True,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
*args,
**kwargs)
if kwargs['reuse'] == True:
return local_var
global_center_variable = getter(
name='%s/%s' % (GLOBAL_VARIABLE_NAME, name),
trainable=False,
collections=[ops.GraphKeys.GLOBAL_VARIABLES],
*args,
**kwargs)
with ops.device(self._worker_device):
grad_variable = getter(
name='%s/%s' % (GRAD_VARIABLE_NAME, name),
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
*args,
**kwargs)
if kwargs['partitioner'] is None:
self._grad_map[local_var] = grad_variable
self._global_map[local_var] = global_center_variable
else:
v_list = list(local_var)
for i in range(len(v_list)):
self._grad_map[v_list[i]] = list(grad_variable)[i]
self._global_map[v_list[i]] = list(global_center_variable)[i]
return local_var
else:
return getter(
name, trainable=trainable, collections=collections, *args, **kwargs)
class AGNOptimizer(optimizer.Optimizer):
"""Wrapper that implements the Accumulated GradientNormalization algorithm.
Reference:
Accumulated Gradient Normalization: Joeri Hermans ACML2017
https://arxiv.org/abs/1710.02368
"""
def __init__(self,
optimizer,
num_worker,
custom_getter,
communication_period=10,
use_locking=True,
name='AGNOptimizer'):
"""Construct a new AGN optimizer.
Args:
optimizer: input optimizer, can be sgd/momentum/adam etc.
num_worker: The number of workers
custom_getter: The AGNCustomGetter
communication_period: An int point value to controls the frequency of the
communication between every worker and the ps.
use_locking: If True use locks for update operations.
name: Optional name prefix for the operations created when applying
gradients. Defaults to "AGNOptimizer".
"""
super(AGNOptimizer, self).__init__(use_locking, name)
self._opt = optimizer
self._num_worker = num_worker
self._period = communication_period
self._global_map = custom_getter._global_map
self._grad_map = custom_getter._grad_map
self._local_step = variable_scope.get_variable(
initializer=0,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
name='local_step')
self._opt._prepare()
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
"""Apply gradients to global variables.
This is the second part of `minimize()`. It returns an `Operation` that
applies gradients.
Args:
grads_and_vars: List of (gradient, variable) pairs as returned by
`compute_gradients()`.
global_step: Optional `Variable` to increment by one after the variables
have been updated.
name: Optional name for the returned operation. Default to the name
passed to the `Optimizer` constructor.
Returns:
An `Operation` that applies the specified gradients. If `global_step`
was not None, that operation also increments `global_step`.
"""
local_vars = [v for g, v in grads_and_vars if g is not None]
grads = [g for g, v in grads_and_vars if g is not None]
def _variable_creator(next_creator, collections, **kwargs):
if not collections:
collections = [ops.GraphKeys.LOCAL_VARIABLES]
elif ops.GraphKeys.GLOBAL_VARIABLES in collections:
collections = list(collections)
collections.append(ops.GraphKeys.LOCAL_VARIABLES)
collections.remove(ops.GraphKeys.GLOBAL_VARIABLES)
return next_creator(collections=collections, **kwargs)
# theta = theta - lr * grad
with variable_scope.variable_creator_scope(_variable_creator):
local_update_op = self._opt.apply_gradients(grads_and_vars)
# a = a + grad
update_ops = []
update_ops.append(local_update_op)
grad_vars = [self._grad_map[var] for var in local_vars]
for g, grad_var in zip(grads, grad_vars):
update_ops.append(state_ops.assign_add(grad_var, g))
global_center_vars = [self._global_map[var] for var in local_vars]
# update global variables.
def _Update_global_variables():
global_norm = []
# a = a / t
for g in grad_vars:
global_norm.append(state_ops.assign(g, g / self._period))
# apply
with ops.control_dependencies(global_norm):
apply_global_op = self._opt.apply_gradients(
zip(grad_vars, global_center_vars))
# pull
with ops.control_dependencies([apply_global_op]):
update_ops = []
if global_step:
with ops.colocate_with(global_step):
update_ops.append(state_ops.assign_add(global_step, 1))
for lvar in local_vars:
g_val = self._global_map[lvar].read_value()
update_ops.append(state_ops.assign(lvar, g_val))
for grad_var in grad_vars:
update_ops.append(
state_ops.assign(grad_var, array_ops.zeros_like(grad_var)))
variable_update = control_flow_ops.group(*(update_ops))
return variable_update
local_update = state_ops.assign_add(
self._local_step, 1, name='local_step_update').op
with ops.control_dependencies([local_update]):
condition = math_ops.equal(
math_ops.mod(self._local_step, self._period), 0)
with ops.control_dependencies(update_ops):
conditional_update = control_flow_ops.cond(
condition, _Update_global_variables, control_flow_ops.no_op)
return conditional_update
def get_init_op(self, task_index):
"""Returns the op to let all the local variables and local center
variables equal to the global center variables before the training begins
"""
init_ops = []
local_vars = variables.trainable_variables()
global_center_vars = [self._global_map[var] for var in local_vars]
grad_vars = [self._grad_map[var] for var in local_vars]
if not (local_vars and global_center_vars and grad_vars):
raise ValueError('The lists of local_variables, global_center_variables,'
'grad_center_variables should not be empty')
for lvar, gc_var in zip(local_vars, global_center_vars):
init_ops.append(state_ops.assign(lvar, gc_var))
for g in grad_vars:
init_ops.append(state_ops.assign(g, array_ops.zeros_like(g)))
init_op = control_flow_ops.group(*(init_ops))
return init_op
def make_session_run_hook(self, is_chief, task_index):
"""Creates a hook to handle AGNOptimizerHook ops such as initialization."""
return _AGNOptimizerHook(self, is_chief, task_index)
class _AGNOptimizerHook(session_run_hook.SessionRunHook):
def __init__(self, agn_optimizer, is_chief, task_index):
"""Creates hook to handle AGNOptimizer initialization ops.
Args:
agn_optimizer: `AGNOptimizer` which this hook will initialize.
is_chief: `Bool`, whether is this a chief replica or not.
task_index: int, task_index of worker
"""
self._agn_optimizer = agn_optimizer
self._is_chief = is_chief
self._task_index = task_index
def begin(self):
self._local_init_op = variables.local_variables_initializer()
self._global_init_op = None
if self._is_chief:
self._global_init_op = variables.global_variables_initializer()
self._variable_init_op = self._agn_optimizer.get_init_op(self._task_index)
def after_create_session(self, session, coord):
"""Run initialization ops"""
session.run(self._variable_init_op)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/opt/python/training/agn_optimizer.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for AdaMax."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.opt.python.training import adamax
from tensorflow.python.client import session
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
def adamax_update_numpy(param,
g_t,
t,
m,
v,
alpha=0.001,
beta1=0.9,
beta2=0.999,
epsilon=1e-8):
m_t = beta1 * m + (1 - beta1) * g_t
v_t = np.maximum(beta2 * v, np.abs(g_t))
param_t = param - (alpha / (1 - beta1**t)) * (m_t / (v_t + epsilon))
return param_t, m_t, v_t
def adamax_sparse_update_numpy(param,
indices,
g_t,
t,
m,
v,
alpha=0.001,
beta1=0.9,
beta2=0.999,
epsilon=1e-8):
m_t, v_t, param_t = np.copy(m), np.copy(v), np.copy(param)
m_t_slice = beta1 * m[indices] + (1 - beta1) * g_t
v_t_slice = np.maximum(beta2 * v[indices], np.abs(g_t))
param_t_slice = param[indices] - ((alpha / (1 - beta1**t)) *
(m_t_slice / (v_t_slice + epsilon)))
m_t[indices] = m_t_slice
v_t[indices] = v_t_slice
param_t[indices] = param_t_slice
return param_t, m_t, v_t
class AdaMaxOptimizerTest(test.TestCase):
def doTestSparse(self, use_resource=False):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
# Initialize variables for numpy implementation.
zero_slots = lambda: np.zeros((3), dtype=dtype.as_numpy_dtype)
m0, v0, m1, v1 = zero_slots(), zero_slots(), zero_slots(), zero_slots()
var0_np = np.array([1.0, 2.0, 3.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([4.0, 5.0, 6.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
if use_resource:
var0 = resource_variable_ops.ResourceVariable(var0_np)
var1 = resource_variable_ops.ResourceVariable(var1_np)
else:
var0 = variables.Variable(var0_np)
var1 = variables.Variable(var1_np)
grads0_np_indices = np.array([0, 1], dtype=np.int32)
grads0 = ops.IndexedSlices(
constant_op.constant(grads0_np),
constant_op.constant(grads0_np_indices), constant_op.constant([2]))
grads1_np_indices = np.array([2, 1], dtype=np.int32)
grads1 = ops.IndexedSlices(
constant_op.constant(grads1_np),
constant_op.constant(grads1_np_indices), constant_op.constant([2]))
opt = adamax.AdaMaxOptimizer()
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0, 3.0], var0.eval())
self.assertAllClose([4.0, 5.0, 6.0], var1.eval())
beta1_power = opt._get_beta_accumulators()
# Run 3 steps of AdaMax
for t in range(1, 4):
self.assertAllCloseAccordingToType(0.9**t, beta1_power.eval())
update.run()
var0_np, m0, v0 = adamax_sparse_update_numpy(
var0_np, grads0_np_indices, grads0_np, t, m0, v0)
var1_np, m1, v1 = adamax_sparse_update_numpy(
var1_np, grads1_np_indices, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, var0.eval())
self.assertAllCloseAccordingToType(var1_np, var1.eval())
def testSparse(self):
self.doTestSparse(use_resource=False)
def testResourceSparse(self):
self.doTestSparse(use_resource=True)
def testSparseDevicePlacement(self):
for index_dtype in [dtypes.int32, dtypes.int64]:
with self.cached_session(force_gpu=test.is_gpu_available()):
# If a GPU is available, tests that all optimizer ops can be placed on
# it (i.e. they have GPU kernels).
var = variables.Variable([[1.0], [2.0]])
indices = constant_op.constant([0, 1], dtype=index_dtype)
gathered_sum = math_ops.reduce_sum(array_ops.gather(var, indices))
optimizer = adamax.AdaMaxOptimizer(3.0)
minimize_op = optimizer.minimize(gathered_sum)
variables.global_variables_initializer().run()
minimize_op.run()
def testSparseRepeatedIndices(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
repeated_index_update_var = variables.Variable(
[[1.0], [2.0]], dtype=dtype)
aggregated_update_var = variables.Variable(
[[1.0], [2.0]], dtype=dtype)
grad_repeated_index = ops.IndexedSlices(
constant_op.constant(
[0.1, 0.1], shape=[2, 1], dtype=dtype),
constant_op.constant([1, 1]),
constant_op.constant([2, 1]))
grad_aggregated = ops.IndexedSlices(
constant_op.constant(
[0.2], shape=[1, 1], dtype=dtype),
constant_op.constant([1]),
constant_op.constant([2, 1]))
repeated_update = adamax.AdaMaxOptimizer().apply_gradients(
[(grad_repeated_index, repeated_index_update_var)])
aggregated_update = adamax.AdaMaxOptimizer().apply_gradients(
[(grad_aggregated, aggregated_update_var)])
variables.global_variables_initializer().run()
self.assertAllClose(aggregated_update_var.eval(),
repeated_index_update_var.eval())
for _ in range(3):
repeated_update.run()
aggregated_update.run()
self.assertAllClose(aggregated_update_var.eval(),
repeated_index_update_var.eval())
def doTestBasic(self, use_resource=False):
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
with self.session(graph=ops.Graph()):
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
if use_resource:
var0 = resource_variable_ops.ResourceVariable(
var0_np, name="var0_%d" % i)
var1 = resource_variable_ops.ResourceVariable(
var1_np, name="var1_%d" % i)
else:
var0 = variables.Variable(var0_np)
var1 = variables.Variable(var1_np)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
opt = adamax.AdaMaxOptimizer()
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
opt_variables = opt.variables()
beta1_power = opt._get_beta_accumulators()
self.assertTrue(beta1_power is not None)
self.assertIn(beta1_power, opt_variables)
if not context.executing_eagerly():
with ops.Graph().as_default():
# Shouldn't return non-slot variables from other graphs.
self.assertEqual(0, len(opt.variables()))
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
beta1_power = opt._get_beta_accumulators()
# Run 3 steps of AdaMax
for t in range(1, 4):
if not context.executing_eagerly():
self.evaluate(update)
elif t > 1:
opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.assertAllCloseAccordingToType(0.9**(t + 1),
self.evaluate(beta1_power))
var0_np, m0, v0 = adamax_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adamax_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0),
rtol=1e-2)
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1),
rtol=1e-2)
if use_resource:
self.assertEqual("var0_%d/AdaMax:0" % (i,),
opt.get_slot(var=var0, name="m").name)
def testBasic(self):
with self.cached_session():
self.doTestBasic(use_resource=False)
@test_util.run_in_graph_and_eager_modes(reset_test=True)
def testResourceBasic(self):
self.doTestBasic(use_resource=True)
def testTensorLearningRate(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = variables.Variable(var0_np)
var1 = variables.Variable(var1_np)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
opt = adamax.AdaMaxOptimizer(constant_op.constant(0.001))
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
beta1_power = opt._get_beta_accumulators()
# Run 3 steps of AdaMax
for t in range(1, 4):
self.assertAllCloseAccordingToType(0.9**t, beta1_power.eval())
update.run()
var0_np, m0, v0 = adamax_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adamax_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, var0.eval())
self.assertAllCloseAccordingToType(var1_np, var1.eval())
def testSharing(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = variables.Variable(var0_np)
var1 = variables.Variable(var1_np)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
opt = adamax.AdaMaxOptimizer()
update1 = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
update2 = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
beta1_power = opt._get_beta_accumulators()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
# Run 3 steps of intertwined AdaMax1 and AdaMax2.
for t in range(1, 4):
self.assertAllCloseAccordingToType(0.9**t, beta1_power.eval())
if t % 2 == 0:
update1.run()
else:
update2.run()
var0_np, m0, v0 = adamax_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adamax_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, var0.eval())
self.assertAllCloseAccordingToType(var1_np, var1.eval())
def testTwoSessions(self):
optimizer = adamax.AdaMaxOptimizer()
g = ops.Graph()
with g.as_default():
with session.Session():
var0 = variables.Variable(np.array([1.0, 2.0]), name="v0")
grads0 = constant_op.constant(np.array([0.1, 0.1]))
optimizer.apply_gradients([(grads0, var0)])
gg = ops.Graph()
with gg.as_default():
with session.Session():
var0 = variables.Variable(np.array([1.0, 2.0]), name="v0")
grads0 = constant_op.constant(np.array([0.1, 0.1]))
# If the optimizer saves any state not keyed by graph the following line
# fails.
optimizer.apply_gradients([(grads0, var0)])
def testSlotsUniqueEager(self):
with context.eager_mode():
v1 = resource_variable_ops.ResourceVariable(1.)
v2 = resource_variable_ops.ResourceVariable(1.)
opt = adamax.AdaMaxOptimizer(1.)
opt.minimize(lambda: v1 + v2)
# There should be two non-slot variables, and two unique slot variables
# for v1 and v2 respectively.
self.assertEqual(5, len(set(opt.variables())))
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/opt/python/training/adamax_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow interface for third-party optimizers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
__all__ = ['ExternalOptimizerInterface', 'ScipyOptimizerInterface']
class ExternalOptimizerInterface(object):
"""Base class for interfaces with external optimization algorithms.
Subclass this and implement `_minimize` in order to wrap a new optimization
algorithm.
`ExternalOptimizerInterface` should not be instantiated directly; instead use
e.g. `ScipyOptimizerInterface`.
@@__init__
@@minimize
"""
def __init__(self,
loss,
var_list=None,
equalities=None,
inequalities=None,
var_to_bounds=None,
**optimizer_kwargs):
"""Initialize a new interface instance.
Args:
loss: A scalar `Tensor` to be minimized.
var_list: Optional `list` of `Variable` objects to update to minimize
`loss`. Defaults to the list of variables collected in the graph
under the key `GraphKeys.TRAINABLE_VARIABLES`.
equalities: Optional `list` of equality constraint scalar `Tensor`s to be
held equal to zero.
inequalities: Optional `list` of inequality constraint scalar `Tensor`s
to be held nonnegative.
var_to_bounds: Optional `dict` where each key is an optimization
`Variable` and each corresponding value is a length-2 tuple of
`(low, high)` bounds. Although enforcing this kind of simple constraint
could be accomplished with the `inequalities` arg, not all optimization
algorithms support general inequality constraints, e.g. L-BFGS-B. Both
`low` and `high` can either be numbers or anything convertible to a
NumPy array that can be broadcast to the shape of `var` (using
`np.broadcast_to`). To indicate that there is no bound, use `None` (or
`+/- np.infty`). For example, if `var` is a 2x3 matrix, then any of
the following corresponding `bounds` could be supplied:
* `(0, np.infty)`: Each element of `var` held positive.
* `(-np.infty, [1, 2])`: First column less than 1, second column less
than 2.
* `(-np.infty, [[1], [2], [3]])`: First row less than 1, second row less
than 2, etc.
* `(-np.infty, [[1, 2, 3], [4, 5, 6]])`: Entry `var[0, 0]` less than 1,
`var[0, 1]` less than 2, etc.
**optimizer_kwargs: Other subclass-specific keyword arguments.
"""
self._loss = loss
self._equalities = equalities or []
self._inequalities = inequalities or []
if var_list is None:
self._vars = variables.trainable_variables()
else:
self._vars = list(var_list)
packed_bounds = None
if var_to_bounds is not None:
left_packed_bounds = []
right_packed_bounds = []
for var in self._vars:
shape = var.get_shape().as_list()
bounds = (-np.infty, np.infty)
if var in var_to_bounds:
bounds = var_to_bounds[var]
left_packed_bounds.extend(list(np.broadcast_to(bounds[0], shape).flat))
right_packed_bounds.extend(list(np.broadcast_to(bounds[1], shape).flat))
packed_bounds = list(zip(left_packed_bounds, right_packed_bounds))
self._packed_bounds = packed_bounds
self._update_placeholders = [
array_ops.placeholder(var.dtype) for var in self._vars
]
self._var_updates = [
var.assign(array_ops.reshape(placeholder, _get_shape_tuple(var)))
for var, placeholder in zip(self._vars, self._update_placeholders)
]
loss_grads = _compute_gradients(loss, self._vars)
equalities_grads = [
_compute_gradients(equality, self._vars)
for equality in self._equalities
]
inequalities_grads = [
_compute_gradients(inequality, self._vars)
for inequality in self._inequalities
]
self.optimizer_kwargs = optimizer_kwargs
self._packed_var = self._pack(self._vars)
self._packed_loss_grad = self._pack(loss_grads)
self._packed_equality_grads = [
self._pack(equality_grads) for equality_grads in equalities_grads
]
self._packed_inequality_grads = [
self._pack(inequality_grads) for inequality_grads in inequalities_grads
]
dims = [_prod(_get_shape_tuple(var)) for var in self._vars]
accumulated_dims = list(_accumulate(dims))
self._packing_slices = [
slice(start, end)
for start, end in zip(accumulated_dims[:-1], accumulated_dims[1:])
]
def minimize(self,
session=None,
feed_dict=None,
fetches=None,
step_callback=None,
loss_callback=None,
**run_kwargs):
"""Minimize a scalar `Tensor`.
Variables subject to optimization are updated in-place at the end of
optimization.
Note that this method does *not* just return a minimization `Op`, unlike
`Optimizer.minimize()`; instead it actually performs minimization by
executing commands to control a `Session`.
Args:
session: A `Session` instance.
feed_dict: A feed dict to be passed to calls to `session.run`.
fetches: A list of `Tensor`s to fetch and supply to `loss_callback`
as positional arguments.
step_callback: A function to be called at each optimization step;
arguments are the current values of all optimization variables
flattened into a single vector.
loss_callback: A function to be called every time the loss and gradients
are computed, with evaluated fetches supplied as positional arguments.
**run_kwargs: kwargs to pass to `session.run`.
"""
session = session or ops.get_default_session()
feed_dict = feed_dict or {}
fetches = fetches or []
loss_callback = loss_callback or (lambda *fetches: None)
step_callback = step_callback or (lambda xk: None)
# Construct loss function and associated gradient.
loss_grad_func = self._make_eval_func([self._loss,
self._packed_loss_grad], session,
feed_dict, fetches, loss_callback)
# Construct equality constraint functions and associated gradients.
equality_funcs = self._make_eval_funcs(self._equalities, session, feed_dict,
fetches)
equality_grad_funcs = self._make_eval_funcs(self._packed_equality_grads,
session, feed_dict, fetches)
# Construct inequality constraint functions and associated gradients.
inequality_funcs = self._make_eval_funcs(self._inequalities, session,
feed_dict, fetches)
inequality_grad_funcs = self._make_eval_funcs(self._packed_inequality_grads,
session, feed_dict, fetches)
# Get initial value from TF session.
initial_packed_var_val = session.run(self._packed_var)
# Perform minimization.
packed_var_val = self._minimize(
initial_val=initial_packed_var_val,
loss_grad_func=loss_grad_func,
equality_funcs=equality_funcs,
equality_grad_funcs=equality_grad_funcs,
inequality_funcs=inequality_funcs,
inequality_grad_funcs=inequality_grad_funcs,
packed_bounds=self._packed_bounds,
step_callback=step_callback,
optimizer_kwargs=self.optimizer_kwargs)
var_vals = [
packed_var_val[packing_slice] for packing_slice in self._packing_slices
]
# Set optimization variables to their new values.
session.run(
self._var_updates,
feed_dict=dict(zip(self._update_placeholders, var_vals)),
**run_kwargs)
def _minimize(self, initial_val, loss_grad_func, equality_funcs,
equality_grad_funcs, inequality_funcs, inequality_grad_funcs,
packed_bounds, step_callback, optimizer_kwargs):
"""Wrapper for a particular optimization algorithm implementation.
It would be appropriate for a subclass implementation of this method to
raise `NotImplementedError` if unsupported arguments are passed: e.g. if an
algorithm does not support constraints but `len(equality_funcs) > 0`.
Args:
initial_val: A NumPy vector of initial values.
loss_grad_func: A function accepting a NumPy packed variable vector and
returning two outputs, a loss value and the gradient of that loss with
respect to the packed variable vector.
equality_funcs: A list of functions each of which specifies a scalar
quantity that an optimizer should hold exactly zero.
equality_grad_funcs: A list of gradients of equality_funcs.
inequality_funcs: A list of functions each of which specifies a scalar
quantity that an optimizer should hold >= 0.
inequality_grad_funcs: A list of gradients of inequality_funcs.
packed_bounds: A list of bounds for each index, or `None`.
step_callback: A callback function to execute at each optimization step,
supplied with the current value of the packed variable vector.
optimizer_kwargs: Other key-value arguments available to the optimizer.
Returns:
The optimal variable vector as a NumPy vector.
"""
raise NotImplementedError(
'To use ExternalOptimizerInterface, subclass from it and implement '
'the _minimize() method.')
@classmethod
def _pack(cls, tensors):
"""Pack a list of `Tensor`s into a single, flattened, rank-1 `Tensor`."""
if not tensors:
return None
elif len(tensors) == 1:
return array_ops.reshape(tensors[0], [-1])
else:
flattened = [array_ops.reshape(tensor, [-1]) for tensor in tensors]
return array_ops.concat(flattened, 0)
def _make_eval_func(self, tensors, session, feed_dict, fetches,
callback=None):
"""Construct a function that evaluates a `Tensor` or list of `Tensor`s."""
if not isinstance(tensors, list):
tensors = [tensors]
num_tensors = len(tensors)
def eval_func(x):
"""Function to evaluate a `Tensor`."""
augmented_feed_dict = {
var: x[packing_slice].reshape(_get_shape_tuple(var))
for var, packing_slice in zip(self._vars, self._packing_slices)
}
augmented_feed_dict.update(feed_dict)
augmented_fetches = tensors + fetches
augmented_fetch_vals = session.run(
augmented_fetches, feed_dict=augmented_feed_dict)
if callable(callback):
callback(*augmented_fetch_vals[num_tensors:])
return augmented_fetch_vals[:num_tensors]
return eval_func
def _make_eval_funcs(self,
tensors,
session,
feed_dict,
fetches,
callback=None):
return [
self._make_eval_func(tensor, session, feed_dict, fetches, callback)
for tensor in tensors
]
class ScipyOptimizerInterface(ExternalOptimizerInterface):
"""Wrapper allowing `scipy.optimize.minimize` to operate a `tf.compat.v1.Session`.
Example:
```python
vector = tf.Variable([7., 7.], 'vector')
# Make vector norm as small as possible.
loss = tf.reduce_sum(tf.square(vector))
optimizer = ScipyOptimizerInterface(loss, options={'maxiter': 100})
with tf.compat.v1.Session() as session:
optimizer.minimize(session)
# The value of vector should now be [0., 0.].
```
Example with simple bound constraints:
```python
vector = tf.Variable([7., 7.], 'vector')
# Make vector norm as small as possible.
loss = tf.reduce_sum(tf.square(vector))
optimizer = ScipyOptimizerInterface(
loss, var_to_bounds={vector: ([1, 2], np.infty)})
with tf.compat.v1.Session() as session:
optimizer.minimize(session)
# The value of vector should now be [1., 2.].
```
Example with more complicated constraints:
```python
vector = tf.Variable([7., 7.], 'vector')
# Make vector norm as small as possible.
loss = tf.reduce_sum(tf.square(vector))
# Ensure the vector's y component is = 1.
equalities = [vector[1] - 1.]
# Ensure the vector's x component is >= 1.
inequalities = [vector[0] - 1.]
# Our default SciPy optimization algorithm, L-BFGS-B, does not support
# general constraints. Thus we use SLSQP instead.
optimizer = ScipyOptimizerInterface(
loss, equalities=equalities, inequalities=inequalities, method='SLSQP')
with tf.compat.v1.Session() as session:
optimizer.minimize(session)
# The value of vector should now be [1., 1.].
```
"""
_DEFAULT_METHOD = 'L-BFGS-B'
def _minimize(self, initial_val, loss_grad_func, equality_funcs,
equality_grad_funcs, inequality_funcs, inequality_grad_funcs,
packed_bounds, step_callback, optimizer_kwargs):
def loss_grad_func_wrapper(x):
# SciPy's L-BFGS-B Fortran implementation requires gradients as doubles.
loss, gradient = loss_grad_func(x)
return loss, gradient.astype('float64')
optimizer_kwargs = dict(optimizer_kwargs.items())
method = optimizer_kwargs.pop('method', self._DEFAULT_METHOD)
constraints = []
for func, grad_func in zip(equality_funcs, equality_grad_funcs):
constraints.append({'type': 'eq', 'fun': func, 'jac': grad_func})
for func, grad_func in zip(inequality_funcs, inequality_grad_funcs):
constraints.append({'type': 'ineq', 'fun': func, 'jac': grad_func})
minimize_args = [loss_grad_func_wrapper, initial_val]
minimize_kwargs = {
'jac': True,
'callback': step_callback,
'method': method,
'constraints': constraints,
'bounds': packed_bounds,
}
for kwarg in minimize_kwargs:
if kwarg in optimizer_kwargs:
if kwarg == 'bounds':
# Special handling for 'bounds' kwarg since ability to specify bounds
# was added after this module was already publicly released.
raise ValueError(
'Bounds must be set using the var_to_bounds argument')
raise ValueError(
'Optimizer keyword arg \'{}\' is set '
'automatically and cannot be injected manually'.format(kwarg))
minimize_kwargs.update(optimizer_kwargs)
import scipy.optimize # pylint: disable=g-import-not-at-top
result = scipy.optimize.minimize(*minimize_args, **minimize_kwargs)
message_lines = [
'Optimization terminated with:',
' Message: %s',
' Objective function value: %f',
]
message_args = [result.message, result.fun]
if hasattr(result, 'nit'):
# Some optimization methods might not provide information such as nit and
# nfev in the return. Logs only available information.
message_lines.append(' Number of iterations: %d')
message_args.append(result.nit)
if hasattr(result, 'nfev'):
message_lines.append(' Number of functions evaluations: %d')
message_args.append(result.nfev)
logging.info('\n'.join(message_lines), *message_args)
return result['x']
def _accumulate(list_):
total = 0
yield total
for x in list_:
total += x
yield total
def _get_shape_tuple(tensor):
return tuple(tensor.get_shape().as_list())
def _prod(array):
prod = 1
for value in array:
prod *= value
return prod
def _compute_gradients(tensor, var_list):
grads = gradients.gradients(tensor, var_list)
# tf.gradients sometimes returns `None` when it should return 0.
return [
grad if grad is not None else array_ops.zeros_like(var)
for var, grad in zip(var_list, grads)
]
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/opt/python/training/external_optimizer.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Layer-wise Adaptive Rate Scaling optimizer for large-batch training."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.training import optimizer
from tensorflow.python.training import training_ops
class LARSOptimizer(optimizer.Optimizer):
"""Layer-wise Adaptive Rate Scaling for large batch training.
Introduced by "Large Batch Training of Convolutional Networks" by Y. You,
I. Gitman, and B. Ginsburg. (https://arxiv.org/abs/1708.03888)
Implements the LARS learning rate scheme presented in the paper above. This
optimizer is useful when scaling the batch size to up to 32K without
significant performance degradation. It is recommended to use the optimizer
in conjunction with:
- Gradual learning rate warm-up
- Linear learning rate scaling
- Poly rule learning rate decay
Note, LARS scaling is currently only enabled for dense tensors. Sparse tensors
use the default momentum optimizer.
"""
def __init__(
self,
learning_rate,
momentum=0.9,
weight_decay=0.0001,
# The LARS coefficient is a hyperparameter
eeta=0.001,
epsilon=0.0,
name="LARSOptimizer",
# Enable skipping variables from LARS scaling.
# TODO(sameerkm): Enable a direct mechanism to pass a
# subset of variables to the optimizer.
skip_list=None,
use_nesterov=False):
"""Construct a new LARS Optimizer.
Args:
learning_rate: A `Tensor` or floating point value. The base learning rate.
momentum: A floating point value. Momentum hyperparameter.
weight_decay: A floating point value. Weight decay hyperparameter.
eeta: LARS coefficient as used in the paper. Dfault set to LARS
coefficient from the paper. (eeta / weight_decay) determines the highest
scaling factor in LARS.
epsilon: Optional epsilon parameter to be set in models that have very
small gradients. Default set to 0.0.
name: Optional name prefix for variables and ops created by LARSOptimizer.
skip_list: List of strings to enable skipping variables from LARS scaling.
If any of the strings in skip_list is a subset of var.name, variable
'var' is skipped from LARS scaling. For a typical classification model
with batch normalization, the skip_list is ['batch_normalization',
'bias']
use_nesterov: when set to True, nesterov momentum will be enabled
Raises:
ValueError: If a hyperparameter is set to a non-sensical value.
"""
if momentum < 0.0:
raise ValueError("momentum should be positive: %s" % momentum)
if weight_decay < 0.0:
raise ValueError("weight_decay should be positive: %s" % weight_decay)
super(LARSOptimizer, self).__init__(use_locking=False, name=name)
self._learning_rate = learning_rate
self._momentum = momentum
self._weight_decay = weight_decay
self._eeta = eeta
self._epsilon = epsilon
self._name = name
self._skip_list = skip_list
self._use_nesterov = use_nesterov
def _create_slots(self, var_list):
for v in var_list:
self._zeros_slot(v, "momentum", self._name)
def compute_lr(self, grad, var):
scaled_lr = self._learning_rate
if self._skip_list is None or not any(v in var.name
for v in self._skip_list):
w_norm = linalg_ops.norm(var, ord=2)
g_norm = linalg_ops.norm(grad, ord=2)
trust_ratio = array_ops.where(
math_ops.greater(w_norm, 0),
array_ops.where(
math_ops.greater(g_norm, 0),
(self._eeta * w_norm /
(g_norm + self._weight_decay * w_norm + self._epsilon)), 1.0),
1.0)
scaled_lr = self._learning_rate * trust_ratio
# Add the weight regularization gradient
grad = grad + self._weight_decay * var
return scaled_lr, grad
def _apply_dense(self, grad, var):
scaled_lr, grad = self.compute_lr(grad, var)
mom = self.get_slot(var, "momentum")
return training_ops.apply_momentum(
var,
mom,
math_ops.cast(1.0, var.dtype.base_dtype),
grad * scaled_lr,
self._momentum,
use_locking=False,
use_nesterov=self._use_nesterov)
def _resource_apply_dense(self, grad, var):
scaled_lr, grad = self.compute_lr(grad, var)
mom = self.get_slot(var, "momentum")
return training_ops.resource_apply_momentum(
var.handle,
mom.handle,
math_ops.cast(1.0, var.dtype.base_dtype),
grad * scaled_lr,
self._momentum,
use_locking=False,
use_nesterov=self._use_nesterov)
# Fallback to momentum optimizer for sparse tensors
def _apply_sparse(self, grad, var):
mom = self.get_slot(var, "momentum")
return training_ops.sparse_apply_momentum(
var,
mom,
math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
grad.values,
grad.indices,
math_ops.cast(self._momentum_tensor, var.dtype.base_dtype),
use_locking=self._use_locking,
use_nesterov=self._use_nesterov).op
def _resource_apply_sparse(self, grad, var, indices):
mom = self.get_slot(var, "momentum")
return training_ops.resource_sparse_apply_momentum(
var.handle,
mom.handle,
math_ops.cast(self._learning_rate_tensor, grad.dtype),
grad,
indices,
math_ops.cast(self._momentum_tensor, grad.dtype),
use_locking=self._use_locking,
use_nesterov=self._use_nesterov)
def _prepare(self):
learning_rate = self._learning_rate
if callable(learning_rate):
learning_rate = learning_rate()
self._learning_rate_tensor = ops.convert_to_tensor(
learning_rate, name="learning_rate")
momentum = self._momentum
if callable(momentum):
momentum = momentum()
self._momentum_tensor = ops.convert_to_tensor(momentum, name="momentum")
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/opt/python/training/lars_optimizer.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Wrapper optimizer for Model Average."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.training import optimizer
from tensorflow.python.training import session_run_hook
GLOBAL_VARIABLE_NAME = "global_center_variable"
class ModelAverageCustomGetter(object):
"""Custom_getter class is used to do.
1. Change trainable variables to local collection and place them at worker
device
2. Generate global variables
Notice that the class should be used with tf.replica_device_setter,
so that the global center variables and global step variable can be placed
at ps device. Besides, use 'tf.compat.v1.get_variable' instead of
'tf.Variable' to
use this custom getter.
For example,
ma_custom_getter = ModelAverageCustomGetter(worker_device)
with tf.device(
tf.compat.v1.train.replica_device_setter(
worker_device=worker_device,
ps_device="/job:ps/cpu:0",
cluster=cluster)),
tf.compat.v1.variable_scope('',custom_getter=ma_custom_getter):
hid_w = tf.compat.v1.get_variable(
initializer=tf.random.truncated_normal(
[IMAGE_PIXELS * IMAGE_PIXELS, FLAGS.hidden_units],
stddev=1.0 / IMAGE_PIXELS),
name="hid_w")
hid_b =
tf.compat.v1.get_variable(initializer=tf.zeros([FLAGS.hidden_units]),
name="hid_b")
"""
def __init__(self, worker_device):
"""Create a new `ModelAverageCustomGetter`.
Args:
worker_device: String. Name of the `worker` job.
"""
self._worker_device = worker_device
self._local_2_global = {}
def __call__(self, getter, name, trainable, collections, *args, **kwargs):
if trainable:
with ops.device(self._worker_device):
local_var = getter(
name,
trainable=True,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
*args,
**kwargs)
global_variable = variable_scope.variable(
name="%s/%s" % (GLOBAL_VARIABLE_NAME, name),
initial_value=local_var.initialized_value(),
trainable=False,
collections=[ops.GraphKeys.GLOBAL_VARIABLES])
self._local_2_global[local_var] = global_variable
return local_var
else:
kwargs["trainable"] = trainable
kwargs["collections"] = collections
if ops.GraphKeys.LOCAL_VARIABLES in collections:
with ops.device(self._worker_device):
return getter(name, *args, **kwargs)
else:
return getter(name, *args, **kwargs)
class ModelAverageOptimizer(optimizer.Optimizer):
"""Wrapper optimizer that implements the Model Average algorithm.
This is a sync optimizer. During the training, each worker will update
the local variables and maintains its own local_step, which starts from 0
and is incremented by 1 after each update of local variables. Whenever the
interval_steps divides the local step, the local variables from all the
workers will be averaged and assigned to global center variables. Then the
local variables will be assigned by global center variables.
"""
def __init__(self,
opt,
num_worker,
is_chief,
ma_custom_getter,
interval_steps=100,
use_locking=True,
name="ModelAverageOptimizer"):
"""Construct a new model average optimizer.
Args:
opt: The actual optimizer that will be used to update local variables
num_worker: The number of workers
is_chief: whether chief worker
ma_custom_getter: ModelAverageCustomGetter
interval_steps: An int point value to controls the frequency of the
average of local variables
use_locking: If True use locks for update operations
name: string. Optional name of the returned operation
"""
super(ModelAverageOptimizer, self).__init__(use_locking, name)
self._opt = opt
self._num_worker = num_worker
self._is_chief = is_chief
self._local_2_global = ma_custom_getter._local_2_global # pylint:disable=protected-access
self._interval_steps = interval_steps
self._accumulator_list = []
self._chief_init_op = None
self._local_step = variable_scope.get_variable(
initializer=0,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
name="local_step")
self._opt._prepare() # pylint:disable=protected-access
def compute_gradients(self, *args, **kwargs):
"""Compute gradients of "loss" for the variables in "var_list".
This simply wraps the compute_gradients() from the real optimizer.
Args:
*args: Arguments for compute_gradients().
**kwargs: Keyword arguments for compute_gradients().
Returns:
A list of (gradient, variable) pairs.
"""
return self._opt.compute_gradients(*args, **kwargs)
def _local_vars_update(self, var_list):
"""Get the update ops for the local variables in "var_list".
Args:
var_list: Optional list or tuple of 'tf.Variable' to update
Returns:
An update op
Raises:
ValueError: if var_list is empty.
"""
if not var_list:
raise ValueError("The list of local_variables should not be empty")
update_ops = []
global_center_vars = [self._local_2_global[var] for var in var_list]
for lvar, gvar in zip(var_list, global_center_vars):
with ops.device(lvar.device):
update_ops.append(state_ops.assign(lvar, gvar.read_value()))
return control_flow_ops.group(*(update_ops))
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
"""Apply gradients to variables.
This contains most of the synchronization implementation and also wraps the
apply_gradients() from the real optimizer. The chief work updates global
variables.
Args:
grads_and_vars: List of (gradient, variable) pairs as returned by
compute_gradients().
global_step: Optional Variable to increment by one after the variables
have been updated.
name: Optional name for the returned operation. Default to the name
passed to the Optimizer constructor.
Returns:
A conditional 'Operation' that update both local and global variables or
just local variables
Raises:
ValueError: If the grads_and_vars is empty.
ValueError: If global step is not provided, the staleness cannot be
checked.
"""
# update local variables
if not grads_and_vars:
raise ValueError("Must supply at least one variable")
if global_step is None:
raise ValueError("Global step is required")
apply_updates = self._opt.apply_gradients(grads_and_vars)
with ops.control_dependencies([apply_updates]):
local_update = state_ops.assign_add(
self._local_step, 1, name="local_step_update").op
# update global variables.
def _update_global_variables(): # pylint: disable=missing-docstring
local_vars = [v for g, v in grads_and_vars if g is not None]
global_vars = [self._local_2_global[v] for v in local_vars]
# sync queue
with ops.colocate_with(global_step):
sync_queue = data_flow_ops.FIFOQueue(
-1, [dtypes.bool], shapes=[[]], shared_name="sync_queue")
train_ops = []
aggregated_vars = []
with ops.name_scope(None, self._name + "/global"):
for var, gvar in zip(local_vars, global_vars):
# pylint: disable=protected-access
with ops.device(gvar.device):
if isinstance(var._ref(), ops.Tensor):
var_accum = data_flow_ops.ConditionalAccumulator(
var.dtype,
shape=var.get_shape(),
shared_name=gvar.name + "/var_accum")
train_ops.append(
var_accum.apply_grad(var._ref(), local_step=global_step))
aggregated_vars.append(var_accum.take_grad(self._num_worker))
else:
raise ValueError("Unknown local variable type!")
self._accumulator_list.append((var_accum, gvar.device))
# chief worker updates global vars and enqueues tokens to the sync queue
if self._is_chief:
update_ops = []
with ops.control_dependencies(train_ops):
for avg_var, gvar in zip(aggregated_vars, global_vars):
with ops.device(gvar.device):
update_ops.append(state_ops.assign(gvar, avg_var))
with ops.device(global_step.device):
update_ops.append(state_ops.assign_add(global_step, 1))
with ops.control_dependencies(update_ops), ops.device(
global_step.device):
tokens = array_ops.fill([self._num_worker - 1],
constant_op.constant(False))
sync_op = sync_queue.enqueue_many(tokens)
else:
with ops.control_dependencies(train_ops), ops.device(
global_step.device):
sync_op = sync_queue.dequeue()
with ops.control_dependencies([sync_op]):
local_update_op = self._local_vars_update(local_vars)
return local_update_op
with ops.control_dependencies([local_update]):
condition = math_ops.equal(
math_ops.mod(self._local_step, self._interval_steps), 0)
conditional_update = control_flow_ops.cond(condition,
_update_global_variables,
control_flow_ops.no_op)
chief_init_ops = []
for accum, dev in self._accumulator_list:
with ops.device(dev):
chief_init_ops.append(
accum.set_global_step(global_step, name="SetGlobalStep"))
self._chief_init_op = control_flow_ops.group(*(chief_init_ops))
return conditional_update
def get_init_op(self):
"""Returns the op.
This method lets all the local variables equal to the global
variables before the training begins.
"""
return self._local_vars_update(variables.trainable_variables())
def make_session_run_hook(self):
"""Creates a hook to handle ModelAverage ops such as initialization."""
return _ModelAverageOptimizerHook(self, self._is_chief)
class _ModelAverageOptimizerHook(session_run_hook.SessionRunHook): # pylint: disable=missing-docstring
def __init__(self, ma_optimizer, is_chief):
"""Creates hook to handle ModelAverageOptimizer initialization ops.
Args:
ma_optimizer: `ModelAverageOptimizer` which this hook will initialize.
is_chief: `Bool`, whether is this a chief replica or not.
"""
self._ma_optimizer = ma_optimizer
self._is_chief = is_chief
def begin(self):
self._local_init_op = variables.local_variables_initializer()
self._global_init_op = None
if self._is_chief:
self._global_init_op = variables.global_variables_initializer()
self._chief_init_op = self._ma_optimizer._chief_init_op # pylint: disable=protected-access
self._variable_init_op = self._ma_optimizer.get_init_op()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/opt/python/training/model_average_optimizer.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base class to make optimizers weight decay ready."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.opt.python.training import shampoo
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.training import adam
from tensorflow.python.training import momentum as momentum_opt
from tensorflow.python.training import optimizer
from tensorflow.python.util.tf_export import tf_export
from tensorflow.python.ops import array_ops
class DecoupledWeightDecayExtension(object):
"""This class allows to extend optimizers with decoupled weight decay.
It implements the decoupled weight decay described by Loshchilov & Hutter
(https://arxiv.org/pdf/1711.05101.pdf), in which the weight decay is
decoupled from the optimization steps w.r.t. to the loss function.
For SGD variants, this simplifies hyperparameter search since it decouples
the settings of weight decay and learning rate.
For adaptive gradient algorithms, it regularizes variables with large
gradients more than L2 regularization would, which was shown to yield better
training loss and generalization error in the paper above.
This class alone is not an optimizer but rather extends existing
optimizers with decoupled weight decay. We explicitly define the two examples
used in the above paper (SGDW and AdamW), but in general this can extend
any OptimizerX by using
`extend_with_weight_decay(OptimizerX, weight_decay=weight_decay)`.
In order for it to work, it must be the first class the Optimizer with
weight decay inherits from, e.g.
```python
class AdamWOptimizer(DecoupledWeightDecayExtension, adam.AdamOptimizer):
def __init__(self, weight_decay, *args, **kwargs):
super(AdamWOptimizer, self).__init__(weight_decay, *args, **kwargs).
```
Note that this extension decays weights BEFORE applying the update based
on the gradient, i.e. this extension only has the desired behaviour for
optimizers which do not depend on the value of'var' in the update step!
Note: when applying a decay to the learning rate, be sure to manually apply
the decay to the `weight_decay` as well. For example:
```python
schedule =
tf.compat.v1.train.piecewise_constant(tf.compat.v1.train.get_global_step(),
[10000, 15000], [1e-0, 1e-1, 1e-2])
lr = 1e-1 * schedule()
wd = lambda: 1e-4 * schedule()
# ...
optimizer = tf.contrib.opt.MomentumWOptimizer(learning_rate=lr,
weight_decay=wd,
momentum=0.9,
use_nesterov=True)
```
"""
def __init__(self, weight_decay, **kwargs):
"""Construct the extension class that adds weight decay to an optimizer.
Args:
weight_decay: A `Tensor` or a floating point value, the factor by which a
variable is decayed in the update step.
**kwargs: Optional list or tuple or set of `Variable` objects to decay.
"""
self._decay_var_list = None # is set in minimize or apply_gradients
self._weight_decay = weight_decay
# The tensors are initialized in call to _prepare
self._weight_decay_tensor = None
super(DecoupledWeightDecayExtension, self).__init__(**kwargs)
def minimize(self,
loss,
global_step=None,
var_list=None,
gate_gradients=optimizer.Optimizer.GATE_OP,
aggregation_method=None,
colocate_gradients_with_ops=False,
name=None,
grad_loss=None,
decay_var_list=None):
"""Add operations to minimize `loss` by updating `var_list` with decay.
This function is the same as Optimizer.minimize except that it allows to
specify the variables that should be decayed using decay_var_list.
If decay_var_list is None, all variables in var_list are decayed.
For more information see the documentation of Optimizer.minimize.
Args:
loss: A `Tensor` containing the value to minimize.
global_step: Optional `Variable` to increment by one after the variables
have been updated.
var_list: Optional list or tuple of `Variable` objects to update to
minimize `loss`. Defaults to the list of variables collected in the
graph under the key `GraphKeys.TRAINABLE_VARIABLES`.
gate_gradients: How to gate the computation of gradients. Can be
`GATE_NONE`, `GATE_OP`, or `GATE_GRAPH`.
aggregation_method: Specifies the method used to combine gradient terms.
Valid values are defined in the class `AggregationMethod`.
colocate_gradients_with_ops: If True, try colocating gradients with the
corresponding op.
name: Optional name for the returned operation.
grad_loss: Optional. A `Tensor` holding the gradient computed for `loss`.
decay_var_list: Optional list of decay variables.
Returns:
An Operation that updates the variables in `var_list`. If `global_step`
was not `None`, that operation also increments `global_step`.
"""
self._decay_var_list = set(decay_var_list) if decay_var_list else False
return super(DecoupledWeightDecayExtension, self).minimize(
loss,
global_step=global_step,
var_list=var_list,
gate_gradients=gate_gradients,
aggregation_method=aggregation_method,
colocate_gradients_with_ops=colocate_gradients_with_ops,
name=name,
grad_loss=grad_loss)
def apply_gradients(self,
grads_and_vars,
global_step=None,
name=None,
decay_var_list=None):
"""Apply gradients to variables and decay the variables.
This function is the same as Optimizer.apply_gradients except that it
allows to specify the variables that should be decayed using
decay_var_list. If decay_var_list is None, all variables in var_list
are decayed.
For more information see the documentation of Optimizer.apply_gradients.
Args:
grads_and_vars: List of (gradient, variable) pairs as returned by
`compute_gradients()`.
global_step: Optional `Variable` to increment by one after the variables
have been updated.
name: Optional name for the returned operation. Default to the name
passed to the `Optimizer` constructor.
decay_var_list: Optional list of decay variables.
Returns:
An `Operation` that applies the specified gradients. If `global_step`
was not None, that operation also increments `global_step`.
"""
self._decay_var_list = set(decay_var_list) if decay_var_list else False
return super(DecoupledWeightDecayExtension, self).apply_gradients(
grads_and_vars, global_step=global_step, name=name)
def _prepare(self):
weight_decay = self._weight_decay
if callable(weight_decay):
weight_decay = weight_decay()
self._weight_decay_tensor = ops.convert_to_tensor(
weight_decay, name="weight_decay")
# Call the optimizers _prepare function.
super(DecoupledWeightDecayExtension, self)._prepare()
def _decay_weights_op(self, var):
if not self._decay_var_list or var in self._decay_var_list:
return var.assign_sub(self._weight_decay * var, self._use_locking)
return control_flow_ops.no_op()
def _decay_weights_sparse_op(self, var, indices, scatter_add):
if not self._decay_var_list or var in self._decay_var_list:
update = -self._weight_decay * array_ops.gather(var, indices)
return scatter_add(var, indices, update, self._use_locking)
return control_flow_ops.no_op()
# Here, we overwrite the apply functions that the base optimizer calls.
# super().apply_x resolves to the apply_x function of the BaseOptimizer.
def _apply_dense(self, grad, var):
with ops.control_dependencies([self._decay_weights_op(var)]):
return super(DecoupledWeightDecayExtension, self)._apply_dense(grad, var)
def _resource_apply_dense(self, grad, var):
with ops.control_dependencies([self._decay_weights_op(var)]):
return super(DecoupledWeightDecayExtension,
self)._resource_apply_dense(grad, var)
def _apply_sparse(self, grad, var):
scatter_add = state_ops.scatter_add
decay_op = self._decay_weights_sparse_op(var, grad.indices, scatter_add)
with ops.control_dependencies([decay_op]):
return super(DecoupledWeightDecayExtension, self)._apply_sparse(grad, var)
def _resource_scatter_add(self, x, i, v, _=None):
# last argument allows for one overflow argument, to have the same function
# signature as state_ops.scatter_add
with ops.control_dependencies(
[resource_variable_ops.resource_scatter_add(x.handle, i, v)]):
return x.value()
def _resource_apply_sparse(self, grad, var, indices):
scatter_add = self._resource_scatter_add
decay_op = self._decay_weights_sparse_op(var, indices, scatter_add)
with ops.control_dependencies([decay_op]):
return super(DecoupledWeightDecayExtension,
self)._resource_apply_sparse(grad, var, indices)
def extend_with_decoupled_weight_decay(base_optimizer):
"""Factory function returning an optimizer class with decoupled weight decay.
Returns an optimizer class. An instance of the returned class computes the
update step of `base_optimizer` and additionally decays the weights.
E.g., the class returned by
`extend_with_decoupled_weight_decay(tf.compat.v1.train.AdamOptimizer)` is
equivalent to
`tf.contrib.opt.AdamWOptimizer`.
The API of the new optimizer class slightly differs from the API of the
base optimizer:
- The first argument to the constructor is the weight decay rate.
- `minimize` and `apply_gradients` accept the optional keyword argument
`decay_var_list`, which specifies the variables that should be decayed.
If `None`, all variables that are optimized are decayed.
Usage example:
```python
# MyAdamW is a new class
MyAdamW = extend_with_decoupled_weight_decay(tf.compat.v1.train.AdamOptimizer)
# Create a MyAdamW object
optimizer = MyAdamW(weight_decay=0.001, learning_rate=0.001)
sess.run(optimizer.minimize(loss, decay_variables=[var1, var2]))
Note that this extension decays weights BEFORE applying the update based
on the gradient, i.e. this extension only has the desired behaviour for
optimizers which do not depend on the value of'var' in the update step!
```
Args:
base_optimizer: An optimizer class that inherits from tf.train.Optimizer.
Returns:
A new optimizer class that inherits from DecoupledWeightDecayExtension
and base_optimizer.
"""
class OptimizerWithDecoupledWeightDecay(DecoupledWeightDecayExtension,
base_optimizer):
"""Base_optimizer with decoupled weight decay.
This class computes the update step of `base_optimizer` and
additionally decays the variable with the weight decay being decoupled from
the optimization steps w.r.t. to the loss function, as described by
Loshchilov & Hutter (https://arxiv.org/pdf/1711.05101.pdf).
For SGD variants, this simplifies hyperparameter search since
it decouples the settings of weight decay and learning rate.
For adaptive gradient algorithms, it regularizes variables with large
gradients more than L2 regularization would, which was shown to yield
better training loss and generalization error in the paper above.
"""
def __init__(self, weight_decay, *args, **kwargs):
# super delegation is necessary here
# pylint: disable=useless-super-delegation
super(OptimizerWithDecoupledWeightDecay,
self).__init__(weight_decay, *args, **kwargs)
# pylint: enable=useless-super-delegation
return OptimizerWithDecoupledWeightDecay
@tf_export("contrib.opt.MomentumWOptimizer")
class MomentumWOptimizer(DecoupledWeightDecayExtension,
momentum_opt.MomentumOptimizer):
"""Optimizer that implements the Momentum algorithm with weight_decay.
This is an implementation of the SGDW optimizer described in "Fixing
Weight Decay Regularization in Adam" by Loshchilov & Hutter
(https://arxiv.org/abs/1711.05101)
([pdf])(https://arxiv.org/pdf/1711.05101.pdf).
It computes the update step of `train.MomentumOptimizer` and additionally
decays the variable. Note that this is different from adding
L2 regularization on the variables to the loss. Decoupling the weight decay
from other hyperparameters (in particular the learning rate) simplifies
hyperparameter search.
For further information see the documentation of the Momentum Optimizer.
Note that this optimizer can also be instantiated as
```python
extend_with_weight_decay(tf.compat.v1.train.MomentumOptimizer,
weight_decay=weight_decay)
```
"""
def __init__(self,
weight_decay,
learning_rate,
momentum,
use_locking=False,
name="MomentumW",
use_nesterov=False):
"""Construct a new MomentumW optimizer.
For further information see the documentation of the Momentum Optimizer.
Args:
weight_decay: A `Tensor` or a floating point value. The weight decay.
learning_rate: A `Tensor` or a floating point value. The learning rate.
momentum: A `Tensor` or a floating point value. The momentum.
use_locking: If `True` use locks for update operations.
name: Optional name prefix for the operations created when applying
gradients. Defaults to "Momentum".
use_nesterov: If `True` use Nesterov Momentum. See [Sutskever et al.,
2013](
http://jmlr.org/proceedings/papers/v28/sutskever13.pdf). This
implementation always computes gradients at the value of the
variable(s) passed to the optimizer. Using Nesterov Momentum makes the
variable(s) track the values called `theta_t + mu*v_t` in the paper.
@compatibility(eager) When eager execution is enabled, learning_rate,
weight_decay and momentum can each be a callable that takes no
arguments and returns the actual value to use. This can be useful for
changing these values across different invocations of optimizer
functions. @end_compatibility
"""
super(MomentumWOptimizer, self).__init__(
weight_decay,
learning_rate=learning_rate,
momentum=momentum,
use_locking=use_locking,
name=name,
use_nesterov=use_nesterov)
@tf_export("contrib.opt.AdamWOptimizer")
class AdamWOptimizer(DecoupledWeightDecayExtension, adam.AdamOptimizer):
"""Optimizer that implements the Adam algorithm with weight decay.
This is an implementation of the AdamW optimizer described in ["Fixing
Weight Decay Regularization in Adam" by Loshchilov & Hutter]
(https://arxiv.org/abs/1711.05101)
([pdf](https://arxiv.org/pdf/1711.05101.pdf)).
It computes the update step of `train.AdamOptimizer` and additionally decays
the variable. Note that this is different from adding L2 regularization on
the variables to the loss: it regularizes variables with large
gradients more than L2 regularization would, which was shown to yield better
training loss and generalization error in the paper above.
For further information see the documentation of the Adam Optimizer.
Note that this optimizer can also be instantiated as
```python
extend_with_weight_decay(tf.compat.v1.train.AdamOptimizer,
weight_decay=weight_decay)
```
"""
def __init__(self,
weight_decay,
learning_rate=0.001,
beta1=0.9,
beta2=0.999,
epsilon=1e-8,
use_locking=False,
name="AdamW"):
"""Construct a new AdamW optimizer.
For further information see the documentation of the Adam Optimizer.
Args:
weight_decay: A `Tensor` or a floating point value. The weight decay.
learning_rate: A Tensor or a floating point value. The learning rate.
beta1: A float value or a constant float tensor. The exponential decay
rate for the 1st moment estimates.
beta2: A float value or a constant float tensor. The exponential decay
rate for the 2nd moment estimates.
epsilon: A small constant for numerical stability. This epsilon is
"epsilon hat" in the Kingma and Ba paper (in the formula just before
Section 2.1), not the epsilon in Algorithm 1 of the paper.
use_locking: If True use locks for update operations.
name: Optional name for the operations created when applying gradients.
Defaults to "Adam".
"""
super(AdamWOptimizer, self).__init__(
weight_decay,
learning_rate=learning_rate,
beta1=beta1,
beta2=beta2,
epsilon=epsilon,
use_locking=use_locking,
name=name)
@tf_export("contrib.opt.ShampooWOptimizer")
class ShampooWOptimizer(DecoupledWeightDecayExtension,
shampoo.ShampooOptimizer):
"""Optimizer that implements the Shampoo algorithm with weight decay.
For further information see the documentation of the Shampoo Optimizer.
"""
def __init__(self,
weight_decay,
global_step,
max_matrix_size=768,
gbar_decay=0.0,
gbar_weight=1.0,
mat_gbar_decay=1.0,
mat_gbar_weight=1.0,
learning_rate=1.0,
svd_interval=1,
precond_update_interval=1,
epsilon=1e-4,
alpha=0.5,
use_iterative_root=False,
use_locking=False,
name="ShampooW"):
"""Construct a new ShampooW optimizer.
For further information see the documentation of the Shampoo Optimizer.
Args:
weight_decay: A `Tensor` or a floating point value. The weight decay.
global_step: tensorflow variable indicating the step.
max_matrix_size: We do not perform SVD for matrices larger than this.
gbar_decay:
gbar_weight: Used to update gbar: gbar[t] = gbar_decay[t] * gbar[t-1] +
gbar_weight[t] * g[t]
mat_gbar_decay:
mat_gbar_weight: Used to update mat_gbar: mat_gbar_j[t] =
mat_gbar_decay[t] * mat_gbar_j[t-1] + mat_gbar_weight[t] * gg_j[t]
learning_rate: Similar to SGD
svd_interval: We should do SVD after this many steps. Default = 1, i.e.
every step. Usually 20 leads to no loss of accuracy, and 50 or 100 is
also OK. May also want more often early,
and less often later - set in caller as for example:
"svd_interval = lambda(T): tf.cond(
T < 2000, lambda: 20.0, lambda: 1000.0)"
precond_update_interval: We should update the preconditioners after this
many steps. Default = 1. Usually less than svd_interval.
epsilon: epsilon * I_n is added to each mat_gbar_j for stability
alpha: total power of the preconditioners.
use_iterative_root: should the optimizer use SVD (faster) or the iterative
root method (for TPU) for finding the roots of PSD matrices.
use_locking: If `True` use locks for update operations.
name: name of optimizer.
"""
super(ShampooWOptimizer, self).__init__(
weight_decay,
global_step=global_step,
max_matrix_size=max_matrix_size,
gbar_decay=gbar_decay,
gbar_weight=gbar_weight,
mat_gbar_decay=mat_gbar_weight,
learning_rate=learning_rate,
svd_interval=svd_interval,
precond_update_interval=precond_update_interval,
epsilon=epsilon,
alpha=alpha,
use_iterative_root=use_iterative_root,
use_locking=use_locking,
name=name)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/opt/python/training/weight_decay_optimizers.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for AdaMoo optimizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.contrib.opt.python.training import shampoo
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
TOLERANCE = 1e-3
RIDGE_EPSILON = 1e-4
def np_power(mat_g, alpha):
"""Computes mat_g^alpha for a square symmetric matrix mat_g."""
mat_u, diag_d, mat_v = np.linalg.svd(mat_g)
diag_d = np.power(diag_d, alpha)
return np.dot(np.dot(mat_u, np.diag(diag_d)), mat_v)
class ShampooTest(test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(('Var', False), ('ResourceVar', True))
def testBasicVector(self, use_resource_var):
"""Similar to the full Adagrad update."""
size = 20
init_var_np = np.zeros(size)
grad_np = np.random.rand(size)
grad_np_2 = np.random.rand(size)
with self.cached_session() as sess:
global_step = variables.VariableV1(
0, dtype=dtypes.int64, use_resource=use_resource_var)
var = variables.VariableV1(
init_var_np, dtype=dtypes.float32, use_resource=use_resource_var)
grad = constant_op.constant(grad_np, dtype=dtypes.float32)
grad_2 = constant_op.constant(grad_np_2, dtype=dtypes.float32)
opt = shampoo.ShampooOptimizer(global_step)
update = opt.apply_gradients(zip([grad], [var]),
global_step=global_step)
update_2 = opt.apply_gradients(zip([grad_2], [var]),
global_step=global_step)
variables.global_variables_initializer().run()
init_val = sess.run(var)
self.assertAllCloseAccordingToType(init_var_np, init_val)
# Run a step of Shampoo
update.run()
new_val = sess.run(var)
# let up compute this in numpy
# Update rule is var = var - lr * mat_g^{-0.5} * grad
# lr = 1
mat_g = np.outer(grad_np, grad_np) / grad_np.shape[0]
mat_h = np_power(mat_g + RIDGE_EPSILON * np.eye(size), -0.5)
new_val_np = init_var_np - np.dot(mat_h, grad_np)
self.assertAllCloseAccordingToType(new_val_np, new_val,
atol=TOLERANCE, rtol=TOLERANCE)
# Run another step of Shampoo
update_2.run()
new_val = sess.run(var)
mat_g += np.outer(grad_np_2, grad_np_2) / grad_np.shape[0]
mat_h = np_power(mat_g + RIDGE_EPSILON * np.eye(size), -0.5)
new_val_np -= np.dot(mat_h, grad_np_2)
self.assertAllCloseAccordingToType(new_val_np, new_val,
atol=TOLERANCE, rtol=TOLERANCE)
@parameterized.named_parameters(('Var', False), ('ResourceVar', True))
def testBasicMatrix(self, use_resource_var):
"""Check update when gradient is a matrix."""
size = [10, 5]
init_var_np = np.zeros(size)
grad_np = np.random.rand(size[0], size[1])
grad_np_2 = np.random.rand(size[0], size[1])
with self.cached_session() as sess:
global_step = variables.VariableV1(
0, dtype=dtypes.int64, use_resource=use_resource_var)
var = variables.VariableV1(
init_var_np, dtype=dtypes.float32, use_resource=use_resource_var)
grad = constant_op.constant(grad_np, dtype=dtypes.float32)
grad_2 = constant_op.constant(grad_np_2, dtype=dtypes.float32)
opt = shampoo.ShampooOptimizer(global_step)
update = opt.apply_gradients(zip([grad], [var]),
global_step=global_step)
update_2 = opt.apply_gradients(zip([grad_2], [var]),
global_step=global_step)
variables.global_variables_initializer().run()
init_val = sess.run(var)
self.assertAllCloseAccordingToType(init_var_np, init_val)
# Run a step of Shampoo
update.run()
new_val = sess.run(var)
# let up compute this in numpy
# Update rule is var = var - lr * mat_g1^{-0.25} * grad * mat_g2^{-0.25}
# lr = 1
mat_g1 = np.dot(grad_np, grad_np.transpose()) / grad_np.shape[0]
mat_left = np_power(mat_g1 + RIDGE_EPSILON * np.eye(size[0]), -0.25)
mat_g2 = np.dot(grad_np.transpose(), grad_np) / grad_np.shape[1]
mat_right = np_power(mat_g2 + RIDGE_EPSILON * np.eye(size[1]), -0.25)
new_val_np = init_var_np - np.dot(np.dot(mat_left, grad_np), mat_right)
self.assertAllCloseAccordingToType(new_val_np, new_val,
atol=TOLERANCE, rtol=TOLERANCE)
# Run another step of Shampoo
update_2.run()
new_val = sess.run(var)
mat_g1 += np.dot(grad_np_2, grad_np_2.transpose()) / grad_np_2.shape[0]
mat_left = np_power(mat_g1 + RIDGE_EPSILON * np.eye(size[0]), -0.25)
mat_g2 += np.dot(grad_np_2.transpose(), grad_np_2) / grad_np_2.shape[1]
mat_right = np_power(mat_g2 + RIDGE_EPSILON * np.eye(size[1]), -0.25)
new_val_np -= np.dot(np.dot(mat_left, grad_np_2), mat_right)
self.assertAllCloseAccordingToType(new_val_np, new_val,
atol=TOLERANCE, rtol=TOLERANCE)
def _testBasicTensor(self, use_iterative_root, use_resource_var):
"""Check update when gradient is a tensor.
Args:
use_iterative_root: use iterative power method or SVD to find nth roots.
use_resource_var: use resource var as variables.
"""
size = [10, 5, 7]
init_var_np = np.zeros(size)
grad_np = np.random.rand(size[0], size[1], size[2])
grad_np_2 = np.random.rand(size[0], size[1], size[2])
with self.cached_session() as sess:
global_step = variables.VariableV1(
0, dtype=dtypes.int64, use_resource=use_resource_var)
var = variables.VariableV1(
init_var_np, dtype=dtypes.float32, use_resource=use_resource_var)
grad = constant_op.constant(grad_np, dtype=dtypes.float32)
grad_2 = constant_op.constant(grad_np_2, dtype=dtypes.float32)
opt = shampoo.ShampooOptimizer(global_step,
use_iterative_root=use_iterative_root)
update = opt.apply_gradients(zip([grad], [var]),
global_step=global_step)
update_2 = opt.apply_gradients(zip([grad_2], [var]),
global_step=global_step)
variables.global_variables_initializer().run()
init_val = sess.run(var)
self.assertAllCloseAccordingToType(init_var_np, init_val)
# Run a step of Shampoo
update.run()
new_val = sess.run(var)
# let up compute this in numpy
# Update rule is var = var - lr * Prod_i mat_g_i^{-0.5/3} grad
# lr = 1
mat_g1 = (
np.tensordot(grad_np, grad_np, axes=([1, 2], [1, 2])) /
grad_np.shape[0])
mat_g1_a = np_power(mat_g1 + RIDGE_EPSILON * np.eye(size[0]), -0.5 / 3.0)
mat_g2 = (
np.tensordot(grad_np, grad_np, axes=([0, 2], [0, 2])) /
grad_np.shape[1])
mat_g2_a = np_power(mat_g2 + RIDGE_EPSILON * np.eye(size[1]), -0.5 / 3.0)
mat_g3 = (
np.tensordot(grad_np, grad_np, axes=([0, 1], [0, 1])) /
grad_np.shape[2])
mat_g3_a = np_power(mat_g3 + RIDGE_EPSILON * np.eye(size[2]), -0.5 / 3.0)
precond_grad = np.tensordot(grad_np, mat_g1_a, axes=([0], [0]))
precond_grad = np.tensordot(precond_grad, mat_g2_a, axes=([0], [0]))
precond_grad = np.tensordot(precond_grad, mat_g3_a, axes=([0], [0]))
new_val_np = init_var_np - precond_grad
self.assertAllCloseAccordingToType(new_val_np, new_val,
atol=TOLERANCE, rtol=TOLERANCE)
# Run another step of Shampoo
update_2.run()
new_val = sess.run(var)
mat_g1 += (
np.tensordot(grad_np_2, grad_np_2, axes=([1, 2], [1, 2])) /
grad_np_2.shape[0])
mat_g1_a = np_power(mat_g1 + RIDGE_EPSILON * np.eye(size[0]), -0.5 / 3.0)
mat_g2 += (
np.tensordot(grad_np_2, grad_np_2, axes=([0, 2], [0, 2])) /
grad_np_2.shape[1])
mat_g2_a = np_power(mat_g2 + RIDGE_EPSILON * np.eye(size[1]), -0.5 / 3.0)
mat_g3 += (
np.tensordot(grad_np_2, grad_np_2, axes=([0, 1], [0, 1])) /
grad_np_2.shape[2])
mat_g3_a = np_power(mat_g3 + RIDGE_EPSILON * np.eye(size[2]), -0.5 / 3.0)
precond_grad = np.tensordot(grad_np_2, mat_g1_a, axes=([0], [0]))
precond_grad = np.tensordot(precond_grad, mat_g2_a, axes=([0], [0]))
precond_grad = np.tensordot(precond_grad, mat_g3_a, axes=([0], [0]))
new_val_np -= precond_grad
self.assertAllCloseAccordingToType(new_val_np, new_val,
atol=TOLERANCE, rtol=TOLERANCE)
@parameterized.named_parameters(
('SVDWithVar', False, False),
('SVDWithResourceVar', False, True),
('IterRootWithVar', True, False),
('IterRootWithResourceVar', True, True),
)
def testBasicTensor(self, use_iterative_root, use_resource_var):
self._testBasicTensor(use_iterative_root, use_resource_var)
@parameterized.named_parameters(('Var', False), ('ResourceVar', True))
def testLargeVector(self, use_resource_var):
"""This is just the diagonal Adagrad update."""
size = 2000
init_var_np = np.zeros(size)
grad_np = np.random.rand(size)
grad_np_2 = np.random.rand(size)
with self.cached_session() as sess:
global_step = variables.VariableV1(
0, dtype=dtypes.int64, use_resource=use_resource_var)
var = variables.VariableV1(
init_var_np, dtype=dtypes.float32, use_resource=use_resource_var)
grad = constant_op.constant(grad_np, dtype=dtypes.float32)
grad_2 = constant_op.constant(grad_np_2, dtype=dtypes.float32)
opt = shampoo.ShampooOptimizer(global_step)
update = opt.apply_gradients(zip([grad], [var]),
global_step=global_step)
update_2 = opt.apply_gradients(zip([grad_2], [var]),
global_step=global_step)
variables.global_variables_initializer().run()
init_val = sess.run(var)
self.assertAllCloseAccordingToType(init_var_np, init_val)
# Run a step of Shampoo
update.run()
new_val = sess.run(var)
# let up compute this in numpy
# Update rule is var = var - lr * gg^{-0.5} * grad
# lr = 1
mat_g = (grad_np * grad_np)
new_val_np = init_var_np - np.power(mat_g, -0.5) * grad_np
self.assertAllCloseAccordingToType(
new_val_np, new_val, atol=TOLERANCE, rtol=TOLERANCE)
# Run another step of Shampoo
update_2.run()
new_val = sess.run(var)
mat_g += (grad_np_2 * grad_np_2)
new_val_np -= np.power(mat_g, -0.5) * grad_np_2
self.assertAllCloseAccordingToType(
new_val_np, new_val, atol=TOLERANCE, rtol=TOLERANCE)
@parameterized.named_parameters(('Var', False), ('ResourceVar', True))
def testLargeMatrix(self, use_resource_var):
"""Gradient is a matrix, one of whose dimensions is large.
We do diagonal updates for large dimensions.
Args:
use_resource_var: use resource var as variables.
"""
size = [2000, 3]
init_var_np = np.zeros(size)
grad_np = np.random.rand(size[0], size[1])
grad_np_2 = np.random.rand(size[0], size[1])
with self.cached_session() as sess:
global_step = variables.VariableV1(
0, dtype=dtypes.int64, use_resource=use_resource_var)
var = variables.VariableV1(
init_var_np, dtype=dtypes.float32, use_resource=use_resource_var)
grad = constant_op.constant(grad_np, dtype=dtypes.float32)
grad_2 = constant_op.constant(grad_np_2, dtype=dtypes.float32)
opt = shampoo.ShampooOptimizer(global_step)
update = opt.apply_gradients(zip([grad], [var]),
global_step=global_step)
update_2 = opt.apply_gradients(zip([grad_2], [var]),
global_step=global_step)
variables.global_variables_initializer().run()
init_val = sess.run(var)
self.assertAllCloseAccordingToType(init_var_np, init_val)
# Run a step of Shampoo
update.run()
new_val = sess.run(var)
# let up compute this in numpy
# Update rule is var = var - lr * mat_left * grad * mat_right
# where the mat_left * grad is just element-wise product,
# with broadcasting
# lr = 1
mat_g1 = np.sum(
grad_np * grad_np, axis=1, keepdims=True) / grad_np.shape[0]
mat_left = np.power(mat_g1, -0.25)
mat_g2 = np.dot(grad_np.transpose(), grad_np) / grad_np.shape[1]
mat_right = np_power(mat_g2 + RIDGE_EPSILON * np.eye(size[1]), -0.25)
new_val_np = init_var_np - np.dot(grad_np * mat_left, mat_right)
self.assertAllCloseAccordingToType(new_val_np, new_val,
atol=TOLERANCE, rtol=TOLERANCE)
# Run another step of Shampoo
update_2.run()
new_val = sess.run(var)
mat_g1 += np.sum(
grad_np_2 * grad_np_2, axis=1, keepdims=True) / grad_np_2.shape[0]
mat_left = np.power(mat_g1, -0.25)
mat_g2 += np.dot(grad_np_2.transpose(), grad_np_2) / grad_np_2.shape[1]
mat_right = np_power(mat_g2 + RIDGE_EPSILON * np.eye(size[1]), -0.25)
new_val_np -= np.dot(grad_np_2 * mat_left, mat_right)
self.assertAllCloseAccordingToType(new_val_np, new_val,
atol=TOLERANCE, rtol=TOLERANCE)
@parameterized.named_parameters(('Var', False))
def testSparseUpdateLarge(self, use_resource_var):
"""Check update when gradient is of type IndexSlices.
We do diagonal updates for the first dimension, unless it is very small.
Args:
use_resource_var: use resource var as variables.
"""
size = [2000, 3]
sample_size_1 = 100
init_var_np = np.zeros(size)
grad_indices = np.sort(np.random.choice(np.arange(size[0]), sample_size_1,
replace=False))
grad_np = np.random.rand(sample_size_1, size[1])
sample_size_2 = 7
grad_indices_2 = np.sort(np.random.choice(np.arange(size[0]), sample_size_2,
replace=False))
grad_np_2 = np.random.rand(sample_size_2, size[1])
with self.cached_session() as sess:
global_step = variables.VariableV1(
0, dtype=dtypes.int64, use_resource=use_resource_var)
var = variables.VariableV1(
init_var_np, dtype=dtypes.float32, use_resource=use_resource_var)
grad = ops.IndexedSlices(
constant_op.constant(grad_np, dtype=dtypes.float32),
constant_op.constant(grad_indices),
constant_op.constant(size))
grad_2 = ops.IndexedSlices(
constant_op.constant(grad_np_2, dtype=dtypes.float32),
constant_op.constant(grad_indices_2),
constant_op.constant(size))
opt = shampoo.ShampooOptimizer(global_step)
update = opt.apply_gradients(zip([grad], [var]),
global_step=global_step)
update_2 = opt.apply_gradients(zip([grad_2], [var]),
global_step=global_step)
variables.global_variables_initializer().run()
init_val = sess.run(var)
self.assertAllCloseAccordingToType(init_var_np, init_val)
# Run a step of Shampoo
update.run()
new_val = sess.run(var)
# let up compute this in numpy
# Update rule is var = var - lr * mat_left * grad * mat_right
# where the mat_left * grad is just element-wise product,
# with broadcasting
# lr = 1
# In this case the update lr * mat_left * grad * mat_right is
# of size 10 x 2.
# So the correct indices of var need to be updated.
mat_g1 = np.sum(grad_np * grad_np, axis=1, keepdims=True)
mat_g1_acc = np.zeros((size[0], 1))
mat_g1_acc[grad_indices] += mat_g1
mat_left = np.power(mat_g1 + RIDGE_EPSILON, -0.25)
mat_g2 = np.dot(grad_np.transpose(), grad_np) / grad_np.shape[1]
mat_right = np_power(mat_g2 + RIDGE_EPSILON * np.eye(size[1]), -0.25)
new_val_np = init_var_np
new_val_np[grad_indices, :] -= np.dot(grad_np * mat_left, mat_right)
self.assertAllCloseAccordingToType(new_val_np, new_val,
atol=TOLERANCE, rtol=TOLERANCE)
# Run another step of Shampoo
update_2.run()
new_val = sess.run(var)
mat_g1 = np.sum(grad_np_2 * grad_np_2, axis=1, keepdims=True)
mat_g1_acc[grad_indices_2] += mat_g1
mat_left = np.power(mat_g1_acc[grad_indices_2] + RIDGE_EPSILON, -0.25)
mat_g2 += np.dot(grad_np_2.transpose(), grad_np_2) / grad_np_2.shape[1]
mat_right = np_power(mat_g2 + RIDGE_EPSILON * np.eye(size[1]), -0.25)
new_val_np[grad_indices_2, :] -= np.dot(grad_np_2 * mat_left, mat_right)
self.assertAllCloseAccordingToType(new_val_np, new_val,
atol=TOLERANCE, rtol=TOLERANCE)
def _testSparseUpdateSmall(self, use_iterative_root, use_resource_var):
"""Gradient is of type IndexSlices, but the first dimension is small.
We create dense gradient and do the full update with SVD etc.
Args:
use_iterative_root: use iterative power method or SVD to find nth roots.
use_resource_var: use resource var as variables.
"""
size = [100, 3, 5]
sample_size = 10
init_var_np = np.zeros(size)
grad_indices = np.sort(np.random.choice(np.arange(size[0]), sample_size,
replace=False))
grad_np = np.random.rand(sample_size, size[1], size[2])
with self.cached_session() as sess:
global_step = variables.VariableV1(
0, dtype=dtypes.int64, use_resource=use_resource_var)
var = variables.VariableV1(
init_var_np, dtype=dtypes.float32, use_resource=use_resource_var)
grad = ops.IndexedSlices(
constant_op.constant(grad_np, dtype=dtypes.float32),
constant_op.constant(grad_indices),
constant_op.constant(size))
opt = shampoo.ShampooOptimizer(global_step,
use_iterative_root=use_iterative_root)
update = opt.apply_gradients(zip([grad], [var]),
global_step=global_step)
variables.global_variables_initializer().run()
init_val = sess.run(var)
self.assertAllCloseAccordingToType(init_var_np, init_val)
# Run a step of Shampoo
update.run()
new_val = sess.run(var)
# let up compute this in numpy
# Update rule is var = var - lr * Prod_i mat_g_i^{-0.125} grad
# lr = 1
grad_dense = np.zeros_like(init_var_np)
grad_dense[grad_indices] = grad_np
mat_g1 = np.tensordot(
grad_dense, grad_dense, axes=([1, 2], [1, 2])) / grad_dense.shape[0]
mat_g1_a = np_power(mat_g1 + RIDGE_EPSILON * np.eye(size[0]), -0.5 / 3.0)
mat_g2 = np.tensordot(
grad_dense, grad_dense, axes=([0, 2], [0, 2])) / grad_dense.shape[1]
mat_g2_a = np_power(mat_g2 + RIDGE_EPSILON * np.eye(size[1]), -0.5 / 3.0)
mat_g3 = np.tensordot(
grad_dense, grad_dense, axes=([0, 1], [0, 1])) / grad_dense.shape[2]
mat_g3_a = np_power(mat_g3 + RIDGE_EPSILON * np.eye(size[2]), -0.5 / 3.0)
precond_grad = np.tensordot(grad_dense, mat_g1_a, axes=([0], [0]))
precond_grad = np.tensordot(precond_grad, mat_g2_a, axes=([0], [0]))
precond_grad = np.tensordot(precond_grad, mat_g3_a, axes=([0], [0]))
new_val_np = init_var_np - precond_grad
self.assertAllCloseAccordingToType(new_val_np, new_val,
atol=TOLERANCE, rtol=TOLERANCE)
@parameterized.named_parameters(
('SVDWithVar', False, False),
('SVDWithResourceVar', False, True),
('IterRootWithVar', True, False),
('IterRootWithResourceVar', True, True),
)
def testSparseUpdateSmall(self, use_iterative_root, use_resource_var):
self._testSparseUpdateSmall(use_iterative_root, use_resource_var)
def _testBasicTensorWithMomentum(self, use_iterative_root, use_resource_var):
"""Check update with momentum when gradient is a tensor.
Args:
use_iterative_root: use iterative power method or SVD to find nth roots.
use_resource_var: use resource var as variables.
"""
size = [10, 5, 7]
init_var_np = np.zeros(size)
grad_np = np.random.rand(size[0], size[1], size[2])
grad_np_2 = np.random.rand(size[0], size[1], size[2])
gbar_decay = 0.9
gbar_weight = 0.1
with self.cached_session() as sess:
global_step = variables.VariableV1(
0, dtype=dtypes.int64, use_resource=use_resource_var)
var = variables.VariableV1(
init_var_np, dtype=dtypes.float32, use_resource=use_resource_var)
grad = constant_op.constant(grad_np, dtype=dtypes.float32)
grad_2 = constant_op.constant(grad_np_2, dtype=dtypes.float32)
opt = shampoo.ShampooOptimizer(global_step, gbar_decay=gbar_decay,
gbar_weight=gbar_weight,
use_iterative_root=use_iterative_root)
update = opt.apply_gradients(zip([grad], [var]),
global_step=global_step)
update_2 = opt.apply_gradients(zip([grad_2], [var]),
global_step=global_step)
variables.global_variables_initializer().run()
# Run a step of Shampoo
update.run()
new_val = sess.run(var)
# let up compute this in numpy
# Update rule is var = var - lr * Prod_i mat_g_i^{-0.5/3} grad
# lr = 1
mat_g1 = np.tensordot(
grad_np, grad_np, axes=([1, 2], [1, 2])) / grad_np.shape[0]
mat_g1_a = np_power(mat_g1 + RIDGE_EPSILON * np.eye(size[0]), -0.5 / 3.0)
mat_g2 = np.tensordot(
grad_np, grad_np, axes=([0, 2], [0, 2])) / grad_np.shape[1]
mat_g2_a = np_power(mat_g2 + RIDGE_EPSILON * np.eye(size[1]), -0.5 / 3.0)
mat_g3 = np.tensordot(
grad_np, grad_np, axes=([0, 1], [0, 1])) / grad_np.shape[2]
mat_g3_a = np_power(mat_g3 + RIDGE_EPSILON * np.eye(size[2]), -0.5 / 3.0)
gbar_np = gbar_weight * grad_np
precond_grad = np.tensordot(gbar_np, mat_g1_a, axes=([0], [0]))
precond_grad = np.tensordot(precond_grad, mat_g2_a, axes=([0], [0]))
precond_grad = np.tensordot(precond_grad, mat_g3_a, axes=([0], [0]))
new_val_np = init_var_np - precond_grad
self.assertAllCloseAccordingToType(new_val_np, new_val,
atol=TOLERANCE, rtol=TOLERANCE)
# Run another step of Shampoo
update_2.run()
new_val = sess.run(var)
mat_g1 += np.tensordot(
grad_np_2, grad_np_2, axes=([1, 2], [1, 2])) / grad_np_2.shape[0]
mat_g1_a = np_power(mat_g1 + RIDGE_EPSILON * np.eye(size[0]), -0.5 / 3.0)
mat_g2 += np.tensordot(
grad_np_2, grad_np_2, axes=([0, 2], [0, 2])) / grad_np_2.shape[1]
mat_g2_a = np_power(mat_g2 + RIDGE_EPSILON * np.eye(size[1]), -0.5 / 3.0)
mat_g3 += np.tensordot(
grad_np_2, grad_np_2, axes=([0, 1], [0, 1])) / grad_np_2.shape[2]
mat_g3_a = np_power(mat_g3 + RIDGE_EPSILON * np.eye(size[2]), -0.5 / 3.0)
gbar_np_2 = gbar_decay * gbar_np + gbar_weight * grad_np_2
precond_grad = np.tensordot(gbar_np_2, mat_g1_a, axes=([0], [0]))
precond_grad = np.tensordot(precond_grad, mat_g2_a, axes=([0], [0]))
precond_grad = np.tensordot(precond_grad, mat_g3_a, axes=([0], [0]))
new_val_np -= precond_grad
self.assertAllCloseAccordingToType(new_val_np, new_val,
atol=TOLERANCE, rtol=TOLERANCE)
@parameterized.named_parameters(
('SVDWithVar', False, False),
('SVDWithResourceVar', False, True),
('IterRootWithVar', True, False),
('IterRootWithResourceVar', True, True),
)
def testBasicTensorWithMomentum(self, use_iterative_root, use_resource_var):
self._testBasicTensorWithMomentum(use_iterative_root, use_resource_var)
def _testDelayedSVD(self, use_iterative_root, use_resource_var):
"""Performing the SVD every nth step.
Args:
use_iterative_root: use iterative power method or SVD to find nth roots.
use_resource_var: use resource var as variables.
"""
size = [10, 5, 7]
init_var_np = np.zeros(size).astype(np.float32)
iterations = 20
svd_interval = 5
grad_np = np.random.rand(
iterations, size[0], size[1], size[2]).astype(np.float32)
mat_g1_a = np.eye(size[0])
mat_g1 = np.zeros_like(mat_g1_a)
mat_g2_a = np.eye(size[1])
mat_g2 = np.zeros_like(mat_g2_a)
mat_g3_a = np.eye(size[2])
mat_g3 = np.zeros_like(mat_g3_a)
with self.cached_session() as sess:
global_step = variables.VariableV1(
0, dtype=dtypes.int64, use_resource=use_resource_var)
var = variables.VariableV1(
init_var_np, dtype=dtypes.float32, use_resource=use_resource_var)
grad = array_ops.placeholder(dtypes.float32, shape=size)
opt = shampoo.ShampooOptimizer(global_step, svd_interval=svd_interval,
use_iterative_root=use_iterative_root)
update = opt.apply_gradients(zip([grad], [var]),
global_step=global_step)
variables.global_variables_initializer().run()
init_val = sess.run(var)
self.assertAllCloseAccordingToType(init_var_np, init_val)
new_val_np = init_var_np
# Run n steps of Shampoo
for i in range(iterations):
_ = sess.run(update, feed_dict={grad: grad_np[i]})
new_val = sess.run(var)
# let up compute this in numpy
# Update rule is var = var - lr * Prod_i mat_g_i^{-0.5/3} grad
# lr = 1
mat_g1 += np.tensordot(
grad_np[i], grad_np[i], axes=([1, 2], [1, 2])) / grad_np[i].shape[0]
mat_g2 += np.tensordot(
grad_np[i], grad_np[i], axes=([0, 2], [0, 2])) / grad_np[i].shape[1]
mat_g3 += np.tensordot(
grad_np[i], grad_np[i], axes=([0, 1], [0, 1])) / grad_np[i].shape[2]
if (i + 1) % svd_interval == 0:
mat_g1_a = np_power(mat_g1 + RIDGE_EPSILON * np.eye(size[0]),
-0.5 / 3.0)
mat_g2_a = np_power(mat_g2 + RIDGE_EPSILON * np.eye(size[1]),
-0.5 / 3.0)
mat_g3_a = np_power(mat_g3 + RIDGE_EPSILON * np.eye(size[2]),
-0.5 / 3.0)
precond_grad = np.tensordot(grad_np[i], mat_g1_a, axes=([0], [0]))
precond_grad = np.tensordot(precond_grad, mat_g2_a, axes=([0], [0]))
precond_grad = np.tensordot(precond_grad, mat_g3_a, axes=([0], [0]))
new_val_np -= precond_grad
self.assertAllCloseAccordingToType(new_val_np, new_val,
atol=TOLERANCE, rtol=TOLERANCE)
@parameterized.named_parameters(
('SVDWithVar', False, False),
('SVDWithResourceVar', False, True),
('IterRootWithVar', True, False),
('IterRootWithResourceVar', True, True),
)
def testDelayedSVD(self, use_iterative_root, use_resource_var):
self._testDelayedSVD(use_iterative_root, use_resource_var)
def _testDelayedPrecondUpdate(self, use_iterative_root, use_resource_var):
"""Update the squared sum every nth step, drop the other steps.
Args:
use_iterative_root: use iterative power method or SVD to find nth roots.
use_resource_var: use resource var as variables.
"""
size = [10, 5, 7]
init_var_np = np.zeros(size).astype(np.float32)
iterations = 100
grad_np = np.random.rand(
iterations, size[0], size[1], size[2]).astype(np.float32)
svd_interval = 20
precond_update_interval = 5
mat_g1_a = np.eye(size[0])
mat_g1 = np.zeros_like(mat_g1_a)
mat_g2_a = np.eye(size[1])
mat_g2 = np.zeros_like(mat_g2_a)
mat_g3_a = np.eye(size[2])
mat_g3 = np.zeros_like(mat_g3_a)
with self.cached_session() as sess:
global_step = variables.VariableV1(
0, dtype=dtypes.int64, use_resource=use_resource_var)
var = variables.VariableV1(
init_var_np, dtype=dtypes.float32, use_resource=use_resource_var)
grad = array_ops.placeholder(dtypes.float32, shape=size)
opt = shampoo.ShampooOptimizer(
global_step, svd_interval=svd_interval,
precond_update_interval=precond_update_interval,
use_iterative_root=use_iterative_root)
update = opt.apply_gradients(zip([grad], [var]),
global_step=global_step)
variables.global_variables_initializer().run()
init_val = sess.run(var)
self.assertAllCloseAccordingToType(init_var_np, init_val)
new_val_np = init_var_np
# Run n steps of Shampoo
for i in range(iterations):
_ = sess.run(update, feed_dict={grad: grad_np[i]})
new_val = sess.run(var)
# let up compute this in numpy
# Update rule is var = var - lr * Prod_i mat_g_i^{-0.5/3} grad
# lr = 1
if (i + 1) % precond_update_interval == 0:
mat_g1 += (
np.tensordot(grad_np[i], grad_np[i], axes=([1, 2], [1, 2])) /
grad_np[i].shape[0] * precond_update_interval)
mat_g2 += (
np.tensordot(grad_np[i], grad_np[i], axes=([0, 2], [0, 2])) /
grad_np[i].shape[1] * precond_update_interval)
mat_g3 += (
np.tensordot(grad_np[i], grad_np[i], axes=([0, 1], [0, 1])) /
grad_np[i].shape[2] * precond_update_interval)
if (i + 1) % svd_interval == 0:
mat_g1_a = np_power(mat_g1 + RIDGE_EPSILON * np.eye(size[0]),
-0.5 / 3.0)
mat_g2_a = np_power(mat_g2 + RIDGE_EPSILON * np.eye(size[1]),
-0.5 / 3.0)
mat_g3_a = np_power(mat_g3 + RIDGE_EPSILON * np.eye(size[2]),
-0.5 / 3.0)
precond_grad = np.tensordot(grad_np[i], mat_g1_a, axes=([0], [0]))
precond_grad = np.tensordot(precond_grad, mat_g2_a, axes=([0], [0]))
precond_grad = np.tensordot(precond_grad, mat_g3_a, axes=([0], [0]))
new_val_np -= precond_grad
self.assertAllCloseAccordingToType(new_val_np, new_val,
atol=TOLERANCE, rtol=TOLERANCE)
@parameterized.named_parameters(
('SVDWithVar', False, False),
('SVDWithResourceVar', False, True),
('IterRootWithVar', True, False),
('IterRootWithResourceVar', True, True),
)
def testDelayedPrecondUpdate(self, use_iterative_root, use_resource_var):
self._testDelayedPrecondUpdate(use_iterative_root, use_resource_var)
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/opt/python/training/shampoo_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Variant of the Adam optimizer that handles sparse updates more efficiently.
Compared with the original Adam optimizer, the one in this file can provide a
large improvement in model training throughput for some applications. However,
it provides slightly different semantics than the original Adam algorithm, and
may lead to different empirical results.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.training import adam
class LazyAdamOptimizer(adam.AdamOptimizer):
"""Variant of the Adam optimizer that handles sparse updates more efficiently.
The original Adam algorithm maintains two moving-average accumulators for
each trainable variable; the accumulators are updated at every step.
This class provides lazier handling of gradient updates for sparse variables.
It only updates moving-average accumulators for sparse variable indices that
appear in the current batch, rather than updating the accumulators for all
indices. Compared with the original Adam optimizer, it can provide large
improvements in model training throughput for some applications. However, it
provides slightly different semantics than the original Adam algorithm, and
may lead to different empirical results.
"""
def _apply_sparse(self, grad, var):
beta1_power, beta2_power = self._get_beta_accumulators()
beta1_power = math_ops.cast(beta1_power, var.dtype.base_dtype)
beta2_power = math_ops.cast(beta2_power, var.dtype.base_dtype)
lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)
beta1_t = math_ops.cast(self._beta1_t, var.dtype.base_dtype)
beta2_t = math_ops.cast(self._beta2_t, var.dtype.base_dtype)
epsilon_t = math_ops.cast(self._epsilon_t, var.dtype.base_dtype)
lr = (lr_t * math_ops.sqrt(1 - beta2_power) / (1 - beta1_power))
# \\(m := beta1 * m + (1 - beta1) * g_t\\)
m = self.get_slot(var, "m")
m_t = state_ops.scatter_update(m, grad.indices,
beta1_t * array_ops.gather(m, grad.indices) +
(1 - beta1_t) * grad.values,
use_locking=self._use_locking)
# \\(v := beta2 * v + (1 - beta2) * (g_t * g_t)\\)
v = self.get_slot(var, "v")
v_t = state_ops.scatter_update(v, grad.indices,
beta2_t * array_ops.gather(v, grad.indices) +
(1 - beta2_t) * math_ops.square(grad.values),
use_locking=self._use_locking)
# \\(variable -= learning_rate * m_t / (epsilon_t + sqrt(v_t))\\)
m_t_slice = array_ops.gather(m_t, grad.indices)
v_t_slice = array_ops.gather(v_t, grad.indices)
denominator_slice = math_ops.sqrt(v_t_slice) + epsilon_t
var_update = state_ops.scatter_sub(var, grad.indices,
lr * m_t_slice / denominator_slice,
use_locking=self._use_locking)
return control_flow_ops.group(var_update, m_t, v_t)
def _resource_apply_sparse(self, grad, var, indices):
beta1_power, beta2_power = self._get_beta_accumulators()
beta1_power = math_ops.cast(beta1_power, var.dtype.base_dtype)
beta2_power = math_ops.cast(beta2_power, var.dtype.base_dtype)
lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)
beta1_t = math_ops.cast(self._beta1_t, var.dtype.base_dtype)
beta2_t = math_ops.cast(self._beta2_t, var.dtype.base_dtype)
epsilon_t = math_ops.cast(self._epsilon_t, var.dtype.base_dtype)
lr = (lr_t * math_ops.sqrt(1 - beta2_power) / (1 - beta1_power))
# \\(m := beta1 * m + (1 - beta1) * g_t\\)
m = self.get_slot(var, "m")
m_t_slice = beta1_t * array_ops.gather(m, indices) + (1 - beta1_t) * grad
m_update_op = resource_variable_ops.resource_scatter_update(m.handle,
indices,
m_t_slice)
# \\(v := beta2 * v + (1 - beta2) * (g_t * g_t)\\)
v = self.get_slot(var, "v")
v_t_slice = (beta2_t * array_ops.gather(v, indices) +
(1 - beta2_t) * math_ops.square(grad))
v_update_op = resource_variable_ops.resource_scatter_update(v.handle,
indices,
v_t_slice)
# \\(variable -= learning_rate * m_t / (epsilon_t + sqrt(v_t))\\)
var_slice = lr * m_t_slice / (math_ops.sqrt(v_t_slice) + epsilon_t)
var_update_op = resource_variable_ops.resource_scatter_sub(var.handle,
indices,
var_slice)
return control_flow_ops.group(var_update_op, m_update_op, v_update_op)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/opt/python/training/lazy_adam_optimizer.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Matrix functions contains iterative methods for M^p."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
def matrix_square_root(mat_a, mat_a_size, iter_count=100, ridge_epsilon=1e-4):
"""Iterative method to get matrix square root.
Stable iterations for the matrix square root, Nicholas J. Higham
Page 231, Eq 2.6b
http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.6.8799&rep=rep1&type=pdf
Args:
mat_a: the symmetric PSD matrix whose matrix square root be computed
mat_a_size: size of mat_a.
iter_count: Maximum number of iterations.
ridge_epsilon: Ridge epsilon added to make the matrix positive definite.
Returns:
mat_a^0.5
"""
def _iter_condition(i, unused_mat_y, unused_old_mat_y, unused_mat_z,
unused_old_mat_z, err, old_err):
# This method require that we check for divergence every step.
return math_ops.logical_and(i < iter_count, err < old_err)
def _iter_body(i, mat_y, unused_old_mat_y, mat_z, unused_old_mat_z, err,
unused_old_err):
current_iterate = 0.5 * (3.0 * identity - math_ops.matmul(mat_z, mat_y))
current_mat_y = math_ops.matmul(mat_y, current_iterate)
current_mat_z = math_ops.matmul(current_iterate, mat_z)
# Compute the error in approximation.
mat_sqrt_a = current_mat_y * math_ops.sqrt(norm)
mat_a_approx = math_ops.matmul(mat_sqrt_a, mat_sqrt_a)
residual = mat_a - mat_a_approx
current_err = math_ops.sqrt(math_ops.reduce_sum(residual * residual)) / norm
return i + 1, current_mat_y, mat_y, current_mat_z, mat_z, current_err, err
identity = linalg_ops.eye(math_ops.cast(mat_a_size, dtypes.int32))
mat_a = mat_a + ridge_epsilon * identity
norm = math_ops.sqrt(math_ops.reduce_sum(mat_a * mat_a))
mat_init_y = mat_a / norm
mat_init_z = identity
init_err = norm
_, _, prev_mat_y, _, _, _, _ = control_flow_ops.while_loop(
_iter_condition, _iter_body, [
0, mat_init_y, mat_init_y, mat_init_z, mat_init_z, init_err,
init_err + 1.0
])
return prev_mat_y * math_ops.sqrt(norm)
def matrix_inverse_pth_root(mat_g,
mat_g_size,
alpha,
iter_count=100,
epsilon=1e-6,
ridge_epsilon=1e-6):
"""Computes mat_g^alpha, where alpha = -1/p, p a positive integer.
We use an iterative Schur-Newton method from equation 3.2 on page 9 of:
A Schur-Newton Method for the Matrix p-th Root and its Inverse
by Chun-Hua Guo and Nicholas J. Higham
SIAM Journal on Matrix Analysis and Applications,
2006, Vol. 28, No. 3 : pp. 788-804
https://pdfs.semanticscholar.org/0abe/7f77433cf5908bfe2b79aa91af881da83858.pdf
Args:
mat_g: the symmetric PSD matrix whose power it to be computed
mat_g_size: size of mat_g.
alpha: exponent, must be -1/p for p a positive integer.
iter_count: Maximum number of iterations.
epsilon: accuracy indicator, useful for early termination.
ridge_epsilon: Ridge epsilon added to make the matrix positive definite.
Returns:
mat_g^alpha
"""
identity = linalg_ops.eye(math_ops.cast(mat_g_size, dtypes.int32))
def mat_power(mat_m, p):
"""Computes mat_m^p, for p a positive integer.
Power p is known at graph compile time, so no need for loop and cond.
Args:
mat_m: a square matrix
p: a positive integer
Returns:
mat_m^p
"""
assert p == int(p) and p > 0
power = None
while p > 0:
if p % 2 == 1:
power = math_ops.matmul(mat_m, power) if power is not None else mat_m
p //= 2
mat_m = math_ops.matmul(mat_m, mat_m)
return power
def _iter_condition(i, mat_m, _):
return math_ops.logical_and(
i < iter_count,
math_ops.reduce_max(math_ops.abs(mat_m - identity)) > epsilon)
def _iter_body(i, mat_m, mat_x):
mat_m_i = (1 - alpha) * identity + alpha * mat_m
return (i + 1, math_ops.matmul(mat_power(mat_m_i, -1.0 / alpha), mat_m),
math_ops.matmul(mat_x, mat_m_i))
if mat_g_size == 1:
mat_h = math_ops.pow(mat_g + ridge_epsilon, alpha)
else:
damped_mat_g = mat_g + ridge_epsilon * identity
z = (1 - 1 / alpha) / (2 * linalg_ops.norm(damped_mat_g))
# The best value for z is
# (1 - 1/alpha) * (c_max^{-alpha} - c_min^{-alpha}) /
# (c_max^{1-alpha} - c_min^{1-alpha})
# where c_max and c_min are the largest and smallest singular values of
# damped_mat_g.
# The above estimate assumes that c_max > c_min * 2^p. (p = -1/alpha)
# Can replace above line by the one below, but it is less accurate,
# hence needs more iterations to converge.
# z = (1 - 1/alpha) / math_ops.trace(damped_mat_g)
# If we want the method to always converge, use z = 1 / norm(damped_mat_g)
# or z = 1 / math_ops.trace(damped_mat_g), but these can result in many
# extra iterations.
_, _, mat_h = control_flow_ops.while_loop(
_iter_condition, _iter_body,
[0, damped_mat_g * z, identity * math_ops.pow(z, -alpha)])
return mat_h
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/opt/python/training/matrix_functions.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""AdaMax for TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.training import adam
from tensorflow.python.training import training_ops
class AdaMaxOptimizer(adam.AdamOptimizer):
"""Optimizer that implements the AdaMax algorithm.
Adamax is sometimes superior to adam, specially in models with embeddings,
see [Kingma et al., 2014](http://arxiv.org/abs/1412.6980)
([pdf](http://arxiv.org/pdf/1412.6980.pdf)).
"""
def __init__(self, learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-8,
use_locking=False, name="AdaMax"):
"""Construct a new AdaMax optimizer.
Initialization:
```
m_0 <- 0 (Initialize initial 1st moment vector)
v_0 <- 0 (Initialize the exponentially weighted infinity norm)
t <- 0 (Initialize timestep)
```
The update rule for `variable` with gradient `g` uses an optimization
described at the end of section 7.1 of the paper:
```
t <- t + 1
m_t <- beta1 * m_{t-1} + (1 - beta1) * g
v_t <- max(beta2 * v_{t-1}, abs(g))
variable <- variable - learning_rate / (1 - beta1^t) * m_t / (v_t + epsilon)
```
Similar to AdamOptimizer, the epsilon is added for numerical stability
(especially to get rid of division by zero when v_t = 0).
Contrast to AdamOptimizer, the sparse implementation of this algorithm
(used when the gradient is an IndexedSlices object, typically because of
`tf.gather` or an embedding lookup in the forward pass) only updates
variable slices and corresponding `m_t`, `v_t` terms when that part of
the variable was used in the forward pass. This means that the sparse
behavior is contrast to the dense behavior (similar to some momentum
implementations which ignore momentum unless a variable slice was actually
used).
Args:
learning_rate: A Tensor or a floating point value. The learning rate.
beta1: A float value or a constant float tensor.
The exponential decay rate for the 1st moment estimates.
beta2: A float value or a constant float tensor.
The exponential decay rate for the exponentially weighted infinity norm.
epsilon: A small constant for numerical stability.
use_locking: If True use locks for update operations.
name: Optional name for the operations created when applying gradients.
Defaults to "AdaMax".
"""
super(AdaMaxOptimizer, self).__init__(learning_rate, beta1, beta2,
epsilon, use_locking, name)
def _get_beta_accumulators(self):
if context.executing_eagerly():
graph = None
else:
graph = ops.get_default_graph()
return self._get_non_slot_variable("beta1_power", graph=graph)
def _create_slots(self, var_list):
# Create the beta1 accumulators on the same device as the first
# variable. Sort the var_list to make sure this device is consistent across
# workers (these need to go on the same PS, otherwise some updates are
# silently ignored).
first_var = min(var_list, key=lambda x: x.name)
self._create_non_slot_variable(initial_value=self._beta1,
name="beta1_power",
colocate_with=first_var)
# Create slots for the first and second moments.
for v in var_list:
self._zeros_slot(v, "m", self._name)
self._zeros_slot(v, "v", self._name)
def _apply_dense(self, grad, var):
m = self.get_slot(var, "m")
v = self.get_slot(var, "v")
beta1_power = self._get_beta_accumulators()
return training_ops.apply_ada_max(
var, m, v,
math_ops.cast(beta1_power, var.dtype.base_dtype),
math_ops.cast(self._lr_t, var.dtype.base_dtype),
math_ops.cast(self._beta1_t, var.dtype.base_dtype),
math_ops.cast(self._beta2_t, var.dtype.base_dtype),
math_ops.cast(self._epsilon_t, var.dtype.base_dtype),
grad, use_locking=self._use_locking).op
def _resource_apply_dense(self, grad, var):
m = self.get_slot(var, "m")
v = self.get_slot(var, "v")
beta1_power = self._get_beta_accumulators()
return training_ops.resource_apply_ada_max(
var.handle, m.handle, v.handle,
math_ops.cast(beta1_power, grad.dtype.base_dtype),
math_ops.cast(self._lr_t, grad.dtype.base_dtype),
math_ops.cast(self._beta1_t, grad.dtype.base_dtype),
math_ops.cast(self._beta2_t, grad.dtype.base_dtype),
math_ops.cast(self._epsilon_t, grad.dtype.base_dtype),
grad, use_locking=self._use_locking)
def _apply_sparse_shared(self, grad, var, indices,
scatter_add, scatter_update):
beta1_power = self._get_beta_accumulators()
beta1_power = math_ops.cast(beta1_power, var.dtype.base_dtype)
lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)
beta1_t = math_ops.cast(self._beta1_t, var.dtype.base_dtype)
beta2_t = math_ops.cast(self._beta2_t, var.dtype.base_dtype)
epsilon_t = math_ops.cast(self._epsilon_t, var.dtype.base_dtype)
# m_t = beta1 * m + (1 - beta1) * g_t
m = self.get_slot(var, "m")
m_slice = array_ops.gather(m, indices)
m_t_slice = m_slice * beta1_t + grad * (1 - beta1_t)
with ops.control_dependencies([m_t_slice]):
m_t = scatter_update(m, indices, m_t_slice)
# u_t = max(beta2 * u, abs(g_t))
v = self.get_slot(var, "v")
v_slice = array_ops.gather(v, indices)
v_t_slice = math_ops.maximum(v_slice * beta2_t, math_ops.abs(grad))
with ops.control_dependencies([v_t_slice]):
v_t = scatter_update(v, indices, v_t_slice)
# theta_t = theta - lr / (1 - beta1^t) * m_t / u_t
var_slice = -lr_t / (1 - beta1_power) * (m_t_slice /
(v_t_slice + epsilon_t))
with ops.control_dependencies([var_slice]):
var_update = scatter_add(var, indices, var_slice)
return control_flow_ops.group(*[var_update, m_t, v_t])
def _apply_sparse(self, grad, var):
return self._apply_sparse_shared(
grad.values, var, grad.indices,
lambda x, i, v: state_ops.scatter_add( # pylint: disable=g-long-lambda
x, i, v, use_locking=self._use_locking),
lambda x, i, v: state_ops.scatter_update( # pylint: disable=g-long-lambda
x, i, v, use_locking=self._use_locking))
def _resource_scatter_update(self, x, i, v):
with ops.control_dependencies(
[resource_variable_ops.resource_scatter_update(
x.handle, i, v)]):
return x.value()
def _resource_apply_sparse(self, grad, var, indices):
return self._apply_sparse_shared(
grad, var, indices,
self._resource_scatter_add, self._resource_scatter_update)
def _finish(self, update_ops, name_scope):
# Update the power accumulators.
with ops.control_dependencies(update_ops):
beta1_power = self._get_beta_accumulators()
with ops.colocate_with(beta1_power):
update_beta1 = beta1_power.assign(
beta1_power * self._beta1_t, use_locking=self._use_locking)
return control_flow_ops.group(*update_ops + [update_beta1],
name=name_scope)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/opt/python/training/adamax.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of AddSign."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.training import optimizer
from tensorflow.python.training import training_ops
class AddSignOptimizer(optimizer.Optimizer):
"""Optimizer that implements the AddSign update.
See [Bello et al., ICML2017],
[Neural Optimizer Search with RL](https://arxiv.org/abs/1709.07417).
"""
def __init__(self,
learning_rate=0.1,
alpha=1.0,
beta=0.9,
sign_decay_fn=None,
use_locking=False,
name='AddSignOptimizer'):
"""Constructs a new AddSignOptimizer object.
Initialization:
```
m_0 <- 0 (Initialize initial 1st moment vector)
t <- 0 (Initialize timestep)
```
Update:
```
t <- t + 1
m_t <- beta1 * m_{t-1} + (1 - beta1) * g
sign_decay <- sign_decay_fn(t)
update <- (alpha + sign_decay * sign(g) *sign(m)) * g
variable <- variable - lr_t * update
```
Example for AddSign-ld (AddSign with linear sign decay)
```
decay_steps = 1000
linear_decay_fn = sign_decays.get_linear_decay_fn(decay_steps)
opt = AddSignOptimizer(learning_rate=0.1, sign_decay_fn=linear_decay_fn)
```
Args:
learning_rate: learning_rate used when taking a step.
alpha: alpha used in optimizer.
beta: decay used for computing the moving average m.
sign_decay_fn: decay function applied to the sign(g) sign(m) quantity.
Takes global_step as an argument. See sign_decay.py for some examples.
use_locking: If True, use locks for update operations.
name: Optional name for the operations created when applying gradients.
Defaults to "AddSignOptimizer".
"""
super(AddSignOptimizer, self).__init__(use_locking, name)
self._lr = learning_rate
self._alpha = alpha
self._beta = beta
self._sign_decay_fn = sign_decay_fn
# Tensor versions of the constructor arguments, created in _prepare().
self._lr_t = None
self._alpha_t = None
self._beta_t = None
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
if self._sign_decay_fn is not None:
self._sign_decay_t = ops.convert_to_tensor(
self._sign_decay_fn(global_step), name='sign_decay')
return super(AddSignOptimizer, self).apply_gradients(
grads_and_vars, global_step=global_step, name=name)
def _create_slots(self, var_list):
# Create slots for the first moment.
for v in var_list:
self._zeros_slot(v, 'm', self._name)
def _prepare(self):
self._lr_t = ops.convert_to_tensor(self._lr, name='learning_rate')
self._beta_t = ops.convert_to_tensor(self._beta, name='beta')
self._alpha_t = ops.convert_to_tensor(self._alpha, name='alpha')
if self._sign_decay_fn is None:
self._sign_decay_t = ops.convert_to_tensor(1.0, name='sign_decay')
def _apply_dense(self, grad, var):
m = self.get_slot(var, 'm')
return training_ops.apply_add_sign(
var,
m,
math_ops.cast(self._lr_t, var.dtype.base_dtype),
math_ops.cast(self._alpha_t, var.dtype.base_dtype),
math_ops.cast(self._sign_decay_t, var.dtype.base_dtype),
math_ops.cast(self._beta_t, var.dtype.base_dtype),
grad,
use_locking=self._use_locking).op
def _resource_apply_dense(self, grad, var):
m = self.get_slot(var, 'm')
return training_ops.resource_apply_add_sign(
var.handle,
m.handle,
math_ops.cast(self._lr_t, var.dtype.base_dtype),
math_ops.cast(self._alpha_t, var.dtype.base_dtype),
math_ops.cast(self._sign_decay_t, var.dtype.base_dtype),
math_ops.cast(self._beta_t, var.dtype.base_dtype),
grad,
use_locking=self._use_locking)
def _apply_sparse(self, grad, var):
lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)
alpha_t = math_ops.cast(self._alpha_t, var.dtype.base_dtype)
beta_t = math_ops.cast(self._beta_t, var.dtype.base_dtype)
m = self.get_slot(var, 'm')
m_t = state_ops.assign(
m, (m * beta_t) + (grad * (1 - beta_t)), use_locking=self._use_locking)
sign_g = ops.IndexedSlices(
math_ops.sign(grad.values), grad.indices, dense_shape=grad.dense_shape)
sign_gm = ops.IndexedSlices(
array_ops.gather(math_ops.sign(m_t), sign_g.indices) * sign_g.values,
sign_g.indices,
dense_shape=sign_g.dense_shape)
sign_decayed = math_ops.cast(
self._sign_decay_t, var.dtype.base_dtype)
multiplier_values = alpha_t + sign_decayed * sign_gm.values
multiplier = ops.IndexedSlices(
multiplier_values, sign_gm.indices, dense_shape=sign_gm.dense_shape)
final_update = ops.IndexedSlices(
lr_t * multiplier.values * grad.values,
multiplier.indices,
dense_shape=multiplier.dense_shape)
var_update = state_ops.scatter_sub(
var,
final_update.indices,
final_update.values,
use_locking=self._use_locking)
return control_flow_ops.group(* [var_update, m_t])
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/opt/python/training/addsign.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ModelAverageOptimizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import portpicker
from tensorflow.contrib.opt.python.training import model_average_optimizer
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import device_setter
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import server_lib
from tensorflow.python.training import training
from tensorflow.python.training import training_util
def create_local_cluster(num_workers, num_ps, protocol="grpc"):
"""Create local GRPC servers and return them."""
worker_ports = [portpicker.pick_unused_port() for _ in range(num_workers)]
ps_ports = [portpicker.pick_unused_port() for _ in range(num_ps)]
cluster_dict = {
"worker": ["localhost:%s" % port for port in worker_ports],
"ps": ["localhost:%s" % port for port in ps_ports]
}
cs = server_lib.ClusterSpec(cluster_dict)
workers = [
server_lib.Server(
cs, job_name="worker", protocol=protocol, task_index=ix, start=True)
for ix in range(num_workers)
]
ps_servers = [
server_lib.Server(
cs, job_name="ps", protocol=protocol, task_index=ix, start=True)
for ix in range(num_ps)
]
return cluster_dict, workers, ps_servers
# Creates the workers and return their sessions, graphs, train_ops.
# Chief worker will update at last
def _get_workers(num_workers, steps, workers):
sessions = []
graphs = []
train_ops = []
for worker_id in range(num_workers):
graph = ops.Graph()
is_chief = (worker_id == 0)
with graph.as_default():
worker_device = "/job:worker/task:%d/cpu:0" % (worker_id)
ma_coustom = model_average_optimizer.ModelAverageCustomGetter(
worker_device=worker_device)
with variable_scope.variable_scope(
"", custom_getter=ma_coustom), ops.device(
device_setter.replica_device_setter(
worker_device=worker_device,
ps_device="/job:ps/task:0/cpu:0",
ps_tasks=1)):
global_step = variables.Variable(0, name="global_step", trainable=False)
var_0 = variable_scope.get_variable(initializer=0.0, name="v0")
var_1 = variable_scope.get_variable(initializer=1.0, name="v1")
with ops.device("/job:worker/task:" + str(worker_id)):
if worker_id == 0:
grads_0 = constant_op.constant(-1.0)
grads_1 = constant_op.constant(-1.0)
else:
grads_0 = constant_op.constant(-2.0)
grads_1 = constant_op.constant(-2.0)
sgd_opt = gradient_descent.GradientDescentOptimizer(1.0)
opt = model_average_optimizer.ModelAverageOptimizer(
opt=sgd_opt,
num_worker=num_workers,
ma_custom_getter=ma_coustom,
is_chief=is_chief,
interval_steps=steps)
train_op = [
opt.apply_gradients([[grads_0, var_0], [grads_1, var_1]],
global_step)
]
ma_hook = opt.make_session_run_hook()
# Creates MonitoredSession
sess = training.MonitoredTrainingSession(
workers[worker_id].target, hooks=[ma_hook])
sessions.append(sess)
graphs.append(graph)
train_ops.append(train_op)
return sessions, graphs, train_ops
class ModelAverageOptimizerTest(test.TestCase):
def _run(self, train_op, sess):
sess.run(train_op)
def disabled_test1Workers2Period(self):
num_workers = 2
steps = 2
num_ps = 1
_, workers, _ = create_local_cluster(
num_workers=num_workers, num_ps=num_ps)
sessions, graphs, train_ops = _get_workers(num_workers, steps, workers)
var_0 = graphs[0].get_tensor_by_name("v0:0")
var_1 = graphs[0].get_tensor_by_name("v1:0")
global_step = training_util.get_global_step(graphs[0])
global_var_0 = graphs[0].get_tensor_by_name(
model_average_optimizer.GLOBAL_VARIABLE_NAME + "/v0:0")
global_var_1 = graphs[0].get_tensor_by_name(
model_average_optimizer.GLOBAL_VARIABLE_NAME + "/v1:0")
# Verify the initialized value.
self.assertAllEqual(0.0, sessions[0].run(var_0))
self.assertAllEqual(1.0, sessions[0].run(var_1))
self.assertAllEqual(0.0, sessions[0].run(global_var_0))
self.assertAllEqual(1.0, sessions[0].run(global_var_1))
self.assertAllEqual(0, sessions[0].run(global_step))
sessions[0].run(train_ops[0])
sessions[1].run(train_ops[1])
self.assertAllEqual(1.0, sessions[0].run(var_0))
self.assertAllEqual(2.0, sessions[0].run(var_1))
self.assertAllEqual(0.0, sessions[0].run(global_var_0))
self.assertAllEqual(1.0, sessions[0].run(global_var_1))
self.assertAllEqual(0, sessions[0].run(global_step))
# iteration 2, global variable update
thread_0 = self.checkedThread(
target=self._run, args=(train_ops[0], sessions[0]))
thread_1 = self.checkedThread(
target=self._run, args=(train_ops[1], sessions[1]))
thread_0.start()
thread_1.start()
thread_0.join()
thread_1.join()
self.assertAllEqual(3.0, sessions[0].run(var_0))
self.assertAllEqual(4.0, sessions[0].run(var_1))
self.assertAllEqual(3.0, sessions[0].run(global_var_0))
self.assertAllEqual(4.0, sessions[0].run(global_var_1))
self.assertAllEqual(1, sessions[0].run(global_step))
# iteration 3
sessions[0].run(train_ops[0])
self.assertAllEqual(4.0, sessions[0].run(var_0))
self.assertAllEqual(5.0, sessions[0].run(var_1))
self.assertAllEqual(3.0, sessions[0].run(global_var_0))
self.assertAllEqual(4.0, sessions[0].run(global_var_1))
self.assertAllEqual(1, sessions[0].run(global_step))
def testPS2TasksWithClusterSpecClass(self):
cluster_spec = server_lib.ClusterSpec({
"ps": ["ps0:2222", "ps1:2222"],
"worker": ["worker0:2222", "worker1:2222", "worker2:2222"]
})
worker_device = "/job:worker/task:0"
ma_coustom = model_average_optimizer.ModelAverageCustomGetter(
worker_device=worker_device)
from tensorflow.python.training import device_setter
with ops.device(
device_setter.replica_device_setter(cluster=cluster_spec,
worker_device=worker_device,
ps_device="/job:ps")), \
variable_scope.variable_scope("", custom_getter=ma_coustom):
v = variable_scope.get_variable(initializer=[1, 2], name="v")
w = variable_scope.get_variable(initializer=[2, 1], name="w")
v_g, w_g = ma_coustom._local_2_global[v], ma_coustom._local_2_global[w]
self.assertDeviceEqual("/job:worker/task:0", v.device)
self.assertDeviceEqual("job:ps/task:0", v_g.device)
self.assertDeviceEqual("/job:worker/task:0", w.device)
self.assertDeviceEqual("job:ps/task:1", w_g.device)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/opt/python/training/model_average_optimizer_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for moving_average_optimizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import tempfile
import six
from tensorflow.contrib.opt.python.training import moving_average_optimizer
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import saver
class MovingAverageOptimizerTest(test.TestCase):
def testRun(self):
self._helpTestRun(use_resource=False)
def testRunUseResource(self):
# Test that MovingAverageOptimizer works with resource variables.
self._helpTestRun(use_resource=True)
def testRunUsePartitionedVars(self):
# Test that MovingAverageOptimizer works with partitioned variables.
self._helpTestRun(use_partitioned_vars=True)
def testRunUseResourcePartitionedVars(self):
# Test that MovingAverageOptimizer works with resource and partitioned
# variables.
self._helpTestRun(use_partitioned_vars=True, use_resource=True)
def _helpTestRun(self, use_resource=False, use_partitioned_vars=False):
# Partitioned variables are represented as a "collection" of partitions.
# To simplify the test and reuse as much code as possible we employ
# following test strategy for partitioned variables.
#
# In the case of non-partitioned variables test runs on variables with
# shape [2].
#
# In the case of partitioned variables we use shape [4] with two partitions,
# thus each partition has shape [2].
# For partitioned variables the test is run twice (for loop over
# variable_part_names), first time on the first partition of each variable,
# second time on the second partition of each variable.
variable_part_names = ['part_0', 'part_1'] if use_partitioned_vars else ['']
for sequential_update in [True, False]:
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
for var_part_name in variable_part_names:
with self.session(graph=ops.Graph()) as sess:
orig_val0 = [1.0, 2.0]
orig_val1 = [3.0, 4.0]
grads0 = [0.1, 0.1]
grads1 = [0.01, 0.01]
if use_partitioned_vars:
# Use partitioned variables.
# Create partitioned and duplicate each value used as initial
# value of variables.
partitioner = partitioned_variables.fixed_size_partitioner(
num_shards=2)
orig_val0 = orig_val0 * 2
orig_val1 = orig_val1 * 2
grads0 = grads0 * 2
grads1 = grads1 * 2
else:
# Regular (non-partitioned) variables.
partitioner = None
var0 = variable_scope.get_variable(
'var0',
initializer=constant_op.constant(orig_val0, dtype=dtype),
use_resource=use_resource,
partitioner=partitioner)
var1 = variable_scope.get_variable(
'var1',
initializer=constant_op.constant(orig_val1, dtype=dtype),
use_resource=use_resource,
partitioner=partitioner)
# Make a fake loss, such that gradient(loss, var0) == grads0
# and gradient(loss, var1) == grads1
grads0 = constant_op.constant(grads0, dtype=dtype)
grads1 = constant_op.constant(grads1, dtype=dtype)
loss = (math_ops.reduce_sum(grads0 * var0)
+ math_ops.reduce_sum(grads1 * var1))
opt = moving_average_optimizer.MovingAverageOptimizer(
gradient_descent.GradientDescentOptimizer(learning_rate=2.0),
average_decay=0.5,
sequential_update=sequential_update)
save_dir = tempfile.mkdtemp(
prefix=os.path.join(self.get_temp_dir(), 'run_1'))
save_path = os.path.join(save_dir, 'model')
update = opt.minimize(loss)
# Get variables and their EMAs. In case of partitioned variables
# get proper part of each variable.
def _get_variable(var_name, part_name, ema):
"""Returns variable of it's moving average by name."""
matches = [
v for v in variables.global_variables()
if ((var_name in v.op.name)
and (part_name in v.op.name)
and (('ExponentialMovingAverage' in v.op.name) == ema))
]
self.assertEqual(len(matches), 1)
return matches[0]
var0 = _get_variable('var0', var_part_name, ema=False)
var1 = _get_variable('var1', var_part_name, ema=False)
ema_var0 = _get_variable('var0', var_part_name, ema=True)
ema_var1 = _get_variable('var1', var_part_name, ema=True)
perturb = control_flow_ops.group([
state_ops.assign_add(var0, [1.0, 1.0]),
state_ops.assign_add(var1, [2.0, 2.0]),
state_ops.assign_add(ema_var0, [3.0, 3.0]),
state_ops.assign_add(ema_var1, [4.0, 4.0])
])
# Test that saver with missing ema variables will fail.
with self.assertRaisesRegexp(ValueError, r'Variable to swap'):
opt.swapping_saver(var_list=[var0])
train_saver = opt.swapping_saver()
train_saver_subset = opt.swapping_saver(var_list=[var0, ema_var0])
inference_saver = saver.Saver()
variables.global_variables_initializer().run()
# Step 1.
update.run()
self.assertAllCloseAccordingToType([0.8, 1.8], var0.eval())
self.assertAllCloseAccordingToType([2.98, 3.98], var1.eval())
if sequential_update:
self.assertAllCloseAccordingToType([0.9, 1.9], ema_var0.eval())
self.assertAllCloseAccordingToType([2.99, 3.99], ema_var1.eval())
# Test that the swapping saver save/restore operation is identity.
train_saver.save(sess, save_path)
train_saver.restore(sess, save_path)
self.assertAllCloseAccordingToType([0.8, 1.8], var0.eval())
self.assertAllCloseAccordingToType([2.98, 3.98], var1.eval())
if sequential_update:
self.assertAllCloseAccordingToType([0.9, 1.9], ema_var0.eval())
self.assertAllCloseAccordingToType([2.99, 3.99], ema_var1.eval())
# Test that the subset saver saves the EMA variable as well.
if sequential_update:
subset_save_path = save_path + '_subset'
train_saver_subset.save(sess, subset_save_path)
perturb.run()
self.assertAllCloseAccordingToType([1.8, 2.8], var0.eval())
self.assertAllCloseAccordingToType([3.9, 4.9], ema_var0.eval())
self.assertAllCloseAccordingToType([4.98, 5.98], var1.eval())
self.assertAllCloseAccordingToType([6.99, 7.99], ema_var1.eval())
# Restoring should only restore var0 and ema_var0.
train_saver_subset.restore(sess, subset_save_path)
self.assertAllCloseAccordingToType([0.8, 1.8], var0.eval())
self.assertAllCloseAccordingToType([0.9, 1.9], ema_var0.eval())
self.assertAllCloseAccordingToType([4.98, 5.98], var1.eval())
self.assertAllCloseAccordingToType([6.99, 7.99], ema_var1.eval())
# Restore back to previous state.
train_saver.restore(sess, save_path)
# If updates are parallel,
# this is not always true after the 1st step.
if sequential_update:
# Test that the normal saver will have the averaged variables.
# We test that the average values are between the original value
# and the most recent variable values (since they are an average
# of the two).
val0 = var0.eval()
val1 = var1.eval()
train_saver.save(sess, save_path)
inference_saver.restore(sess, save_path)
avg_val0 = var0.eval()
avg_val1 = var1.eval()
for i in six.moves.range(len(val0)):
self.assertLess(val0[i], avg_val0[i])
self.assertLess(avg_val0[i], orig_val0[i])
self.assertLess(val1[i], avg_val1[i])
self.assertLess(avg_val1[i], orig_val1[i])
train_saver.restore(sess, save_path)
# Step 2.
update.run()
# Test that the normal saver will have the averaged variables.
# We test that the average values are between the original value and
# the most recent variable values (since they are an average of the
# two).
val0 = var0.eval()
val1 = var1.eval()
self.assertAllCloseAccordingToType([0.6, 1.6], val0)
self.assertAllCloseAccordingToType([2.96, 3.96], val1)
train_saver.save(sess, save_path)
inference_saver.restore(sess, save_path)
avg_val0 = var0.eval()
avg_val1 = var1.eval()
for i in six.moves.range(len(val0)):
self.assertLess(val0[i], avg_val0[i])
self.assertLess(avg_val0[i], orig_val0[i])
self.assertLess(val1[i], avg_val1[i])
self.assertLess(avg_val1[i], orig_val1[i])
def testFailWhenSaverCreatedBeforeInitialized(self):
with self.cached_session():
var = variables.Variable([1.0], name='var', dtype=dtypes.float32)
opt = moving_average_optimizer.MovingAverageOptimizer(
gradient_descent.GradientDescentOptimizer(learning_rate=2.0))
# We didn't call apply_gradients yet.
# This will raise an exception.
with self.assertRaises(RuntimeError):
_ = opt.swapping_saver([var])
def testCorrectOverride(self):
class WrapperOptimizer(gradient_descent.GradientDescentOptimizer):
def compute_gradients(self, *args, **kwargs):
self.compute_gradients_called = True
return super(WrapperOptimizer, self).compute_gradients(
*args, **kwargs)
def apply_gradients(self, *args, **kwargs):
self.apply_gradients_called = True
return super(WrapperOptimizer, self).apply_gradients(*args, **kwargs)
with self.cached_session() as sess:
var = variables.Variable([1.2], name='var', dtype=dtypes.float32)
loss = var ** 2
wrapper_opt = WrapperOptimizer(learning_rate=2.0)
opt = moving_average_optimizer.MovingAverageOptimizer(wrapper_opt)
train_op = opt.minimize(loss)
# Check that both methods are called on the underlying optimizer.
self.assertTrue(wrapper_opt.compute_gradients_called)
self.assertTrue(wrapper_opt.apply_gradients_called)
# Run train_op once, and verify that we've updated the variable.
variables.global_variables_initializer().run()
sess.run(train_op)
var_value = sess.run(var)
# Started at 1.2, gradient is 2*1.2=2.4, lr=2, so should now be -3.6.
self.assertNear(-3.6, var_value, 1e-6)
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/opt/python/training/moving_average_optimizer_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Nadam."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.opt.python.training import nadam_optimizer
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
def nadam_update_numpy(param,
g_t,
t,
m,
v,
alpha=0.001,
beta1=0.9,
beta2=0.999,
epsilon=1e-8):
alpha_t = alpha * np.sqrt(1 - beta2**t) / (1 - beta1**t)
m_t = beta1 * m + (1 - beta1) * g_t
v_t = beta2 * v + (1 - beta2) * g_t * g_t
m_bar = (1 - beta1) * g_t + beta1 * m_t
param_t = param - alpha_t * m_bar / (np.sqrt(v_t) + epsilon)
return param_t, m_t, v_t
class NadamOptimizerTest(test.TestCase):
def doTestSparse(self, use_resource=False):
# need to use a larger value of epsilon here so that
# np.sqrt(v_t) + epsilon doesn't get rounded to 0 when
# the dtype is half and np.sqrt(v_t) = 0, as is the case
# when the gradient is 0
sparse_epsilon = 1e-7
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0, 0.01], dtype=dtype.as_numpy_dtype)
if use_resource:
var0 = resource_variable_ops.ResourceVariable(var0_np)
var1 = resource_variable_ops.ResourceVariable(var1_np)
else:
var0 = variables.Variable(var0_np)
var1 = variables.Variable(var1_np)
grads0_np_indices = np.array([0, 2], dtype=np.int32)
grads0 = ops.IndexedSlices(
constant_op.constant(grads0_np[grads0_np_indices]),
constant_op.constant(grads0_np_indices), constant_op.constant([3]))
grads1_np_indices = np.array([0, 2], dtype=np.int32)
grads1 = ops.IndexedSlices(
constant_op.constant(grads1_np[grads1_np_indices]),
constant_op.constant(grads1_np_indices), constant_op.constant([3]))
opt = nadam_optimizer.NadamOptimizer(epsilon=sparse_epsilon)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 3.0, 4.0], var1.eval())
beta1_power, beta2_power = opt._get_beta_accumulators()
# Run 3 steps of Nadam
for t in range(1, 4):
self.assertAllCloseAccordingToType(0.9**t, beta1_power.eval())
self.assertAllCloseAccordingToType(0.999**t, beta2_power.eval())
update.run()
var0_np, m0, v0 = nadam_update_numpy(var0_np, grads0_np, t, m0, v0,
epsilon=sparse_epsilon)
var1_np, m1, v1 = nadam_update_numpy(var1_np, grads1_np, t, m1, v1,
epsilon=sparse_epsilon)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, var0.eval())
self.assertAllCloseAccordingToType(var1_np, var1.eval())
def testSparse(self):
self.doTestSparse(use_resource=False)
def testResourceSparse(self):
self.doTestSparse(use_resource=True)
def doTestBasic(self, use_resource=False):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
if use_resource:
var0 = resource_variable_ops.ResourceVariable(var0_np)
var1 = resource_variable_ops.ResourceVariable(var1_np)
else:
var0 = variables.Variable(var0_np)
var1 = variables.Variable(var1_np)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
opt = nadam_optimizer.NadamOptimizer()
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
beta1_power, beta2_power = opt._get_beta_accumulators()
# Run 3 steps of Nadam
for t in range(1, 4):
self.assertAllCloseAccordingToType(0.9**t, beta1_power.eval())
self.assertAllCloseAccordingToType(0.999**t, beta2_power.eval())
update.run()
var0_np, m0, v0 = nadam_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = nadam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, var0.eval())
self.assertAllCloseAccordingToType(var1_np, var1.eval())
def testBasic(self):
self.doTestBasic(use_resource=False)
def testResourceBasic(self):
self.doTestBasic(use_resource=True)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/opt/python/training/nadam_optimizer_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0. Licensed to the Apache
# Software Foundation. You may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test for Layer-wise Adaptive Rate Scaling optimizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.opt.python.training import lars_optimizer as lo
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class LARSOptimizerTest(test.TestCase):
def testLARSGradientOneStep(self):
for _ in range(10):
for dtype in [dtypes.float32, dtypes.float64]:
with self.cached_session() as sess:
shape = [3, 3]
var_np = np.ones(shape)
grad_np = np.ones(shape)
lr_np = 0.1
m_np = 0.9
wd_np = 0.1
ep_np = 1e-5
eeta = 0.1
vel_np = np.zeros(shape)
var = variables.Variable(var_np, dtype=dtype)
grad = variables.Variable(grad_np, dtype=dtype)
opt = lo.LARSOptimizer(
learning_rate=lr_np,
momentum=m_np,
weight_decay=wd_np,
eeta=eeta,
epsilon=ep_np)
step = opt.apply_gradients([(grad, var)])
variables.global_variables_initializer().run()
pre_var = sess.run(var)
pre_vel = sess.run(opt.get_slot(var, 'momentum'))
self.assertAllClose(var_np, pre_var)
self.assertAllClose(vel_np, pre_vel)
step.run()
post_var = sess.run(var)
post_vel = sess.run(opt.get_slot(var, 'momentum'))
w_norm = np.linalg.norm(var_np.flatten(), ord=2)
g_norm = np.linalg.norm(grad_np.flatten(), ord=2)
trust_ratio = eeta * w_norm / (g_norm + wd_np * w_norm + ep_np)
scaled_lr = lr_np * trust_ratio
grad_np = grad_np + wd_np * var_np
vel_np = m_np * vel_np + scaled_lr * grad_np
var_np -= vel_np
self.assertAllClose(var_np, post_var)
self.assertAllClose(vel_np, post_vel)
def testLARSGradientMultiStep(self):
for _ in range(10):
for dtype in [dtypes.float32, dtypes.float64]:
with self.cached_session() as sess:
shape = [3, 3]
var_np = np.ones(shape)
grad_np = np.ones(shape)
lr_np = 0.1
m_np = 0.9
wd_np = 0.1
ep_np = 1e-5
eeta = 0.1
vel_np = np.zeros(shape)
var = variables.Variable(var_np, dtype=dtype)
grad = variables.Variable(grad_np, dtype=dtype)
opt = lo.LARSOptimizer(
learning_rate=lr_np,
momentum=m_np,
eeta=eeta,
weight_decay=wd_np,
epsilon=ep_np)
step = opt.apply_gradients([(grad, var)])
variables.global_variables_initializer().run()
pre_var = sess.run(var)
pre_vel = sess.run(opt.get_slot(var, 'momentum'))
self.assertAllClose(var_np, pre_var)
self.assertAllClose(vel_np, pre_vel)
for _ in range(10):
step.run()
post_var = sess.run(var)
post_vel = sess.run(opt.get_slot(var, 'momentum'))
w_norm = np.linalg.norm(var_np.flatten(), ord=2)
g_norm = np.linalg.norm(grad_np.flatten(), ord=2)
trust_ratio = eeta * w_norm / (g_norm + wd_np * w_norm + ep_np)
scaled_lr = lr_np * trust_ratio
grad_np = grad_np + wd_np * var_np
vel_np = m_np * vel_np + scaled_lr * grad_np
var_np -= vel_np
self.assertAllClose(var_np, post_var)
self.assertAllClose(vel_np, post_vel)
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/opt/python/training/lars_optimizer_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""LazyAdam rewrite to use global step for computing beta1 & beta2 accumulation.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.opt.python.training import adam_gs_optimizer
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
class LazyAdamGSOptimizer(adam_gs_optimizer.AdamGSOptimizer):
"""Variant of the Adam optimizer that handles sparse updates more efficiently.
Branched from tf.contrib.opt.LazyAdamGSOptimizer. The only difference is to
pass global step for computing beta1 and beta2 accumulators, instead of having
optimizer keep its own independent beta1 and beta2 accumulators as non-slot
variables.
The original Adam algorithm maintains two moving-average accumulators for
each trainable variable; the accumulators are updated at every step.
This class provides lazier handling of gradient updates for sparse variables.
It only updates moving-average accumulators for sparse variable indices that
appear in the current batch, rather than updating the accumulators for all
indices. Compared with the original Adam optimizer, it can provide large
improvements in model training throughput for some applications. However, it
provides slightly different semantics than the original Adam algorithm, and
may lead to different empirical results.
"""
def _apply_sparse(self, grad, var):
beta1_power, beta2_power = self._get_beta_accumulators()
beta1_power = math_ops.cast(beta1_power, var.dtype.base_dtype)
beta2_power = math_ops.cast(beta2_power, var.dtype.base_dtype)
lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)
beta1_t = math_ops.cast(self._beta1_t, var.dtype.base_dtype)
beta2_t = math_ops.cast(self._beta2_t, var.dtype.base_dtype)
epsilon_t = math_ops.cast(self._epsilon_t, var.dtype.base_dtype)
lr = (lr_t * math_ops.sqrt(1 - beta2_power) / (1 - beta1_power))
# \\(m := beta1 * m + (1 - beta1) * g_t\\)
m = self.get_slot(var, "m")
m_t = state_ops.scatter_update(m, grad.indices,
beta1_t * array_ops.gather(m, grad.indices) +
(1 - beta1_t) * grad.values,
use_locking=self._use_locking)
# \\(v := beta2 * v + (1 - beta2) * (g_t * g_t)\\)
v = self.get_slot(var, "v")
v_t = state_ops.scatter_update(v, grad.indices,
beta2_t * array_ops.gather(v, grad.indices) +
(1 - beta2_t) * math_ops.square(grad.values),
use_locking=self._use_locking)
# \\(variable -= learning_rate * m_t / (epsilon_t + sqrt(v_t))\\)
m_t_slice = array_ops.gather(m_t, grad.indices)
v_t_slice = array_ops.gather(v_t, grad.indices)
denominator_slice = math_ops.sqrt(v_t_slice) + epsilon_t
var_update = state_ops.scatter_sub(var, grad.indices,
lr * m_t_slice / denominator_slice,
use_locking=self._use_locking)
return control_flow_ops.group(var_update, m_t, v_t)
def _resource_apply_sparse(self, grad, var, indices):
beta1_power, beta2_power = self._get_beta_accumulators()
beta1_power = math_ops.cast(beta1_power, var.dtype.base_dtype)
beta2_power = math_ops.cast(beta2_power, var.dtype.base_dtype)
lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)
beta1_t = math_ops.cast(self._beta1_t, var.dtype.base_dtype)
beta2_t = math_ops.cast(self._beta2_t, var.dtype.base_dtype)
epsilon_t = math_ops.cast(self._epsilon_t, var.dtype.base_dtype)
lr = (lr_t * math_ops.sqrt(1 - beta2_power) / (1 - beta1_power))
# \\(m := beta1 * m + (1 - beta1) * g_t\\)
m = self.get_slot(var, "m")
m_t_slice = beta1_t * array_ops.gather(m, indices) + (1 - beta1_t) * grad
m_update_op = resource_variable_ops.resource_scatter_update(m.handle,
indices,
m_t_slice)
# \\(v := beta2 * v + (1 - beta2) * (g_t * g_t)\\)
v = self.get_slot(var, "v")
v_t_slice = (beta2_t * array_ops.gather(v, indices) +
(1 - beta2_t) * math_ops.square(grad))
v_update_op = resource_variable_ops.resource_scatter_update(v.handle,
indices,
v_t_slice)
# \\(variable -= learning_rate * m_t / (epsilon_t + sqrt(v_t))\\)
var_slice = lr * m_t_slice / (math_ops.sqrt(v_t_slice) + epsilon_t)
var_update_op = resource_variable_ops.resource_scatter_sub(var.handle,
indices,
var_slice)
return control_flow_ops.group(var_update_op, m_update_op, v_update_op)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/opt/python/training/lazy_adam_gs_optimizer.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for VariableClippingOptimizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import socket
import numpy as np
from tensorflow.contrib.opt.python.training import variable_clipping_optimizer
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import server_lib
class VariableClippingOptimizerTest(test.TestCase):
def _setupCluster(self):
def get_open_port():
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
except IOError:
s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
s.bind(("", 0))
port = s.getsockname()[1]
s.close()
return port
port1 = get_open_port()
port2 = get_open_port()
cs = server_lib.ClusterSpec({
"worker": ["localhost:%s" % port1],
"ps": ["localhost:%s" % port2]
})
worker = server_lib.Server(cs, job_name="worker", start=True)
ps = server_lib.Server(cs, job_name="ps", start=True)
return worker, ps
@contextlib.contextmanager
def _maybeWithDevice(self, device):
if device is not None:
with ops.device(device):
yield
else:
yield
def _setupDense(self, is_distributed, dtype):
with self._maybeWithDevice("/job:ps" if is_distributed else None):
var0 = variables.Variable([[0.0, 1.0], [2.0, 3.0]], dtype=dtype)
var1 = variables.Variable([4.0, 5.0], dtype=dtype)
with self._maybeWithDevice("/job:worker" if is_distributed else None):
grads0 = constant_op.constant([[0.1, 0.1], [0.1, 0.1]], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
sgd = gradient_descent.GradientDescentOptimizer(3.0)
clip_opt = variable_clipping_optimizer.VariableClippingOptimizer(
sgd, {var0: [1]}, 2.0)
update_op = clip_opt.apply_gradients(
list(zip([grads0, grads1], [var0, var1])))
variables.global_variables_initializer().run()
return var0, var1, update_op
def _assertDenseCorrect(self, var0, var1, update_op):
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([[0.0, 1.0], [2.0, 3.0]], var0.eval())
self.assertAllCloseAccordingToType([4.0, 5.0], var1.eval())
# Run 1 step of sgd, clipping each var0[i] to max L2-norm 2.0
update_op.run()
# Validate updated params
var0_out = var0.eval()
# var0[0] has norm < 2.0, so it is not clipped.
self.assertAllCloseAccordingToType([(0.0 - 3.0 * 0.1), (1.0 - 3.0 * 0.1)],
var0_out[0])
# var0[1] has norm > 2.0, so it is clipped.
expected_unclipped = np.array([(2.0 - 3.0 * 0.1), (3.0 - 3.0 * 0.1)])
self.assertAllCloseAccordingToType(2.0 * expected_unclipped /
np.linalg.norm(expected_unclipped),
var0_out[1])
# var1 is not in the var list, so it should not be clipped
self.assertAllCloseAccordingToType([4.0 - 3.0 * 0.01, 5.0 - 3.0 * 0.01],
var1.eval())
def _setupSparse(self, is_distributed, dtype):
with self._maybeWithDevice("/job:ps" if is_distributed else None):
var0 = variables.Variable(
[[0.0, 1.0], [2.0, 3.0], [4.0, 5.0]], dtype=dtype)
var1 = variables.Variable(
[[0.0, 1.0], [0.0, 3.0], [0.0, 5.0]], dtype=dtype)
with self._maybeWithDevice("/job:worker" if is_distributed else None):
grads = ops.IndexedSlices(
constant_op.constant(
[[0.1, 0.1], [0.1, 0.1]], dtype=dtype), [0, 2], [3, 2])
sgd = gradient_descent.GradientDescentOptimizer(3.0)
clip_opt = variable_clipping_optimizer.VariableClippingOptimizer(
sgd, {var0: [1],
var1: [0]}, 2.0)
update_op = clip_opt.apply_gradients(
list(zip([grads, grads], [var0, var1])))
variables.global_variables_initializer().run()
return var0, var1, update_op
def _assertSparseCorrect(self, var0, var1, update_op):
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([[0.0, 1.0], [2.0, 3.0], [4.0, 5.0]],
var0.eval())
self.assertAllCloseAccordingToType([[0.0, 1.0], [0.0, 3.0], [0.0, 5.0]],
var1.eval())
# Run 1 step of sgd
update_op.run()
# var1 is clipped along the sparse dimension, so defaults to using dense
# calculations. There should be a warning logged, but the numerics
# should still be correct.
var1_out = var1.eval()
# var1[:, 0] has norm < 2.0, so it is not clipped.
self.assertAllCloseAccordingToType(
[(0.0 - 3.0 * 0.1), 0.0, (0.0 - 3.0 * 0.1)], var1_out[:, 0])
# var1[:, 1] has norm > 2.0, so it is clipped.
expected_unclipped = np.array([(1.0 - 3.0 * 0.1), 3.0, (5.0 - 3.0 * 0.1)])
self.assertAllCloseAccordingToType(2.0 * expected_unclipped /
np.linalg.norm(expected_unclipped),
var1_out[:, 1])
# Validate updated params
var0_out = var0.eval()
# var0[0] has norm < 2.0, so it is not clipped.
self.assertAllCloseAccordingToType([(0.0 - 3.0 * 0.1), (1.0 - 3.0 * 0.1)],
var0_out[0])
# var0[1] has no gradients, so it should remain unchanged.
self.assertAllCloseAccordingToType([2.0, 3.0], var0_out[1])
# var0[2] has norm > 2.0, so it is clipped.
expected_unclipped = np.array([(4.0 - 3.0 * 0.1), (5.0 - 3.0 * 0.1)])
self.assertAllCloseAccordingToType(2.0 * expected_unclipped /
np.linalg.norm(expected_unclipped),
var0_out[2])
def testDenseLocal(self):
for dtype in [dtypes.float32, dtypes.float64, dtypes.half]:
with self.cached_session():
var0, var1, update_op = self._setupDense(False, dtype)
self._assertDenseCorrect(var0, var1, update_op)
def testDenseDistributed(self):
worker, unused_ps = self._setupCluster()
for dtype in [dtypes.float64, dtypes.half, dtypes.float32]:
with session.Session(worker.target):
var0, var1, update_op = self._setupDense(True, dtype)
self._assertDenseCorrect(var0, var1, update_op)
def testSparseLocal(self):
for dtype in [dtypes.float64, dtypes.float32, dtypes.half]:
with self.cached_session():
var0, var1, update_op = self._setupSparse(False, dtype)
self._assertSparseCorrect(var0, var1, update_op)
def testSparseDistributed(self):
worker, unused_ps = self._setupCluster()
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with session.Session(worker.target):
var0, var1, update_op = self._setupSparse(True, dtype)
self._assertSparseCorrect(var0, var1, update_op)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/opt/python/training/variable_clipping_optimizer_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for LazyAdamOptimizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.contrib.opt.python.training import lazy_adam_optimizer
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
def adam_update_numpy(param,
g_t,
t,
m,
v,
alpha=0.001,
beta1=0.9,
beta2=0.999,
epsilon=1e-8):
alpha_t = alpha * np.sqrt(1 - beta2**t) / (1 - beta1**t)
m_t = beta1 * m + (1 - beta1) * g_t
v_t = beta2 * v + (1 - beta2) * g_t * g_t
param_t = param - alpha_t * m_t / (np.sqrt(v_t) + epsilon)
return param_t, m_t, v_t
class AdamOptimizerTest(test.TestCase, parameterized.TestCase):
@parameterized.parameters([False, True])
def testSparse(self, use_resource):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
if use_resource:
var0 = resource_variable_ops.ResourceVariable(var0_np)
var1 = resource_variable_ops.ResourceVariable(var1_np)
else:
var0 = variables.Variable(var0_np)
var1 = variables.Variable(var1_np)
grads0_np_indices = np.array([0, 1], dtype=np.int32)
grads0 = ops.IndexedSlices(
constant_op.constant(grads0_np),
constant_op.constant(grads0_np_indices), constant_op.constant([2]))
grads1_np_indices = np.array([0, 1], dtype=np.int32)
grads1 = ops.IndexedSlices(
constant_op.constant(grads1_np),
constant_op.constant(grads1_np_indices), constant_op.constant([2]))
opt = lazy_adam_optimizer.LazyAdamOptimizer()
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
beta1_power, beta2_power = opt._get_beta_accumulators()
# Run 3 steps of Adam
for t in range(1, 4):
self.assertAllCloseAccordingToType(0.9**t, beta1_power.eval())
self.assertAllCloseAccordingToType(0.999**t, beta2_power.eval())
update.run()
var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, var0.eval())
self.assertAllCloseAccordingToType(var1_np, var1.eval())
@parameterized.parameters([False, True])
def testSparseDevicePlacement(self, use_resource):
for index_dtype in [dtypes.int32, dtypes.int64]:
with self.cached_session(force_gpu=test.is_gpu_available()):
# If a GPU is available, tests that all optimizer ops can be placed on
# it (i.e. they have GPU kernels).
if use_resource:
var = resource_variable_ops.ResourceVariable([[1.0], [2.0]])
else:
var = variables.Variable([[1.0], [2.0]])
indices = constant_op.constant([0, 1], dtype=index_dtype)
gathered_sum = math_ops.reduce_sum(array_ops.gather(var, indices))
optimizer = lazy_adam_optimizer.LazyAdamOptimizer(3.0)
minimize_op = optimizer.minimize(gathered_sum)
variables.global_variables_initializer().run()
minimize_op.run()
@parameterized.parameters([False, True])
def testSparseRepeatedIndices(self, use_resource):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
if use_resource:
repeated_index_update_var = resource_variable_ops.ResourceVariable(
[[1.0], [2.0]], dtype=dtype)
aggregated_update_var = resource_variable_ops.ResourceVariable(
[[1.0], [2.0]], dtype=dtype)
else:
repeated_index_update_var = variables.Variable(
[[1.0], [2.0]], dtype=dtype)
aggregated_update_var = variables.Variable(
[[1.0], [2.0]], dtype=dtype)
grad_repeated_index = ops.IndexedSlices(
constant_op.constant(
[0.1, 0.1], shape=[2, 1], dtype=dtype),
constant_op.constant([1, 1]),
constant_op.constant([2, 1]))
grad_aggregated = ops.IndexedSlices(
constant_op.constant(
[0.2], shape=[1, 1], dtype=dtype),
constant_op.constant([1]),
constant_op.constant([2, 1]))
repeated_update_opt = lazy_adam_optimizer.LazyAdamOptimizer()
repeated_update = repeated_update_opt.apply_gradients(
[(grad_repeated_index, repeated_index_update_var)])
aggregated_update_opt = lazy_adam_optimizer.LazyAdamOptimizer()
aggregated_update = aggregated_update_opt.apply_gradients(
[(grad_aggregated, aggregated_update_var)])
variables.global_variables_initializer().run()
self.assertAllClose(aggregated_update_var.eval(),
repeated_index_update_var.eval())
for _ in range(3):
repeated_update.run()
aggregated_update.run()
self.assertAllClose(aggregated_update_var.eval(),
repeated_index_update_var.eval())
def doTestBasic(self, use_resource=False, use_callable_params=False):
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
with self.session(graph=ops.Graph()):
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
if use_resource:
var0 = resource_variable_ops.ResourceVariable(
var0_np, name="var0_%d" % i)
var1 = resource_variable_ops.ResourceVariable(
var1_np, name="var1_%d" % i)
else:
var0 = variables.Variable(var0_np)
var1 = variables.Variable(var1_np)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
learning_rate = lambda: 0.001
beta1 = lambda: 0.9
beta2 = lambda: 0.999
epsilon = lambda: 1e-8
if not use_callable_params:
learning_rate = learning_rate()
beta1 = beta1()
beta2 = beta2()
epsilon = epsilon()
opt = lazy_adam_optimizer.LazyAdamOptimizer(learning_rate=learning_rate)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
opt_variables = opt.variables()
beta1_power, beta2_power = opt._get_beta_accumulators()
self.assertIsNotNone(beta1_power)
self.assertIsNotNone(beta2_power is not None)
self.assertIn(beta1_power, opt_variables)
self.assertIn(beta2_power, opt_variables)
if not context.executing_eagerly():
with ops.Graph().as_default():
# Shouldn't return non-slot variables from other graphs.
self.assertEqual(0, len(opt.variables()))
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
beta1_power, beta2_power = opt._get_beta_accumulators()
# Run 3 steps of Adam
for t in range(1, 4):
if not context.executing_eagerly():
self.evaluate(update)
elif t > 1:
opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.assertAllCloseAccordingToType(0.9**(t + 1),
self.evaluate(beta1_power))
self.assertAllCloseAccordingToType(0.999**(t + 1),
self.evaluate(beta2_power))
var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
if use_resource:
self.assertEqual("var0_%d/Adam:0" % (i,),
opt.get_slot(var=var0, name="m").name)
def testBasic(self):
with self.cached_session():
self.doTestBasic(use_resource=False)
@test_util.run_in_graph_and_eager_modes(reset_test=True)
def testResourceBasic(self):
self.doTestBasic(use_resource=True)
def testBasicCallableParams(self):
with context.eager_mode():
self.doTestBasic(use_resource=True, use_callable_params=True)
def testTensorLearningRate(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = variables.Variable(var0_np)
var1 = variables.Variable(var1_np)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
opt = lazy_adam_optimizer.LazyAdamOptimizer(constant_op.constant(0.001))
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
beta1_power, beta2_power = opt._get_beta_accumulators()
# Run 3 steps of Adam
for t in range(1, 4):
self.assertAllCloseAccordingToType(0.9**t, beta1_power.eval())
self.assertAllCloseAccordingToType(0.999**t, beta2_power.eval())
update.run()
var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, var0.eval())
self.assertAllCloseAccordingToType(var1_np, var1.eval())
def testSharing(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = variables.Variable(var0_np)
var1 = variables.Variable(var1_np)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
opt = lazy_adam_optimizer.LazyAdamOptimizer()
update1 = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
update2 = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
beta1_power, beta2_power = opt._get_beta_accumulators()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
# Run 3 steps of intertwined Adam1 and Adam2.
for t in range(1, 4):
self.assertAllCloseAccordingToType(0.9**t, beta1_power.eval())
self.assertAllCloseAccordingToType(0.999**t, beta2_power.eval())
if t % 2 == 0:
update1.run()
else:
update2.run()
var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, var0.eval())
self.assertAllCloseAccordingToType(var1_np, var1.eval())
def testTwoSessions(self):
optimizer = lazy_adam_optimizer.LazyAdamOptimizer()
with context.eager_mode():
var0 = variables.Variable(np.array([1.0, 2.0]), name="v0")
grads0 = constant_op.constant(np.array([0.1, 0.1]))
optimizer.apply_gradients([(grads0, var0)])
g = ops.Graph()
with g.as_default():
with self.session(graph=g):
var0 = variables.Variable(np.array([1.0, 2.0]), name="v0")
grads0 = constant_op.constant(np.array([0.1, 0.1]))
optimizer.apply_gradients([(grads0, var0)])
gg = ops.Graph()
with gg.as_default():
with self.session(graph=gg):
var0 = variables.Variable(np.array([1.0, 2.0]), name="v0")
grads0 = constant_op.constant(np.array([0.1, 0.1]))
# If the optimizer saves any state not keyed by graph the following line
# fails.
optimizer.apply_gradients([(grads0, var0)])
def testSlotsUniqueEager(self):
with context.eager_mode():
v1 = resource_variable_ops.ResourceVariable(1.)
v2 = resource_variable_ops.ResourceVariable(1.)
opt = lazy_adam_optimizer.LazyAdamOptimizer(1.)
opt.minimize(lambda: v1 + v2)
# There should be two non-slot variables, and two unique slot variables
# for v1 and v2 respectively.
self.assertEqual(6, len(set(opt.variables())))
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/opt/python/training/lazy_adam_optimizer_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for optimizers with weight decay."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.opt.python.training import weight_decay_optimizers
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import adam
WEIGHT_DECAY = 0.01
def adamw_update_numpy(param, g_t, t, m, v, lr=0.001, beta1=0.9,
beta2=0.999, epsilon=1e-8):
lr_t = lr * np.sqrt(1 - beta2**t) / (1 - beta1**t)
m_t = beta1 * m + (1 - beta1) * g_t
v_t = beta2 * v + (1 - beta2) * g_t * g_t
param_t = (param - lr_t * m_t / (np.sqrt(v_t) + epsilon) -
(param * WEIGHT_DECAY))
return param_t, m_t, v_t
def momentumw_update_numpy(param, g_t, m, lr=0.001, momentum=0.9, **_):
# v, t are not needed for momentum optimizer
m = momentum * m + g_t
param_t = param - lr * m - param * WEIGHT_DECAY
return param_t, m, None
class WeightDecayOptimizerTest(test.TestCase):
def doTest(self, optimizer, update_fn, optimizer_name, slot_name,
use_resource=False, do_sparse=False):
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
with self.session(graph=ops.Graph()):
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
if use_resource:
var0 = resource_variable_ops.ResourceVariable(
var0_np, name="var0_%d" % i)
var1 = resource_variable_ops.ResourceVariable(
var1_np, name="var1_%d" % i)
else:
var0 = variables.Variable(var0_np)
var1 = variables.Variable(var1_np)
if do_sparse:
grads0_np_indices = np.array([0, 1], dtype=np.int32)
grads0 = ops.IndexedSlices(constant_op.constant(grads0_np),
constant_op.constant(grads0_np_indices),
constant_op.constant([2]))
grads1_np_indices = np.array([0, 1], dtype=np.int32)
grads1 = ops.IndexedSlices(constant_op.constant(grads1_np),
constant_op.constant(grads1_np_indices),
constant_op.constant([2]))
else:
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
opt = optimizer()
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
if not context.executing_eagerly():
with ops.Graph().as_default():
# Shouldn't return non-slot variables from other graphs.
self.assertEqual(0, len(opt.variables()))
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 3 steps of the optimizer
for t in range(1, 4):
if not context.executing_eagerly():
self.evaluate(update)
elif t > 1:
opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
var0_np, m0, v0 = update_fn(var0_np, grads0_np, t=t, m=m0, v=v0)
var1_np, m1, v1 = update_fn(var1_np, grads1_np, t=t, m=m1, v=v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
if use_resource:
self.assertEqual("var0_%d/%s:0" % (i, optimizer_name),
opt.get_slot(var=var0, name=slot_name).name)
class AdamWOptimizerTest(WeightDecayOptimizerTest):
@staticmethod
def get_optimizer():
return weight_decay_optimizers.AdamWOptimizer(WEIGHT_DECAY)
def testSparse(self):
self.doTest(self.get_optimizer, adamw_update_numpy, "AdamW", "m",
use_resource=False, do_sparse=True)
def testResourceSparse(self):
self.doTest(self.get_optimizer, adamw_update_numpy, "AdamW", "m",
use_resource=True, do_sparse=True)
def testBasic(self):
self.doTest(self.get_optimizer, adamw_update_numpy, "AdamW", "m",
use_resource=False)
@test_util.run_in_graph_and_eager_modes(reset_test=True)
def testResourceBasic(self):
self.doTest(self.get_optimizer, adamw_update_numpy, "AdamW", "m",
use_resource=True)
class MomentumWOptimizerTest(WeightDecayOptimizerTest):
@staticmethod
def get_optimizer():
return weight_decay_optimizers.MomentumWOptimizer(WEIGHT_DECAY, 0.001, 0.9)
def testSparse(self):
self.doTest(self.get_optimizer, momentumw_update_numpy, "MomentumW",
"momentum", use_resource=False, do_sparse=True)
def testResourceSparse(self):
self.doTest(self.get_optimizer, momentumw_update_numpy, "MomentumW",
"momentum", use_resource=True, do_sparse=True)
def testBasic(self):
self.doTest(self.get_optimizer, momentumw_update_numpy, "MomentumW",
"momentum", use_resource=False)
@test_util.run_in_graph_and_eager_modes(reset_test=True)
def testResourceBasic(self):
self.doTest(self.get_optimizer, momentumw_update_numpy, "MomentumW",
"momentum", use_resource=True)
class ExtendWithWeightDecayTest(WeightDecayOptimizerTest):
@staticmethod
def get_optimizer():
adamw = weight_decay_optimizers.extend_with_decoupled_weight_decay(
adam.AdamOptimizer)
return adamw(WEIGHT_DECAY)
def testBasic(self):
self.doTest(self.get_optimizer, adamw_update_numpy, "Adam", "m",
use_resource=False)
@test_util.run_in_graph_and_eager_modes(reset_test=True)
def testResourceBasic(self):
self.doTest(self.get_optimizer, adamw_update_numpy, "Adam", "m",
use_resource=True)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/opt/python/training/weight_decay_optimizers_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Tests for EAOptimizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import portpicker
from tensorflow.contrib.opt.python.training import agn_optimizer
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import adam
from tensorflow.python.training import device_setter
from tensorflow.python.training import server_lib
from tensorflow.python.training import training
from tensorflow.python.training import training_util
def create_local_cluster(num_workers, num_ps, protocol="grpc"):
"""Create local GRPC servers and return them."""
worker_ports = [portpicker.pick_unused_port() for _ in range(num_workers)]
ps_ports = [portpicker.pick_unused_port() for _ in range(num_ps)]
cluster_dict = {
"worker": ["localhost:%s" % port for port in worker_ports],
"ps": ["localhost:%s" % port for port in ps_ports]
}
cs = server_lib.ClusterSpec(cluster_dict)
workers = [
server_lib.Server(
cs, job_name="worker", protocol=protocol, task_index=ix, start=True)
for ix in range(num_workers)
]
ps_servers = [
server_lib.Server(
cs, job_name="ps", protocol=protocol, task_index=ix, start=True)
for ix in range(num_ps)
]
return cluster_dict, workers, ps_servers
# Creates the workers and return their sessions, graphs, train_ops.
# Cheif worker will update at last
def _get_workers(num_workers, period, workers, num_ps=1):
sessions = []
graphs = []
train_ops = []
for worker_id in range(num_workers):
graph = ops.Graph()
is_chief = (worker_id == 0)
with graph.as_default():
worker_device = "/job:worker/task:%d/cpu:0" % (worker_id)
ps_device = device_setter.replica_device_setter(
worker_device=worker_device,
ps_device="/job:ps/task:0/cpu:0",
ps_tasks=1)
agn_getter = agn_optimizer.AGNCustomGetter(worker_device=worker_device)
with variable_scope.variable_scope(
"", custom_getter=agn_getter), ops.device(ps_device):
global_step = training_util.get_or_create_global_step()
var_0 = variable_scope.get_variable(initializer=0.0, name="v0")
var_1 = variable_scope.get_variable(initializer=0.5, name="v1")
if num_ps > 1:
with variable_scope.variable_scope(
"",
partitioner=partitioned_variables.fixed_size_partitioner(
num_ps, axis=0),
custom_getter=agn_getter), ops.device(ps_device):
partition_var = variable_scope.get_variable(
"partition_var",
shape=[2, 4],
initializer=init_ops.zeros_initializer)
part_0 = list(partition_var)[0]
part_1 = list(partition_var)[1]
with ops.device("/job:worker/task:" + str(worker_id)):
grads_0 = constant_op.constant(-1.0)
grads_1 = constant_op.constant(-1.0)
grads_part_0 = constant_op.constant([[-1., -1., -1., -1.]])
grads_part_1 = constant_op.constant([[-1., -1., -1., -1.]])
optimizer = \
adam.AdamOptimizer(learning_rate=0.1, beta1=0.0, beta2=0.0)
opt = agn_optimizer.AGNOptimizer(
optimizer,
num_worker=num_workers,
communication_period=period,
custom_getter=agn_getter)
if num_ps == 1:
train_op = [
opt.apply_gradients(([grads_0, var_0], [grads_1, var_1]),
global_step)
]
else:
train_op = [
opt.apply_gradients(
([grads_0, var_0], [grads_1, var_1], [grads_part_0, part_0],
[grads_part_1, part_1]), global_step)
]
hook = opt.make_session_run_hook(is_chief, worker_id)
# Creates MonitoredSession
sess = training.MonitoredTrainingSession(
workers[worker_id].target, hooks=[hook])
sessions.append(sess)
graphs.append(graph)
train_ops.append(train_op)
return sessions, graphs, train_ops
class AGNOptimizerTest(test.TestCase):
def _run(self, train_op, sess):
sess.run(train_op)
def test1Workers2Period(self):
num_workers = 1
communication_period = 4
num_ps = 1
_, workers, _ = create_local_cluster(num_workers=num_workers, num_ps=num_ps)
sessions, graphs, train_ops = _get_workers(num_workers,
communication_period, workers)
var_0 = graphs[0].get_tensor_by_name("v0:0")
var_1 = graphs[0].get_tensor_by_name("v1:0")
global_step = training_util.get_global_step(graphs[0])
var_0_g = graphs[0].get_tensor_by_name(
agn_optimizer.GLOBAL_VARIABLE_NAME + "/v0:0")
var_1_g = graphs[0].get_tensor_by_name(
agn_optimizer.GLOBAL_VARIABLE_NAME + "/v1:0")
# verify adam/beta variables not in global collection
with graphs[0].as_default():
for ele in variables.global_variables():
self.assertTrue(ele.op.name.find("beta") < 0)
if ele.op.name.find("global_center_variable") < 0:
self.assertTrue(ele.op.name.find("Adam") < 0)
# Verify the initialized value.
self.assertAllEqual(0.0, sessions[0].run(var_0))
self.assertAllEqual(0.5, sessions[0].run(var_1))
self.assertAllEqual(0.0, sessions[0].run(var_0_g))
self.assertAllEqual(0.5, sessions[0].run(var_1_g))
self.assertAllEqual(0, sessions[0].run(global_step))
# step 0
sessions[0].run(train_ops[0])
self.assertNear(0.1, sessions[0].run(var_0), 1e-6)
self.assertNear(0.6, sessions[0].run(var_1), 1e-6)
self.assertAllEqual(0.0, sessions[0].run(var_0_g))
self.assertAllEqual(0.5, sessions[0].run(var_1_g))
self.assertAllEqual(0, sessions[0].run(global_step))
# 2 & 3
sessions[0].run(train_ops[0])
sessions[0].run(train_ops[0])
self.assertNear(0.3, sessions[0].run(var_0), 1e-6)
self.assertNear(0.8, sessions[0].run(var_1), 1e-6)
# 4
sessions[0].run(train_ops[0])
# pull
self.assertAllEqual(sessions[0].run(var_0), sessions[0].run(var_0_g))
self.assertAllEqual(sessions[0].run(var_1), sessions[0].run(var_1_g))
self.assertNear(0.1, sessions[0].run(var_0), 1e-6)
self.assertNear(0.6, sessions[0].run(var_1), 1e-6)
sessions[0].run(train_ops[0])
sessions[0].run(train_ops[0])
sessions[0].run(train_ops[0])
sessions[0].run(train_ops[0])
self.assertAllEqual(sessions[0].run(var_0), sessions[0].run(var_0_g))
self.assertAllEqual(sessions[0].run(var_1), sessions[0].run(var_1_g))
self.assertNear(0.2, sessions[0].run(var_0), 1e-6)
self.assertNear(0.7, sessions[0].run(var_1), 1e-6)
def test2Worker1Period(self):
num_workers = 2
communication_period = 1
num_ps = 2
_, workers, _ = create_local_cluster(num_workers=num_workers, num_ps=num_ps)
sessions, graphs, train_ops = _get_workers(
num_workers, communication_period, workers, num_ps=2)
var_0 = graphs[0].get_tensor_by_name("v0:0")
var_1 = graphs[0].get_tensor_by_name("v1:0")
var_0_1 = graphs[1].get_tensor_by_name("v0:0")
var_1_1 = graphs[1].get_tensor_by_name("v1:0")
var_0_g = graphs[0].get_tensor_by_name(
agn_optimizer.GLOBAL_VARIABLE_NAME + "/v0:0")
var_1_g = graphs[0].get_tensor_by_name(
agn_optimizer.GLOBAL_VARIABLE_NAME + "/v1:0")
part_0_g = graphs[0].get_tensor_by_name(
agn_optimizer.GLOBAL_VARIABLE_NAME +
"/partition_var/part_0:0")
part_1_g = graphs[0].get_tensor_by_name(
agn_optimizer.GLOBAL_VARIABLE_NAME +
"/partition_var/part_1:0")
# Verify the initialized value.
self.assertAllEqual(0.0, sessions[0].run(var_0))
self.assertAllEqual(0.5, sessions[0].run(var_1))
self.assertAllEqual(0.0, sessions[1].run(var_0_1))
self.assertAllEqual(0.5, sessions[1].run(var_1_1))
self.assertAllEqual(0.0, sessions[0].run(var_0_g))
self.assertAllEqual(0.5, sessions[0].run(var_1_g))
# verify each step
sessions[0].run(train_ops[0])
self.assertNear(0.1, sessions[0].run(var_0_g), 1e-6)
self.assertNDArrayNear([0.1, 0.1, 0.1, 0.1], sessions[0].run(part_0_g),
1e-6)
self.assertNDArrayNear([0.1, 0.1, 0.1, 0.1], sessions[0].run(part_1_g),
1e-6)
sessions[1].run(train_ops[1])
self.assertNear(0.2, sessions[0].run(var_0_g), 1e-6)
self.assertNDArrayNear([0.2, 0.2, 0.2, 0.2], sessions[0].run(part_0_g),
1e-6)
self.assertNDArrayNear([0.2, 0.2, 0.2, 0.2], sessions[0].run(part_1_g),
1e-6)
sessions[0].run(train_ops[0])
sessions[1].run(train_ops[1])
sessions[0].run(train_ops[0])
sessions[1].run(train_ops[1])
self.assertNear(0.6, sessions[0].run(var_0_g), 1e-6)
self.assertNDArrayNear([0.6, 0.6, 0.6, 0.6], sessions[0].run(part_0_g),
1e-6)
self.assertNDArrayNear([0.6, 0.6, 0.6, 0.6], sessions[0].run(part_1_g),
1e-6)
def testAGNCustomGetter(self):
cluster_spec = server_lib.ClusterSpec({
"ps": ["ps0:2222", "ps1:2222"],
"worker": ["worker0:2222", "worker1:2222", "worker2:2222"]
})
agn_getter = agn_optimizer.AGNCustomGetter(
worker_device="/job:worker/task:0")
with ops.device(
device_setter.replica_device_setter(cluster=cluster_spec,
worker_device="/job:worker/task:0",
ps_device="/job:ps")), \
variable_scope.variable_scope("", custom_getter=agn_getter):
v = variable_scope.get_variable(initializer=[1, 2], name="v")
w = variable_scope.get_variable(initializer=[2, 1], name="w")
v_g, w_g = agn_getter._global_map[v], agn_getter._global_map[w]
self.assertDeviceEqual("/job:worker/task:0", v.device)
self.assertDeviceEqual("job:ps/task:0", v_g.device)
self.assertDeviceEqual("/job:worker/task:0", w.device)
self.assertDeviceEqual("job:ps/task:1", w_g.device)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/opt/python/training/agn_optimizer_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Shampoo Optimizer.
Variant of Adagrad using one preconditioner matrix per variable dimension.
For details, see https://arxiv.org/abs/1802.09568
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.opt.python.training import matrix_functions
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.platform import tf_logging
from tensorflow.python.training import optimizer
def GetParam(var, timestep):
if callable(var):
return var(timestep)
else:
return var
class ShampooOptimizer(optimizer.Optimizer):
"""The Shampoo Optimizer
Variant of Adagrad using one preconditioner matrix per variable dimension.
For details, see https://arxiv.org/abs/1802.09568
gbar is time-weighted accumulated gradient:
gbar[t] = gbar_decay[t] * gbar[t-1] + gbar_weight[t] * g[t]
mat_gbar is time-weighted accumulated gradient square:
mat_gbar_j[t] = mat_gbar_decay[t] * mat_gbar_j[t-1]
+ mat_gbar_weight[t] * gg_j[t]
where if g[t] = g_abcd then gg_a[t] = g_abcd g_a'bcd (Einstein notation)
Update rule:
w[t+1] = w[t] - learning_rate[t] * Prod_j mat_gbar_j[t]^(-alpha/n) gbar[t]
Again, mat_gbar_j[t]^(-alpha) gbar[t] is a tensor contraction along the
j'th dimension of gbar[t] with the first dimension of
mat_gbar_j[t]^(-alpha/n), where alpha is a hyperparameter,
and n = rank of the variable.
Prod_j represents doing this contraction for all j in 0..n-1.
Typically learning_rate is constant, but could be time dependent by passing
a lambda function that depends on step.
"""
def __init__(self,
global_step=0,
max_matrix_size=768,
gbar_decay=0.0,
gbar_weight=1.0,
mat_gbar_decay=1.0,
mat_gbar_weight=1.0,
learning_rate=1.0,
svd_interval=1,
precond_update_interval=1,
epsilon=1e-4,
alpha=0.5,
use_iterative_root=False,
use_locking=False,
name="Shampoo"):
"""Default values of the various hyper-parameters.
gbar_decay, gbar_weight etc. can be a float or a time varying parameter.
For time-varying parameters use e.g. "lambda T: T / (T + 1.0)"
where the expression in the lambda is a tensorflow expression
Args:
global_step: tensorflow variable indicating the step.
max_matrix_size: We do not perform SVD for matrices larger than this.
gbar_decay:
gbar_weight: Used to update gbar:
gbar[t] = gbar_decay[t] * gbar[t-1] + gbar_weight[t] * g[t]
mat_gbar_decay:
mat_gbar_weight: Used to update mat_gbar:
mat_gbar_j[t] = mat_gbar_decay[t] * mat_gbar_j[t-1]
+ mat_gbar_weight[t] * gg_j[t]
learning_rate: Similar to SGD
svd_interval: We should do SVD after this many steps. Default = 1, i.e.
every step. Usually 20 leads to no loss of accuracy, and
50 or 100 is also OK. May also want more often early,
and less often later - set in caller as for example:
"svd_interval = lambda(T): tf.cond(
T < 2000, lambda: 20.0, lambda: 1000.0)"
precond_update_interval: We should update the preconditioners after
this many steps. Default = 1. Usually less than
svd_interval.
epsilon: epsilon * I_n is added to each mat_gbar_j for stability for
non-diagonal version of shampoo.
alpha: total power of the preconditioners.
use_iterative_root: should the optimizer use SVD (faster) or the
iterative root method (for TPU) for finding the
roots of PSD matrices.
use_locking:
name: name of optimizer.
"""
super(ShampooOptimizer, self).__init__(use_locking, name)
self._global_step = math_ops.cast(global_step, dtypes.float32)
self._max_matrix_size = max_matrix_size
self._gbar_decay = gbar_decay
self._gbar_weight = gbar_weight
self._mat_gbar_decay = mat_gbar_decay
self._mat_gbar_weight = mat_gbar_weight
self._learning_rate = learning_rate
self._svd_interval = svd_interval
self._precond_update_interval = precond_update_interval
self._epsilon = epsilon
self._alpha = alpha
self._use_iterative_root = use_iterative_root
self._name = name
def _create_slots(self, var_list):
for v in var_list:
with ops.colocate_with(v):
_ = self._zeros_slot(v, "gbar", self._name)
shape = np.array(v.get_shape())
for i, d in enumerate(shape):
d_tensor = ops.convert_to_tensor(d)
if d <= self._max_matrix_size:
mat_g_init = array_ops.zeros_like(linalg_ops.eye(d_tensor))
if self._svd_interval > 1:
_ = self._get_or_make_slot(v, linalg_ops.eye(d_tensor),
"H_" + str(i), self._name)
else:
mat_g_init = array_ops.zeros([d_tensor])
_ = self._get_or_make_slot(v, mat_g_init, "Gbar_" + str(i),
self._name)
def _resource_apply_dense(self, grad, var):
return self._apply_dense(grad, var)
def _apply_dense(self, grad, var):
return self._apply_gradient(grad, var)
def _resource_apply_sparse(self, grad_values, var, grad_indices):
return self._apply_sparse_shared(grad_values, grad_indices, var)
def _apply_sparse(self, grad, var):
return self._apply_sparse_shared(grad.values, grad.indices, var)
def _apply_sparse_shared(self, grad_values, grad_indices, var):
if var.get_shape()[0] <= self._max_matrix_size or self._gbar_decay != 0.0:
# The dimension is small enough, we can make the variable dense and
# do a dense update
dense_grad = array_ops.scatter_nd(
array_ops.expand_dims(grad_indices, axis=1), grad_values,
array_ops.shape(var, out_type=grad_indices.dtype))
return self._apply_gradient(dense_grad, var)
return self._apply_gradient(grad_values, var, grad_indices)
def _weighted_average(self, var, weight, weight_t, rest):
"""Computes exponential weighted average: var = weight_t * var + rest.
Important to ensure that var does not occur in rest, otherwise
we can get race conditions in a distributed setting.
Args:
var: variable to be updated
weight: parameter to be checked. If it is a constant, we can optimize.
weight_t: current value of parameter, used for weighting
rest: the remaining tensor to be added
Returns:
updated variable.
"""
if weight == 0.0:
return rest # no need to update var, we will never use it.
if weight == 1.0: # common case
return state_ops.assign_add(var, rest)
# The op below can cause race conditions in a distributed setting,
# since computing weight_t * var + rest can take some time, during
# which var may be set by another worker. To prevent this, it should
# be implemented as a C++ op.
return var.assign_add((weight_t - 1) * var + rest)
def _update_mat_g(self, mat_g, grad, axes, mat_gbar_decay,
mat_gbar_weight, i):
"""Updates the cumulative outer products of the gradients.
Args:
mat_g: the matrix to be updated
grad: the gradient of the variable
axes: a list of k-1 integers 0 to k-1, except i
mat_gbar_decay: constant for weighted average:
mat_g = mat_g * decay + grad * weight
mat_gbar_weight: constant for weighted average
i: index of dimension to be updated.
Returns:
updated mat_g = mat_g * mat_gbar_decay + grad_outer * mat_gbar_weight
In Einstein notation if i = 0: grad_outer_aa'= g_abcd g_a'bcd
thus grad_outer is a matrix d_i x d_i, where d_i is the size of the
i'th dimension of g.
Alternate view: If mat_i(grad) is the flattening of grad to a
d_i x (d_1d_2...d_{i-1}d_{i+1}...d_k) matrix, then
grad_outer = mat_i(grad) mat_i(grad).transpose
"""
grad_outer = math_ops.tensordot(grad, grad, axes=(axes, axes),
name="grad_outer_" + str(i))
return self._weighted_average(mat_g, self._mat_gbar_decay, mat_gbar_decay,
mat_gbar_weight * grad_outer)
def _compute_power_svd(self, var, mat_g, mat_g_size, alpha, mat_h_slot_name):
"""Computes mat_h = mat_g^alpha using svd. mat_g is a symmetric PSD matrix.
Args:
var: the variable we are updating.
mat_g: the symmetric PSD matrix whose power it to be computed
mat_g_size: size of mat_g
alpha: a real number
mat_h_slot_name: name of slot to store the power, if needed.
Returns:
mat_h = mat_g^alpha
Stores mat_h in the appropriate slot, if it exists.
Note that mat_g is PSD. So we could use linalg_ops.self_adjoint_eig.
"""
if mat_g_size == 1:
mat_h = math_ops.pow(mat_g + self._epsilon, alpha)
else:
damping = self._epsilon * linalg_ops.eye(
math_ops.cast(mat_g_size, dtypes.int32))
diag_d, mat_u, mat_v = linalg_ops.svd(mat_g + damping, full_matrices=True)
mat_h = math_ops.matmul(
mat_v * math_ops.pow(math_ops.maximum(diag_d, self._epsilon), alpha),
array_ops.transpose(mat_u))
if mat_h_slot_name is not None:
return state_ops.assign(self.get_slot(var, mat_h_slot_name), mat_h)
return mat_h
def _compute_power_iter(self, var, mat_g, mat_g_size, alpha, mat_h_slot_name,
iter_count=100, epsilon=1e-6):
"""Computes mat_g^alpha, where alpha = -1/p, p a positive integer."""
mat_g_sqrt = matrix_functions.matrix_square_root(mat_g, mat_g_size,
iter_count, self._epsilon)
mat_h = matrix_functions.matrix_inverse_pth_root(
mat_g_sqrt,
mat_g_size,
2 * alpha,
iter_count,
epsilon,
ridge_epsilon=0.0)
if mat_h_slot_name is not None:
return state_ops.assign(self.get_slot(var, mat_h_slot_name), mat_h)
return mat_h
def _compute_power(self, var, mat_g, mat_g_size, alpha, mat_h_slot_name=None):
"""Just a switch between the iterative power vs svd."""
with ops.name_scope("matrix_iterative_power"):
if self._use_iterative_root:
return self._compute_power_iter(var, mat_g, mat_g_size, alpha,
mat_h_slot_name)
else:
return self._compute_power_svd(var, mat_g, mat_g_size, alpha,
mat_h_slot_name)
def _apply_gradient(self, grad, var, indices=None):
"""The main function to update a variable.
Args:
grad: A Tensor containing gradient to apply.
var: A Tensor containing the variable to update.
indices: An array of integers, for sparse update.
Returns:
Updated variable var = var - learning_rate * preconditioner * grad
If the gradient is dense, var and grad have the same shape.
If the update is sparse, then the first dimension of the gradient and var
may differ, others are all the same. In this case the indices array
provides the set of indices of the variable which are to be updated with
each row of the gradient.
"""
global_step = self._global_step + 1
# Update accumulated weighted average of gradients
gbar = self.get_slot(var, "gbar")
gbar_decay_t = GetParam(self._gbar_decay, global_step)
gbar_weight_t = GetParam(self._gbar_weight, global_step)
if indices is not None:
# Note - the sparse update is not easily implemented, since the
# algorithm needs all indices of gbar to be updated
# if mat_gbar_decay != 1 or mat_gbar_decay != 0.
# One way to make mat_gbar_decay = 1 is by rescaling.
# If we want the update:
# G_{t+1} = a_{t+1} G_t + b_{t+1} w_t
# define:
# r_{t+1} = a_{t+1} * r_t
# h_t = G_t / r_t
# Then:
# h_{t+1} = h_t + (b_{t+1} / r_{t+1}) * w_t
# So we get the mat_gbar_decay = 1 as desired.
# We can implement this in a future version as needed.
# However we still need gbar_decay = 0, otherwise all indices
# of the variable will need to be updated.
if self._gbar_decay != 0.0:
tf_logging.warning("Not applying momentum for variable: %s" % var.name)
gbar_updated = grad
else:
gbar_updated = self._weighted_average(gbar, self._gbar_decay,
gbar_decay_t,
gbar_weight_t * grad)
# Update the preconditioners and compute the preconditioned gradient
shape = var.get_shape()
mat_g_list = []
for i in range(len(shape)):
mat_g_list.append(self.get_slot(var, "Gbar_" + str(i)))
mat_gbar_decay_t = GetParam(self._mat_gbar_decay, global_step)
mat_gbar_weight_t = GetParam(self._mat_gbar_weight, global_step)
preconditioned_grad = gbar_updated
v_rank = len(mat_g_list)
neg_alpha = - GetParam(self._alpha, global_step) / v_rank
svd_interval = GetParam(self._svd_interval, global_step)
precond_update_interval = GetParam(self._precond_update_interval,
global_step)
for i, mat_g in enumerate(mat_g_list):
# axes is the list of indices to reduce - everything but the current i.
axes = list(range(i)) + list(range(i+1, v_rank))
if shape[i] <= self._max_matrix_size:
# If the tensor size is sufficiently small perform full Shampoo update
# Note if precond_update_interval > 1 and mat_gbar_decay_t != 1, this
# is not strictly correct. However we will use it for now, and
# fix if needed. (G_1 = aG + bg ==> G_n = a^n G + (1+a+..+a^{n-1})bg)
# pylint: disable=g-long-lambda,cell-var-from-loop
mat_g_updated = control_flow_ops.cond(
math_ops.mod(global_step, precond_update_interval) < 1,
lambda: self._update_mat_g(
mat_g, grad, axes, mat_gbar_decay_t,
mat_gbar_weight_t * precond_update_interval, i),
lambda: mat_g)
mat_g_updated = mat_g_updated / float(shape[i].value)
if self._svd_interval == 1:
mat_h = self._compute_power(var, mat_g_updated, shape[i], neg_alpha)
else:
mat_h = control_flow_ops.cond(
math_ops.mod(global_step, svd_interval) < 1,
lambda: self._compute_power(var, mat_g_updated, shape[i],
neg_alpha, "H_" + str(i)),
lambda: self.get_slot(var, "H_" + str(i)))
# mat_h is a square matrix of size d_i x d_i
# preconditioned_grad is a d_i x ... x d_n x d_0 x ... d_{i-1} tensor
# After contraction with a d_i x d_i tensor
# it becomes a d_{i+1} x ... x d_n x d_0 x ... d_i tensor
# (the first dimension is contracted out, and the second dimension of
# mat_h is appended). After going through all the indices, it becomes
# a d_0 x ... x d_n tensor again.
preconditioned_grad = math_ops.tensordot(preconditioned_grad, mat_h,
axes=([0], [0]),
name="precond_" + str(i))
else:
# Tensor size is too large -- perform diagonal Shampoo update
# Only normalize non-vector cases.
if axes:
normalizer = 1.0 if indices is not None else float(shape[i].value)
grad_outer = math_ops.reduce_sum(grad * grad, axis=axes) / normalizer
else:
grad_outer = grad * grad
if i == 0 and indices is not None:
assert self._mat_gbar_decay == 1.0
mat_g_updated = state_ops.scatter_add(mat_g, indices,
mat_gbar_weight_t * grad_outer)
mat_g_updated_slice = array_ops.gather(mat_g_updated, indices)
mat_h = array_ops.where(
math_ops.greater(mat_g_updated_slice, 0),
math_ops.pow(mat_g_updated_slice, neg_alpha),
array_ops.zeros_like(mat_g_updated_slice))
else:
mat_g_updated = self._weighted_average(mat_g,
self._mat_gbar_decay,
mat_gbar_decay_t,
mat_gbar_weight_t * grad_outer)
mat_h = array_ops.where(
math_ops.greater(mat_g_updated, 0),
math_ops.pow(mat_g_updated, neg_alpha),
array_ops.zeros_like(mat_g_updated))
# Need to do the transpose to ensure that the tensor becomes
# a d_{i+1} x ... x d_n x d_0 x ... d_i tensor as described above.
preconditioned_grad = array_ops.transpose(
preconditioned_grad, perm=list(range(1, v_rank)) + [0]) * mat_h
# Update the variable based on the Shampoo update
learning_rate_t = GetParam(self._learning_rate, global_step)
if indices is not None:
var_updated = state_ops.scatter_add(
var, indices, -learning_rate_t * preconditioned_grad)
else:
var_updated = state_ops.assign_sub(var,
learning_rate_t * preconditioned_grad)
return var_updated
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/opt/python/training/shampoo.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Nadam for TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.training import adam
from tensorflow.python.training import training_ops
class NadamOptimizer(adam.AdamOptimizer):
"""Optimizer that implements the Nadam algorithm.
See [Dozat, T., 2015](http://cs229.stanford.edu/proj2015/054_report.pdf).
"""
def _apply_dense(self, grad, var):
m = self.get_slot(var, "m")
v = self.get_slot(var, "v")
beta1_power, beta2_power = self._get_beta_accumulators()
return training_ops.apply_adam(
var,
m,
v,
math_ops.cast(beta1_power, var.dtype.base_dtype),
math_ops.cast(beta2_power, var.dtype.base_dtype),
math_ops.cast(self._lr_t, var.dtype.base_dtype),
math_ops.cast(self._beta1_t, var.dtype.base_dtype),
math_ops.cast(self._beta2_t, var.dtype.base_dtype),
math_ops.cast(self._epsilon_t, var.dtype.base_dtype),
grad,
use_locking=self._use_locking,
use_nesterov=True).op
def _resource_apply_dense(self, grad, var):
m = self.get_slot(var, "m")
v = self.get_slot(var, "v")
beta1_power, beta2_power = self._get_beta_accumulators()
return training_ops.resource_apply_adam(
var.handle,
m.handle,
v.handle,
math_ops.cast(beta1_power, grad.dtype.base_dtype),
math_ops.cast(beta2_power, grad.dtype.base_dtype),
math_ops.cast(self._lr_t, grad.dtype.base_dtype),
math_ops.cast(self._beta1_t, grad.dtype.base_dtype),
math_ops.cast(self._beta2_t, grad.dtype.base_dtype),
math_ops.cast(self._epsilon_t, grad.dtype.base_dtype),
grad,
use_locking=self._use_locking,
use_nesterov=True)
def _apply_sparse_shared(self, grad, var, indices, scatter_add):
beta1_power, beta2_power = self._get_beta_accumulators()
beta1_power = math_ops.cast(beta1_power, var.dtype.base_dtype)
beta2_power = math_ops.cast(beta2_power, var.dtype.base_dtype)
lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)
beta1_t = math_ops.cast(self._beta1_t, var.dtype.base_dtype)
beta2_t = math_ops.cast(self._beta2_t, var.dtype.base_dtype)
epsilon_t = math_ops.cast(self._epsilon_t, var.dtype.base_dtype)
lr = (lr_t * math_ops.sqrt(1 - beta2_power) / (1 - beta1_power))
# m_t = beta1 * m + (1 - beta1) * g_t
m = self.get_slot(var, "m")
m_scaled_g_values = grad * (1 - beta1_t)
m_t = state_ops.assign(m, m * beta1_t, use_locking=self._use_locking)
with ops.control_dependencies([m_t]):
m_t = scatter_add(m, indices, m_scaled_g_values)
# m_bar = (1 - beta1) * g_t + beta1 * m_t
m_bar = m_scaled_g_values + beta1_t * array_ops.gather(m_t, indices)
# v_t = beta2 * v + (1 - beta2) * (g_t * g_t)
v = self.get_slot(var, "v")
v_scaled_g_values = (grad * grad) * (1 - beta2_t)
v_t = state_ops.assign(v, v * beta2_t, use_locking=self._use_locking)
with ops.control_dependencies([v_t]):
v_t = scatter_add(v, indices, v_scaled_g_values)
v_t_slice = array_ops.gather(v_t, indices)
v_sqrt = math_ops.sqrt(v_t_slice)
var_update = scatter_add(var, indices, -lr * m_bar / (v_sqrt + epsilon_t))
return control_flow_ops.group(*[var_update, m_bar, v_t])
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/opt/python/training/nadam_optimizer.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Moving average optimizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import variables
from tensorflow.python.training import moving_averages
from tensorflow.python.training import optimizer
from tensorflow.python.training import saver
from tensorflow.python.training.saving import saveable_object_util
class MovingAverageOptimizer(optimizer.Optimizer):
"""Optimizer that computes a moving average of the variables.
Empirically it has been found that using the moving average of the trained
parameters of a deep network is better than using its trained parameters
directly. This optimizer allows you to compute this moving average and swap
the variables at save time so that any code outside of the training loop will
use by default the averaged values instead of the original ones.
Example of usage:
```python
// Encapsulate your favorite optimizer (here the momentum one)
// inside the MovingAverageOptimizer.
opt = tf.compat.v1.train.MomentumOptimizer(learning_rate, FLAGS.momentum)
opt = tf.contrib.opt.MovingAverageOptimizer(opt)
// Then create your model and all its variables.
model = build_model()
// Add the training op that optimizes using opt.
// This needs to be called before swapping_saver().
opt.minimize(cost, var_list)
// Then create your saver like this:
saver = opt.swapping_saver()
// Pass it to your training loop.
slim.learning.train(
model,
...
saver=saver)
```
Note that for evaluation, the normal saver should be used instead of
swapping_saver().
"""
def __init__(self, opt, average_decay=0.9999, num_updates=None,
sequential_update=True):
"""Construct a new MovingAverageOptimizer.
Args:
opt: A tf.Optimizer that will be used to compute and apply gradients.
average_decay: Float. Decay to use to maintain the moving averages
of trained variables.
See tf.train.ExponentialMovingAverage for details.
num_updates: Optional count of number of updates applied to variables.
See tf.train.ExponentialMovingAverage for details.
sequential_update: Bool. If False, will compute the moving average at the
same time as the model is updated, potentially doing
benign data races.
If True, will update the moving average after gradient
updates.
"""
self._optimizer = opt
self._ema = moving_averages.ExponentialMovingAverage(
average_decay, num_updates=num_updates)
self._swapped_variable_name_map = None
self._sequential_update = sequential_update
def compute_gradients(self, *args, **kwargs):
return self._optimizer.compute_gradients(*args, **kwargs)
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
train_op = self._optimizer.apply_gradients(
grads_and_vars, global_step=global_step, name=name)
var_list = [x[1] for x in grads_and_vars if x[0] is not None]
self._swapped_variable_name_map = {}
if self._sequential_update:
with ops.control_dependencies([train_op]):
ma_op = self._ema.apply(var_list)
else:
ma_op = self._ema.apply(var_list)
for v in var_list:
v_avg = self._ema.average(v)
self._swapped_variable_name_map[v.op.name] = v_avg.op.name
self._swapped_variable_name_map[v_avg.op.name] = v.op.name
return control_flow_ops.group(train_op, ma_op, name='train_with_avg')
def _find_swapped_variable(self, v_name_to_tensor, v_name, tensor):
"""Returns name of swapped variable for given tensor.
Args:
v_name_to_tensor: Mapping from variable names to tensors.
v_name: name of the variable for which swapped variable should be returned
tensor: Tensor which correspond to variable for which swapped variable
should be returned.
Returns:
Tensor which correspond to swapped variable.
Raises:
ValueError: If swapped variable could not be found in v_name_to_tensor.
"""
swapped_v_name = self._swapped_variable_name_map.get(v_name, None)
if swapped_v_name is None:
return tensor
else:
if swapped_v_name in v_name_to_tensor:
return v_name_to_tensor[swapped_v_name]
else:
raise ValueError(
('Variable to swap %s is not part of variables to save. '
'This breaks MovingAverageOptimizer.') % swapped_v_name)
def swapping_saver(self, var_list=None, name='swapping_saver', **kwargs):
"""Create a saver swapping moving averages and variables.
You should use this saver during training. It will save the moving averages
of the trained parameters under the original parameter names. For
evaluations or inference you should use a regular saver and it will
automatically use the moving averages for the trained variable.
You must call this function after all variables have been created and after
you have called Optimizer.minimize().
Args:
var_list: List of variables to save, as per `Saver()`.
If set to None, will save all the variables that have been
created before this call.
name: The name of the saver.
**kwargs: Keyword arguments of `Saver()`.
Returns:
A `tf.compat.v1.train.Saver` object.
Raises:
RuntimeError: If apply_gradients or minimize has not been called before.
ValueError: If var_list is provided and contains some variables but not
their moving average counterpart.
"""
if self._swapped_variable_name_map is None:
raise RuntimeError('Must call apply_gradients or minimize before '
'creating the swapping_saver')
if var_list is None:
var_list = variables.global_variables()
if not isinstance(var_list, dict):
var_list = saveable_object_util.op_list_to_dict(var_list)
v_name_to_tensor = {}
for k, tensor_or_list in six.iteritems(var_list):
# For each partitioned variable OpListToDict returns list of constituent
# parts instead of single tensor.
if (isinstance(tensor_or_list, list)
or isinstance(tensor_or_list, variables.PartitionedVariable)):
for tensor in tensor_or_list:
v_name = tensor.op.name
v_name_to_tensor[v_name] = tensor
else:
v_name_to_tensor[k] = tensor_or_list
# Now swap variables and moving averages
swapped_var_list = {}
for k, tensor_or_list in six.iteritems(var_list):
if isinstance(tensor_or_list, list):
tensor_list_to_save = []
for tensor in tensor_or_list:
v_name = tensor.op.name
swapped_variable = self._find_swapped_variable(v_name_to_tensor,
v_name,
tensor)
tensor_list_to_save.append(swapped_variable)
swapped_var_list[k] = tensor_list_to_save
else:
swapped_var_list[k] = self._find_swapped_variable(
v_name_to_tensor, k, tensor_or_list)
# Build the swapping saver.
return saver.Saver(swapped_var_list, name=name, **kwargs)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/opt/python/training/moving_average_optimizer.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for MultitaskOptimizerWrapper."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import six
from tensorflow.contrib.opt.python.training import multitask_optimizer_wrapper
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import momentum
class MultitaskOptimizerWrapperTest(test.TestCase):
"""Tests for the multitask optimizer wrapper.
"""
def testWrapper(self):
with self.cached_session():
var0 = variables.Variable([1.0, 2.0], dtype=dtypes.float32)
var1 = variables.Variable([3.0, 4.0], dtype=dtypes.float32)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtypes.float32)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtypes.float32)
grads_allzero = constant_op.constant([0.0, 0.0], dtype=dtypes.float32)
mom_opt_impl = momentum.MomentumOptimizer(learning_rate=2.0, momentum=0.9)
mom_opt = multitask_optimizer_wrapper.MultitaskOptimizerWrapper(
mom_opt_impl)
mom_update = mom_opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
mom_update_partial = mom_opt.apply_gradients(
zip([grads_allzero, grads1], [var0, var1]))
mom_update_no_action = mom_opt.apply_gradients(
zip([grads_allzero, grads_allzero], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
self.assertEqual(["momentum"], mom_opt.get_slot_names())
slot0 = mom_opt.get_slot(var0, "momentum")
self.assertEquals(slot0.get_shape(), var0.get_shape())
slot1 = mom_opt.get_slot(var1, "momentum")
self.assertEquals(slot1.get_shape(), var1.get_shape())
# Step 1: normal momentum update.
self.evaluate(mom_update)
# Check that the momentum accumulators have been updated.
self.assertAllCloseAccordingToType(
np.array([0.1, 0.1]), self.evaluate(slot0))
self.assertAllCloseAccordingToType(
np.array([0.01, 0.01]), self.evaluate(slot1))
# Check that the parameters have been updated.
self.assertAllCloseAccordingToType(
np.array([1.0 - (0.1 * 2.0), 2.0 - (0.1 * 2.0)]), self.evaluate(var0))
self.assertAllCloseAccordingToType(
np.array([3.0 - (0.01 * 2.0), 4.0 - (0.01 * 2.0)]),
self.evaluate(var1))
# Step 2: momentum update that changes only slot1 but not slot0.
self.evaluate(mom_update_partial)
# Check that only the relevant momentum accumulator has been updated.
self.assertAllCloseAccordingToType(
np.array([0.1, 0.1]), self.evaluate(slot0))
self.assertAllCloseAccordingToType(
np.array([(0.9 * 0.01 + 0.01), (0.9 * 0.01 + 0.01)]),
self.evaluate(slot1))
# Step 3: momentum update that does not change anything.
self.evaluate(mom_update_no_action)
# Check that the momentum accumulators have *NOT* been updated.
self.assertAllCloseAccordingToType(
np.array([0.1, 0.1]), self.evaluate(slot0))
self.assertAllCloseAccordingToType(
np.array([(0.9 * 0.01 + 0.01), (0.9 * 0.01 + 0.01)]),
self.evaluate(slot1))
def testGradientClipping(self):
with self.cached_session():
var0 = variables.Variable([1.0, 2.0], dtype=dtypes.float32)
var1 = variables.Variable([3.0, 4.0], dtype=dtypes.float32)
var2 = variables.Variable([3.0, 4.0], dtype=dtypes.float32)
var3 = variables.Variable([3.0, 4.0], dtype=dtypes.float32)
grads0 = constant_op.constant([10.0, 15.0], dtype=dtypes.float32)
grads1 = constant_op.constant([0.0, 5.0], dtype=dtypes.float32)
grads2 = constant_op.constant([0.0, 0.0], dtype=dtypes.float32)
grads3 = None
varlist = [var0, var1, var2, var3]
gradients = [grads0, grads1, grads2, grads3]
clipped_gradvars, global_norm = (
multitask_optimizer_wrapper.clip_gradients_by_global_norm(
six.moves.zip(gradients, varlist), clip_norm=1.0))
clipped_grads = list(six.moves.zip(*clipped_gradvars))[0]
reference_global_norm = np.sqrt(np.sum(np.square([10.0, 15.0, 0.0, 5.0])))
self.assertAllCloseAccordingToType(
self.evaluate(global_norm), reference_global_norm)
self.assertAllCloseAccordingToType(
self.evaluate(clipped_grads[2]), np.array([0., 0.]))
self.assertEqual(clipped_grads[3], None)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/opt/python/training/multitask_optimizer_wrapper_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for AdamGS."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.opt.python.training import adam_gs_optimizer
from tensorflow.python.client import session
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
def adam_update_numpy(param,
g_t,
t,
m,
v,
alpha=0.001,
beta1=0.9,
beta2=0.999,
epsilon=1e-8):
alpha_t = alpha * np.sqrt(1 - beta2**t) / (1 - beta1**t)
m_t = beta1 * m + (1 - beta1) * g_t
v_t = beta2 * v + (1 - beta2) * g_t * g_t
param_t = param - alpha_t * m_t / (np.sqrt(v_t) + epsilon)
return param_t, m_t, v_t
class AdamGSOptimizerTest(test.TestCase):
def doTestSparse(self, use_resource=False):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
if use_resource:
global_step = resource_variable_ops.ResourceVariable(
array_ops.zeros([], dtypes.int64))
var0 = resource_variable_ops.ResourceVariable(var0_np)
var1 = resource_variable_ops.ResourceVariable(var1_np)
else:
global_step = variables.Variable(array_ops.zeros([], dtypes.int64))
var0 = variables.Variable(var0_np)
var1 = variables.Variable(var1_np)
grads0_np_indices = np.array([0, 1], dtype=np.int32)
grads0 = ops.IndexedSlices(
constant_op.constant(grads0_np),
constant_op.constant(grads0_np_indices), constant_op.constant([2]))
grads1_np_indices = np.array([0, 1], dtype=np.int32)
grads1 = ops.IndexedSlices(
constant_op.constant(grads1_np),
constant_op.constant(grads1_np_indices), constant_op.constant([2]))
opt = adam_gs_optimizer.AdamGSOptimizer(global_step=global_step)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]),
global_step=global_step)
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
beta1_power, beta2_power = opt._get_beta_accumulators()
# Run 3 steps of Adam
for t in range(1, 4):
self.assertAllCloseAccordingToType(0.9**t, self.evaluate(beta1_power))
self.assertAllCloseAccordingToType(0.999**t,
self.evaluate(beta2_power))
update.run()
var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
def testSparse(self):
self.doTestSparse(use_resource=False)
def testResourceSparse(self):
self.doTestSparse(use_resource=True)
def testSparseDevicePlacement(self):
for index_dtype in [dtypes.int32, dtypes.int64]:
with self.cached_session(force_gpu=test.is_gpu_available()):
# If a GPU is available, tests that all optimizer ops can be placed on
# it (i.e. they have GPU kernels).
var = variables.Variable([[1.0], [2.0]])
indices = constant_op.constant([0, 1], dtype=index_dtype)
gathered_sum = math_ops.reduce_sum(array_ops.gather(var, indices))
optimizer = adam_gs_optimizer.AdamGSOptimizer(3.0)
minimize_op = optimizer.minimize(gathered_sum)
variables.global_variables_initializer().run()
minimize_op.run()
def testSparseRepeatedIndices(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
repeated_index_global_step = variables.Variable(
array_ops.zeros([], dtypes.int64))
aggregated_global_step = variables.Variable(
array_ops.zeros([], dtypes.int64))
repeated_index_update_var = variables.Variable(
[[1.0], [2.0]], dtype=dtype)
aggregated_update_var = variables.Variable(
[[1.0], [2.0]], dtype=dtype)
grad_repeated_index = ops.IndexedSlices(
constant_op.constant(
[0.1, 0.1], shape=[2, 1], dtype=dtype),
constant_op.constant([1, 1]),
constant_op.constant([2, 1]))
grad_aggregated = ops.IndexedSlices(
constant_op.constant(
[0.2], shape=[1, 1], dtype=dtype),
constant_op.constant([1]),
constant_op.constant([2, 1]))
repeated_update = adam_gs_optimizer.AdamGSOptimizer(
global_step=repeated_index_global_step).apply_gradients(
[(grad_repeated_index, repeated_index_update_var)],
global_step=repeated_index_global_step)
aggregated_update = adam_gs_optimizer.AdamGSOptimizer(
global_step=aggregated_global_step).apply_gradients(
[(grad_aggregated, aggregated_update_var)],
global_step=aggregated_global_step)
variables.global_variables_initializer().run()
self.assertAllClose(aggregated_update_var.eval(),
self.evaluate(repeated_index_update_var))
for _ in range(3):
repeated_update.run()
aggregated_update.run()
self.assertAllClose(aggregated_update_var.eval(),
self.evaluate(repeated_index_update_var))
def doTestBasic(self, use_resource=False, use_callable_params=False):
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
with self.session(graph=ops.Graph()):
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
if use_resource:
global_step = resource_variable_ops.ResourceVariable(
array_ops.zeros([], dtypes.int64), name="global_step_%d" % i)
var0 = resource_variable_ops.ResourceVariable(
var0_np, name="var0_%d" % i)
var1 = resource_variable_ops.ResourceVariable(
var1_np, name="var1_%d" % i)
else:
global_step = variables.Variable(array_ops.zeros([], dtypes.int64))
var0 = variables.Variable(var0_np)
var1 = variables.Variable(var1_np)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
learning_rate = lambda: 0.001
beta1 = lambda: 0.9
beta2 = lambda: 0.999
epsilon = lambda: 1e-8
if not use_callable_params:
learning_rate = learning_rate()
beta1 = beta1()
beta2 = beta2()
epsilon = epsilon()
opt = adam_gs_optimizer.AdamGSOptimizer(global_step=global_step,
learning_rate=learning_rate)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]),
global_step=global_step)
opt_variables = opt.variables()
beta1_power, beta2_power = opt._get_beta_accumulators()
self.assertTrue(beta1_power is not None)
self.assertTrue(beta2_power is not None)
self.assertNotIn(beta1_power, opt_variables)
self.assertNotIn(beta2_power, opt_variables)
if not context.executing_eagerly():
with ops.Graph().as_default():
# Shouldn't return non-slot variables from other graphs.
self.assertEqual(0, len(opt.variables()))
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 3 steps of Adam
for t in range(1, 4):
if not context.executing_eagerly():
self.evaluate(update)
self.assertAllCloseAccordingToType(
0.9**(t + 1), self.evaluate(beta1_power))
self.assertAllCloseAccordingToType(
0.999**(t + 1), self.evaluate(beta2_power))
else:
if t > 1:
opt.apply_gradients(zip([grads0, grads1], [var0, var1]),
global_step=global_step)
beta1_power, beta2_power = opt._get_beta_accumulators()
self.assertAllCloseAccordingToType(
0.9**t, self.evaluate(beta1_power))
self.assertAllCloseAccordingToType(
0.999**t, self.evaluate(beta2_power))
var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
if use_resource:
self.assertEqual("var0_%d/Adam:0" % (i,),
opt.get_slot(var=var0, name="m").name)
def testBasic(self):
with self.cached_session():
self.doTestBasic(use_resource=False)
@test_util.run_in_graph_and_eager_modes(reset_test=True)
def testResourceBasic(self):
self.doTestBasic(use_resource=True)
def testBasicCallableParams(self):
with context.eager_mode():
self.doTestBasic(use_resource=True, use_callable_params=True)
def testTensorLearningRate(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
global_step = variables.Variable(array_ops.zeros([], dtypes.int64))
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = variables.Variable(var0_np)
var1 = variables.Variable(var1_np)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
opt = adam_gs_optimizer.AdamGSOptimizer(
global_step=global_step, learning_rate=constant_op.constant(0.001))
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]),
global_step=global_step)
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
beta1_power, beta2_power = opt._get_beta_accumulators()
# Run 3 steps of Adam
for t in range(1, 4):
self.assertAllCloseAccordingToType(0.9**t, self.evaluate(beta1_power))
self.assertAllCloseAccordingToType(0.999**t,
self.evaluate(beta2_power))
update.run()
var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
def testSharing(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
global_step = variables.Variable(array_ops.zeros([], dtypes.int64))
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = variables.Variable(var0_np)
var1 = variables.Variable(var1_np)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
opt = adam_gs_optimizer.AdamGSOptimizer(global_step=global_step)
update1 = opt.apply_gradients(zip([grads0, grads1], [var0, var1]),
global_step=global_step)
update2 = opt.apply_gradients(zip([grads0, grads1], [var0, var1]),
global_step=global_step)
variables.global_variables_initializer().run()
beta1_power, beta2_power = opt._get_beta_accumulators()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 3 steps of intertwined Adam1 and Adam2.
for t in range(1, 4):
self.assertAllCloseAccordingToType(0.9**t, self.evaluate(beta1_power))
self.assertAllCloseAccordingToType(0.999**t,
self.evaluate(beta2_power))
if t % 2 == 0:
update1.run()
else:
update2.run()
var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
def testTwoSessions(self):
optimizer = adam_gs_optimizer.AdamGSOptimizer()
with context.eager_mode():
var0 = variables.Variable(np.array([1.0, 2.0]), name="v0")
grads0 = constant_op.constant(np.array([0.1, 0.1]))
optimizer.apply_gradients([(grads0, var0)])
g = ops.Graph()
with g.as_default():
with session.Session():
var0 = variables.Variable(np.array([1.0, 2.0]), name="v0")
grads0 = constant_op.constant(np.array([0.1, 0.1]))
optimizer.apply_gradients([(grads0, var0)])
gg = ops.Graph()
with gg.as_default():
with session.Session():
var0 = variables.Variable(np.array([1.0, 2.0]), name="v0")
grads0 = constant_op.constant(np.array([0.1, 0.1]))
# If the optimizer saves any state not keyed by graph the following line
# fails.
optimizer.apply_gradients([(grads0, var0)])
def testSlotsUniqueEager(self):
with context.eager_mode():
v1 = resource_variable_ops.ResourceVariable(1.)
v2 = resource_variable_ops.ResourceVariable(1.)
opt = adam_gs_optimizer.AdamGSOptimizer(1.)
opt.minimize(lambda: v1 + v2)
# There should be two unique slot variables for v1 and v2 respectively.
self.assertEqual(4, len(set(opt.variables())))
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/opt/python/training/adam_gs_optimizer_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for Matrix functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.opt.python.training import matrix_functions
from tensorflow.python.platform import test
TOLERANCE = 1e-3
def np_power(mat_g, alpha):
"""Computes mat_g^alpha for a square symmetric matrix mat_g."""
mat_u, diag_d, mat_v = np.linalg.svd(mat_g)
diag_d = np.power(diag_d, alpha)
return np.dot(np.dot(mat_u, np.diag(diag_d)), mat_v)
class MatrixFunctionTests(test.TestCase):
def testMatrixSquareRootFunction(self):
"""Tests for matrix square roots."""
size = 20
mat_a = np.random.rand(size, size)
mat = np.dot(mat_a, mat_a.T)
expected_mat = np_power(mat, 0.5)
mat_root = matrix_functions.matrix_square_root(mat, size)
self.assertAllCloseAccordingToType(
expected_mat, mat_root, atol=TOLERANCE, rtol=TOLERANCE)
def testMatrixInversePthRootFunction(self):
"""Tests for matrix inverse pth roots."""
size = 20
mat_a = np.random.rand(size, size)
mat = np.dot(mat_a, mat_a.T)
expected_mat = np_power(mat, -0.125)
mat_root = matrix_functions.matrix_inverse_pth_root(mat, size, -0.125)
self.assertAllCloseAccordingToType(
expected_mat, mat_root, atol=TOLERANCE, rtol=TOLERANCE)
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/opt/python/training/matrix_functions_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for Regreg_adagrad_optimizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.opt.python.training import reg_adagrad_optimizer
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class RegAdagradOptimizerTest(test.TestCase):
def doTestBasic(self, use_locking=False, use_resource=False):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
if use_resource:
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype)
else:
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
ada_opt = reg_adagrad_optimizer.RegAdagradOptimizer(
3.0, initial_accumulator_value=0.1, use_locking=use_locking)
ada_update = ada_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
# Run 3 steps of adagrad
for _ in range(3):
ada_update.run()
# Validate updated params
self.assertAllCloseAccordingToType(
np.array([-1.6026098728179932, -0.6026098728179932]), var0.eval())
self.assertAllCloseAccordingToType(
np.array([2.715679168701172, 3.715679168701172]), var1.eval())
def testBasic(self):
self.doTestBasic(use_locking=False)
def testBasicResource(self):
self.doTestBasic(use_locking=False, use_resource=True)
def testBasicLocked(self):
self.doTestBasic(use_locking=True)
def testMinimizeSparseResourceVariable(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
var0 = resource_variable_ops.ResourceVariable(
[[1.0, 2.0], [3.0, 4.0]], dtype=dtype)
x = constant_op.constant([[4.0], [5.0]], dtype=dtype)
pred = math_ops.matmul(embedding_ops.embedding_lookup([var0], [0]), x)
loss = pred * pred
sgd_op = reg_adagrad_optimizer.RegAdagradOptimizer(1.0).minimize(loss)
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([[1.0, 2.0], [3.0, 4.0]],
var0.eval())
# Run 1 step of sgd
sgd_op.run()
# Validate updated params
self.assertAllCloseAccordingToType(
[[0, 1], [3, 4]], var0.eval(), atol=0.01)
def testTensorLearningRate(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
ada_opt = reg_adagrad_optimizer.RegAdagradOptimizer(
constant_op.constant(3.0), initial_accumulator_value=0.1)
ada_update = ada_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
# Run 3 steps of adagrad
for _ in range(3):
ada_update.run()
# Validate updated params
self.assertAllCloseAccordingToType(
np.array([-1.6026098728179932, -0.6026098728179932]), var0.eval())
self.assertAllCloseAccordingToType(
np.array([2.715679168701172, 3.715679168701172]), var1.eval())
def testSparseBasic(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
var0 = variables.Variable([[1.0], [2.0]], dtype=dtype)
var1 = variables.Variable([[3.0], [4.0]], dtype=dtype)
grads0 = ops.IndexedSlices(
constant_op.constant([0.1], shape=[1, 1], dtype=dtype),
constant_op.constant([0]), constant_op.constant([2, 1]))
grads1 = ops.IndexedSlices(
constant_op.constant([0.01], shape=[1, 1], dtype=dtype),
constant_op.constant([1]), constant_op.constant([2, 1]))
ada_opt = reg_adagrad_optimizer.RegAdagradOptimizer(
3.0, initial_accumulator_value=0.1)
ada_update = ada_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([[1.0], [2.0]], var0.eval())
self.assertAllClose([[3.0], [4.0]], var1.eval())
# Run 3 step of sgd
for _ in range(3):
ada_update.run()
# Validate updated params
self.assertAllCloseAccordingToType(
np.array([[-1.6026098728179932], [2.0]]), var0.eval())
self.assertAllCloseAccordingToType(
np.array([[3.0], [3.715679168701172]]), var1.eval())
def testSparseRepeatedIndices(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
repeated_index_update_var = variables.Variable(
[[1.0], [2.0]], dtype=dtype)
aggregated_update_var = variables.Variable([[1.0], [2.0]], dtype=dtype)
grad_repeated_index = ops.IndexedSlices(
constant_op.constant([0.1, 0.1], shape=[2, 1], dtype=dtype),
constant_op.constant([1, 1]), constant_op.constant([2, 1]))
grad_aggregated = ops.IndexedSlices(
constant_op.constant([0.2], shape=[1, 1], dtype=dtype),
constant_op.constant([1]), constant_op.constant([2, 1]))
repeated_update = reg_adagrad_optimizer.RegAdagradOptimizer(
3.0).apply_gradients([(grad_repeated_index,
repeated_index_update_var)])
aggregated_update = reg_adagrad_optimizer.RegAdagradOptimizer(
3.0).apply_gradients([(grad_aggregated, aggregated_update_var)])
variables.global_variables_initializer().run()
self.assertAllClose(aggregated_update_var.eval(),
repeated_index_update_var.eval())
for _ in range(3):
repeated_update.run()
aggregated_update.run()
self.assertAllClose(aggregated_update_var.eval(),
repeated_index_update_var.eval())
def testSparseRepeatedIndicesResourceVariable(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
var_repeated = resource_variable_ops.ResourceVariable(
[1.0, 2.0], dtype=dtype)
loss_repeated = math_ops.reduce_sum(
embedding_ops.embedding_lookup(var_repeated, [0, 0]))
var_aggregated = resource_variable_ops.ResourceVariable(
[1.0, 2.0], dtype=dtype)
loss_aggregated = 2 * math_ops.reduce_sum(
embedding_ops.embedding_lookup(var_aggregated, [0]))
update_op_repeated = reg_adagrad_optimizer.RegAdagradOptimizer(
2.0).minimize(loss_repeated)
update_op_aggregated = reg_adagrad_optimizer.RegAdagradOptimizer(
2.0).minimize(loss_aggregated)
variables.global_variables_initializer().run()
self.assertAllCloseAccordingToType(var_repeated.eval(),
var_aggregated.eval())
for _ in range(3):
update_op_repeated.run()
update_op_aggregated.run()
self.assertAllCloseAccordingToType(var_repeated.eval(),
var_aggregated.eval())
def testSparseStability(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
shape = [1, 6]
var0 = variables.Variable(
[[
0.00872496, -0.106952, 0.110467, 0.226505, -0.0147257,
-0.0105945
]],
dtype=dtype)
grads0 = ops.IndexedSlices(
constant_op.constant(
[[
-5.91278e-05, 5.31673e-05, -2.5779e-06, 4.29153e-05,
-8.4877e-05, -9.48906e-05
]],
shape=shape,
dtype=dtype), constant_op.constant([0]),
constant_op.constant(shape))
ada_opt = reg_adagrad_optimizer.RegAdagradOptimizer(
1.0, initial_accumulator_value=0.1)
ada_update = ada_opt.apply_gradients(zip([grads0], [var0]))
self.assertEqual(["accumulator"], ada_opt.get_slot_names())
slot0 = ada_opt.get_slot(var0, "accumulator")
init = variables.global_variables_initializer()
for _ in range(100):
init.run()
ada_update.run()
self.assertAllCloseAccordingToType(
np.array([[0.1, 0.1, 0.1, 0.1, 0.1, 0.1]]), slot0.eval())
self.assertAllCloseAccordingToType(
np.array([[
0.00891194, -0.10712013, 0.11047515, 0.22636929, -0.0144573,
-0.01029443
]]), var0.eval())
def testSharing(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
ada_opt = reg_adagrad_optimizer.RegAdagradOptimizer(3.0)
# Apply the optimizer twice. Both applications will use
# the same accums.
ada_update1 = ada_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
ada_update2 = ada_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
self.assertEqual(["accumulator"], ada_opt.get_slot_names())
slot0 = ada_opt.get_slot(var0, "accumulator")
self.assertEquals(slot0.get_shape(), var0.get_shape())
slot1 = ada_opt.get_slot(var1, "accumulator")
self.assertEquals(slot1.get_shape(), var1.get_shape())
variables.global_variables_initializer().run()
# Fetch params to validate initial values.
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
# Mix the first and the second adagrad for 3 steps.
ada_update1.run()
ada_update2.run()
ada_update1.run()
# Validate updated params (the same as with only 1 RegAdagrad).
self.assertAllCloseAccordingToType(
np.array([-1.6026098728179932, -0.6026098728179932]), var0.eval())
self.assertAllCloseAccordingToType(
np.array([2.715679168701172, 3.715679168701172]), var1.eval())
def testDynamicShapeVariable_Ok(self):
with self.cached_session():
v = variable_scope.get_variable(
"v", initializer=constant_op.constant(1.), validate_shape=False)
self.assertFalse(v.shape.is_fully_defined())
# Creating optimizer should cause no exception.
reg_adagrad_optimizer.RegAdagradOptimizer(
3.0, initial_accumulator_value=0.1)
def testSkipUpdatingSlots(self):
iav = 0.130005 # A value that works with float16
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
ada_opt = reg_adagrad_optimizer.RegAdagradOptimizer(
3.0, initial_accumulator_value=iav)
# Apply the optimizer twice. Both applications will use
# the same accums.
with ada_opt.avoid_updating_slots():
ada_update = ada_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
self.assertEqual(["accumulator"], ada_opt.get_slot_names())
slot0 = ada_opt.get_slot(var0, "accumulator")
self.assertEquals(slot0.get_shape(), var0.get_shape())
slot1 = ada_opt.get_slot(var1, "accumulator")
self.assertEquals(slot1.get_shape(), var1.get_shape())
variables.global_variables_initializer().run()
# Fetch params to validate initial values.
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
# Mix the first and the second adagrad for 3 steps.
for _ in range(3):
ada_update.run()
# Validate that ada_opt's slots are not updated.
self.assertAllCloseAccordingToType(np.array([iav, iav]), slot0.eval())
self.assertAllCloseAccordingToType(np.array([iav, iav]), slot1.eval())
def testSparseSkipUpdatingSlots(self):
iav = 0.130005 # A value that works with float16
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
var0 = variables.Variable([[1.0], [2.0]], dtype=dtype)
var1 = variables.Variable([[3.0], [4.0]], dtype=dtype)
grads0 = ops.IndexedSlices(
constant_op.constant([0.1], shape=[1, 1], dtype=dtype),
constant_op.constant([0]), constant_op.constant([2, 1]))
grads1 = ops.IndexedSlices(
constant_op.constant([0.01], shape=[1, 1], dtype=dtype),
constant_op.constant([1]), constant_op.constant([2, 1]))
ada_opt = reg_adagrad_optimizer.RegAdagradOptimizer(
3.0, initial_accumulator_value=iav)
with ada_opt.avoid_updating_slots():
ada_update = ada_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
slot0 = ada_opt.get_slot(var0, "accumulator")
self.assertEquals(slot0.get_shape(), var0.get_shape())
slot1 = ada_opt.get_slot(var1, "accumulator")
self.assertEquals(slot1.get_shape(), var1.get_shape())
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([[1.0], [2.0]], var0.eval())
self.assertAllClose([[3.0], [4.0]], var1.eval())
# Run 3 step of sgd
for _ in range(3):
ada_update.run()
# Validate that ada_opt's slots are not updated.
self.assertAllCloseAccordingToType(
np.array([[iav], [iav]]), slot0.eval())
self.assertAllCloseAccordingToType(
np.array([[iav], [iav]]), slot1.eval())
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/opt/python/training/reg_adagrad_optimizer_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for AddSign."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.opt.python.training import addsign
from tensorflow.contrib.opt.python.training import sign_decay
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
def py_linear_decay_fn(decay_steps):
def linear_decay(step):
step = min(step, decay_steps)
return float(decay_steps - step) / decay_steps
return linear_decay
def addsign_update_numpy(params,
g_t,
m,
lr,
alpha=1.0,
beta=0.9,
py_sign_decay_fn=None,
t=None):
m_t = beta * m + (1 - beta) * g_t
if py_sign_decay_fn is None:
sign_decayed = 1.0
else:
sign_decayed = py_sign_decay_fn(t-1)
multiplier = alpha + sign_decayed * np.sign(g_t) * np.sign(m_t)
params_t = params - lr * multiplier * g_t
return params_t, m_t
class AddSignTest(test.TestCase):
def _testDense(self,
use_resource=False,
learning_rate=0.1,
sign_decay_fn=None,
py_sign_decay_fn=None,
alpha=1.0,
beta=0.9):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session(use_gpu=True):
# Initialize variables for numpy implementation.
m0, m1 = 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
if use_resource:
var0 = resource_variable_ops.ResourceVariable(var0_np)
var1 = resource_variable_ops.ResourceVariable(var1_np)
global_step = resource_variable_ops.ResourceVariable(
0, trainable=False)
else:
var0 = variables.VariableV1(var0_np)
var1 = variables.VariableV1(var1_np)
global_step = variables.VariableV1(
0, trainable=False)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
opt = addsign.AddSignOptimizer(
learning_rate=learning_rate,
alpha=alpha,
beta=beta,
sign_decay_fn=sign_decay_fn,
)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]),
global_step=global_step)
neg_update = opt.apply_gradients(zip([-grads0, -grads1], [var0, var1]),
global_step=global_step)
if not context.executing_eagerly():
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 7 steps of AddSign
# first 4 steps with positive gradient
# last 3 steps with negative gradient (sign(gm) should be -1)
for t in range(1, 8):
if t < 5:
if not context.executing_eagerly():
self.evaluate(update)
elif t > 1:
opt.apply_gradients(zip([grads0, grads1], [var0, var1]),
global_step=global_step)
else:
if not context.executing_eagerly():
self.evaluate(neg_update)
elif t > 1:
opt.apply_gradients(zip([-grads0, -grads1], [var0, var1]),
global_step=global_step)
var0_np, m0 = addsign_update_numpy(
var0_np,
grads0_np if t < 5 else -grads0_np,
m0,
learning_rate,
alpha=alpha,
beta=beta,
py_sign_decay_fn=py_sign_decay_fn,
t=t,
)
var1_np, m1 = addsign_update_numpy(
var1_np,
grads1_np if t < 5 else -grads1_np,
m1,
learning_rate,
alpha=alpha,
beta=beta,
py_sign_decay_fn=py_sign_decay_fn,
t=t,
)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
def testDense(self):
decay_steps = 10
sign_decay_fn = sign_decay.get_linear_decay_fn(decay_steps)
py_sign_decay_fn = py_linear_decay_fn(decay_steps)
self._testDense(use_resource=False)
self._testDense(use_resource=False, learning_rate=0.01, alpha=0.1, beta=0.8)
self._testDense(use_resource=False,
sign_decay_fn=sign_decay_fn,
py_sign_decay_fn=py_sign_decay_fn)
self._testDense(use_resource=True)
self._testDense(use_resource=True, learning_rate=0.01, alpha=0.1, beta=0.8)
self._testDense(use_resource=True,
sign_decay_fn=sign_decay_fn,
py_sign_decay_fn=py_sign_decay_fn)
def _testSparse(self,
use_resource=False,
learning_rate=0.1,
sign_decay_fn=None,
py_sign_decay_fn=None,
alpha=1.0,
beta=0.9):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session(use_gpu=True):
# Initialize variables for numpy implementation.
m0, m1 = 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
if use_resource:
var0 = resource_variable_ops.ResourceVariable(var0_np)
var1 = resource_variable_ops.ResourceVariable(var1_np)
global_step = resource_variable_ops.ResourceVariable(
0, trainable=False)
else:
var0 = variables.VariableV1(var0_np)
var1 = variables.VariableV1(var1_np)
global_step = variables.VariableV1(
0, trainable=False)
grads0_np_indices = np.array([0, 1], dtype=np.int32)
grads0 = ops.IndexedSlices(
constant_op.constant(grads0_np),
constant_op.constant(grads0_np_indices), constant_op.constant([2]))
grads1_np_indices = np.array([0, 1], dtype=np.int32)
grads1 = ops.IndexedSlices(
constant_op.constant(grads1_np),
constant_op.constant(grads1_np_indices), constant_op.constant([2]))
opt = addsign.AddSignOptimizer(
learning_rate=learning_rate,
alpha=alpha,
beta=beta,
sign_decay_fn=sign_decay_fn,
)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]),
global_step=global_step)
neg_update = opt.apply_gradients(zip([-grads0, -grads1], [var0, var1]),
global_step=global_step)
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
# Run 7 steps of AddSign
# first 4 steps with positive gradient
# last 3 steps with negative gradient (sign(gm) should be -1)
for t in range(1, 8):
if t < 5:
update.run()
else:
neg_update.run()
var0_np, m0 = addsign_update_numpy(
var0_np,
grads0_np if t < 5 else -grads0_np,
m0,
learning_rate,
alpha=alpha,
beta=beta,
py_sign_decay_fn=py_sign_decay_fn,
t=t,
)
var1_np, m1 = addsign_update_numpy(
var1_np,
grads1_np if t < 5 else -grads1_np,
m1,
learning_rate,
alpha=alpha,
beta=beta,
py_sign_decay_fn=py_sign_decay_fn,
t=t,
)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, var0.eval())
self.assertAllCloseAccordingToType(var1_np, var1.eval())
def testSparse(self):
decay_steps = 10
sign_decay_fn = sign_decay.get_linear_decay_fn(decay_steps)
py_sign_decay_fn = py_linear_decay_fn(decay_steps)
self._testSparse(use_resource=False)
self._testSparse(use_resource=False,
learning_rate=0.01,
alpha=0.1,
beta=0.8)
self._testSparse(use_resource=False,
sign_decay_fn=sign_decay_fn,
py_sign_decay_fn=py_sign_decay_fn)
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/opt/python/training/addsign_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""GGT for Tensorflow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
from tensorflow.contrib.optimizer_v2 import optimizer_v2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
class GGTOptimizer(optimizer_v2.OptimizerV2):
"""Optimizer that implements the GGT algorithm.
GGT has an advantage over sgd and adam on large models with poor conditioning,
for example language models and CNNs,
see [[ABCHSZZ 2018]](https://arxiv.org/pdf/1806.02958.pdf).
"""
def __init__(self,
learning_rate=0.001,
beta1=0.9,
use_locking=False,
name="GGT",
window=10,
eps=1e-4,
svd_eps=1e-6,
sigma_eps=1e-2):
"""Construct a new GGT optimizer.
Initialization:
```
t <- 0 (Initialize timestep)
grad_buffer <- 0 (Initialize buffer for keeping past gradients)
flat_grad <- 0 (Initialize flattened gradient that contains gradients of all
variables)
m_0 <- 0 (Initialize 1st moment vector)
```
Suppose all variables and their gradients are concatenated into vectors
`flat_vars` and `flat_grad`. The update rule for `flat_vars`
uses an optimization described at the beginning of section 2 of the paper:
```
t <- t + 1
m_t <- beta1 * m_{t-1} + (1 - beta1) * flat_grad
grad_buffer[(t-1) % window, :] <- m_t
M <- grad_buffer^T / sqrt(min(t, window))
U, sigma, _ <- SVD(M^TM + I * svd_eps)
sigma_sqrt_inv <- (sqrt(sigma) + sigma_eps)^(-3)
sigma_sqrt_min <- min(sqrt(sigma))
if sigma_sqrt_min > eps:
new_step <- M U diag(sigma_sqrt_inv) U^T M^T m_t +
(m_t - M U diag(1/sigma) U^T M^T m_t) / sigma_sqrt_min
else:
new_step <- M U diag(sigma_sqrt_inv) U^T M^T m_t
flat_vars <- flat_vars - learning_rate * new_step
```
GGT provides the power of full-matrix adaptive regularization at a cost not
much larger than SGD. As a result it is suited for large models where the
gradient covariance matrix has a poor condition number that slows down first
order methods.
GGT uses the preconditioner from full-matrix AdaGrad, with gradient history
attenuated exponentially as in Adam, and truncated to a window parameter.
It has provable guarantees even for non-convex optimization that is never
significantly worse than SGD and in some cases better.
Args:
learning_rate: A float hyperparameter. The learning rate.
beta1: A float hyperparameter. The exponential decay rate for the 1st
moment estimates.
use_locking: If True use locks for update operations.
name: Optional name for the operations created when applying gradients.
Defaults to "GGT".
window: An integer hyperparameter. The number of first moments to keep in
computing the adaptive preconditioner.
eps: A float hyperparameter. Used to truncate small eigenvalues of the
gradient covariance matrix.
svd_eps: A float hyperparameter. Used to stabilize SVD.
sigma_eps: A float hyperparameter. Used to regularize matrix inversion.
"""
super(GGTOptimizer, self).__init__(use_locking, name)
self._set_hyper("lr", learning_rate)
self._set_hyper("beta1", beta1)
self._set_hyper("window", window)
self._set_hyper("eps", eps)
self._set_hyper("svd_eps", svd_eps)
self._set_hyper("sigma_eps", sigma_eps)
self.index_dict = {}
self.shape_dict = {}
def _create_vars(self, var_list, state):
# Construct ordered dictionary for variable dimensions, sorted by name.
shape_dict = {}
for v in var_list:
shape_dict[v.name] = tensor_shape.dimension_value(np.prod(v.get_shape()))
self.shape_dict = collections.OrderedDict(
sorted(shape_dict.items(), key=lambda t: t[0]))
# Assign each variable its location in flat_grad. The locations are based on
# the order of sorted names.
idx = 0
for v_name, v_dim in self.shape_dict.items():
self.index_dict[v_name] = idx
idx += v_dim
state.create_non_slot(
initial_value=math_ops.cast(0., dtype=var_list[0].dtype.base_dtype),
name="global_step")
# Buffer for keeping past gradients.
window = state.get_hyper("window")
grad_buffer_init = array_ops.zeros(
[window, idx], dtype=var_list[0].dtype.base_dtype)
state.create_non_slot(initial_value=grad_buffer_init, name="grad_buffer")
state.create_non_slot(
initial_value=array_ops.zeros(
(idx,), dtype=var_list[0].dtype.base_dtype),
name="moment1")
# Flattened gradient that contains gradients for all variables in the model.
state.create_non_slot(
initial_value=array_ops.zeros(
(idx,), dtype=var_list[0].dtype.base_dtype),
name="flat_grad")
def _get_global_step(self, state=None):
if state is None:
state = self._get_per_graph_state()
return state.get_non_slot("global_step")
def _get_moment1(self, state=None):
if state is None:
state = self._get_per_graph_state()
return state.get_non_slot("moment1")
def _get_grad_buffer(self, state=None):
if state is None:
state = self._get_per_graph_state()
return state.get_non_slot("grad_buffer")
def _get_flat_grad(self, state=None):
if state is None:
state = self._get_per_graph_state()
return state.get_non_slot("flat_grad")
def _apply_sparse(self, grad, var):
raise NotImplementedError("Sparse gradient updates are not supported.")
def _prepare(self, state):
self._variables = []
def _apply_dense(self, grad, var, state):
self._variables.append(var)
dim = self.shape_dict[var.name]
start_index = self.index_dict[var.name]
end_index = start_index + dim
# Update flat_gradient at the index associated with the variable.
flat_grad = self._get_flat_grad(state)
new_flat_grad = array_ops.reshape(grad, [-1])
flat_grad_updated = state_ops.scatter_update(
flat_grad, math_ops.range(start_index, end_index), new_flat_grad)
return flat_grad_updated
def _resource_apply_dense(self, grad, var, state):
self._variables.append(var)
dim = self.shape_dict[var.name]
start_index = self.index_dict[var.name]
end_index = start_index + dim
# Update flat_gradient at the index associated with the variable.
flat_grad = self._get_flat_grad(state)
new_flat_grad = array_ops.reshape(grad, [-1])
flat_grad_updated = state_ops.scatter_update(
flat_grad, math_ops.range(start_index, end_index), new_flat_grad)
return flat_grad_updated
def _finish(self, state):
var_dtype = self._variables[0].dtype.base_dtype
# Update global step.
global_step = self._get_global_step(state)
update_global_step = state_ops.assign_add(global_step, 1.)
# Update the first moment estimate.
beta1 = state.get_hyper("beta1", dtype=var_dtype)
moment1 = self._get_moment1(state)
flat_grad = self._get_flat_grad(state)
# moment1_t := beta1 * moment1_{t-1} + (1 - beta1) * flat_grad_t
update_moment1 = moment1.assign(beta1 * moment1 + (1. - beta1) * flat_grad)
# Update the gradient buffer.
window = state.get_hyper("window")
grad_buffer = self._get_grad_buffer(state)
next_grad_index = math_ops.floormod(
math_ops.cast(update_global_step - 1., dtypes.int32), window)
# grad_buffer[(t-1) % window] := moment1_t
update_grad_buffer = state_ops.scatter_update(grad_buffer, next_grad_index,
update_moment1)
# Compute the update step.
eps = state.get_hyper("eps", dtype=var_dtype)
svd_eps = state.get_hyper("svd_eps", dtype=var_dtype)
sigma_eps = state.get_hyper("sigma_eps", dtype=var_dtype)
lr = state.get_hyper("lr", dtype=var_dtype)
denom = math_ops.sqrt(
math_ops.minimum(
ops.convert_to_tensor(update_global_step),
ops.convert_to_tensor(math_ops.cast(window, dtype=var_dtype))))
moment1_2d = array_ops.expand_dims(update_moment1, -1)
# m = grad_buffer^T / sqrt(min(t, window))
# m has shape [model dimension, window], where model dimension is the sum
# of the dimensions of the flattened variables.
m = array_ops.transpose(math_ops.divide(update_grad_buffer, denom))
# sigma, u, _ = SVD(m^Tm + I * svd_eps)
mm = math_ops.matmul(m, m, transpose_a=True)
damping = math_ops.cast(linalg_ops.eye(window), dtype=var_dtype) * svd_eps
sigma, u, _ = linalg_ops.svd(mm + damping)
sigma_sqrt = math_ops.sqrt(sigma)
sigma_sqrt_min = math_ops.reduce_min(sigma_sqrt)
# sigma_sqrt_inv = 1 / (\sqrt{sigma} + sigma_eps) ^ 3
# We add sigma_eps to alleviate numerical instability.
# Note that (m^Tm)^(-3/2) = u diag(sigma_sqrt_inv) u^T.
sigma_sqrt_inv = math_ops.divide(
math_ops.cast(1.0, dtype=var_dtype),
math_ops.pow(sigma_sqrt + sigma_eps, 3))
# In full matrix AdaGrad, the update step computes (mm^T)^(-1/2)g, where the
# inversion of a model dimension by model dimension matrix is needed. To
# speed up this computation we calculate the following instead:
# m(m^Tm)^(-3/2)m^T moment1 = m u diag(sigma_sqrt_inv) u^T m^T moment1.
new_step = array_ops.expand_dims(
array_ops.zeros(flat_grad.get_shape(), dtype=var_dtype), -1)
head = math_ops.matmul(
m,
math_ops.matmul(
u,
math_ops.matmul(
array_ops.diag(sigma_sqrt_inv),
math_ops.matmul(
u,
math_ops.matmul(m, moment1_2d, transpose_a=True),
transpose_a=True))))
# When inverting (mm^t)^(1/2), we also add epsilon * I regularization for
# degenerate cases. We expand ((mm^t)^(1/2) + epsilon * I)^(-1) using
# Woodbury's identity.
# For full derivation please see paper at
# https://arxiv.org/pdf/1806.02958.pdf
tail = moment1_2d - math_ops.matmul(
m,
math_ops.matmul(
u,
math_ops.matmul(
array_ops.diag(
math_ops.divide(math_ops.cast(1.0, dtype=var_dtype),
sigma)),
math_ops.matmul(
u,
math_ops.matmul(m, moment1_2d, transpose_a=True),
transpose_a=True))))
scaled_tail = math_ops.divide(tail, sigma_sqrt_min)
update_new_step = control_flow_ops.cond(
sigma_sqrt_min > eps, lambda: math_ops.add(head, scaled_tail),
lambda: math_ops.add(new_step, head))
# Update each variable.
update_step = []
for var in self._variables:
dim = self.shape_dict[var.name]
start_index = self.index_dict[var.name]
end_index = start_index + dim
var_update_correct_shape = array_ops.reshape(
update_new_step[start_index:end_index], var.get_shape())
var_updated = state_ops.assign_sub(var, lr * var_update_correct_shape)
update_step.append(var_updated)
return control_flow_ops.group(update_step)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/opt/python/training/ggt.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Adam rewrite to use global step for computing beta1 & beta2 accumulation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.training import optimizer
from tensorflow.python.training import training_ops
from tensorflow.python.util.tf_export import tf_export
@tf_export("train.AdamOptimizer")
class AdamGSOptimizer(optimizer.Optimizer):
"""Optimizer that implements the Adam algorithm.
See [Kingma et al., 2014](http://arxiv.org/abs/1412.6980)
([pdf](http://arxiv.org/pdf/1412.6980.pdf)).
"""
def __init__(self,
global_step=0,
learning_rate=0.001,
beta1=0.9,
beta2=0.999,
epsilon=1e-8,
use_locking=False,
name="Adam"):
r"""Construct a new Adam optimizer.
Branched from tf.train.AdamOptimizer. The only difference is to pass
global step for computing beta1 and beta2 accumulators, instead of having
optimizer keep its own independent beta1 and beta2 accumulators as non-slot
variables.
Initialization:
$$m_0 := 0 \text{(Initialize initial 1st moment vector)}$$
$$v_0 := 0 \text{(Initialize initial 2nd moment vector)}$$
$$t := 0 \text{(Initialize timestep)}$$
The update rule for `variable` with gradient `g` uses an optimization
described at the end of section2 of the paper:
$$t := t + 1$$
$$lr_t := \text{learning\_rate} * \sqrt{1 - beta_2^t} / (1 - beta_1^t)$$
$$m_t := beta_1 * m_{t-1} + (1 - beta_1) * g$$
$$v_t := beta_2 * v_{t-1} + (1 - beta_2) * g * g$$
$$variable := variable - lr_t * m_t / (\sqrt{v_t} + \epsilon)$$
The default value of 1e-8 for epsilon might not be a good default in
general. For example, when training an Inception network on ImageNet a
current good choice is 1.0 or 0.1. Note that since AdamOptimizer uses the
formulation just before Section 2.1 of the Kingma and Ba paper rather than
the formulation in Algorithm 1, the "epsilon" referred to here is "epsilon
hat" in the paper.
The sparse implementation of this algorithm (used when the gradient is an
IndexedSlices object, typically because of `tf.gather` or an embedding
lookup in the forward pass) does apply momentum to variable slices even if
they were not used in the forward pass (meaning they have a gradient equal
to zero). Momentum decay (beta1) is also applied to the entire momentum
accumulator. This means that the sparse behavior is equivalent to the dense
behavior (in contrast to some momentum implementations which ignore momentum
unless a variable slice was actually used).
Args:
global_step: tensorflow variable indicating the step.
learning_rate: A Tensor or a floating point value. The learning rate.
beta1: A float value or a constant float tensor. The exponential decay
rate for the 1st moment estimates.
beta2: A float value or a constant float tensor. The exponential decay
rate for the 2nd moment estimates.
epsilon: A small constant for numerical stability. This epsilon is
"epsilon hat" in the Kingma and Ba paper (in the formula just before
Section 2.1), not the epsilon in Algorithm 1 of the paper.
use_locking: If True use locks for update operations.
name: Optional name for the operations created when applying gradients.
Defaults to "Adam". @compatibility(eager) When eager execution is
enabled, `learning_rate`, `beta1`, `beta2`, and `epsilon` can each be a
callable that takes no arguments and returns the actual value to use.
This can be useful for changing these values across different
invocations of optimizer functions. @end_compatibility
"""
super(AdamGSOptimizer, self).__init__(use_locking, name)
self._lr = learning_rate
self._beta1 = beta1
self._beta2 = beta2
self._epsilon = epsilon
self._global_step = global_step
self._global_step_on_worker = None
# Tensor versions of the constructor arguments, created in _prepare().
self._lr_t = None
self._beta1_t = None
self._beta2_t = None
self._epsilon_t = None
def _get_beta_accumulators(self):
return (math_ops.pow(self._beta1_t, self._global_step_on_worker),
math_ops.pow(self._beta2_t, self._global_step_on_worker))
def _create_slots(self, var_list):
# Create slots for the first and second moments.
for v in var_list:
self._zeros_slot(v, "m", self._name)
self._zeros_slot(v, "v", self._name)
def _prepare(self):
lr = self._call_if_callable(self._lr)
beta1 = self._call_if_callable(self._beta1)
beta2 = self._call_if_callable(self._beta2)
epsilon = self._call_if_callable(self._epsilon)
self._lr_t = ops.convert_to_tensor(lr, name="learning_rate")
self._beta1_t = ops.convert_to_tensor(beta1, name="beta1")
self._beta2_t = ops.convert_to_tensor(beta2, name="beta2")
self._epsilon_t = ops.convert_to_tensor(epsilon, name="epsilon")
# Performance optimization so that worker creates a copy of the global step
# to avoid overloading the parameter server holding the global step.
self._global_step_on_worker = math_ops.cast(
array_ops.identity(self._global_step) + 1, dtypes.float32)
def _apply_dense(self, grad, var):
m = self.get_slot(var, "m")
v = self.get_slot(var, "v")
beta1_power, beta2_power = self._get_beta_accumulators()
return training_ops.apply_adam(
var,
m,
v,
math_ops.cast(beta1_power, var.dtype.base_dtype),
math_ops.cast(beta2_power, var.dtype.base_dtype),
math_ops.cast(self._lr_t, var.dtype.base_dtype),
math_ops.cast(self._beta1_t, var.dtype.base_dtype),
math_ops.cast(self._beta2_t, var.dtype.base_dtype),
math_ops.cast(self._epsilon_t, var.dtype.base_dtype),
grad,
use_locking=self._use_locking).op
def _resource_apply_dense(self, grad, var):
m = self.get_slot(var, "m")
v = self.get_slot(var, "v")
beta1_power, beta2_power = self._get_beta_accumulators()
return training_ops.resource_apply_adam(
var.handle,
m.handle,
v.handle,
math_ops.cast(beta1_power, grad.dtype.base_dtype),
math_ops.cast(beta2_power, grad.dtype.base_dtype),
math_ops.cast(self._lr_t, grad.dtype.base_dtype),
math_ops.cast(self._beta1_t, grad.dtype.base_dtype),
math_ops.cast(self._beta2_t, grad.dtype.base_dtype),
math_ops.cast(self._epsilon_t, grad.dtype.base_dtype),
grad,
use_locking=self._use_locking)
def _apply_sparse_shared(self, grad, var, indices, scatter_add):
beta1_power, beta2_power = self._get_beta_accumulators()
beta1_power = math_ops.cast(beta1_power, var.dtype.base_dtype)
beta2_power = math_ops.cast(beta2_power, var.dtype.base_dtype)
lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)
beta1_t = math_ops.cast(self._beta1_t, var.dtype.base_dtype)
beta2_t = math_ops.cast(self._beta2_t, var.dtype.base_dtype)
epsilon_t = math_ops.cast(self._epsilon_t, var.dtype.base_dtype)
lr = (lr_t * math_ops.sqrt(1 - beta2_power) / (1 - beta1_power))
# m_t = beta1 * m + (1 - beta1) * g_t
m = self.get_slot(var, "m")
m_scaled_g_values = grad * (1 - beta1_t)
m_t = state_ops.assign(m, m * beta1_t, use_locking=self._use_locking)
with ops.control_dependencies([m_t]):
m_t = scatter_add(m, indices, m_scaled_g_values)
# v_t = beta2 * v + (1 - beta2) * (g_t * g_t)
v = self.get_slot(var, "v")
v_scaled_g_values = (grad * grad) * (1 - beta2_t)
v_t = state_ops.assign(v, v * beta2_t, use_locking=self._use_locking)
with ops.control_dependencies([v_t]):
v_t = scatter_add(v, indices, v_scaled_g_values)
v_sqrt = math_ops.sqrt(v_t)
var_update = state_ops.assign_sub(
var, lr * m_t / (v_sqrt + epsilon_t), use_locking=self._use_locking)
return control_flow_ops.group(*[var_update, m_t, v_t])
def _apply_sparse(self, grad, var):
return self._apply_sparse_shared(
grad.values,
var,
grad.indices,
lambda x, i, v: state_ops.scatter_add( # pylint: disable=g-long-lambda
x,
i,
v,
use_locking=self._use_locking))
def _resource_scatter_add(self, x, i, v):
with ops.control_dependencies(
[resource_variable_ops.resource_scatter_add(x.handle, i, v)]):
return x.value()
def _resource_apply_sparse(self, grad, var, indices):
return self._apply_sparse_shared(grad, var, indices,
self._resource_scatter_add)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/opt/python/training/adam_gs_optimizer.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of PowerSign."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.training import optimizer
from tensorflow.python.training import training_ops
class PowerSignOptimizer(optimizer.Optimizer):
"""Optimizer that implements the PowerSign update.
See [Bello et al., ICML2017],
[Neural Optimizer Search with RL](https://arxiv.org/abs/1709.07417).
"""
def __init__(self,
learning_rate=0.1,
base=math.e,
beta=0.9,
sign_decay_fn=None,
use_locking=False,
name='PowerSignOptimizer'):
"""Constructs a new PowerSignOptimizer object.
Initialization:
```
m_0 <- 0 (Initialize initial 1st moment vector)
t <- 0 (Initialize timestep)
```
Update:
```
t <- t + 1
m_t <- beta1 * m_{t-1} + (1 - beta1) * g
sign_decay <- sign_decay_fn(t)
update <- base ** (sign_decay * sign(g) * sign(m)) * g
variable <- variable - lr_t * update
```
Example usage for PowerSign-cd (PowerSign with cosine sign decay)
```
decay_steps = 1000
linear_decay_fn = sign_decays.get_cosine_decay_fn(decay_steps)
opt = PowerSignOptimizer(learning_rate=0.1, sign_decay_fn=linear_decay_fn)
```
Args:
learning_rate: learning_rate used when taking a step.
base: base used in optimizer.
beta: decay used for computing the moving average m.
sign_decay_fn: decay function applied to the sign(g) sign(m) quantity.
Takes global_step as an argument. See sign_decay.py for some examples.
use_locking: If True, use locks for update operations.
name: Optional name for the operations created iwhen applying gradients.
Defaults to "PowerSignOptimizer".
"""
super(PowerSignOptimizer, self).__init__(use_locking, name)
self._lr = learning_rate
self._beta = beta
self._logbase = math.log(base)
self._sign_decay_fn = sign_decay_fn
# Tensor versions of the constructor arguments, created in _prepare().
self._lr_t = None
self._beta_t = None
self._logbase_t = None
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
if self._sign_decay_fn is not None:
self._sign_decay_t = ops.convert_to_tensor(
self._sign_decay_fn(global_step), name='sign_decay')
return super(PowerSignOptimizer, self).apply_gradients(
grads_and_vars, global_step=global_step, name=name)
def _create_slots(self, var_list):
# Create slots for the first moment.
for v in var_list:
self._zeros_slot(v, 'm', self._name)
def _prepare(self):
self._lr_t = ops.convert_to_tensor(self._lr, name='learning_rate')
self._beta_t = ops.convert_to_tensor(self._beta, name='beta')
self._logbase_t = ops.convert_to_tensor(self._logbase, name='logbase')
if self._sign_decay_fn is None:
self._sign_decay_t = ops.convert_to_tensor(1.0, name='sign_decay')
def _apply_dense(self, grad, var):
m = self.get_slot(var, 'm')
return training_ops.apply_power_sign(
var,
m,
math_ops.cast(self._lr_t, var.dtype.base_dtype),
math_ops.cast(self._logbase_t, var.dtype.base_dtype),
math_ops.cast(self._sign_decay_t, var.dtype.base_dtype),
math_ops.cast(self._beta_t, var.dtype.base_dtype),
grad,
use_locking=self._use_locking).op
def _resource_apply_dense(self, grad, var):
m = self.get_slot(var, 'm')
return training_ops.resource_apply_power_sign(
var.handle,
m.handle,
math_ops.cast(self._lr_t, var.dtype.base_dtype),
math_ops.cast(self._logbase_t, var.dtype.base_dtype),
math_ops.cast(self._sign_decay_t, var.dtype.base_dtype),
math_ops.cast(self._beta_t, var.dtype.base_dtype),
grad,
use_locking=self._use_locking)
def _apply_sparse(self, grad, var):
lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)
beta_t = math_ops.cast(self._beta_t, var.dtype.base_dtype)
logbase_t = math_ops.cast(self._logbase_t, var.dtype.base_dtype)
e_t = math_ops.cast(math.e, var.dtype.base_dtype)
m = self.get_slot(var, 'm')
m_t = state_ops.assign(
m, (m * beta_t) + (grad * (1 - beta_t)), use_locking=self._use_locking)
sign_g = ops.IndexedSlices(
math_ops.sign(grad.values), grad.indices, dense_shape=grad.dense_shape)
sign_gm = ops.IndexedSlices(
array_ops.gather(math_ops.sign(m_t), sign_g.indices) * sign_g.values,
sign_g.indices,
dense_shape=sign_g.dense_shape)
sign_decayed = math_ops.cast(
self._sign_decay_t, var.dtype.base_dtype)
multiplier_values = math_ops.pow(
e_t, logbase_t * sign_decayed * sign_gm.values)
multiplier = ops.IndexedSlices(
multiplier_values, sign_gm.indices, dense_shape=sign_gm.dense_shape)
final_update = ops.IndexedSlices(
lr_t * multiplier.values * grad.values,
multiplier.indices,
dense_shape=multiplier.dense_shape)
var_update = state_ops.scatter_sub(
var,
final_update.indices,
final_update.values,
use_locking=self._use_locking)
return control_flow_ops.group(* [var_update, m_t])
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/opt/python/training/powersign.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Delegating optimizer to clip norm for specified variables."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import optimizer
__all__ = ["VariableClippingOptimizer"]
class VariableClippingOptimizer(optimizer.Optimizer):
"""Wrapper optimizer that clips the norm of specified variables after update.
This optimizer delegates all aspects of gradient calculation and application
to an underlying optimizer. After applying gradients, this optimizer then
clips the variable to have a maximum L2 norm along specified dimensions.
NB: this is quite different from clipping the norm of the gradients.
Multiple instances of `VariableClippingOptimizer` may be chained to specify
different max norms for different subsets of variables.
This is more efficient at serving-time than using normalization during
embedding lookup, at the expense of more expensive training and fewer
guarantees about the norms.
@@__init__
"""
def __init__(self,
opt,
vars_to_clip_dims,
max_norm,
use_locking=False,
colocate_clip_ops_with_vars=False,
name="VariableClipping"):
"""Construct a new clip-norm optimizer.
Args:
opt: The actual optimizer that will be used to compute and apply the
gradients. Must be one of the Optimizer classes.
vars_to_clip_dims: A dict with keys as Variables and values as lists
of dimensions along which to compute the L2-norm. See
`tf.clip_by_norm` for more details.
max_norm: The L2-norm to clip to, for all variables specified.
use_locking: If `True` use locks for clip update operations.
colocate_clip_ops_with_vars: If `True`, try colocating the clip norm
ops with the corresponding variable.
name: Optional name prefix for the operations created when applying
gradients. Defaults to "VariableClipping".
"""
super(VariableClippingOptimizer, self).__init__(use_locking, name)
self._opt = opt
# Defensive copy of input dict
self._vars_to_clip_dims = {
var: clip_dims[:] for var, clip_dims in vars_to_clip_dims.items()}
self._max_norm = max_norm
self._colocate_clip_ops_with_vars = colocate_clip_ops_with_vars
def compute_gradients(self, *args, **kwargs):
return self._opt.compute_gradients(*args, **kwargs)
def get_slot(self, *args, **kwargs):
return self._opt.get_slot(*args, **kwargs)
def get_slot_names(self, *args, **kwargs):
return self._opt.get_slot_names(*args, **kwargs)
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
with ops.name_scope(name, self._name) as name:
update_op = self._opt.apply_gradients(
grads_and_vars, global_step=global_step)
clip_update_ops = []
with ops.control_dependencies([update_op]):
for grad, var in grads_and_vars:
if grad is None or var not in self._vars_to_clip_dims:
continue
with ops.name_scope("clip_" + var.op.name):
if isinstance(grad, ops.Tensor):
clip_update_ops.append(self._clip_dense(var))
else:
clip_update_ops.append(self._clip_sparse(grad, var))
# In case no var was clipped, still need to run the update_op.
return control_flow_ops.group(*([update_op] + clip_update_ops), name=name)
def _clip_dense(self, var):
with self._maybe_colocate_with(var):
updated_var_value = var.read_value()
normalized_var = clip_ops.clip_by_norm(
updated_var_value, self._max_norm, self._vars_to_clip_dims[var])
delta = updated_var_value - normalized_var
with ops.colocate_with(var):
return var.assign_sub(delta, use_locking=self._use_locking)
def _clip_sparse(self, grad, var):
assert isinstance(grad, ops.IndexedSlices)
clip_dims = self._vars_to_clip_dims[var]
if 0 in clip_dims:
logging.warning("Clipping norm across dims %s for %s is inefficient "
"when including sparse dimension 0.", clip_dims,
var.op.name)
return self._clip_dense(var)
with ops.colocate_with(var):
var_subset = array_ops.gather(var, grad.indices)
with self._maybe_colocate_with(var):
normalized_var_subset = clip_ops.clip_by_norm(
var_subset, self._max_norm, clip_dims)
delta = ops.IndexedSlices(
var_subset - normalized_var_subset, grad.indices, grad.dense_shape)
with ops.colocate_with(var):
return var.scatter_sub(delta, use_locking=self._use_locking)
@contextlib.contextmanager
def _maybe_colocate_with(self, var):
"""Context to colocate with `var` if `colocate_clip_ops_with_vars`."""
if self._colocate_clip_ops_with_vars:
with ops.colocate_with(var):
yield
else:
yield
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/opt/python/training/variable_clipping_optimizer.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Exposes the Python wrapper for StatSummarizer utility class.
The wrapper implementation is in tensorflow/python/util/stat_summarizer.i for
technical reasons, but it should be accessed via tf.contrib.stat_summarizer.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,wildcard-import, line-too-long
from tensorflow.python.pywrap_tensorflow import DeleteStatSummarizer
from tensorflow.python.pywrap_tensorflow import NewStatSummarizer
from tensorflow.python.pywrap_tensorflow import StatSummarizer
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = ['DeleteStatSummarizer', 'NewStatSummarizer',
'StatSummarizer']
remove_undocumented(__name__, _allowed_symbols)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/stat_summarizer/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for StatSummarizer Python wrapper."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.protobuf import config_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class StatSummarizerTest(test.TestCase):
def testStatSummarizer(self):
with ops.Graph().as_default() as graph:
matrix1 = constant_op.constant([[3., 3.]], name=r"m1")
matrix2 = constant_op.constant([[2.], [2.]], name=r"m2")
product = math_ops.matmul(matrix1, matrix2, name=r"product")
graph_def = graph.as_graph_def()
ss = pywrap_tensorflow.NewStatSummarizer(graph_def.SerializeToString())
with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
for _ in range(20):
run_metadata = config_pb2.RunMetadata()
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
sess.run(product, options=run_options, run_metadata=run_metadata)
ss.ProcessStepStatsStr(run_metadata.step_stats.SerializeToString())
output_string = ss.GetOutputString()
print(output_string)
# Test it recorded running the expected number of times.
self.assertRegexpMatches(output_string, r"count=20")
# Test that a header line got printed.
self.assertRegexpMatches(output_string, r"====== .* ======")
# Test that the nodes we added were analyzed.
# The line for the op should contain both the op type (MatMul)
# and the name of the node (product)
self.assertRegexpMatches(output_string, r"MatMul.*product")
self.assertRegexpMatches(output_string, r"Const.*m1")
self.assertRegexpMatches(output_string, r"Const.*m2")
# Test that a CDF summed to 100%
self.assertRegexpMatches(output_string, r"100\.")
pywrap_tensorflow.DeleteStatSummarizer(ss)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/stat_summarizer/python/stat_summarizer_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Linear-chain CRF layer.
@@crf_binary_score
@@crf_decode
@@crf_log_likelihood
@@crf_log_norm
@@crf_multitag_sequence_score
@@crf_sequence_score
@@crf_unary_score
@@CrfDecodeBackwardRnnCell
@@CrfDecodeForwardRnnCell
@@CrfForwardRnnCell
@@viterbi_decode
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.crf.python.ops.crf import crf_binary_score
from tensorflow.contrib.crf.python.ops.crf import crf_decode
from tensorflow.contrib.crf.python.ops.crf import crf_log_likelihood
from tensorflow.contrib.crf.python.ops.crf import crf_log_norm
from tensorflow.contrib.crf.python.ops.crf import crf_multitag_sequence_score
from tensorflow.contrib.crf.python.ops.crf import crf_sequence_score
from tensorflow.contrib.crf.python.ops.crf import crf_unary_score
from tensorflow.contrib.crf.python.ops.crf import CrfDecodeBackwardRnnCell
from tensorflow.contrib.crf.python.ops.crf import CrfDecodeForwardRnnCell
from tensorflow.contrib.crf.python.ops.crf import CrfForwardRnnCell
from tensorflow.contrib.crf.python.ops.crf import viterbi_decode
from tensorflow.python.util.all_util import remove_undocumented
remove_undocumented(__name__)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/crf/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Linear-chain CRF."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/crf/python/__init__.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.