python_code
stringlengths 0
1.02M
| repo_name
stringlengths 9
48
| file_path
stringlengths 5
114
|
---|---|---|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy
from tensorflow.contrib.checkpoint.python import python_state
from tensorflow.python.client import session
from tensorflow.python.eager import test
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import variables
from tensorflow.python.training.tracking import util
class NumpyStateTests(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def testSaveRestoreNumpyState(self):
directory = self.get_temp_dir()
prefix = os.path.join(directory, "ckpt")
save_state = python_state.NumpyState()
saver = util.Checkpoint(numpy=save_state)
save_state.a = numpy.ones([2, 2])
save_state.b = numpy.ones([2, 2])
save_state.b = numpy.zeros([2, 2])
save_state.c = numpy.int64(3)
self.assertAllEqual(numpy.ones([2, 2]), save_state.a)
self.assertAllEqual(numpy.zeros([2, 2]), save_state.b)
self.assertEqual(3, save_state.c)
first_save_path = saver.save(prefix)
save_state.a[1, 1] = 2.
save_state.c = numpy.int64(4)
second_save_path = saver.save(prefix)
load_state = python_state.NumpyState()
loader = util.Checkpoint(numpy=load_state)
loader.restore(first_save_path).initialize_or_restore()
self.assertAllEqual(numpy.ones([2, 2]), load_state.a)
self.assertAllEqual(numpy.zeros([2, 2]), load_state.b)
self.assertEqual(3, load_state.c)
load_state.a[0, 0] = 42.
self.assertAllEqual([[42., 1.], [1., 1.]], load_state.a)
loader.restore(first_save_path).run_restore_ops()
self.assertAllEqual(numpy.ones([2, 2]), load_state.a)
loader.restore(second_save_path).run_restore_ops()
self.assertAllEqual([[1., 1.], [1., 2.]], load_state.a)
self.assertAllEqual(numpy.zeros([2, 2]), load_state.b)
self.assertEqual(4, load_state.c)
def testNoGraphPollution(self):
graph = ops.Graph()
with graph.as_default(), session.Session():
directory = self.get_temp_dir()
prefix = os.path.join(directory, "ckpt")
save_state = python_state.NumpyState()
saver = util.Checkpoint(numpy=save_state)
save_state.a = numpy.ones([2, 2])
save_path = saver.save(prefix)
saver.restore(save_path)
graph.finalize()
saver.save(prefix)
save_state.a = numpy.zeros([2, 2])
saver.save(prefix)
saver.restore(save_path)
@test_util.run_in_graph_and_eager_modes
def testNoMixedNumpyStateTF(self):
save_state = python_state.NumpyState()
save_state.a = numpy.ones([2, 2])
with self.assertRaises(NotImplementedError):
save_state.v = variables.Variable(1.)
@test_util.run_in_graph_and_eager_modes
def testDocstringExample(self):
arrays = python_state.NumpyState()
checkpoint = util.Checkpoint(numpy_arrays=arrays)
arrays.x = numpy.zeros([3, 4])
save_path = checkpoint.save(os.path.join(self.get_temp_dir(), "ckpt"))
arrays.x[1, 1] = 4.
checkpoint.restore(save_path)
self.assertAllEqual(numpy.zeros([3, 4]), arrays.x)
second_checkpoint = util.Checkpoint(numpy_arrays=python_state.NumpyState())
second_checkpoint.restore(save_path)
self.assertAllEqual(numpy.zeros([3, 4]), second_checkpoint.numpy_arrays.x)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/checkpoint/python/python_state_test.py
|
"""Utilities for visualizing dependency graphs."""
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.training.tracking import base as trackable
from tensorflow.python.training.tracking import util as trackable_utils
def dot_graph_from_checkpoint(save_path):
r"""Visualizes an object-based checkpoint (from `tf.train.Checkpoint`).
Useful for inspecting checkpoints and debugging loading issues.
Example usage from Python (requires pydot):
```python
import tensorflow as tf
import pydot
dot_string = tf.contrib.checkpoint.dot_graph_from_checkpoint('/path/to/ckpt')
parsed, = pydot.graph_from_dot_data(dot_string)
parsed.write_svg('/tmp/tensorflow/visualized_checkpoint.svg')
```
Example command line usage:
```sh
python -c "import tensorflow as tf;\
print(tf.contrib.checkpoint.dot_graph_from_checkpoint('/path/to/ckpt'))"\
| dot -Tsvg > /tmp/tensorflow/checkpoint_viz.svg
```
Args:
save_path: The checkpoint prefix, as returned by `tf.train.Checkpoint.save`
or `tf.train.latest_checkpoint`.
Returns:
A graph in DOT format as a string.
"""
reader = pywrap_tensorflow.NewCheckpointReader(save_path)
object_graph = trackable_utils.object_metadata(save_path)
shape_map = reader.get_variable_to_shape_map()
dtype_map = reader.get_variable_to_dtype_map()
graph = 'digraph {\n'
def _escape(name):
return name.replace('"', '\\"')
slot_ids = set()
for node in object_graph.nodes:
for slot_reference in node.slot_variables:
slot_ids.add(slot_reference.slot_variable_node_id)
for node_id, node in enumerate(object_graph.nodes):
if (len(node.attributes) == 1
and node.attributes[0].name == trackable.VARIABLE_VALUE_KEY):
if node_id in slot_ids:
color = 'orange'
tooltip_prefix = 'Slot variable'
else:
color = 'blue'
tooltip_prefix = 'Variable'
attribute = node.attributes[0]
graph += ('N_%d [shape=point label="" color=%s width=.25'
' tooltip="%s %s shape=%s %s"]\n') % (
node_id,
color,
tooltip_prefix,
_escape(attribute.full_name),
shape_map[attribute.checkpoint_key],
dtype_map[attribute.checkpoint_key].name)
elif node.slot_variables:
graph += ('N_%d [shape=point label="" width=.25 color=red,'
'tooltip="Optimizer"]\n') % node_id
else:
graph += 'N_%d [shape=point label="" width=.25]\n' % node_id
for reference in node.children:
graph += 'N_%d -> N_%d [label="%s"]\n' % (
node_id, reference.node_id, _escape(reference.local_name))
for slot_reference in node.slot_variables:
graph += 'N_%d -> N_%d [label="%s" style=dotted]\n' % (
node_id,
slot_reference.slot_variable_node_id,
_escape(slot_reference.slot_name))
graph += 'N_%d -> N_%d [style=dotted]\n' % (
slot_reference.original_variable_node_id,
slot_reference.slot_variable_node_id)
graph += '}\n'
return graph
|
tensorflow-master
|
tensorflow/contrib/checkpoint/python/visualize.py
|
"""Utilities for including Python state in TensorFlow checkpoints."""
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy
from tensorflow.python.training.tracking import base
from tensorflow.python.training.tracking import python_state as core_python_state
# pylint: disable=g-import-not-at-top
try:
# In Python 2.x, use the faster string buffering option.
from cStringIO import StringIO as BytesIO
except ImportError:
from io import BytesIO
# pylint: enable=g-import-not-at-top
class NumpyState(base.Trackable):
"""A trackable object whose NumPy array attributes are saved/restored.
Example usage:
```python
arrays = tf.contrib.checkpoint.NumpyState()
checkpoint = tf.train.Checkpoint(numpy_arrays=arrays)
arrays.x = numpy.zeros([3, 4])
save_path = checkpoint.save("/tmp/ckpt")
arrays.x[1, 1] = 4.
checkpoint.restore(save_path)
assert (arrays.x == numpy.zeros([3, 4])).all()
second_checkpoint = tf.train.Checkpoint(
numpy_arrays=tf.contrib.checkpoint.NumpyState())
# Attributes of NumpyState objects are created automatically by restore()
second_checkpoint.restore(save_path)
assert (second_checkpoint.numpy_arrays.x == numpy.zeros([3, 4])).all()
```
Note that `NumpyState` objects re-create the attributes of the previously
saved object on `restore()`. This is in contrast to TensorFlow variables, for
which a `Variable` object must be created and assigned to an attribute.
This snippet works both when graph building and when executing eagerly. On
save, the NumPy array(s) are fed as strings to be saved in the checkpoint (via
a placeholder when graph building, or as a string constant when executing
eagerly). When restoring they skip the TensorFlow graph entirely, and so no
restore ops need be run. This means that restoration always happens eagerly,
rather than waiting for `checkpoint.restore(...).run_restore_ops()` like
TensorFlow variables when graph building.
"""
def _lookup_dependency(self, name):
"""Create placeholder NumPy arrays for to-be-restored attributes.
Typically `_lookup_dependency` is used to check by name whether a dependency
exists. We cheat slightly by creating a trackable object for `name` if
we don't already have one, giving us attribute re-creation behavior when
loading a checkpoint.
Args:
name: The name of the dependency being checked.
Returns:
An existing dependency if one exists, or a new `_NumpyWrapper` placeholder
dependency (which will generally be restored immediately).
"""
value = super(NumpyState, self)._lookup_dependency(name)
if value is None:
value = _NumpyWrapper(numpy.array([]))
new_reference = base.TrackableReference(name=name, ref=value)
self._unconditional_checkpoint_dependencies.append(new_reference)
self._unconditional_dependency_names[name] = value
super(NumpyState, self).__setattr__(name, value)
return value
def __getattribute__(self, name):
"""Un-wrap `_NumpyWrapper` objects when accessing attributes."""
value = super(NumpyState, self).__getattribute__(name)
if isinstance(value, _NumpyWrapper):
return value.array
return value
def __setattr__(self, name, value):
"""Automatically wrap NumPy arrays assigned to attributes."""
# TODO(allenl): Consider supporting lists/tuples, either ad-hoc or by making
# ndarrays trackable natively and using standard trackable list
# tracking.
if isinstance(value, (numpy.ndarray, numpy.generic)):
try:
existing = super(NumpyState, self).__getattribute__(name)
existing.array = value
return
except AttributeError:
value = _NumpyWrapper(value)
self._track_trackable(value, name=name, overwrite=True)
elif (name not in ("_self_setattr_tracking", "_self_update_uid")
and getattr(self, "_self_setattr_tracking", True)):
# Mixing restore()-created attributes with user-added trackable
# objects is tricky, since we can't use the `_lookup_dependency` trick to
# re-create attributes (we might accidentally steal the restoration for
# another trackable object). For now `NumpyState` objects must be
# leaf nodes. Theoretically we could add some extra arguments to
# `_lookup_dependency` to figure out whether we should create a NumPy
# array for the attribute or not.
raise NotImplementedError(
("Assigned %s to the %s property of %s, which is not a NumPy array. "
"Currently mixing NumPy arrays and other trackable objects is "
"not supported. File a feature request if this limitation bothers "
"you.")
% (value, name, self))
super(NumpyState, self).__setattr__(name, value)
class _NumpyWrapper(core_python_state.PythonState):
"""Wraps a NumPy array for storage in an object-based checkpoint."""
def __init__(self, array):
"""Specify a NumPy array to wrap.
Args:
array: The NumPy array to save and restore (may be overwritten).
"""
self.array = array
def serialize(self):
"""Callback to serialize the array."""
string_file = BytesIO()
try:
numpy.save(string_file, self.array, allow_pickle=False)
serialized = string_file.getvalue()
finally:
string_file.close()
return serialized
def deserialize(self, string_value):
"""Callback to deserialize the array."""
string_file = BytesIO(string_value)
try:
self.array = numpy.load(string_file, allow_pickle=False)
finally:
string_file.close()
|
tensorflow-master
|
tensorflow/contrib/checkpoint/python/python_state.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.contrib.checkpoint.python import split_dependency
from tensorflow.python.eager import test
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.training.tracking import base
from tensorflow.python.training.tracking import tracking
from tensorflow.python.training.tracking import util
def _split_variable_closure(variable):
def _fill_save_buffer_fn(save_buffer):
save_buffer["first_half"] = variable[:2]
save_buffer["second_half"] = variable[2:]
return _fill_save_buffer_fn
def _combine_variable_closure(variable):
def _consume_restore_buffer_fn(restore_buffer):
return variable.assign(
array_ops.concat([restore_buffer["first_half"],
restore_buffer["second_half"]],
axis=0))
return _consume_restore_buffer_fn
class SaveTensorSlicesAsDeps(base.Trackable):
def __init__(self):
self.combined = resource_variable_ops.ResourceVariable([0., 0., 0., 0.])
split_dependencies = split_dependency.split_dependency(
component_names=("first_half", "second_half"),
component_dtypes=(self.combined.dtype,) * 2,
fill_save_buffer_fn=_split_variable_closure(
self.combined),
consume_restore_buffer_fn=_combine_variable_closure(
self.combined),
device=self.combined.device)
for name, dep in split_dependencies.items():
self._track_trackable(dep, name=name)
class HasRegularDeps(tracking.AutoTrackable):
def __init__(self):
self.first_half = resource_variable_ops.ResourceVariable([0., 0.])
self.second_half = resource_variable_ops.ResourceVariable([0., 0.])
class OnlyOneDep(tracking.AutoTrackable):
def __init__(self):
self.first_half = resource_variable_ops.ResourceVariable([0., 0.])
class SplitTests(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def testSaveRestoreSplitDep(self):
save_checkpoint = util.Checkpoint(
dep=SaveTensorSlicesAsDeps())
self.evaluate(save_checkpoint.dep.combined.assign([1., 2., 3., 4.]))
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
save_path = save_checkpoint.save(checkpoint_prefix)
regular_deps = HasRegularDeps()
regular_restore_checkpoint = util.Checkpoint(
dep=regular_deps)
regular_restore_checkpoint.restore(
save_path).assert_consumed().run_restore_ops()
self.assertAllEqual([1., 2.], self.evaluate(regular_deps.first_half))
self.assertAllEqual([3., 4.], self.evaluate(regular_deps.second_half))
one_dep = OnlyOneDep()
one_dep_restore_checkpoint = util.Checkpoint(dep=one_dep)
status = one_dep_restore_checkpoint.restore(save_path)
with self.assertRaises(AssertionError):
# Missing the second dependency.
status.assert_consumed()
status.run_restore_ops()
self.assertAllEqual([1., 2.], self.evaluate(one_dep.first_half))
restore_checkpoint = util.Checkpoint()
status = restore_checkpoint.restore(save_path)
restore_checkpoint.dep = SaveTensorSlicesAsDeps()
status.assert_consumed().run_restore_ops()
self.assertAllEqual(
[1., 2., 3., 4.],
self.evaluate(restore_checkpoint.dep.combined))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/checkpoint/python/split_dependency_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
from tensorflow.contrib.checkpoint.python import visualize
from tensorflow.python.eager import context
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.keras.engine import training
from tensorflow.python.keras.layers import core
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.training import adam
from tensorflow.python.training.tracking import util as trackable_utils
try:
import pydot # pylint: disable=g-import-not-at-top
except ImportError:
pydot = None
class MyModel(training.Model):
"""A concrete Model for testing."""
def __init__(self):
super(MyModel, self).__init__()
self._named_dense = core.Dense(1, use_bias=True)
self._second = core.Dense(1, use_bias=False)
def call(self, values):
ret = self._second(self._named_dense(values))
return ret
class DotGraphTests(test.TestCase):
def testMakeDotGraph(self):
with context.eager_mode():
input_value = constant_op.constant([[3.]])
model = MyModel()
optimizer = adam.AdamOptimizer(0.001)
optimizer_step = resource_variable_ops.ResourceVariable(12)
save_checkpoint = trackable_utils.Checkpoint(
optimizer=optimizer, model=model, optimizer_step=optimizer_step)
optimizer.minimize(functools.partial(model, input_value))
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, 'ckpt')
save_path = save_checkpoint.save(checkpoint_prefix)
prefix = save_checkpoint.save(save_path)
dot_graph_string = visualize.dot_graph_from_checkpoint(prefix)
# The remainder of this test is more-or-less optional since it's so
# dependent on pydot/platform/Python versions.
if pydot is None:
self.skipTest('pydot is required for the remainder of this test.')
try:
parsed, = pydot.graph_from_dot_data(dot_graph_string)
except NameError as e:
if "name 'dot_parser' is not defined" in str(e):
self.skipTest("pydot isn't working")
else:
raise
# Check that the graph isn't completely trivial
self.assertEqual(
'"model"',
parsed.obj_dict['edges'][('N_0', 'N_1')][0]['attributes']['label'])
image_path = os.path.join(self.get_temp_dir(), 'saved.svg')
try:
parsed.write_svg(image_path)
except Exception as e: # pylint: disable=broad-except
# For some reason PyDot's "dot not available" error is an Exception, not
# something more specific.
if '"dot" not found in path' in str(e):
self.skipTest("pydot won't save SVGs (dot not available)")
else:
raise
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/checkpoint/python/visualize_test.py
|
"""Utility for creating multiple dependencies with synchronized save/restore."""
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.training.tracking import base as trackable
class _CallbackSaveable(saver_lib.BaseSaverBuilder.SaveableObject):
"""Wraps save and restore callbacks as a `SaveableObject`."""
def __init__(self, name, dtype, device, save_callback, restore_callback):
self._restore_callback = restore_callback
spec = saver_lib.BaseSaverBuilder.SaveSpec(
tensor=save_callback,
slice_spec="",
name=name,
dtype=dtype,
device=device)
super(_CallbackSaveable, self).__init__(
save_callback, [spec], name)
def restore(self, restored_tensors, restored_shapes):
"""Restore the same value into both variables."""
tensor, = restored_tensors
return self._restore_callback(tensor)
class _SplitDependency(trackable.Trackable):
"""Looks like a regular variable while synchronizing save/restores."""
def __init__(self, save_buffer, restore_buffer, name, dtype, device,
num_components, fill_save_buffer_fn, consume_restore_buffer_fn):
self._save_buffer = save_buffer
self._restore_buffer = restore_buffer
self._name = name
self._dtype = dtype
self._device = device
self._num_components = num_components
self._fill_save_buffer_fn = fill_save_buffer_fn
self._consume_restore_buffer_fn = consume_restore_buffer_fn
def _save(self):
"""Pull from the shared buffer, populating it if necessary."""
if self._name not in self._save_buffer:
if self._save_buffer:
raise AssertionError(
("Split dependency %s (%s) unsynchronized. Split dependencies must "
"be saved together.") % (self._name, self))
self._fill_save_buffer_fn(self._save_buffer)
return self._save_buffer.pop(self._name)
def _restore(self, tensor):
"""Push into the shared buffer, flushing it if necessary."""
if self._name in self._restore_buffer:
raise AssertionError(
("Split dependency %s (%s) unsynchronized. Split dependencies must "
"be restored together.") % (self._name, self))
self._restore_buffer[self._name] = tensor
if len(self._restore_buffer) == self._num_components:
op = self._consume_restore_buffer_fn(self._restore_buffer)
self._restore_buffer.clear()
return op
else:
return control_flow_ops.no_op()
def _gather_saveables_for_checkpoint(self):
"""Looks to Trackable like a regular variable."""
return {
trackable.VARIABLE_VALUE_KEY:
functools.partial(_CallbackSaveable,
dtype=self._dtype,
device=self._device,
save_callback=self._save,
restore_callback=self._restore)
}
def split_dependency(component_names, component_dtypes,
fill_save_buffer_fn, consume_restore_buffer_fn,
device):
"""Creates multiple dependencies with a synchronized save/restore.
Useful when a single op produces `Tensor`s which should each be saved under
different objects, or when `Tensor`s saved with many different objects need to
be restored together as inputs to a single op (i.e. an object which uses a
single fused op may be swapped out for a subgraph of objects, and these two
programs are checkpoint compatible).
Args:
component_names: A sequence of names for the split
dependencies. `fill_save_buffer_fn` must add these keys to the dictionary
it is passed, and `consume_restore_buffer_fn` will receive a dictionary
with these keys.
component_dtypes: Data types for the `Tensor`s being saved and restored, a
sequence corresponding to `component_names`.
fill_save_buffer_fn: A function which takes an empty dictionary as an
argument and adds `Tensor`s with `component_names` as keys. These
`Tensor`s will be saved as if they were individual variables.
consume_restore_buffer_fn: A function which takes a dictionary with
`component_names` as keys mapping to restored individual `Tensor`s and
returns a restore op (or if executing eagerly, runs the restoration and
may return `None`).
device: The device on which to run save and restore operations.
Returns:
A dictionary mapping from names to Trackable objects. If one is
reachable from an object as a dependency, the others should be too; adding
dependencies on some but not all of the objects will result in errors.
"""
save_buffer = {}
restore_buffer = {}
split_dependencies = {}
for name, dtype in zip(component_names, component_dtypes):
split_dependencies[name] = _SplitDependency(
save_buffer=save_buffer,
restore_buffer=restore_buffer,
name=name,
dtype=dtype,
device=device,
num_components=len(component_names),
fill_save_buffer_fn=fill_save_buffer_fn,
consume_restore_buffer_fn=consume_restore_buffer_fn)
return split_dependencies
|
tensorflow-master
|
tensorflow/contrib/checkpoint/python/split_dependency.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module that declares the functions in tf.contrib.receptive_field's API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from tensorflow.contrib.receptive_field.python.util.graph_compute_order import get_compute_order
from tensorflow.contrib.receptive_field.python.util.receptive_field import compute_receptive_field_from_graph_def
# pylint: enable=unused-import
del absolute_import
del division
del print_function
|
tensorflow-master
|
tensorflow/contrib/receptive_field/receptive_field_api.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module to compute receptive field parameters for CNN tensorflow models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
|
tensorflow-master
|
tensorflow/contrib/receptive_field/python/__init__.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for receptive_fields module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib import slim
from tensorflow.contrib.receptive_field import receptive_field_api as receptive_field
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import nn
from tensorflow.python.platform import test
# TODO(andrearaujo): Rename the create_test_network_* functions in order to have
# more descriptive names.
def create_test_network_1():
"""Aligned network for test.
The graph corresponds to the example from the second figure in
go/cnn-rf-computation#arbitrary-computation-graphs
Returns:
g: Tensorflow graph object (Graph proto).
"""
g = ops.Graph()
with g.as_default():
# An input test image with unknown spatial resolution.
x = array_ops.placeholder(
dtypes.float32, (None, None, None, 1), name='input_image')
# Left branch.
l1 = slim.conv2d(x, 1, [1, 1], stride=4, scope='L1', padding='VALID')
# Right branch.
l2_pad = array_ops.pad(x, [[0, 0], [1, 0], [1, 0], [0, 0]])
l2 = slim.conv2d(l2_pad, 1, [3, 3], stride=2, scope='L2', padding='VALID')
l3 = slim.conv2d(l2, 1, [1, 1], stride=2, scope='L3', padding='VALID')
# Addition.
nn.relu(l1 + l3, name='output')
return g
def create_test_network_2():
"""Aligned network for test.
The graph corresponds to a variation to the example from the second figure in
go/cnn-rf-computation#arbitrary-computation-graphs. Layers 2 and 3 are changed
to max-pooling operations. Since the functionality is the same as convolution,
the network is aligned and the receptive field size is the same as from the
network created using create_test_network_1().
Returns:
g: Tensorflow graph object (Graph proto).
"""
g = ops.Graph()
with g.as_default():
# An input test image with unknown spatial resolution.
x = array_ops.placeholder(
dtypes.float32, (None, None, None, 1), name='input_image')
# Left branch.
l1 = slim.conv2d(x, 1, [1, 1], stride=4, scope='L1', padding='VALID')
# Right branch.
l2_pad = array_ops.pad(x, [[0, 0], [1, 0], [1, 0], [0, 0]])
l2 = slim.max_pool2d(l2_pad, [3, 3], stride=2, scope='L2', padding='VALID')
l3 = slim.max_pool2d(l2, [1, 1], stride=2, scope='L3', padding='VALID')
# Addition.
nn.relu(l1 + l3, name='output')
return g
def create_test_network_3():
"""Misaligned network for test.
The graph corresponds to the example from the first figure in
go/cnn-rf-computation#arbitrary-computation-graphs
Returns:
g: Tensorflow graph object (Graph proto).
"""
g = ops.Graph()
with g.as_default():
# An input test image with unknown spatial resolution.
x = array_ops.placeholder(
dtypes.float32, (None, None, None, 1), name='input_image')
# Left branch.
l1_pad = array_ops.pad(x, [[0, 0], [2, 1], [2, 1], [0, 0]])
l1 = slim.conv2d(l1_pad, 1, [5, 5], stride=2, scope='L1', padding='VALID')
# Right branch.
l2 = slim.conv2d(x, 1, [3, 3], stride=1, scope='L2', padding='VALID')
l3 = slim.conv2d(l2, 1, [3, 3], stride=1, scope='L3', padding='VALID')
# Addition.
nn.relu(l1 + l3, name='output')
return g
def create_test_network_4():
"""Misaligned network for test.
The graph corresponds to a variation from the example from the second figure
in go/cnn-rf-computation#arbitrary-computation-graphs. Layer 2 uses 'SAME'
padding, which makes its padding dependent on the input image dimensionality.
In this case, the effective padding will be undetermined, and the utility is
not able to check the network alignment.
Returns:
g: Tensorflow graph object (Graph proto).
"""
g = ops.Graph()
with g.as_default():
# An input test image with unknown spatial resolution.
x = array_ops.placeholder(
dtypes.float32, (None, None, None, 1), name='input_image')
# Left branch.
l1 = slim.conv2d(x, 1, [1, 1], stride=4, scope='L1', padding='VALID')
# Right branch.
l2 = slim.conv2d(x, 1, [3, 3], stride=2, scope='L2', padding='SAME')
l3 = slim.conv2d(l2, 1, [1, 1], stride=2, scope='L3', padding='VALID')
# Addition.
nn.relu(l1 + l3, name='output')
return g
def create_test_network_5():
"""Single-path network for testing non-square kernels.
The graph is similar to the right branch of the graph from
create_test_network_1(), except that the kernel sizes are changed to be
non-square.
Returns:
g: Tensorflow graph object (Graph proto).
"""
g = ops.Graph()
with g.as_default():
# An input test image with unknown spatial resolution.
x = array_ops.placeholder(
dtypes.float32, (None, None, None, 1), name='input_image')
# Two convolutional layers, where the first one has non-square kernel.
l1 = slim.conv2d(x, 1, [3, 5], stride=2, scope='L1', padding='VALID')
l2 = slim.conv2d(l1, 1, [3, 1], stride=2, scope='L2', padding='VALID')
# ReLU.
nn.relu(l2, name='output')
return g
def create_test_network_6():
"""Aligned network with dropout for test.
The graph is similar to create_test_network_1(), except that the right branch
has dropout normalization.
Returns:
g: Tensorflow graph object (Graph proto).
"""
g = ops.Graph()
with g.as_default():
# An input test image with unknown spatial resolution.
x = array_ops.placeholder(
dtypes.float32, (None, None, None, 1), name='input_image')
# Left branch.
l1 = slim.conv2d(x, 1, [1, 1], stride=4, scope='L1', padding='VALID')
# Right branch.
l2_pad = array_ops.pad(x, [[0, 0], [1, 0], [1, 0], [0, 0]])
l2 = slim.conv2d(l2_pad, 1, [3, 3], stride=2, scope='L2', padding='VALID')
l3 = slim.conv2d(l2, 1, [1, 1], stride=2, scope='L3', padding='VALID')
dropout = slim.dropout(l3)
# Addition.
nn.relu(l1 + dropout, name='output')
return g
def create_test_network_7():
"""Aligned network for test, with a control dependency.
The graph is similar to create_test_network_1(), except that it includes an
assert operation on the left branch.
Returns:
g: Tensorflow graph object (Graph proto).
"""
g = ops.Graph()
with g.as_default():
# An 8x8 test image.
x = array_ops.placeholder(dtypes.float32, (1, 8, 8, 1), name='input_image')
# Left branch.
l1 = slim.conv2d(x, 1, [1, 1], stride=4, scope='L1', padding='VALID')
l1_shape = array_ops.shape(l1)
assert_op = control_flow_ops.Assert(
gen_math_ops.equal(l1_shape[1], 2), [l1_shape], summarize=4)
# Right branch.
l2_pad = array_ops.pad(x, [[0, 0], [1, 0], [1, 0], [0, 0]])
l2 = slim.conv2d(l2_pad, 1, [3, 3], stride=2, scope='L2', padding='VALID')
l3 = slim.conv2d(l2, 1, [1, 1], stride=2, scope='L3', padding='VALID')
# Addition.
with ops.control_dependencies([assert_op]):
nn.relu(l1 + l3, name='output')
return g
def create_test_network_8():
"""Aligned network for test, including an intermediate addition.
The graph is similar to create_test_network_1(), except that it includes a few
more layers on top. The added layers compose two different branches whose
receptive fields are different. This makes this test case more challenging; in
particular, this test fails if a naive DFS-like algorithm is used for RF
computation.
Returns:
g: Tensorflow graph object (Graph proto).
"""
g = ops.Graph()
with g.as_default():
# An input test image with unknown spatial resolution.
x = array_ops.placeholder(
dtypes.float32, (None, None, None, 1), name='input_image')
# Left branch before first addition.
l1 = slim.conv2d(x, 1, [1, 1], stride=4, scope='L1', padding='VALID')
# Right branch before first addition.
l2_pad = array_ops.pad(x, [[0, 0], [1, 0], [1, 0], [0, 0]])
l2 = slim.conv2d(l2_pad, 1, [3, 3], stride=2, scope='L2', padding='VALID')
l3 = slim.conv2d(l2, 1, [1, 1], stride=2, scope='L3', padding='VALID')
# First addition.
l4 = nn.relu(l1 + l3)
# Left branch after first addition.
l5 = slim.conv2d(l4, 1, [1, 1], stride=2, scope='L5', padding='VALID')
# Right branch after first addition.
l6_pad = array_ops.pad(l4, [[0, 0], [1, 0], [1, 0], [0, 0]])
l6 = slim.conv2d(l6_pad, 1, [3, 3], stride=2, scope='L6', padding='VALID')
# Final addition.
nn.relu(l5 + l6, name='output')
return g
def create_test_network_9():
"""Aligned network for test, including an intermediate addition.
The graph is the same as create_test_network_8(), except that VALID padding is
changed to SAME.
Returns:
g: Tensorflow graph object (Graph proto).
"""
g = ops.Graph()
with g.as_default():
# An input test image with unknown spatial resolution.
x = array_ops.placeholder(
dtypes.float32, (None, None, None, 1), name='input_image')
# Left branch before first addition.
l1 = slim.conv2d(x, 1, [1, 1], stride=4, scope='L1', padding='SAME')
# Right branch before first addition.
l2 = slim.conv2d(x, 1, [3, 3], stride=2, scope='L2', padding='SAME')
l3 = slim.conv2d(l2, 1, [1, 1], stride=2, scope='L3', padding='SAME')
# First addition.
l4 = nn.relu(l1 + l3)
# Left branch after first addition.
l5 = slim.conv2d(l4, 1, [1, 1], stride=2, scope='L5', padding='SAME')
# Right branch after first addition.
l6 = slim.conv2d(l4, 1, [3, 3], stride=2, scope='L6', padding='SAME')
# Final addition.
nn.relu(l5 + l6, name='output')
return g
class ReceptiveFieldTest(test.TestCase):
def testComputeRFFromGraphDefAligned(self):
graph_def = create_test_network_1().as_graph_def()
input_node = 'input_image'
output_node = 'output'
(receptive_field_x, receptive_field_y, effective_stride_x,
effective_stride_y, effective_padding_x, effective_padding_y) = (
receptive_field.compute_receptive_field_from_graph_def(
graph_def, input_node, output_node))
self.assertEqual(receptive_field_x, 3)
self.assertEqual(receptive_field_y, 3)
self.assertEqual(effective_stride_x, 4)
self.assertEqual(effective_stride_y, 4)
self.assertEqual(effective_padding_x, 1)
self.assertEqual(effective_padding_y, 1)
def testComputeRFFromGraphDefAligned2(self):
graph_def = create_test_network_2().as_graph_def()
input_node = 'input_image'
output_node = 'output'
(receptive_field_x, receptive_field_y, effective_stride_x,
effective_stride_y, effective_padding_x, effective_padding_y) = (
receptive_field.compute_receptive_field_from_graph_def(
graph_def, input_node, output_node))
self.assertEqual(receptive_field_x, 3)
self.assertEqual(receptive_field_y, 3)
self.assertEqual(effective_stride_x, 4)
self.assertEqual(effective_stride_y, 4)
self.assertEqual(effective_padding_x, 1)
self.assertEqual(effective_padding_y, 1)
def testComputeRFFromGraphDefUnaligned(self):
graph_def = create_test_network_3().as_graph_def()
input_node = 'input_image'
output_node = 'output'
with self.assertRaises(ValueError):
receptive_field.compute_receptive_field_from_graph_def(
graph_def, input_node, output_node)
def testComputeRFFromGraphDefUndefinedPadding(self):
graph_def = create_test_network_4().as_graph_def()
input_node = 'input_image'
output_node = 'output'
(receptive_field_x, receptive_field_y, effective_stride_x,
effective_stride_y, effective_padding_x, effective_padding_y) = (
receptive_field.compute_receptive_field_from_graph_def(
graph_def, input_node, output_node))
self.assertEqual(receptive_field_x, 3)
self.assertEqual(receptive_field_y, 3)
self.assertEqual(effective_stride_x, 4)
self.assertEqual(effective_stride_y, 4)
self.assertEqual(effective_padding_x, None)
self.assertEqual(effective_padding_y, None)
def testComputeRFFromGraphDefFixedInputDim(self):
graph_def = create_test_network_4().as_graph_def()
input_node = 'input_image'
output_node = 'output'
(receptive_field_x, receptive_field_y, effective_stride_x,
effective_stride_y, effective_padding_x, effective_padding_y) = (
receptive_field.compute_receptive_field_from_graph_def(
graph_def, input_node, output_node, input_resolution=[9, 9]))
self.assertEqual(receptive_field_x, 3)
self.assertEqual(receptive_field_y, 3)
self.assertEqual(effective_stride_x, 4)
self.assertEqual(effective_stride_y, 4)
self.assertEqual(effective_padding_x, 1)
self.assertEqual(effective_padding_y, 1)
def testComputeRFFromGraphDefUnalignedFixedInputDim(self):
graph_def = create_test_network_4().as_graph_def()
input_node = 'input_image'
output_node = 'output'
with self.assertRaises(ValueError):
receptive_field.compute_receptive_field_from_graph_def(
graph_def, input_node, output_node, input_resolution=[8, 8])
def testComputeRFFromGraphDefNonSquareRF(self):
graph_def = create_test_network_5().as_graph_def()
input_node = 'input_image'
output_node = 'output'
(receptive_field_x, receptive_field_y, effective_stride_x,
effective_stride_y, effective_padding_x, effective_padding_y) = (
receptive_field.compute_receptive_field_from_graph_def(
graph_def, input_node, output_node))
self.assertEqual(receptive_field_x, 5)
self.assertEqual(receptive_field_y, 7)
self.assertEqual(effective_stride_x, 4)
self.assertEqual(effective_stride_y, 4)
self.assertEqual(effective_padding_x, 0)
self.assertEqual(effective_padding_y, 0)
def testComputeRFFromGraphDefStopPropagation(self):
graph_def = create_test_network_6().as_graph_def()
input_node = 'input_image'
output_node = 'output'
# Compute the receptive field but stop the propagation for the random
# uniform variable of the dropout.
(receptive_field_x, receptive_field_y, effective_stride_x,
effective_stride_y, effective_padding_x, effective_padding_y) = (
receptive_field.compute_receptive_field_from_graph_def(
graph_def, input_node, output_node,
['Dropout/dropout_1/random_uniform']))
self.assertEqual(receptive_field_x, 3)
self.assertEqual(receptive_field_y, 3)
self.assertEqual(effective_stride_x, 4)
self.assertEqual(effective_stride_y, 4)
self.assertEqual(effective_padding_x, 1)
self.assertEqual(effective_padding_y, 1)
def testComputeCoordinatesRoundtrip(self):
graph_def = create_test_network_1()
input_node = 'input_image'
output_node = 'output'
rf = receptive_field.compute_receptive_field_from_graph_def(
graph_def, input_node, output_node)
x = np.random.randint(0, 100, (50, 2))
y = rf.compute_feature_coordinates(x)
x2 = rf.compute_input_center_coordinates(y)
self.assertAllEqual(x, x2)
def testComputeRFFromGraphDefAlignedWithControlDependencies(self):
graph_def = create_test_network_7().as_graph_def()
input_node = 'input_image'
output_node = 'output'
(receptive_field_x, receptive_field_y, effective_stride_x,
effective_stride_y, effective_padding_x, effective_padding_y) = (
receptive_field.compute_receptive_field_from_graph_def(
graph_def, input_node, output_node))
self.assertEqual(receptive_field_x, 3)
self.assertEqual(receptive_field_y, 3)
self.assertEqual(effective_stride_x, 4)
self.assertEqual(effective_stride_y, 4)
self.assertEqual(effective_padding_x, 1)
self.assertEqual(effective_padding_y, 1)
def testComputeRFFromGraphDefWithIntermediateAddNode(self):
graph_def = create_test_network_8().as_graph_def()
input_node = 'input_image'
output_node = 'output'
(receptive_field_x, receptive_field_y, effective_stride_x,
effective_stride_y, effective_padding_x, effective_padding_y) = (
receptive_field.compute_receptive_field_from_graph_def(
graph_def, input_node, output_node))
self.assertEqual(receptive_field_x, 11)
self.assertEqual(receptive_field_y, 11)
self.assertEqual(effective_stride_x, 8)
self.assertEqual(effective_stride_y, 8)
self.assertEqual(effective_padding_x, 5)
self.assertEqual(effective_padding_y, 5)
def testComputeRFFromGraphDefWithIntermediateAddNodeSamePaddingFixedInputDim(
self):
graph_def = create_test_network_9().as_graph_def()
input_node = 'input_image'
output_node = 'output'
(receptive_field_x, receptive_field_y, effective_stride_x,
effective_stride_y, effective_padding_x, effective_padding_y) = (
receptive_field.compute_receptive_field_from_graph_def(
graph_def, input_node, output_node, input_resolution=[17, 17]))
self.assertEqual(receptive_field_x, 11)
self.assertEqual(receptive_field_y, 11)
self.assertEqual(effective_stride_x, 8)
self.assertEqual(effective_stride_y, 8)
self.assertEqual(effective_padding_x, 5)
self.assertEqual(effective_padding_y, 5)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/receptive_field/python/util/receptive_field_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions to parse RF-related parameters from TF layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from tensorflow.contrib.util import make_ndarray
from tensorflow.python.platform import tf_logging as logging
# White-listed layer operations, which do not affect the receptive field
# computation.
_UNCHANGED_RF_LAYER_OPS = [
"Add", "AddV2", "BiasAdd", "Cast", "Ceil", "ConcatV2", "Const", "Floor",
"FusedBatchNorm", "Identity", "Log", "Mul", "Pow", "RealDiv", "Relu",
"Relu6", "Round", "Rsqrt", "Softplus", "Sub", "VariableV2", "LRN",
"GreaterEqual"
]
# Different ways in which padding modes may be spelled.
_VALID_PADDING = ["VALID", b"VALID"]
_SAME_PADDING = ["SAME", b"SAME"]
def _stride_size(node, name_to_node):
"""Computes stride size given a TF node.
Args:
node: Tensorflow node (NodeDef proto).
name_to_node: For MaxPoolV2, mapping from variable name Tensorflow node.
Returns:
stride_x: Stride size for horizontal direction (integer).
stride_y: Stride size for vertical direction (integer).
Raises:
ValueError: If stride input cannot be found in `name_to_node`.
"""
if node.op == "MaxPoolV2":
strides_input_name = node.input[2]
if not strides_input_name.endswith("/strides"):
raise ValueError("Strides name does not end with '/strides'")
strides_node = name_to_node[strides_input_name]
value = strides_node.attr["value"]
t = make_ndarray(value.tensor)
stride_y = t[1]
stride_x = t[2]
else:
strides_attr = node.attr["strides"]
logging.vlog(4, "strides_attr = %s", strides_attr)
stride_y = strides_attr.list.i[1]
stride_x = strides_attr.list.i[2]
return stride_x, stride_y
def _conv_kernel_size(node, name_to_node):
"""Computes kernel size given a TF convolution or pooling node.
Args:
node: Tensorflow node (NodeDef proto).
name_to_node: Dict keyed by node name, each entry containing the node's
NodeDef.
Returns:
kernel_size_x: Kernel size for horizontal direction (integer).
kernel_size_y: Kernel size for vertical direction (integer).
Raises:
ValueError: If the weight layer node is misconfigured.
"""
weights_layer_read_name = node.input[1]
if not weights_layer_read_name.endswith("/read"):
raise ValueError(
"Weight layer's name input to conv layer does not end with '/read'")
weights_layer_param_name = weights_layer_read_name[:-5]
weights_node = name_to_node[weights_layer_param_name]
if weights_node.op == "VariableV2":
shape_dim = weights_node.attr["shape"].shape.dim
elif weights_node.op == "Const":
shape_dim = weights_node.attr["value"].tensor.tensor_shape.dim
else:
raise ValueError(
"Weight layer {} is not of type VariableV2 or Const: {}".format(
weights_layer_param_name, weights_node.op))
if len(shape_dim) != 4:
raise ValueError(
"Weight layer {} does not have rank 4. Instead, it has: {}".format(
weights_layer_param_name, len(shape_dim)))
logging.vlog(4, "weight shape = %s", shape_dim)
kernel_size_y = shape_dim[0].size
kernel_size_x = shape_dim[1].size
return kernel_size_x, kernel_size_y
def _padding_size_conv_pool(node, kernel_size, stride, input_resolution=None):
"""Computes padding size given a TF convolution or pooling node.
Args:
node: Tensorflow node (NodeDef proto).
kernel_size: Kernel size of node (integer).
stride: Stride size of node (integer).
input_resolution: Input resolution to assume, if not None (integer).
Returns:
total_padding: Total padding size (integer).
padding: Padding size, applied to the left or top (integer).
Raises:
ValueError: If padding is invalid.
"""
# In this case, we need to carefully consider the different TF padding modes.
# The padding depends on kernel size, and may depend on input size. If it
# depends on input size and input_resolution is None, we raise an exception.
padding_attr = node.attr["padding"]
logging.vlog(4, "padding_attr = %s", padding_attr)
if padding_attr.s in _VALID_PADDING:
total_padding = 0
padding = 0
elif padding_attr.s in _SAME_PADDING:
if input_resolution is None:
# In this case, we do not know the input resolution, so we can only know
# the padding in some special cases.
if kernel_size == 1:
total_padding = 0
padding = 0
elif stride == 1:
total_padding = kernel_size - 1
padding = int(math.floor(float(total_padding) / 2))
elif stride == 2 and kernel_size % 2 == 0:
# In this case, we can be sure of the left/top padding, but not of the
# total padding.
total_padding = None
padding = int(math.floor((float(kernel_size) - 1) / 2))
else:
total_padding = None
padding = None
logging.warning(
"Padding depends on input size, which means that the effective "
"padding may be different depending on the input image "
"dimensionality. In this case, alignment check will be skipped. If"
" you know the input resolution, please set it.")
else:
# First, compute total_padding based on documentation.
if input_resolution % stride == 0:
total_padding = int(max(float(kernel_size - stride), 0.0))
else:
total_padding = int(
max(float(kernel_size - (input_resolution % stride)), 0.0))
# Then, compute left/top padding.
padding = int(math.floor(float(total_padding) / 2))
else:
raise ValueError("Invalid padding operation %s" % padding_attr.s)
return total_padding, padding
def _pool_kernel_size(node, name_to_node):
"""Computes kernel size given a TF pooling node.
Args:
node: Tensorflow node (NodeDef proto).
name_to_node: For MaxPoolV2, mapping from node name to NodeDef.
Returns:
kernel_size_x: Kernel size for horizontal direction (integer).
kernel_size_y: Kernel size for vertical direction (integer).
Raises:
ValueError: If pooling is invalid.
"""
if node.op == "MaxPoolV2":
ksize_input_name = node.input[1]
if not ksize_input_name.endswith("/ksize"):
raise ValueError("Kernel size name does not end with '/ksize'")
ksize_node = name_to_node[ksize_input_name]
value = ksize_node.attr["value"]
t = make_ndarray(value.tensor)
kernel_size_y = t[1]
kernel_size_x = t[2]
if t[0] != 1:
raise ValueError("pool ksize for first dim is not 1")
if t[3] != 1:
raise ValueError("pool ksize for last dim is not 1")
else:
ksize = node.attr["ksize"]
kernel_size_y = ksize.list.i[1]
kernel_size_x = ksize.list.i[2]
if ksize.list.i[0] != 1:
raise ValueError("pool ksize for first dim is not 1")
if ksize.list.i[3] != 1:
raise ValueError("pool ksize for last dim is not 1")
return kernel_size_x, kernel_size_y
def _padding_size_pad_layer(node, name_to_node):
"""Computes padding size given a TF padding node.
Args:
node: Tensorflow node (NodeDef proto).
name_to_node: Dict keyed by node name, each entry containing the node's
NodeDef.
Returns:
total_padding_x: Total padding size for horizontal direction (integer).
padding_x: Padding size for horizontal direction, left side (integer).
total_padding_y: Total padding size for vertical direction (integer).
padding_y: Padding size for vertical direction, top side (integer).
Raises:
ValueError: If padding layer is invalid.
"""
paddings_layer_name = node.input[1]
if not paddings_layer_name.endswith("/paddings"):
raise ValueError("Padding layer name does not end with '/paddings'")
paddings_node = name_to_node[paddings_layer_name]
if paddings_node.op != "Const":
raise ValueError("Padding op is not Const")
value = paddings_node.attr["value"]
t = make_ndarray(value.tensor)
padding_y = t[1][0]
padding_x = t[2][0]
total_padding_y = padding_y + t[1][1]
total_padding_x = padding_x + t[2][1]
if (t[0][0] != 0) or (t[0][1] != 0):
raise ValueError("padding is not zero for first tensor dim")
if (t[3][0] != 0) or (t[3][1] != 0):
raise ValueError("padding is not zero for last tensor dim")
return total_padding_x, padding_x, total_padding_y, padding_y
def get_layer_params(node, name_to_node, input_resolution=None, force=False):
"""Gets layer parameters relevant for RF computation.
Currently, only these nodes are supported:
- Conv2D
- DepthwiseConv2dNative
- Pad
- MaxPool
- AvgPool
- all nodes listed in _UNCHANGED_RF_LAYER_OPS
Args:
node: Tensorflow node (NodeDef proto).
name_to_node: Dict keyed by node name, each entry containing the node's
NodeDef.
input_resolution: List with 2 dimensions, denoting the height/width of the
input feature map to this layer. If set to None, then the padding may be
undefined (in tensorflow, SAME padding depends on input spatial
resolution).
force: If True, the function does not raise a ValueError if the layer op is
unknown. Instead, in this case it sets each of the returned parameters to
None.
Returns:
kernel_size_x: Kernel size for horizontal direction (integer).
kernel_size_y: Kernel size for vertical direction (integer).
stride_x: Stride size for horizontal direction (integer).
stride_y: Stride size for vertical direction (integer).
padding_x: Padding size for horizontal direction, left side (integer).
padding_y: Padding size for vertical direction, top side (integer).
total_padding_x: Total padding size for horizontal direction (integer).
total_padding_y: Total padding size for vertical direction (integer).
Raises:
ValueError: If layer op is unknown and force is False.
"""
logging.vlog(3, "node.name = %s", node.name)
logging.vlog(3, "node.op = %s", node.op)
logging.vlog(4, "node = %s", node)
if node.op == "Conv2D" or node.op == "DepthwiseConv2dNative":
stride_x, stride_y = _stride_size(node, name_to_node)
kernel_size_x, kernel_size_y = _conv_kernel_size(node, name_to_node)
# Compute the padding for this node separately for each direction.
total_padding_x, padding_x = _padding_size_conv_pool(
node, kernel_size_x, stride_x,
input_resolution[1] if input_resolution is not None else None)
total_padding_y, padding_y = _padding_size_conv_pool(
node, kernel_size_y, stride_y,
input_resolution[0] if input_resolution is not None else None)
elif node.op == "Pad":
# Kernel and stride are simply 1 in this case.
kernel_size_x = 1
kernel_size_y = 1
stride_x = 1
stride_y = 1
total_padding_x, padding_x, total_padding_y, padding_y = (
_padding_size_pad_layer(node, name_to_node))
elif node.op == "MaxPool" or node.op == "MaxPoolV2" or node.op == "AvgPool":
stride_x, stride_y = _stride_size(node, name_to_node)
kernel_size_x, kernel_size_y = _pool_kernel_size(node, name_to_node)
# Compute the padding for this node separately for each direction.
total_padding_x, padding_x = _padding_size_conv_pool(
node, kernel_size_x, stride_x,
input_resolution[1] if input_resolution is not None else None)
total_padding_y, padding_y = _padding_size_conv_pool(
node, kernel_size_y, stride_y,
input_resolution[0] if input_resolution is not None else None)
elif node.op in _UNCHANGED_RF_LAYER_OPS:
# These nodes do not modify the RF parameters.
kernel_size_x = 1
kernel_size_y = 1
stride_x = 1
stride_y = 1
total_padding_x = 0
padding_x = 0
total_padding_y = 0
padding_y = 0
else:
if force:
kernel_size_x = None
kernel_size_y = None
stride_x = None
stride_y = None
total_padding_x = None
padding_x = None
total_padding_y = None
padding_y = None
else:
raise ValueError("Unknown layer for operation '%s': %s" %
(node.name, node.op))
return (kernel_size_x, kernel_size_y, stride_x, stride_y, padding_x,
padding_y, total_padding_x, total_padding_y)
|
tensorflow-master
|
tensorflow/contrib/receptive_field/python/util/parse_layer_parameters.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Library to compute order of computations in a graph.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
from tensorflow.contrib.receptive_field.python.util import parse_layer_parameters
from tensorflow.python.platform import tf_logging as logging
def parse_graph_nodes(graph_def):
"""Helper function to parse GraphDef's nodes.
It returns a dict mapping from node name to NodeDef.
Args:
graph_def: A GraphDef object.
Returns:
name_to_node: Dict keyed by node name, each entry containing the node's
NodeDef.
"""
name_to_node = {}
for node_def in graph_def.node:
name_to_node[node_def.name] = node_def
return name_to_node
# Named tuple used to collect information from each node in a computation graph.
_node_info = collections.namedtuple(
'NodeInfo', field_names=['order', 'node', 'input_size', 'output_size'])
def _compute_output_resolution(input_spatial_resolution, kernel_size, stride,
total_padding):
"""Computes output resolution, given input resolution and layer parameters.
Note that this computation is done only over one dimension (eg, x or y).
If any of the inputs is None, returns None.
Args:
input_spatial_resolution: Input spatial resolution (int).
kernel_size: Kernel size (int).
stride: Stride (int).
total_padding: Total padding to be applied (int).
Returns:
output_resolution: Output dimension (int) or None.
"""
if (input_spatial_resolution is None) or (kernel_size is None) or (
stride is None) or (total_padding is None):
return None
return int(
math.ceil((
input_spatial_resolution + total_padding - kernel_size + 1) / stride))
def _get_computed_nodes(name_to_node,
current,
node_info,
input_node_name='',
input_node_size=None):
"""Traverses the graph recursively to compute its topological order.
Optionally, the function may also compute the input and output feature map
resolutions at each node. In this case, input_node_name and input_node_size
must be set. Note that if a node's op type is unknown, the input and output
resolutions are ignored and set to None.
Args:
name_to_node: Dict keyed by node name, each entry containing the node's
NodeDef.
current: Current node name.
node_info: Map of nodes we've already traversed, containing their _node_info
information.
input_node_name: Name of node with fixed input resolution (optional).
input_node_size: Fixed input resolution to use (optional).
Returns:
order: Order in topological sort for 'current'.
input_size: Tensor spatial resolution at input of current node.
output_size: Tensor spatial resolution at output of current node.
"""
if current in node_info:
return (node_info[current].order, node_info[current].input_size,
node_info[current].output_size)
node_def = name_to_node[current]
if current == input_node_name:
order = 0
input_size = None
output_size = input_node_size
node_info[current] = _node_info(order, node_def, input_size, output_size)
return (order, input_size, output_size)
input_size = None
output_size = None
order = 0
number_inputs = 0
for each in node_def.input:
# Parses name of input node.
if each.startswith('^'):
# The character '^' denotes a control dependency, so this input node can
# be safely ignored.
continue
each = each.split(':')[0]
# Recursively computes ordering.
(parent_order, _, parent_output_size) = _get_computed_nodes(
name_to_node, each, node_info, input_node_name, input_node_size)
order = max(order, parent_order + 1)
if number_inputs == 0:
# For all the types of nodes we consider, the first input corresponds to
# the feature map.
input_size = parent_output_size
number_inputs += 1
# Figure out output size for this layer.
logging.vlog(3, 'input_size = %s', input_size)
if input_size is None:
output_size = None
else:
(kernel_size_x, kernel_size_y, stride_x, stride_y, _, _, total_padding_x,
total_padding_y) = (
parse_layer_parameters.get_layer_params(
node_def, name_to_node, input_size, force=True))
logging.vlog(3, 'kernel_size_x = %s, kernel_size_y = %s, '
'stride_x = %s, stride_y = %s, '
'total_padding_x = %s, total_padding_y = %s' %
(kernel_size_x, kernel_size_y, stride_x, stride_y,
total_padding_x, total_padding_y))
output_size = [None] * 2
output_size[0] = _compute_output_resolution(input_size[0], kernel_size_x,
stride_x, total_padding_x)
output_size[1] = _compute_output_resolution(input_size[1], kernel_size_y,
stride_y, total_padding_y)
logging.vlog(3, 'output_size = %s', output_size)
node_info[current] = _node_info(order, node_def, input_size, output_size)
return order, input_size, output_size
def get_compute_order(graph_def, input_node_name='', input_node_size=None):
"""Computes order of computation for a given CNN graph.
Optionally, the function may also compute the input and output feature map
resolutions at each node. In this case, input_node_name and input_node_size
must be set. Note that if a node's op type is unknown, the input and output
resolutions are ignored and set to None.
Args:
graph_def: GraphDef object.
input_node_name: Name of node with fixed input resolution (optional). This
is usually the node name for the input image in a CNN.
input_node_size: 2D list of integers, fixed input resolution to use
(optional). This is usually the input resolution used for the input image
in a CNN (common examples are: [224, 224], [299, 299], [321, 321]).
Returns:
node_info: Default dict keyed by node name, mapping to a named tuple with
the following fields:
- order: Integer denoting topological order;
- node: NodeDef for the given node;
- input_size: 2D list of integers, denoting the input spatial resolution
to the node;
- output_size: 2D list of integers, denoting the output spatial resolution
of the node.
name_to_node: Dict keyed by node name, each entry containing the node's
NodeDef.
"""
name_to_node = parse_graph_nodes(graph_def)
node_info = collections.defaultdict(_node_info)
for each in graph_def.node:
_get_computed_nodes(name_to_node, each.name, node_info, input_node_name,
input_node_size)
return node_info, name_to_node
|
tensorflow-master
|
tensorflow/contrib/receptive_field/python/util/graph_compute_order.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for parse_layer_parameters module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.contrib import slim
from tensorflow.contrib.receptive_field.python.util import graph_compute_order
from tensorflow.contrib.receptive_field.python.util import parse_layer_parameters
from tensorflow.python.client import session
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
def create_test_network(placeholder_resolution, convert_variables_to_constants):
"""Convolutional neural network for test.
Args:
placeholder_resolution: Resolution to use for input placeholder. Used for
height and width dimensions.
convert_variables_to_constants: Whether to convert variables to constants.
Returns:
name_to_node: Dict keyed by node name, each entry containing the node's
NodeDef.
"""
g = ops.Graph()
sess = session.Session(graph=g)
with g.as_default():
# An input test image with unknown spatial resolution.
x = array_ops.placeholder(
dtypes.float32, (1, placeholder_resolution, placeholder_resolution, 1),
name='input_image')
# Left branch before first addition.
l1 = slim.conv2d(x, 1, [1, 1], stride=4, scope='L1', padding='VALID')
# Right branch before first addition.
l2_pad = array_ops.pad(x, [[0, 0], [1, 0], [1, 0], [0, 0]], name='L2_pad')
l2 = slim.conv2d(l2_pad, 1, [3, 3], stride=2, scope='L2', padding='VALID')
l3 = slim.max_pool2d(l2, [3, 3], stride=2, scope='L3', padding='SAME')
# First addition.
l4 = nn.relu(l1 + l3, name='L4_relu')
# Left branch after first addition.
l5 = slim.conv2d(l4, 1, [1, 1], stride=2, scope='L5', padding='SAME')
# Right branch after first addition.
l6 = slim.conv2d(l4, 1, [3, 3], stride=2, scope='L6', padding='SAME')
# Final addition.
gen_math_ops.add(l5, l6, name='L7_add')
if convert_variables_to_constants:
sess.run(variables.global_variables_initializer())
graph_def = graph_util.convert_variables_to_constants(
sess, g.as_graph_def(), ['L7_add'])
else:
graph_def = g.as_graph_def()
name_to_node = graph_compute_order.parse_graph_nodes(graph_def)
return name_to_node
class ParseLayerParametersTest(test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(('NonePlaceholder', None, False),
('224Placeholder', 224, False),
('NonePlaceholderVarAsConst', None, True),
('224PlaceholderVarAsConst', 224, True))
def testParametersAreParsedCorrectly(self, placeholder_resolution,
convert_variables_to_constants):
"""Checks parameters from create_test_network() are parsed correctly."""
name_to_node = create_test_network(placeholder_resolution,
convert_variables_to_constants)
# L1.
l1_node_name = 'L1/Conv2D'
l1_params = parse_layer_parameters.get_layer_params(
name_to_node[l1_node_name], name_to_node)
expected_l1_params = (1, 1, 4, 4, 0, 0, 0, 0)
self.assertEqual(l1_params, expected_l1_params)
# L2 padding.
l2_pad_name = 'L2_pad'
l2_pad_params = parse_layer_parameters.get_layer_params(
name_to_node[l2_pad_name], name_to_node)
expected_l2_pad_params = (1, 1, 1, 1, 1, 1, 1, 1)
self.assertEqual(l2_pad_params, expected_l2_pad_params)
# L2.
l2_node_name = 'L2/Conv2D'
l2_params = parse_layer_parameters.get_layer_params(
name_to_node[l2_node_name], name_to_node)
expected_l2_params = (3, 3, 2, 2, 0, 0, 0, 0)
self.assertEqual(l2_params, expected_l2_params)
# L3.
l3_node_name = 'L3/MaxPool'
# - Without knowing input size.
l3_params = parse_layer_parameters.get_layer_params(
name_to_node[l3_node_name], name_to_node)
expected_l3_params = (3, 3, 2, 2, None, None, None, None)
self.assertEqual(l3_params, expected_l3_params)
# - Input size is even.
l3_even_params = parse_layer_parameters.get_layer_params(
name_to_node[l3_node_name], name_to_node, input_resolution=[4, 4])
expected_l3_even_params = (3, 3, 2, 2, 0, 0, 1, 1)
self.assertEqual(l3_even_params, expected_l3_even_params)
# - Input size is odd.
l3_odd_params = parse_layer_parameters.get_layer_params(
name_to_node[l3_node_name], name_to_node, input_resolution=[5, 5])
expected_l3_odd_params = (3, 3, 2, 2, 1, 1, 2, 2)
self.assertEqual(l3_odd_params, expected_l3_odd_params)
# L4.
l4_node_name = 'L4_relu'
l4_params = parse_layer_parameters.get_layer_params(
name_to_node[l4_node_name], name_to_node)
expected_l4_params = (1, 1, 1, 1, 0, 0, 0, 0)
self.assertEqual(l4_params, expected_l4_params)
# L5.
l5_node_name = 'L5/Conv2D'
l5_params = parse_layer_parameters.get_layer_params(
name_to_node[l5_node_name], name_to_node)
expected_l5_params = (1, 1, 2, 2, 0, 0, 0, 0)
self.assertEqual(l5_params, expected_l5_params)
# L6.
l6_node_name = 'L6/Conv2D'
# - Without knowing input size.
l6_params = parse_layer_parameters.get_layer_params(
name_to_node[l6_node_name], name_to_node)
expected_l6_params = (3, 3, 2, 2, None, None, None, None)
self.assertEqual(l6_params, expected_l6_params)
# - Input size is even.
l6_even_params = parse_layer_parameters.get_layer_params(
name_to_node[l6_node_name], name_to_node, input_resolution=[4, 4])
expected_l6_even_params = (3, 3, 2, 2, 0, 0, 1, 1)
self.assertEqual(l6_even_params, expected_l6_even_params)
# - Input size is odd.
l6_odd_params = parse_layer_parameters.get_layer_params(
name_to_node[l6_node_name], name_to_node, input_resolution=[5, 5])
expected_l6_odd_params = (3, 3, 2, 2, 1, 1, 2, 2)
self.assertEqual(l6_odd_params, expected_l6_odd_params)
# L7.
l7_node_name = 'L7_add'
l7_params = parse_layer_parameters.get_layer_params(
name_to_node[l7_node_name], name_to_node)
expected_l7_params = (1, 1, 1, 1, 0, 0, 0, 0)
self.assertEqual(l7_params, expected_l7_params)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/receptive_field/python/util/parse_layer_parameters_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for graph_compute_order module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import slim
from tensorflow.contrib.receptive_field import receptive_field_api as receptive_field
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import nn
from tensorflow.python.platform import test
def create_test_network():
"""Convolutional neural network for test.
Returns:
g: Tensorflow graph object (Graph proto).
"""
g = ops.Graph()
with g.as_default():
# An input test image with unknown spatial resolution.
x = array_ops.placeholder(
dtypes.float32, (None, None, None, 1), name='input_image')
# Left branch before first addition.
l1 = slim.conv2d(x, 1, [1, 1], stride=4, scope='L1', padding='VALID')
# Right branch before first addition.
l2_pad = array_ops.pad(x, [[0, 0], [1, 0], [1, 0], [0, 0]], name='L2_pad')
l2 = slim.conv2d(l2_pad, 1, [3, 3], stride=2, scope='L2', padding='VALID')
l3 = slim.max_pool2d(l2, [3, 3], stride=2, scope='L3', padding='SAME')
# First addition.
l4 = nn.relu(l1 + l3, name='L4_relu')
# Left branch after first addition.
l5 = slim.conv2d(l4, 1, [1, 1], stride=2, scope='L5', padding='SAME')
# Right branch after first addition.
l6 = slim.conv2d(l4, 1, [3, 3], stride=2, scope='L6', padding='SAME')
# Final addition.
gen_math_ops.add(l5, l6, name='L7_add')
return g
class GraphComputeOrderTest(test.TestCase):
def check_topological_sort_and_sizes(self,
node_info,
expected_input_sizes=None,
expected_output_sizes=None):
"""Helper function to check topological sorting and sizes are correct.
The arguments expected_input_sizes and expected_output_sizes are used to
check that the sizes are correct, if they are given.
Args:
node_info: Default dict keyed by node name, mapping to a named tuple with
the following keys: {order, node, input_size, output_size}.
expected_input_sizes: Dict mapping node names to expected input sizes
(optional).
expected_output_sizes: Dict mapping node names to expected output sizes
(optional).
"""
# Loop over nodes in sorted order, collecting those that were already seen.
# These will be used to make sure that the graph is topologically sorted.
# At the same time, we construct dicts from node name to input/output size,
# which will be used to check those.
already_seen_nodes = []
input_sizes = {}
output_sizes = {}
for _, (_, node, input_size, output_size) in sorted(
node_info.items(), key=lambda x: x[1].order):
for inp_name in node.input:
# Since the graph is topologically sorted, the inputs to the current
# node must have been seen beforehand.
self.assertIn(inp_name, already_seen_nodes)
input_sizes[node.name] = input_size
output_sizes[node.name] = output_size
already_seen_nodes.append(node.name)
# Check input sizes, if desired.
if expected_input_sizes is not None:
for k, v in expected_input_sizes.items():
self.assertIn(k, input_sizes)
self.assertEqual(input_sizes[k], v)
# Check output sizes, if desired.
if expected_output_sizes is not None:
for k, v in expected_output_sizes.items():
self.assertIn(k, output_sizes)
self.assertEqual(output_sizes[k], v)
def testGraphOrderIsCorrect(self):
"""Tests that the order and sizes of create_test_network() are correct."""
graph_def = create_test_network().as_graph_def()
# Case 1: Input node name/size are not given.
node_info, _ = receptive_field.get_compute_order(graph_def)
self.check_topological_sort_and_sizes(node_info)
# Case 2: Input node name is given, but not size.
node_info, _ = receptive_field.get_compute_order(
graph_def, input_node_name='input_image')
self.check_topological_sort_and_sizes(node_info)
# Case 3: Input node name and size (224) are given.
node_info, _ = receptive_field.get_compute_order(
graph_def, input_node_name='input_image', input_node_size=[224, 224])
expected_input_sizes = {
'input_image': None,
'L1/Conv2D': [224, 224],
'L2_pad': [224, 224],
'L2/Conv2D': [225, 225],
'L3/MaxPool': [112, 112],
'L4_relu': [56, 56],
'L5/Conv2D': [56, 56],
'L6/Conv2D': [56, 56],
'L7_add': [28, 28],
}
expected_output_sizes = {
'input_image': [224, 224],
'L1/Conv2D': [56, 56],
'L2_pad': [225, 225],
'L2/Conv2D': [112, 112],
'L3/MaxPool': [56, 56],
'L4_relu': [56, 56],
'L5/Conv2D': [28, 28],
'L6/Conv2D': [28, 28],
'L7_add': [28, 28],
}
self.check_topological_sort_and_sizes(node_info, expected_input_sizes,
expected_output_sizes)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/receptive_field/python/util/graph_compute_order_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions to compute receptive field of a fully-convolutional network."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.receptive_field.python.util import graph_compute_order
from tensorflow.contrib.receptive_field.python.util import parse_layer_parameters
from tensorflow.python.framework import ops as framework_ops
from tensorflow.python.platform import tf_logging as logging
def _get_rf_size_node_input(stride, kernel_size, rf_size_output):
"""Computes RF size at the input of a given layer.
Args:
stride: Stride of given layer (integer).
kernel_size: Kernel size of given layer (integer).
rf_size_output: RF size at output of given layer (integer).
Returns:
rf_size_input: RF size at input of given layer (integer).
"""
return stride * rf_size_output + kernel_size - stride
def _get_effective_stride_node_input(stride, effective_stride_output):
"""Computes effective stride at the input of a given layer.
Args:
stride: Stride of given layer (integer).
effective_stride_output: Effective stride at output of given layer
(integer).
Returns:
effective_stride_input: Effective stride at input of given layer
(integer).
"""
return stride * effective_stride_output
def _get_effective_padding_node_input(stride, padding,
effective_padding_output):
"""Computes effective padding at the input of a given layer.
Args:
stride: Stride of given layer (integer).
padding: Padding of given layer (integer).
effective_padding_output: Effective padding at output of given layer
(integer).
Returns:
effective_padding_input: Effective padding at input of given layer
(integer).
"""
return stride * effective_padding_output + padding
class ReceptiveField(object):
"""Receptive field of a convolutional neural network.
Args:
size: Receptive field size.
stride: Effective stride.
padding: Effective padding.
"""
def __init__(self, size, stride, padding):
self.size = np.asarray(size)
self.stride = np.asarray(stride)
self.padding = np.asarray(padding)
def compute_input_center_coordinates(self, y, axis=None):
"""Computes the center of the receptive field that generated a feature.
Args:
y: An array of feature coordinates with shape `(..., d)`, where `d` is the
number of dimensions of the coordinates.
axis: The dimensions for which to compute the input center coordinates. If
`None` (the default), compute the input center coordinates for all
dimensions.
Returns:
x: Center of the receptive field that generated the features, at the input
of the network.
Raises:
ValueError: If the number of dimensions of the feature coordinates does
not match the number of elements in `axis`.
"""
# Use all dimensions.
if axis is None:
axis = range(self.size.size)
# Ensure axis is a list because tuples have different indexing behavior.
axis = list(axis)
y = np.asarray(y)
if y.shape[-1] != len(axis):
raise ValueError("Dimensionality of the feature coordinates `y` (%d) "
"does not match dimensionality of `axis` (%d)" %
(y.shape[-1], len(axis)))
return -self.padding[axis] + y * self.stride[axis] + (
self.size[axis] - 1) / 2
def compute_feature_coordinates(self, x, axis=None):
"""Computes the position of a feature given the center of a receptive field.
Args:
x: An array of input center coordinates with shape `(..., d)`, where `d`
is the number of dimensions of the coordinates.
axis: The dimensions for which to compute the feature coordinates. If
`None` (the default), compute the feature coordinates for all
dimensions.
Returns:
y: Coordinates of the features.
Raises:
ValueError: If the number of dimensions of the input center coordinates
does not match the number of elements in `axis`.
"""
# Use all dimensions.
if axis is None:
axis = range(self.size.size)
# Ensure axis is a list because tuples have different indexing behavior.
axis = list(axis)
x = np.asarray(x)
if x.shape[-1] != len(axis):
raise ValueError("Dimensionality of the input center coordinates `x` "
"(%d) does not match dimensionality of `axis` (%d)" %
(x.shape[-1], len(axis)))
return (x + self.padding[axis] +
(1 - self.size[axis]) / 2) / self.stride[axis]
def __iter__(self):
return iter(np.concatenate([self.size, self.stride, self.padding]))
def compute_receptive_field_from_graph_def(graph_def,
input_node,
output_node,
stop_propagation=None,
input_resolution=None):
"""Computes receptive field (RF) parameters from a Graph or GraphDef object.
The algorithm stops the calculation of the receptive field whenever it
encounters an operation in the list `stop_propagation`. Stopping the
calculation early can be useful to calculate the receptive field of a
subgraph such as a single branch of the
[inception network](https://arxiv.org/abs/1512.00567).
Args:
graph_def: Graph or GraphDef object.
input_node: Name of the input node or Tensor object from graph.
output_node: Name of the output node or Tensor object from graph.
stop_propagation: List of operations or scope names for which to stop the
propagation of the receptive field.
input_resolution: 2D list. If the input resolution to the model is fixed and
known, this may be set. This is helpful for cases where the RF parameters
vary depending on the input resolution (this happens since SAME padding in
tensorflow depends on input resolution in general). If this is None, it is
assumed that the input resolution is unknown, so some RF parameters may be
unknown (depending on the model architecture).
Returns:
rf_size_x: Receptive field size of network in the horizontal direction, with
respect to specified input and output.
rf_size_y: Receptive field size of network in the vertical direction, with
respect to specified input and output.
effective_stride_x: Effective stride of network in the horizontal direction,
with respect to specified input and output.
effective_stride_y: Effective stride of network in the vertical direction,
with respect to specified input and output.
effective_padding_x: Effective padding of network in the horizontal
direction, with respect to specified input and output.
effective_padding_y: Effective padding of network in the vertical
direction, with respect to specified input and output.
Raises:
ValueError: If network is not aligned or if either input or output nodes
cannot be found. For network criterion alignment, see
photos/vision/features/delf/g3doc/rf_computation.md
"""
# Convert a graph to graph_def if necessary.
if isinstance(graph_def, framework_ops.Graph):
graph_def = graph_def.as_graph_def()
# Convert tensors to names.
if isinstance(input_node, framework_ops.Tensor):
input_node = input_node.op.name
if isinstance(output_node, framework_ops.Tensor):
output_node = output_node.op.name
stop_propagation = stop_propagation or []
# Computes order of computation for a given graph.
node_info, name_to_node = graph_compute_order.get_compute_order(
graph_def=graph_def,
input_node_name=input_node,
input_node_size=input_resolution)
# Sort in reverse topological order.
ordered_node_info = sorted(node_info.items(), key=lambda x: -x[1].order)
# Dictionaries to keep track of receptive field, effective stride and
# effective padding of different nodes.
rf_sizes_x = {}
rf_sizes_y = {}
effective_strides_x = {}
effective_strides_y = {}
effective_paddings_x = {}
effective_paddings_y = {}
# Initialize dicts for output_node.
rf_sizes_x[output_node] = 1
rf_sizes_y[output_node] = 1
effective_strides_x[output_node] = 1
effective_strides_y[output_node] = 1
effective_paddings_x[output_node] = 0
effective_paddings_y[output_node] = 0
# Flag to denote if we found output node yet. If we have not, we skip nodes
# until the output node is found.
found_output_node = False
# Flag to denote if padding is undefined. This happens when SAME padding mode
# is used in conjunction with stride and kernel sizes which make it such that
# the padding to be applied would depend on the input size. In this case,
# alignment checks are skipped, and the effective padding is None.
undefined_padding = False
for _, (o, node, _, _) in ordered_node_info:
if node:
logging.vlog(3, "%10d %-100s %-20s" % (o, node.name[:90], node.op))
else:
continue
# When we find input node, we can stop.
if node.name == input_node:
break
# Loop until we find the output node. All nodes before finding the output
# one are irrelevant, so they can be skipped.
if not found_output_node:
if node.name == output_node:
found_output_node = True
if found_output_node:
if node.name not in rf_sizes_x:
assert node.name not in rf_sizes_y, ("Node %s is in rf_sizes_y, but "
"not in rf_sizes_x" % node.name)
# In this case, node is not relevant since it's not part of the
# computation we're interested in.
logging.vlog(3, "Irrelevant node %s, skipping it...", node.name)
continue
# Get params for this layer.
(kernel_size_x, kernel_size_y, stride_x, stride_y, padding_x, padding_y,
_, _) = parse_layer_parameters.get_layer_params(
node, name_to_node, node_info[node.name].input_size)
logging.vlog(
3, "kernel_size_x = %s, kernel_size_y = %s, "
"stride_x = %s, stride_y = %s, "
"padding_x = %s, padding_y = %s, input size = %s" %
(kernel_size_x, kernel_size_y, stride_x, stride_y, padding_x,
padding_y, node_info[node.name].input_size))
if padding_x is None or padding_y is None:
undefined_padding = True
# Get parameters at input of this layer which may or may not be propagated
# to the input layers.
rf_size_input_x = _get_rf_size_node_input(stride_x, kernel_size_x,
rf_sizes_x[node.name])
rf_size_input_y = _get_rf_size_node_input(stride_y, kernel_size_y,
rf_sizes_y[node.name])
effective_stride_input_x = _get_effective_stride_node_input(
stride_x, effective_strides_x[node.name])
effective_stride_input_y = _get_effective_stride_node_input(
stride_y, effective_strides_y[node.name])
if not undefined_padding:
effective_padding_input_x = _get_effective_padding_node_input(
stride_x, padding_x, effective_paddings_x[node.name])
effective_padding_input_y = _get_effective_padding_node_input(
stride_y, padding_y, effective_paddings_y[node.name])
else:
effective_padding_input_x = None
effective_padding_input_y = None
logging.vlog(
4, "rf_size_input_x = %s, rf_size_input_y = %s, "
"effective_stride_input_x = %s, effective_stride_input_y = %s, "
"effective_padding_input_x = %s, effective_padding_input_y = %s" %
(rf_size_input_x, rf_size_input_y, effective_stride_input_x,
effective_stride_input_y, effective_padding_input_x,
effective_padding_input_y))
# Loop over this node's inputs and potentially propagate information down.
for inp_name in node.input:
# Stop the propagation of the receptive field.
if any(inp_name.startswith(stop) for stop in stop_propagation):
logging.vlog(3, "Skipping explicitly ignored node %s.", inp_name)
continue
logging.vlog(4, "inp_name = %s", inp_name)
if inp_name.startswith("^"):
# The character "^" denotes a control dependency, so this input node
# can be safely ignored.
continue
inp_node = name_to_node[inp_name]
logging.vlog(4, "inp_node = \n%s", inp_node)
if inp_name in rf_sizes_x:
assert inp_name in rf_sizes_y, ("Node %s is in rf_sizes_x, but "
"not in rf_sizes_y" % inp_name)
logging.vlog(
4, "rf_sizes_x[inp_name] = %s,"
" rf_sizes_y[inp_name] = %s, "
"effective_strides_x[inp_name] = %s,"
" effective_strides_y[inp_name] = %s, "
"effective_paddings_x[inp_name] = %s,"
" effective_paddings_y[inp_name] = %s" %
(rf_sizes_x[inp_name], rf_sizes_y[inp_name],
effective_strides_x[inp_name], effective_strides_y[inp_name],
effective_paddings_x[inp_name], effective_paddings_y[inp_name]))
# This node was already discovered through a previous path, so we need
# to make sure that graph is aligned. This alignment check is skipped
# if the padding is not defined, since in this case alignment cannot
# be checked.
if not undefined_padding:
if effective_strides_x[inp_name] != effective_stride_input_x:
raise ValueError(
"Graph is not aligned since effective stride from different "
"paths is different in horizontal direction")
if effective_strides_y[inp_name] != effective_stride_input_y:
raise ValueError(
"Graph is not aligned since effective stride from different "
"paths is different in vertical direction")
if (rf_sizes_x[inp_name] -
1) / 2 - effective_paddings_x[inp_name] != (
rf_size_input_x - 1) / 2 - effective_padding_input_x:
raise ValueError(
"Graph is not aligned since center shift from different "
"paths is different in horizontal direction")
if (rf_sizes_y[inp_name] -
1) / 2 - effective_paddings_y[inp_name] != (
rf_size_input_y - 1) / 2 - effective_padding_input_y:
raise ValueError(
"Graph is not aligned since center shift from different "
"paths is different in vertical direction")
# Keep track of path with largest RF, for both directions.
if rf_sizes_x[inp_name] < rf_size_input_x:
rf_sizes_x[inp_name] = rf_size_input_x
effective_strides_x[inp_name] = effective_stride_input_x
effective_paddings_x[inp_name] = effective_padding_input_x
if rf_sizes_y[inp_name] < rf_size_input_y:
rf_sizes_y[inp_name] = rf_size_input_y
effective_strides_y[inp_name] = effective_stride_input_y
effective_paddings_y[inp_name] = effective_padding_input_y
else:
assert inp_name not in rf_sizes_y, ("Node %s is in rf_sizes_y, but "
"not in rf_sizes_x" % inp_name)
# In this case, it is the first time we encounter this node. So we
# propagate the RF parameters.
rf_sizes_x[inp_name] = rf_size_input_x
rf_sizes_y[inp_name] = rf_size_input_y
effective_strides_x[inp_name] = effective_stride_input_x
effective_strides_y[inp_name] = effective_stride_input_y
effective_paddings_x[inp_name] = effective_padding_input_x
effective_paddings_y[inp_name] = effective_padding_input_y
if not found_output_node:
raise ValueError("Output node was not found")
if input_node not in rf_sizes_x:
raise ValueError("Input node was not found")
return ReceptiveField(
(rf_sizes_x[input_node], rf_sizes_y[input_node]),
(effective_strides_x[input_node], effective_strides_y[input_node]),
(effective_paddings_x[input_node], effective_paddings_y[input_node]))
|
tensorflow-master
|
tensorflow/contrib/receptive_field/python/util/receptive_field.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Simple script to convert CSV output from rf_benchmark to Markdown format.
The input CSV should have the following fields:
- CNN
- input resolution
- end_point
- FLOPS (Billion)
- RF size hor
- RF size ver
- effective stride hor
- effective stride ver
- effective padding hor
- effective padding ver
Since usually in all cases the parameters in the horizontal and vertical
directions are the same, this is assumed by this script, which only prints one
of them to the Markdown file.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import csv
import sys
from tensorflow.python.platform import app
cmd_args = None
def main(unused_argv):
with open(cmd_args.markdown_path, 'w') as f:
# Write table header and field size.
f.write('CNN | resolution | end-point | FLOPs (Billion) | RF | '
'effective stride | effective padding\n')
f.write(':--------------------: | :----------: | :---------------: | '
':---------------: | :-----: | :----: | :----:\n')
with open(cmd_args.csv_path) as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
# Make sure horizontal and parameters are the same.
assert row['RF size hor'] == row['RF size ver']
assert row['effective stride hor'] == row['effective stride ver']
assert row['effective padding hor'] == row['effective padding ver']
f.write('%s|%s|%s|%s|%s|%s|%s\n' %
(row['CNN'], row['input resolution'], row['end_point'],
row['FLOPs (Billion)'], row['RF size hor'],
row['effective stride hor'], row['effective padding hor']))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.register('type', 'bool', lambda v: v.lower() == 'true')
parser.add_argument(
'--csv_path',
type=str,
default='/tmp/rf.csv',
help='Path where CSV output of rf_benchmark was saved.')
parser.add_argument(
'--markdown_path',
type=str,
default='/tmp/rf.md',
help='Path where Markdown output will be saved.')
cmd_args, unparsed = parser.parse_known_args()
app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
tensorflow-master
|
tensorflow/contrib/receptive_field/python/util/examples/csv_to_markdown_table.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Computes Receptive Field (RF) information for different models.
The receptive field (and related parameters) for the different models are
printed to stdout, and may also optionally be written to a CSV file.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import csv
import sys
from tensorflow.contrib import framework
from tensorflow.contrib import slim
from tensorflow.contrib.receptive_field import receptive_field_api as receptive_field
from tensorflow.python.client import session
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import app
from tensorflow.python.profiler import profiler
from nets import alexnet
from nets import inception
from nets import mobilenet_v1
from nets import resnet_v1
from nets import resnet_v2
from nets import vgg
cmd_args = None
# Input node name for all architectures.
_INPUT_NODE = 'input_image'
# Variants of different network architectures.
# - resnet: different versions and sizes.
_SUPPORTED_RESNET_VARIANTS = [
'resnet_v1_50', 'resnet_v1_101', 'resnet_v1_152', 'resnet_v1_200',
'resnet_v2_50', 'resnet_v2_101', 'resnet_v2_152', 'resnet_v2_200'
]
# - inception_resnet_v2: default, and version with SAME padding.
_SUPPORTED_INCEPTIONRESNETV2_VARIANTS = [
'inception_resnet_v2', 'inception_resnet_v2-same'
]
# - inception_v2: default, and version with no separable conv.
_SUPPORTED_INCEPTIONV2_VARIANTS = [
'inception_v2', 'inception_v2-no-separable-conv'
]
# - inception_v3: default version.
_SUPPORTED_INCEPTIONV3_VARIANTS = ['inception_v3']
# - inception_v4: default version.
_SUPPORTED_INCEPTIONV4_VARIANTS = ['inception_v4']
# - alexnet_v2: default version.
_SUPPORTED_ALEXNETV2_VARIANTS = ['alexnet_v2']
# - vgg: vgg_a (with 11 layers) and vgg_16 (version D).
_SUPPORTED_VGG_VARIANTS = ['vgg_a', 'vgg_16']
# - mobilenet_v1: 100% and 75%.
_SUPPORTED_MOBILENETV1_VARIANTS = ['mobilenet_v1', 'mobilenet_v1_075']
def _construct_model(model_type='resnet_v1_50', placeholder_resolution=None):
"""Constructs model for the desired type of CNN.
Args:
model_type: Type of model to be used.
placeholder_resolution: Placeholder image resolution to use.
Returns:
end_points: A dictionary from components of the network to the corresponding
activations.
Raises:
ValueError: If the model_type is not supported.
"""
# Placeholder input.
images = array_ops.placeholder(
dtypes.float32,
shape=(1, placeholder_resolution, placeholder_resolution, 3),
name=_INPUT_NODE)
# Construct model.
if model_type == 'inception_resnet_v2':
_, end_points = inception.inception_resnet_v2_base(images)
elif model_type == 'inception_resnet_v2-same':
_, end_points = inception.inception_resnet_v2_base(
images, align_feature_maps=True)
elif model_type == 'inception_v2':
_, end_points = inception.inception_v2_base(images)
elif model_type == 'inception_v2-no-separable-conv':
_, end_points = inception.inception_v2_base(
images, use_separable_conv=False)
elif model_type == 'inception_v3':
_, end_points = inception.inception_v3_base(images)
elif model_type == 'inception_v4':
_, end_points = inception.inception_v4_base(images)
elif model_type == 'alexnet_v2':
_, end_points = alexnet.alexnet_v2(
images, num_classes=None, is_training=False, global_pool=False)
elif model_type == 'vgg_a':
_, end_points = vgg.vgg_a(
images, num_classes=None, is_training=False, global_pool=False)
elif model_type == 'vgg_16':
_, end_points = vgg.vgg_16(
images, num_classes=None, is_training=False, global_pool=False)
elif model_type == 'mobilenet_v1':
_, end_points = mobilenet_v1.mobilenet_v1_base(images)
elif model_type == 'mobilenet_v1_075':
_, end_points = mobilenet_v1.mobilenet_v1_base(
images, depth_multiplier=0.75)
elif model_type == 'resnet_v1_50':
_, end_points = resnet_v1.resnet_v1_50(
images, num_classes=None, is_training=False, global_pool=False)
elif model_type == 'resnet_v1_101':
_, end_points = resnet_v1.resnet_v1_101(
images, num_classes=None, is_training=False, global_pool=False)
elif model_type == 'resnet_v1_152':
_, end_points = resnet_v1.resnet_v1_152(
images, num_classes=None, is_training=False, global_pool=False)
elif model_type == 'resnet_v1_200':
_, end_points = resnet_v1.resnet_v1_200(
images, num_classes=None, is_training=False, global_pool=False)
elif model_type == 'resnet_v2_50':
_, end_points = resnet_v2.resnet_v2_50(
images, num_classes=None, is_training=False, global_pool=False)
elif model_type == 'resnet_v2_101':
_, end_points = resnet_v2.resnet_v2_101(
images, num_classes=None, is_training=False, global_pool=False)
elif model_type == 'resnet_v2_152':
_, end_points = resnet_v2.resnet_v2_152(
images, num_classes=None, is_training=False, global_pool=False)
elif model_type == 'resnet_v2_200':
_, end_points = resnet_v2.resnet_v2_200(
images, num_classes=None, is_training=False, global_pool=False)
else:
raise ValueError('Unsupported model_type %s.' % model_type)
return end_points
def _get_desired_end_point_keys(model_type='resnet_v1_50'):
"""Gets list of desired end point keys for a type of CNN.
Args:
model_type: Type of model to be used.
Returns:
desired_end_point_types: A list containing the desired end-points.
Raises:
ValueError: If the model_type is not supported.
"""
if model_type in _SUPPORTED_RESNET_VARIANTS:
blocks = ['block1', 'block2', 'block3', 'block4']
desired_end_point_keys = ['%s/%s' % (model_type, i) for i in blocks]
elif model_type in _SUPPORTED_INCEPTIONRESNETV2_VARIANTS:
desired_end_point_keys = [
'Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3', 'MaxPool_3a_3x3',
'Conv2d_3b_1x1', 'Conv2d_4a_3x3', 'MaxPool_5a_3x3', 'Mixed_5b',
'Mixed_6a', 'PreAuxLogits', 'Mixed_7a', 'Conv2d_7b_1x1'
]
elif model_type in _SUPPORTED_INCEPTIONV2_VARIANTS:
desired_end_point_keys = [
'Conv2d_1a_7x7', 'MaxPool_2a_3x3', 'Conv2d_2b_1x1', 'Conv2d_2c_3x3',
'MaxPool_3a_3x3', 'Mixed_3b', 'Mixed_3c', 'Mixed_4a', 'Mixed_4b',
'Mixed_4c', 'Mixed_4d', 'Mixed_4e', 'Mixed_5a', 'Mixed_5b', 'Mixed_5c'
]
elif model_type in _SUPPORTED_INCEPTIONV3_VARIANTS:
desired_end_point_keys = [
'Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3', 'MaxPool_3a_3x3',
'Conv2d_3b_1x1', 'Conv2d_4a_3x3', 'MaxPool_5a_3x3', 'Mixed_5b',
'Mixed_5c', 'Mixed_5d', 'Mixed_6a', 'Mixed_6b', 'Mixed_6c', 'Mixed_6d',
'Mixed_6e', 'Mixed_7a', 'Mixed_7b', 'Mixed_7c'
]
elif model_type in _SUPPORTED_INCEPTIONV4_VARIANTS:
desired_end_point_keys = [
'Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3', 'Mixed_3a',
'Mixed_4a', 'Mixed_5a', 'Mixed_5b', 'Mixed_5c', 'Mixed_5d', 'Mixed_5e',
'Mixed_6a', 'Mixed_6b', 'Mixed_6c', 'Mixed_6d', 'Mixed_6e', 'Mixed_6f',
'Mixed_6g', 'Mixed_6h', 'Mixed_7a', 'Mixed_7b', 'Mixed_7c', 'Mixed_7d'
]
elif model_type in _SUPPORTED_ALEXNETV2_VARIANTS:
ep = ['conv1', 'pool1', 'conv2', 'conv3', 'conv4', 'conv5', 'pool5']
desired_end_point_keys = ['%s/%s' % (model_type, i) for i in ep]
elif model_type in _SUPPORTED_VGG_VARIANTS:
ep = [
'conv1/conv1_1', 'pool1', 'conv2/conv2_1', 'pool2', 'conv3/conv3_1',
'conv3/conv3_2', 'pool3', 'conv4/conv4_1', 'conv4/conv4_2', 'pool4',
'conv5/conv5_1', 'conv5/conv5_2', 'pool5'
]
desired_end_point_keys = ['%s/%s' % (model_type, i) for i in ep]
elif model_type in _SUPPORTED_MOBILENETV1_VARIANTS:
desired_end_point_keys = [
'Conv2d_0', 'Conv2d_1_pointwise', 'Conv2d_2_pointwise',
'Conv2d_3_pointwise', 'Conv2d_4_pointwise', 'Conv2d_5_pointwise',
'Conv2d_6_pointwise', 'Conv2d_7_pointwise', 'Conv2d_8_pointwise',
'Conv2d_9_pointwise', 'Conv2d_10_pointwise', 'Conv2d_11_pointwise',
'Conv2d_12_pointwise', 'Conv2d_13_pointwise'
]
else:
raise ValueError('Unsupported model_type %s.' % model_type)
return desired_end_point_keys
def _model_graph_def(model_type,
desired_end_point_keys,
placeholder_resolution=None,
arg_sc=None):
"""Constructs a model graph, returning GraphDef's and end-points.
Args:
model_type: Type of model to be used.
desired_end_point_keys: List of desired end points for which receptive field
information will be computed.
placeholder_resolution: Placeholder resolution to use when constructing the
graph.
arg_sc: Optional arg scope to use in constructing the graph.
Returns:
graphdefs: List of GraphDef's, one per desired end point.
end_points: A dictionary from components of the network to the corresponding
activations.
"""
if arg_sc is None:
arg_sc = {}
g = ops.Graph()
sess = session.Session(graph=g)
with g.as_default():
with framework.arg_scope(arg_sc):
end_points = _construct_model(model_type, placeholder_resolution)
sess.run(variables.global_variables_initializer())
# Produce a graphdef for each desired end point. While this is not required
# for receptive field computation, it helps to provide a better estimate of
# the number of floating point operations, since it removes initialization
# layers.
graphdefs = []
for desired_end_point_key in desired_end_point_keys:
end_point_node_name = end_points[desired_end_point_key].name.split(':')[0]
graphdefs.append(
graph_util.convert_variables_to_constants(sess, g.as_graph_def(),
[end_point_node_name]))
return graphdefs, end_points
def _model_rf_and_flops(graphdefs,
end_points,
desired_end_point_keys,
model_type='resnet_v1_50',
csv_writer=None,
input_resolution=None):
"""Computes receptive field and FLOPs for a given CNN model.
The information will be printed to stdout. If the RF parameters are the same
for the horizontal and vertical directions, it will be printed only once.
Otherwise, they are printed once for the horizontal and once for the vertical
directions.
Args:
graphdefs: List of GraphDef's, one per desired end point.
end_points: A dictionary from components of the model to the corresponding
activations.
desired_end_point_keys: List of desired end points for which receptive field
information will be computed.
model_type: Type of model to be used, used only for printing purposes.
csv_writer: A CSV writer for RF parameters, which is used if it is not None.
input_resolution: Input resolution to use when computing RF parameters. This
is important for the case where padding can only be defined if the input
resolution is known, which may happen if using SAME padding. This is
assumed the resolution for both height and width. If None, we consider the
resolution is unknown.
"""
# Configuration of profiler. Avoid verbose output.
profiler_options = profiler.ProfileOptionBuilder.float_operation()
profiler_options['output'] = 'file:outfile=/dev/null'
for i, desired_end_point_key in enumerate(desired_end_point_keys):
print('- %s:' % desired_end_point_key)
output_node_with_colon = end_points[desired_end_point_key].name
pos = output_node_with_colon.rfind(':')
output_node = output_node_with_colon[:pos]
try:
# Compute receptive field parameters.
(receptive_field_x, receptive_field_y, effective_stride_x,
effective_stride_y, effective_padding_x, effective_padding_y
) = receptive_field.compute_receptive_field_from_graph_def(
graphdefs[i],
_INPUT_NODE,
output_node,
input_resolution=input_resolution)
# Compute FLOPs. Can only be done if input resolution is known.
if input_resolution is None:
billion_flops_str = 'None'
else:
g = ops.Graph()
with g.as_default():
importer.import_graph_def(graphdefs[i], name='')
flops = profiler.profile(g, options=profiler_options)
billion_flops = flops.total_float_ops / 1e9
billion_flops_str = '%.3f' % billion_flops
# If values are the same in horizontal/vertical directions, just report
# one of them. Otherwise, report both.
if (receptive_field_x == receptive_field_y) and (
effective_stride_x == effective_stride_y) and (
effective_padding_x == effective_padding_y):
print('Receptive field size = %5s, effective stride = %5s, effective '
'padding = %5s, FLOPs (Billion) = %7s' %
(str(receptive_field_x), str(effective_stride_x),
str(effective_padding_x), billion_flops_str))
else:
print('Receptive field size: horizontal = %5s, vertical = %5s. '
'Effective stride: horizontal = %5s, vertical = %5s. Effective '
'padding: horizontal = %5s, vertical = %5s, '
'FLOPs (Billion) = %7s' %
(str(receptive_field_x), str(receptive_field_y),
str(effective_stride_x), str(effective_stride_y),
str(effective_padding_x), str(effective_padding_y),
billion_flops_str))
if csv_writer is not None:
csv_writer.writerow({
'CNN':
model_type,
'input resolution':
str(input_resolution[0])
if input_resolution is not None else 'None',
'end_point':
desired_end_point_key,
'FLOPs (Billion)':
billion_flops_str,
'RF size hor':
str(receptive_field_x),
'RF size ver':
str(receptive_field_y),
'effective stride hor':
str(effective_stride_x),
'effective stride ver':
str(effective_stride_y),
'effective padding hor':
str(effective_padding_x),
'effective padding ver':
str(effective_padding_y)
})
except ValueError as e:
print('---->ERROR: Computing RF parameters for model %s with final end '
'point %s and input resolution %s did not work' %
(model_type, desired_end_point_key, input_resolution))
print('---->The returned error is: %s' % e)
if csv_writer is not None:
csv_writer.writerow({
'CNN':
model_type,
'input resolution':
str(input_resolution[0])
if input_resolution is not None else 'None',
'end_point':
desired_end_point_key,
'FLOPs':
'None',
'RF size hor':
'None',
'RF size ver':
'None',
'effective stride hor':
'None',
'effective stride ver':
'None',
'effective padding hor':
'None',
'effective padding ver':
'None'
})
def _process_model_rf_and_flops(model_type='resnet_v1_50',
csv_writer=None,
arg_sc=None,
input_resolutions=None):
"""Contructs model graph and desired end-points, and compute RF.
The computed RF parameters are printed to stdout by the _model_rf_and_flops
function.
Args:
model_type: Type of model to be used.
csv_writer: A CSV writer for RF parameters, which is used if it is not None.
arg_sc: Optional arg scope to use in constructing the graph.
input_resolutions: List of 1D input resolutions to use when computing RF
parameters. This is important for the case where padding can only be
defined if the input resolution is known, which may happen if using SAME
padding. The entries in the list are assumed the resolution for both
height and width. If one of the elements in the list is None, we consider
it to mean that the resolution is unknown. If the list itself is None, we
use the default list [None, 224, 321].
"""
# Process default value for this list.
if input_resolutions is None:
input_resolutions = [None, 224, 321]
desired_end_point_keys = _get_desired_end_point_keys(model_type)
for n in input_resolutions:
print('********************%s, input resolution = %s' % (model_type, n))
graphdefs, end_points = _model_graph_def(model_type, desired_end_point_keys,
n, arg_sc)
_model_rf_and_flops(
graphdefs,
end_points,
desired_end_point_keys,
model_type,
csv_writer,
input_resolution=[n, n] if n is not None else None)
def _resnet_rf(csv_writer=None):
"""Computes RF and associated parameters for resnet models.
The computed values are written to stdout.
Args:
csv_writer: A CSV writer for RF parameters, which is used if it is not None.
"""
for model_type in _SUPPORTED_RESNET_VARIANTS:
arg_sc = resnet_v1.resnet_arg_scope()
_process_model_rf_and_flops(model_type, csv_writer, arg_sc)
def _inception_resnet_v2_rf(csv_writer=None):
"""Computes RF and associated parameters for the inception_resnet_v2 model.
The computed values are written to stdout.
Args:
csv_writer: A CSV writer for RF parameters, which is used if it is not None.
"""
for model_type in _SUPPORTED_INCEPTIONRESNETV2_VARIANTS:
_process_model_rf_and_flops(model_type, csv_writer)
def _inception_v2_rf(csv_writer=None):
"""Computes RF and associated parameters for the inception_v2 model.
The computed values are written to stdout.
Args:
csv_writer: A CSV writer for RF parameters, which is used if it is not None.
"""
for model_type in _SUPPORTED_INCEPTIONV2_VARIANTS:
_process_model_rf_and_flops(model_type, csv_writer)
def _inception_v3_rf(csv_writer=None):
"""Computes RF and associated parameters for the inception_v3 model.
The computed values are written to stdout.
Args:
csv_writer: A CSV writer for RF parameters, which is used if it is not None.
"""
for model_type in _SUPPORTED_INCEPTIONV3_VARIANTS:
_process_model_rf_and_flops(model_type, csv_writer)
def _inception_v4_rf(csv_writer=None):
"""Computes RF and associated parameters for the inception_v4 model.
The computed values are written to stdout.
Args:
csv_writer: A CSV writer for RF parameters, which is used if it is not None.
"""
for model_type in _SUPPORTED_INCEPTIONV4_VARIANTS:
_process_model_rf_and_flops(model_type, csv_writer)
def _alexnet_v2_rf(csv_writer=None):
"""Computes RF and associated parameters for the alexnet_v2 model.
The computed values are written to stdout.
Args:
csv_writer: A CSV writer for RF parameters, which is used if it is not None.
"""
for model_type in _SUPPORTED_ALEXNETV2_VARIANTS:
_process_model_rf_and_flops(model_type, csv_writer)
def _vgg_rf(csv_writer=None):
"""Computes RF and associated parameters for the vgg model.
The computed values are written to stdout.
Args:
csv_writer: A CSV writer for RF parameters, which is used if it is not None.
"""
for model_type in _SUPPORTED_VGG_VARIANTS:
_process_model_rf_and_flops(model_type, csv_writer)
def _mobilenet_v1_rf(csv_writer=None):
"""Computes RF and associated parameters for the mobilenet_v1 model.
The computed values are written to stdout.
Args:
csv_writer: A CSV writer for RF parameters, which is used if it is not None.
"""
for model_type in _SUPPORTED_MOBILENETV1_VARIANTS:
with slim.arg_scope([slim.batch_norm, slim.dropout],
is_training=False) as arg_sc:
_process_model_rf_and_flops(model_type, csv_writer, arg_sc)
def main(unused_argv):
# Configure CSV file which will be written, if desired.
if cmd_args.csv_path:
csv_file = open(cmd_args.csv_path, 'w')
field_names = [
'CNN', 'input resolution', 'end_point', 'FLOPs (Billion)',
'RF size hor', 'RF size ver', 'effective stride hor',
'effective stride ver', 'effective padding hor', 'effective padding ver'
]
rf_writer = csv.DictWriter(csv_file, fieldnames=field_names)
rf_writer.writeheader()
else:
rf_writer = None
# Compute RF parameters for each network architecture.
_alexnet_v2_rf(rf_writer)
_vgg_rf(rf_writer)
_inception_v2_rf(rf_writer)
_inception_v3_rf(rf_writer)
_inception_v4_rf(rf_writer)
_inception_resnet_v2_rf(rf_writer)
_mobilenet_v1_rf(rf_writer)
_resnet_rf(rf_writer)
# Close CSV file, if it was opened.
if cmd_args.csv_path:
csv_file.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.register('type', 'bool', lambda v: v.lower() == 'true')
parser.add_argument(
'--csv_path',
type=str,
default='',
help="""\
Path to CSV file that will be written with RF parameters.If empty, no
file will be written.\
""")
cmd_args, unparsed = parser.parse_known_args()
app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
tensorflow-master
|
tensorflow/contrib/receptive_field/python/util/examples/rf_benchmark.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Simple script to write Inception-ResNet-v2 model to graph file.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import graph_io
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import app
from nets import inception
cmd_args = None
def main(unused_argv):
# Model definition.
g = ops.Graph()
with g.as_default():
images = array_ops.placeholder(
dtypes.float32, shape=(1, None, None, 3), name='input_image')
inception.inception_resnet_v2_base(images)
graph_io.write_graph(g.as_graph_def(), cmd_args.graph_dir,
cmd_args.graph_filename)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.register('type', 'bool', lambda v: v.lower() == 'true')
parser.add_argument(
'--graph_dir',
type=str,
default='/tmp',
help='Directory where graph will be saved.')
parser.add_argument(
'--graph_filename',
type=str,
default='graph.pbtxt',
help='Filename of graph that will be saved.')
cmd_args, unparsed = parser.parse_known_args()
app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
tensorflow-master
|
tensorflow/contrib/receptive_field/python/util/examples/write_inception_resnet_v2_graph.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Computes Receptive Field (RF) information given a graph protobuf."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
from google.protobuf import text_format
from tensorflow.contrib.receptive_field import receptive_field_api as receptive_field
from tensorflow.core.framework import graph_pb2
from tensorflow.python.platform import app
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
cmd_args = None
def _load_graphdef(path):
"""Helper function to load GraphDef from file.
Args:
path: Path to pbtxt file.
Returns:
graph_def: A GraphDef object.
"""
graph_def = graph_pb2.GraphDef()
pbstr = gfile.Open(path).read()
text_format.Parse(pbstr, graph_def)
return graph_def
def main(unused_argv):
graph_def = _load_graphdef(cmd_args.graph_path)
(receptive_field_x, receptive_field_y, effective_stride_x, effective_stride_y,
effective_padding_x, effective_padding_y
) = receptive_field.compute_receptive_field_from_graph_def(
graph_def, cmd_args.input_node, cmd_args.output_node)
logging.info('Receptive field size (horizontal) = %s', receptive_field_x)
logging.info('Receptive field size (vertical) = %s', receptive_field_y)
logging.info('Effective stride (horizontal) = %s', effective_stride_x)
logging.info('Effective stride (vertical) = %s', effective_stride_y)
logging.info('Effective padding (horizontal) = %s', effective_padding_x)
logging.info('Effective padding (vertical) = %s', effective_padding_y)
f = gfile.GFile('%s' % cmd_args.output_path, 'w')
f.write('Receptive field size (horizontal) = %s\n' % receptive_field_x)
f.write('Receptive field size (vertical) = %s\n' % receptive_field_y)
f.write('Effective stride (horizontal) = %s\n' % effective_stride_x)
f.write('Effective stride (vertical) = %s\n' % effective_stride_y)
f.write('Effective padding (horizontal) = %s\n' % effective_padding_x)
f.write('Effective padding (vertical) = %s\n' % effective_padding_y)
f.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.register('type', 'bool', lambda v: v.lower() == 'true')
parser.add_argument(
'--graph_path', type=str, default='', help='Graph path (pbtxt format).')
parser.add_argument(
'--output_path',
type=str,
default='',
help='Path to output text file where RF information will be written to.')
parser.add_argument(
'--input_node', type=str, default='', help='Name of input node.')
parser.add_argument(
'--output_node', type=str, default='', help='Name of output node.')
cmd_args, unparsed = parser.parse_known_args()
app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
tensorflow-master
|
tensorflow/contrib/receptive_field/python/util/examples/compute_rf.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A time series library in TensorFlow (TFTS).
@@StructuralEnsembleRegressor
@@ARRegressor
@@ARModel
@@CSVReader
@@NumpyReader
@@RandomWindowInputFn
@@WholeDatasetInputFn
@@predict_continuation_input_fn
@@TrainEvalFeatures
@@FilteringResults
@@TimeSeriesRegressor
@@OneShotPredictionHead
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import
from tensorflow.contrib.timeseries.python.timeseries import *
# pylint: enable=wildcard-import
from tensorflow.python.util.all_util import remove_undocumented
remove_undocumented(module_name=__name__,
allowed_exception_list=['saved_model_utils'])
|
tensorflow-master
|
tensorflow/contrib/timeseries/__init__.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A time series library in TensorFlow (TFTS)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import
from tensorflow.contrib.timeseries.python.timeseries import *
# pylint: enable=wildcard-import
|
tensorflow-master
|
tensorflow/contrib/timeseries/python/__init__.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helper functions for training and constructing time series Models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy
from tensorflow.contrib.timeseries.python.timeseries import feature_keys
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variable_scope
# TODO(agarwal): Remove and replace with functionality from tf.slim
def fully_connected(inp,
inp_size,
layer_size,
name,
activation=nn_ops.relu,
dtype=dtypes.float32):
"""Helper method to create a fully connected hidden layer."""
wt = variable_scope.get_variable(
name="{}_weight".format(name), shape=[inp_size, layer_size], dtype=dtype)
bias = variable_scope.get_variable(
name="{}_bias".format(name),
shape=[layer_size],
initializer=init_ops.zeros_initializer())
output = nn_ops.xw_plus_b(inp, wt, bias)
if activation is not None:
assert callable(activation)
output = activation(output)
return output
def parameter_switch(parameter_overrides):
"""Create a function which chooses between overridden and model parameters.
Args:
parameter_overrides: A dictionary with explicit overrides of model
parameters, mapping from Tensors to their overridden values.
Returns:
A function which takes a Tensor and returns the override if it is specified,
or otherwise the evaluated value (given current Variable values).
"""
def get_passed_or_trained_value(parameter):
return ops.convert_to_tensor(
parameter_overrides.get(parameter, parameter)).eval()
return get_passed_or_trained_value
def canonicalize_times_or_steps_from_output(times, steps,
previous_model_output):
"""Canonicalizes either relative or absolute times, with error checking."""
if steps is not None and times is not None:
raise ValueError("Only one of `steps` and `times` may be specified.")
if steps is None and times is None:
raise ValueError("One of `steps` and `times` must be specified.")
if times is not None:
times = numpy.array(times)
if len(times.shape) != 2:
times = times[None, ...]
if (previous_model_output[feature_keys.FilteringResults.TIMES].shape[0] !=
times.shape[0]):
raise ValueError(
("`times` must have a batch dimension matching"
" the previous model output (got a batch dimension of {} for `times`"
" and {} for the previous model output).").format(
times.shape[0], previous_model_output[
feature_keys.FilteringResults.TIMES].shape[0]))
if not (previous_model_output[feature_keys.FilteringResults.TIMES][:, -1] <
times[:, 0]).all():
raise ValueError("Prediction times must be after the corresponding "
"previous model output.")
if steps is not None:
predict_times = (
previous_model_output[feature_keys.FilteringResults.TIMES][:, -1:] + 1 +
numpy.arange(steps)[None, ...])
else:
predict_times = times
return predict_times
|
tensorflow-master
|
tensorflow/contrib/timeseries/python/timeseries/model_utils.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for testing time series models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.timeseries.python.timeseries import estimators
from tensorflow.contrib.timeseries.python.timeseries import input_pipeline
from tensorflow.contrib.timeseries.python.timeseries import state_management
from tensorflow.contrib.timeseries.python.timeseries.feature_keys import TrainEvalFeatures
from tensorflow.python.client import session
from tensorflow.python.estimator import estimator_lib
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import adam
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import coordinator as coordinator_lib
from tensorflow.python.training import queue_runner_impl
from tensorflow.python.util import nest
class AllWindowInputFn(input_pipeline.TimeSeriesInputFn):
"""Returns all contiguous windows of data from a full dataset.
In contrast to WholeDatasetInputFn, which does basic shape checking but
maintains the flat sequencing of data, this `TimeSeriesInputFn` creates
batches of windows. However, unlike `RandomWindowInputFn` these windows are
deterministic, starting at every possible offset (i.e. batches of size
series_length - window_size + 1 are produced).
"""
def __init__(self, time_series_reader, window_size):
"""Initialize the input_pipeline.
Args:
time_series_reader: A `input_pipeline.TimeSeriesReader` object.
window_size: The size of contiguous windows of data to produce.
"""
self._window_size = window_size
self._reader = time_series_reader
super(AllWindowInputFn, self).__init__()
def create_batch(self):
features = self._reader.read_full()
times = features[TrainEvalFeatures.TIMES]
num_windows = array_ops.shape(times)[0] - self._window_size + 1
indices = array_ops.reshape(math_ops.range(num_windows), [num_windows, 1])
# indices contains the starting point for each window. We now extend these
# indices to include the elements inside the windows as well by doing a
# broadcast addition.
increments = array_ops.reshape(math_ops.range(self._window_size), [1, -1])
all_indices = array_ops.reshape(indices + increments, [-1])
# Select the appropriate elements in the batch and reshape the output to 3D.
features = {
key: array_ops.reshape(
array_ops.gather(value, all_indices),
array_ops.concat(
[[num_windows, self._window_size], array_ops.shape(value)[1:]],
axis=0))
for key, value in features.items()
}
return (features, None)
class _SavingTensorHook(basic_session_run_hooks.LoggingTensorHook):
"""A hook to save Tensors during training."""
def __init__(self, tensors, every_n_iter=None, every_n_secs=None):
self.tensor_values = {}
super(_SavingTensorHook, self).__init__(
tensors=tensors, every_n_iter=every_n_iter,
every_n_secs=every_n_secs)
def after_run(self, run_context, run_values):
del run_context
if self._should_trigger:
for tag in self._current_tensors.keys():
self.tensor_values[tag] = run_values.results[tag]
self._timer.update_last_triggered_step(self._iter_count)
self._iter_count += 1
def _train_on_generated_data(
generate_fn, generative_model, train_iterations, seed,
learning_rate=0.1, ignore_params_fn=lambda _: (),
derived_param_test_fn=lambda _: (),
train_input_fn_type=input_pipeline.WholeDatasetInputFn,
train_state_manager=state_management.PassthroughStateManager()):
"""The training portion of parameter recovery tests."""
random_seed.set_random_seed(seed)
generate_graph = ops.Graph()
with generate_graph.as_default():
with session.Session(graph=generate_graph):
generative_model.initialize_graph()
time_series_reader, true_parameters = generate_fn(generative_model)
true_parameters = {
tensor.name: value for tensor, value in true_parameters.items()}
eval_input_fn = input_pipeline.WholeDatasetInputFn(time_series_reader)
eval_state_manager = state_management.PassthroughStateManager()
true_parameter_eval_graph = ops.Graph()
with true_parameter_eval_graph.as_default():
generative_model.initialize_graph()
ignore_params = ignore_params_fn(generative_model)
feature_dict, _ = eval_input_fn()
eval_state_manager.initialize_graph(generative_model)
feature_dict[TrainEvalFeatures.VALUES] = math_ops.cast(
feature_dict[TrainEvalFeatures.VALUES], generative_model.dtype)
model_outputs = eval_state_manager.define_loss(
model=generative_model,
features=feature_dict,
mode=estimator_lib.ModeKeys.EVAL)
with session.Session(graph=true_parameter_eval_graph) as sess:
variables.global_variables_initializer().run()
coordinator = coordinator_lib.Coordinator()
queue_runner_impl.start_queue_runners(sess, coord=coordinator)
true_param_loss = model_outputs.loss.eval(feed_dict=true_parameters)
true_transformed_params = {
param: param.eval(feed_dict=true_parameters)
for param in derived_param_test_fn(generative_model)}
coordinator.request_stop()
coordinator.join()
saving_hook = _SavingTensorHook(
tensors=true_parameters.keys(),
every_n_iter=train_iterations - 1)
class _RunConfig(estimator_lib.RunConfig):
@property
def tf_random_seed(self):
return seed
estimator = estimators.TimeSeriesRegressor(
model=generative_model,
config=_RunConfig(),
state_manager=train_state_manager,
optimizer=adam.AdamOptimizer(learning_rate))
train_input_fn = train_input_fn_type(time_series_reader=time_series_reader)
trained_loss = (estimator.train(
input_fn=train_input_fn,
max_steps=train_iterations,
hooks=[saving_hook]).evaluate(
input_fn=eval_input_fn, steps=1))["loss"]
logging.info("Final trained loss: %f", trained_loss)
logging.info("True parameter loss: %f", true_param_loss)
return (ignore_params, true_parameters, true_transformed_params,
trained_loss, true_param_loss, saving_hook,
true_parameter_eval_graph)
def test_parameter_recovery(
generate_fn, generative_model, train_iterations, test_case, seed,
learning_rate=0.1, rtol=0.2, atol=0.1, train_loss_tolerance_coeff=0.99,
ignore_params_fn=lambda _: (),
derived_param_test_fn=lambda _: (),
train_input_fn_type=input_pipeline.WholeDatasetInputFn,
train_state_manager=state_management.PassthroughStateManager()):
"""Test that a generative model fits generated data.
Args:
generate_fn: A function taking a model and returning a `TimeSeriesReader`
object and dictionary mapping parameters to their
values. model.initialize_graph() will have been called on the model
before it is passed to this function.
generative_model: A timeseries.model.TimeSeriesModel instance to test.
train_iterations: Number of training steps.
test_case: A tf.test.TestCase to run assertions on.
seed: Same as for TimeSeriesModel.unconditional_generate().
learning_rate: Step size for optimization.
rtol: Relative tolerance for tests.
atol: Absolute tolerance for tests.
train_loss_tolerance_coeff: Trained loss times this value must be less
than the loss evaluated using the generated parameters.
ignore_params_fn: Function mapping from a Model to a list of parameters
which are not tested for accurate recovery.
derived_param_test_fn: Function returning a list of derived parameters
(Tensors) which are checked for accurate recovery (comparing the value
evaluated with trained parameters to the value under the true
parameters).
As an example, for VARMA, in addition to checking AR and MA parameters,
this function can be used to also check lagged covariance. See
varma_ssm.py for details.
train_input_fn_type: The `TimeSeriesInputFn` type to use when training
(likely `WholeDatasetInputFn` or `RandomWindowInputFn`). If None, use
`WholeDatasetInputFn`.
train_state_manager: The state manager to use when training (likely
`PassthroughStateManager` or `ChainingStateManager`). If None, use
`PassthroughStateManager`.
"""
(ignore_params, true_parameters, true_transformed_params,
trained_loss, true_param_loss, saving_hook, true_parameter_eval_graph
) = _train_on_generated_data(
generate_fn=generate_fn, generative_model=generative_model,
train_iterations=train_iterations, seed=seed, learning_rate=learning_rate,
ignore_params_fn=ignore_params_fn,
derived_param_test_fn=derived_param_test_fn,
train_input_fn_type=train_input_fn_type,
train_state_manager=train_state_manager)
trained_parameter_substitutions = {}
for param in true_parameters.keys():
evaled_value = saving_hook.tensor_values[param]
trained_parameter_substitutions[param] = evaled_value
true_value = true_parameters[param]
logging.info("True %s: %s, learned: %s",
param, true_value, evaled_value)
with session.Session(graph=true_parameter_eval_graph):
for transformed_param, true_value in true_transformed_params.items():
trained_value = transformed_param.eval(
feed_dict=trained_parameter_substitutions)
logging.info("True %s [transformed parameter]: %s, learned: %s",
transformed_param, true_value, trained_value)
test_case.assertAllClose(true_value, trained_value,
rtol=rtol, atol=atol)
if ignore_params is None:
ignore_params = []
else:
ignore_params = nest.flatten(ignore_params)
ignore_params = [tensor.name for tensor in ignore_params]
if trained_loss > 0:
test_case.assertLess(trained_loss * train_loss_tolerance_coeff,
true_param_loss)
else:
test_case.assertLess(trained_loss / train_loss_tolerance_coeff,
true_param_loss)
for param in true_parameters.keys():
if param in ignore_params:
continue
evaled_value = saving_hook.tensor_values[param]
true_value = true_parameters[param]
test_case.assertAllClose(true_value, evaled_value,
rtol=rtol, atol=atol)
def parameter_recovery_dry_run(
generate_fn, generative_model, seed,
learning_rate=0.1,
train_input_fn_type=input_pipeline.WholeDatasetInputFn,
train_state_manager=state_management.PassthroughStateManager()):
"""Test that a generative model can train on generated data.
Args:
generate_fn: A function taking a model and returning a
`input_pipeline.TimeSeriesReader` object and a dictionary mapping
parameters to their values. model.initialize_graph() will have been
called on the model before it is passed to this function.
generative_model: A timeseries.model.TimeSeriesModel instance to test.
seed: Same as for TimeSeriesModel.unconditional_generate().
learning_rate: Step size for optimization.
train_input_fn_type: The type of `TimeSeriesInputFn` to use when training
(likely `WholeDatasetInputFn` or `RandomWindowInputFn`). If None, use
`WholeDatasetInputFn`.
train_state_manager: The state manager to use when training (likely
`PassthroughStateManager` or `ChainingStateManager`). If None, use
`PassthroughStateManager`.
"""
_train_on_generated_data(
generate_fn=generate_fn, generative_model=generative_model,
seed=seed, learning_rate=learning_rate,
train_input_fn_type=train_input_fn_type,
train_state_manager=train_state_manager,
train_iterations=2)
|
tensorflow-master
|
tensorflow/contrib/timeseries/python/timeseries/test_utils.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Miscellaneous utilities used by time series models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
import numpy as np
from tensorflow.contrib import lookup
from tensorflow.contrib.layers.python.layers import layers
from tensorflow.contrib.timeseries.python.timeseries.feature_keys import TrainEvalFeatures
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.util import nest
def normal_log_prob(loc, scale, x):
"""Computes the Normal log pdf."""
z = (x - loc) / scale
return -0.5 * (math_ops.square(z)
+ np.log(2. * np.pi) + math_ops.log(scale))
def cauchy_log_prob(loc, scale, x):
"""Computes the Cauchy log pdf."""
z = (x - loc) / scale
return (-np.log(np.pi) - math_ops.log(scale) -
math_ops.log1p(math_ops.square(z)))
def mvn_tril_log_prob(loc, scale_tril, x):
"""Computes the MVN log pdf under tril scale. Doesn't handle batches."""
x0 = x - loc
z = linalg_ops.matrix_triangular_solve(
scale_tril, x0[..., array_ops.newaxis])[..., 0]
log_det_cov = 2. * math_ops.reduce_sum(math_ops.log(
array_ops.matrix_diag_part(scale_tril)), axis=-1)
d = math_ops.cast(array_ops.shape(scale_tril)[-1], log_det_cov.dtype)
return -0.5 * (math_ops.reduce_sum(math_ops.square(z), axis=-1)
+ d * np.log(2. * np.pi) + log_det_cov)
def clip_covariance(
covariance_matrix, maximum_variance_ratio, minimum_variance):
"""Enforce constraints on a covariance matrix to improve numerical stability.
Args:
covariance_matrix: A [..., N, N] batch of covariance matrices.
maximum_variance_ratio: The maximum allowed ratio of two diagonal
entries. Any entries lower than the maximum entry divided by this ratio
will be set to that value.
minimum_variance: A floor for diagonal entries in the returned matrix.
Returns:
A new covariance matrix with the requested constraints enforced. If the
input was positive definite, the output will be too.
"""
# TODO(allenl): Smarter scaling here so that correlations are preserved when
# fiddling with diagonal elements.
diagonal = array_ops.matrix_diag_part(covariance_matrix)
maximum = math_ops.reduce_max(diagonal, axis=-1, keepdims=True)
new_diagonal = gen_math_ops.maximum(
diagonal, maximum / maximum_variance_ratio)
return array_ops.matrix_set_diag(
covariance_matrix, math_ops.maximum(new_diagonal, minimum_variance))
def block_diagonal(matrices, dtype=dtypes.float32, name="block_diagonal"):
r"""Constructs block-diagonal matrices from a list of batched 2D tensors.
Args:
matrices: A list of Tensors with shape [..., N_i, M_i] (i.e. a list of
matrices with the same batch dimension).
dtype: Data type to use. The Tensors in `matrices` must match this dtype.
name: A name for the returned op.
Returns:
A matrix with the input matrices stacked along its main diagonal, having
shape [..., \sum_i N_i, \sum_i M_i].
"""
matrices = [ops.convert_to_tensor(matrix, dtype=dtype) for matrix in matrices]
blocked_rows = tensor_shape.Dimension(0)
blocked_cols = tensor_shape.Dimension(0)
batch_shape = tensor_shape.TensorShape(None)
for matrix in matrices:
full_matrix_shape = matrix.get_shape().with_rank_at_least(2)
batch_shape = batch_shape.merge_with(full_matrix_shape[:-2])
blocked_rows += full_matrix_shape[-2]
blocked_cols += full_matrix_shape[-1]
ret_columns_list = []
for matrix in matrices:
matrix_shape = array_ops.shape(matrix)
ret_columns_list.append(matrix_shape[-1])
ret_columns = math_ops.add_n(ret_columns_list)
row_blocks = []
current_column = 0
for matrix in matrices:
matrix_shape = array_ops.shape(matrix)
row_before_length = current_column
current_column += matrix_shape[-1]
row_after_length = ret_columns - current_column
row_blocks.append(
array_ops.pad(
tensor=matrix,
paddings=array_ops.concat(
[
array_ops.zeros(
[array_ops.rank(matrix) - 1, 2], dtype=dtypes.int32), [(
row_before_length, row_after_length)]
],
axis=0)))
blocked = array_ops.concat(row_blocks, -2, name=name)
blocked.set_shape(batch_shape.concatenate((blocked_rows, blocked_cols)))
return blocked
def power_sums_tensor(array_size, power_matrix, multiplier):
r"""Computes \sum_{i=0}^{N-1} A^i B (A^i)^T for N=0..(array_size + 1).
Args:
array_size: The number of non-trivial sums to pre-compute.
power_matrix: The "A" matrix above.
multiplier: The "B" matrix above
Returns:
A Tensor with S[N] = \sum_{i=0}^{N-1} A^i B (A^i)^T
S[0] is the zero matrix
S[1] is B
S[2] is A B A^T + B
...and so on
"""
array_size = math_ops.cast(array_size, dtypes.int32)
power_matrix = ops.convert_to_tensor(power_matrix)
identity_like_power_matrix = linalg_ops.eye(
array_ops.shape(power_matrix)[0], dtype=power_matrix.dtype)
identity_like_power_matrix.set_shape(
ops.convert_to_tensor(power_matrix).get_shape())
transition_powers = functional_ops.scan(
lambda previous_power, _: math_ops.matmul(previous_power, power_matrix),
math_ops.range(array_size - 1),
initializer=identity_like_power_matrix)
summed = math_ops.cumsum(
array_ops.concat([
array_ops.expand_dims(multiplier, 0), math_ops.matmul(
batch_times_matrix(transition_powers, multiplier),
transition_powers,
adjoint_b=True)
], 0))
return array_ops.concat(
[array_ops.expand_dims(array_ops.zeros_like(multiplier), 0), summed], 0)
def matrix_to_powers(matrix, powers):
"""Raise a single matrix to multiple powers."""
matrix_tiled = array_ops.tile(
array_ops.expand_dims(matrix, 0), [array_ops.size(powers), 1, 1])
return batch_matrix_pow(matrix_tiled, powers)
def batch_matrix_pow(matrices, powers):
"""Compute powers of matrices, e.g. A^3 = matmul(matmul(A, A), A).
Uses exponentiation by squaring, with O(log(p)) matrix multiplications to
compute A^p.
Args:
matrices: [batch size x N x N]
powers: Which integer power to raise each matrix to [batch size]
Returns:
The matrices raised to their respective powers, same dimensions as the
"matrices" argument.
"""
def terminate_when_all_zero(current_argument, residual_powers, accumulator):
del current_argument, accumulator # not used for condition
do_exit = math_ops.reduce_any(
math_ops.greater(residual_powers, array_ops.ones_like(residual_powers)))
return do_exit
def do_iteration(current_argument, residual_powers, accumulator):
"""Compute one step of iterative exponentiation by squaring.
The recursive form is:
power(A, p) = { power(matmul(A, A), p / 2) for even p
{ matmul(A, power(matmul(A, A), (p - 1) / 2)) for odd p
power(A, 0) = I
The power(A, 0) = I case is handled by starting with accumulator set to the
identity matrix; matrices with zero residual powers are passed through
unchanged.
Args:
current_argument: On this step, what is the first argument (A^2..^2) to
the (unrolled) recursive function? [batch size x N x N]
residual_powers: On this step, what is the second argument (residual p)?
[batch_size]
accumulator: Accumulates the exterior multiplications from the odd
powers (initially the identity matrix). [batch_size x N x N]
Returns:
Updated versions of each argument for one step of the unrolled
computation. Does not change parts of the batch which have a residual
power of zero.
"""
is_even = math_ops.equal(residual_powers % 2,
array_ops.zeros(
array_ops.shape(residual_powers),
dtype=dtypes.int32))
new_accumulator = array_ops.where(is_even, accumulator,
math_ops.matmul(accumulator,
current_argument))
new_argument = math_ops.matmul(current_argument, current_argument)
do_update = math_ops.greater(residual_powers, 1)
new_residual_powers = residual_powers - residual_powers % 2
new_residual_powers //= 2
# Stop updating if we've reached our base case; some batch elements may
# finish sooner than others
accumulator = array_ops.where(do_update, new_accumulator, accumulator)
current_argument = array_ops.where(do_update, new_argument,
current_argument)
residual_powers = array_ops.where(do_update, new_residual_powers,
residual_powers)
return (current_argument, residual_powers, accumulator)
matrices = ops.convert_to_tensor(matrices)
powers = math_ops.cast(powers, dtype=dtypes.int32)
ident = array_ops.expand_dims(
array_ops.diag(
array_ops.ones([array_ops.shape(matrices)[1]], dtype=matrices.dtype)),
0)
ident_tiled = array_ops.tile(ident, [array_ops.shape(matrices)[0], 1, 1])
(final_argument,
final_residual_power, final_accumulator) = control_flow_ops.while_loop(
terminate_when_all_zero, do_iteration, [matrices, powers, ident_tiled])
return array_ops.where(
math_ops.equal(final_residual_power,
array_ops.zeros_like(
final_residual_power, dtype=dtypes.int32)),
ident_tiled, math_ops.matmul(final_argument, final_accumulator))
# TODO(allenl): would be useful if this was built into batch_matmul
def batch_times_matrix(batch, matrix, adj_x=False, adj_y=False):
"""Multiply a batch of matrices by a single matrix.
Functionally equivalent to:
tf.matmul(batch, array_ops.tile(gen_math_ops.expand_dims(matrix, 0),
[array_ops.shape(batch)[0], 1, 1]),
adjoint_a=adj_x, adjoint_b=adj_y)
Args:
batch: [batch_size x N x M] after optional transpose
matrix: [M x P] after optional transpose
adj_x: If true, transpose the second two dimensions of "batch" before
multiplying.
adj_y: If true, transpose "matrix" before multiplying.
Returns:
[batch_size x N x P]
"""
batch = ops.convert_to_tensor(batch)
matrix = ops.convert_to_tensor(matrix)
assert batch.get_shape().ndims == 3
assert matrix.get_shape().ndims == 2
if adj_x:
batch = array_ops.transpose(batch, [0, 2, 1])
batch_dimension = batch.get_shape().dims[0].value
first_dimension = batch.get_shape().dims[1].value
tensor_batch_shape = array_ops.shape(batch)
if batch_dimension is None:
batch_dimension = tensor_batch_shape[0]
if first_dimension is None:
first_dimension = tensor_batch_shape[1]
matrix_first_dimension, matrix_second_dimension = matrix.get_shape().as_list()
batch_reshaped = array_ops.reshape(batch, [-1, tensor_batch_shape[2]])
if adj_y:
if matrix_first_dimension is None:
matrix_first_dimension = array_ops.shape(matrix)[0]
result_shape = [batch_dimension, first_dimension, matrix_first_dimension]
else:
if matrix_second_dimension is None:
matrix_second_dimension = array_ops.shape(matrix)[1]
result_shape = [batch_dimension, first_dimension, matrix_second_dimension]
return array_ops.reshape(
math_ops.matmul(batch_reshaped, matrix, adjoint_b=adj_y), result_shape)
def matrix_times_batch(matrix, batch, adj_x=False, adj_y=False):
"""Like batch_times_matrix, but with the multiplication order swapped."""
return array_ops.transpose(
batch_times_matrix(
batch=batch, matrix=matrix, adj_x=not adj_y, adj_y=not adj_x),
[0, 2, 1])
def make_toeplitz_matrix(inputs, name=None):
"""Make a symmetric Toeplitz matrix from input array of values.
Args:
inputs: a 3-D tensor of shape [num_blocks, block_size, block_size].
name: the name of the operation.
Returns:
a symmetric Toeplitz matrix of shape
[num_blocks*block_size, num_blocks*block_size].
"""
num_blocks = array_ops.shape(inputs)[0]
block_size = array_ops.shape(inputs)[1]
output_size = block_size * num_blocks
lags = array_ops.reshape(math_ops.range(num_blocks), shape=[1, -1])
indices = math_ops.abs(lags - array_ops.transpose(lags))
output = array_ops.gather(inputs, indices)
output = array_ops.reshape(
array_ops.transpose(output, [0, 2, 1, 3]), [output_size, output_size])
return array_ops.identity(output, name=name)
# TODO(allenl): Investigate alternative parameterizations.
def sign_magnitude_positive_definite(
raw, off_diagonal_scale=0., overall_scale=0.):
"""Constructs a positive definite matrix from an unconstrained input matrix.
We want to keep the whole matrix on a log scale, but also allow off-diagonal
elements to be negative, so the sign of off-diagonal elements is modeled
separately from their magnitude (using the lower and upper triangles
respectively). Specifically:
for i < j, we have:
output_cholesky[i, j] = raw[j, i] / (abs(raw[j, i]) + 1) *
exp((off_diagonal_scale + overall_scale + raw[i, j]) / 2)
output_cholesky[i, i] = exp((raw[i, i] + overall_scale) / 2)
output = output_cholesky^T * output_cholesky
where raw, off_diagonal_scale, and overall_scale are
un-constrained real-valued variables. The resulting values are stable
around zero due to the exponential (and the softsign keeps the function
smooth).
Args:
raw: A [..., M, M] Tensor.
off_diagonal_scale: A scalar or [...] shaped Tensor controlling the relative
scale of off-diagonal values in the output matrix.
overall_scale: A scalar or [...] shaped Tensor controlling the overall scale
of the output matrix.
Returns:
The `output` matrix described above, a [..., M, M] positive definite matrix.
"""
raw = ops.convert_to_tensor(raw)
diagonal = array_ops.matrix_diag_part(raw)
def _right_pad_with_ones(tensor, target_rank):
# Allow broadcasting even if overall_scale and off_diagonal_scale have batch
# dimensions
tensor = ops.convert_to_tensor(tensor, dtype=raw.dtype.base_dtype)
return array_ops.reshape(tensor,
array_ops.concat(
[
array_ops.shape(tensor), array_ops.ones(
[target_rank - array_ops.rank(tensor)],
dtype=target_rank.dtype)
],
axis=0))
# We divide the log values by 2 to compensate for the squaring that happens
# when transforming Cholesky factors into positive definite matrices.
sign_magnitude = (gen_math_ops.exp(
(raw + _right_pad_with_ones(off_diagonal_scale, array_ops.rank(raw)) +
_right_pad_with_ones(overall_scale, array_ops.rank(raw))) / 2.) *
nn.softsign(array_ops.matrix_transpose(raw)))
sign_magnitude.set_shape(raw.get_shape())
cholesky_factor = array_ops.matrix_set_diag(
input=array_ops.matrix_band_part(sign_magnitude, 0, -1),
diagonal=gen_math_ops.exp((diagonal + _right_pad_with_ones(
overall_scale, array_ops.rank(diagonal))) / 2.))
return math_ops.matmul(cholesky_factor, cholesky_factor, transpose_a=True)
def transform_to_covariance_matrices(input_vectors, matrix_size):
"""Construct covariance matrices via transformations from input_vectors.
Args:
input_vectors: A [batch size x input size] batch of vectors to transform.
matrix_size: An integer indicating one dimension of the (square) output
matrix.
Returns:
A [batch size x matrix_size x matrix_size] batch of covariance matrices.
"""
combined_values = layers.fully_connected(
input_vectors, matrix_size**2 + 2, activation_fn=None)
return sign_magnitude_positive_definite(
raw=array_ops.reshape(combined_values[..., :-2],
array_ops.concat([
array_ops.shape(combined_values)[:-1],
[matrix_size, matrix_size]
], 0)),
off_diagonal_scale=combined_values[..., -2],
overall_scale=combined_values[..., -1])
def variable_covariance_matrix(
size, name, dtype, initial_diagonal_values=None,
initial_overall_scale_log=0.):
"""Construct a Variable-parameterized positive definite matrix.
Useful for parameterizing covariance matrices.
Args:
size: The size of the main diagonal, the returned matrix having shape [size
x size].
name: The name to use when defining variables and ops.
dtype: The floating point data type to use.
initial_diagonal_values: A Tensor with shape [size] with initial values for
the diagonal values of the returned matrix. Must be positive.
initial_overall_scale_log: Initial value of the bias term for every element
of the matrix in log space.
Returns:
A Variable-parameterized covariance matrix with shape [size x size].
"""
raw_values = variable_scope.get_variable(
name + "_pre_transform",
dtype=dtype,
shape=[size, size],
initializer=init_ops.zeros_initializer())
if initial_diagonal_values is not None:
raw_values += array_ops.matrix_diag(math_ops.log(initial_diagonal_values))
return array_ops.identity(
sign_magnitude_positive_definite(
raw=raw_values,
off_diagonal_scale=variable_scope.get_variable(
name + "_off_diagonal_scale",
dtype=dtype,
initializer=constant_op.constant(-5., dtype=dtype)),
overall_scale=ops.convert_to_tensor(
initial_overall_scale_log, dtype=dtype) +
variable_scope.get_variable(
name + "_overall_scale",
dtype=dtype,
shape=[],
initializer=init_ops.zeros_initializer())),
name=name)
def batch_start_time(times):
return times[:, 0]
def batch_end_time(times):
return times[:, -1]
def log_noninformative_covariance_prior(covariance):
"""Compute a relatively uninformative prior for noise parameters.
Helpful for avoiding noise over-estimation, where noise otherwise decreases
very slowly during optimization.
See:
Villegas, C. On the A Priori Distribution of the Covariance Matrix.
Ann. Math. Statist. 40 (1969), no. 3, 1098--1099.
Args:
covariance: A covariance matrix.
Returns:
For a [p x p] matrix:
log(det(covariance)^(-(p + 1) / 2))
"""
# Avoid zero/negative determinants due to numerical errors
covariance += array_ops.diag(1e-8 * array_ops.ones(
shape=[array_ops.shape(covariance)[0]], dtype=covariance.dtype))
power = -(math_ops.cast(array_ops.shape(covariance)[0] + 1,
covariance.dtype) / 2.)
return power * math_ops.log(linalg_ops.matrix_determinant(covariance))
def entropy_matched_cauchy_scale(covariance):
"""Approximates a similar Cauchy distribution given a covariance matrix.
Since Cauchy distributions do not have moments, entropy matching provides one
way to set a Cauchy's scale parameter in a way that provides a similar
distribution. The effect is dividing the standard deviation of an independent
Gaussian by a constant very near 3.
To set the scale of the Cauchy distribution, we first select the diagonals of
`covariance`. Since this ignores cross terms, it overestimates the entropy of
the Gaussian. For each of these variances, we solve for the Cauchy scale
parameter which gives the same entropy as the Gaussian with that
variance. This means setting the (univariate) Gaussian entropy
0.5 * ln(2 * variance * pi * e)
equal to the Cauchy entropy
ln(4 * pi * scale)
Solving, we get scale = sqrt(variance * (e / (8 pi))).
Args:
covariance: A [batch size x N x N] batch of covariance matrices to produce
Cauchy scales for.
Returns:
A [batch size x N] set of Cauchy scale parameters for each part of the batch
and each dimension of the input Gaussians.
"""
return math_ops.sqrt(math.e / (8. * math.pi) *
array_ops.matrix_diag_part(covariance))
class TensorValuedMutableDenseHashTable(lookup.MutableDenseHashTable):
"""A version of MutableDenseHashTable which stores arbitrary Tensor shapes.
Since MutableDenseHashTable only allows vectors right now, simply adds reshape
ops on both ends.
"""
def __init__(self, key_dtype, value_dtype, default_value, *args, **kwargs):
self._non_vector_value_shape = array_ops.shape(default_value)
super(TensorValuedMutableDenseHashTable, self).__init__(
key_dtype=key_dtype,
value_dtype=value_dtype,
default_value=array_ops.reshape(default_value, [-1]),
*args,
**kwargs)
def insert(self, keys, values, name=None):
keys = ops.convert_to_tensor(keys, dtype=self._key_dtype)
keys_flat = array_ops.reshape(keys, [-1])
return super(TensorValuedMutableDenseHashTable, self).insert(
keys=keys_flat,
# Each key has one corresponding value, so the shape of the tensor of
# values for every key is key_shape + value_shape
values=array_ops.reshape(values, [array_ops.shape(keys_flat)[0], -1]),
name=name)
def lookup(self, keys, name=None):
keys_flat = array_ops.reshape(
ops.convert_to_tensor(keys, dtype=self._key_dtype), [-1])
return array_ops.reshape(
super(TensorValuedMutableDenseHashTable, self).lookup(
keys=keys_flat, name=name),
array_ops.concat([array_ops.shape(keys), self._non_vector_value_shape],
0))
class TupleOfTensorsLookup(lookup.LookupInterface):
"""A LookupInterface with nested tuples of Tensors as values.
Creates one MutableDenseHashTable per value Tensor, which has some unnecessary
overhead.
"""
def __init__(self,
key_dtype,
default_values,
empty_key,
deleted_key,
name,
checkpoint=True):
default_values_flat = nest.flatten(default_values)
self._hash_tables = nest.pack_sequence_as(default_values, [
TensorValuedMutableDenseHashTable(
key_dtype=key_dtype,
value_dtype=default_value.dtype.base_dtype,
default_value=default_value,
empty_key=empty_key,
deleted_key=deleted_key,
name=name + "_{}".format(table_number),
checkpoint=checkpoint)
for table_number, default_value in enumerate(default_values_flat)
])
self._name = name
def lookup(self, keys):
return nest.pack_sequence_as(
self._hash_tables,
[hash_table.lookup(keys)
for hash_table in nest.flatten(self._hash_tables)])
def insert(self, keys, values):
nest.assert_same_structure(self._hash_tables, values)
# Avoid race conditions by requiring that all inputs are computed before any
# inserts happen (an issue if one key's update relies on another's value).
values_flat = [array_ops.identity(value) for value in nest.flatten(values)]
with ops.control_dependencies(values_flat):
insert_ops = [hash_table.insert(keys, value)
for hash_table, value
in zip(nest.flatten(self._hash_tables),
values_flat)]
return control_flow_ops.group(*insert_ops)
def check_table_dtypes(self, key_dtype, value_dtype):
# dtype checking is done in the objects in self._hash_tables
pass
def replicate_state(start_state, batch_size):
"""Create batch versions of state.
Takes a list of Tensors, adds a batch dimension, and replicates
batch_size times across that batch dimension. Used to replicate the
non-batch state returned by get_start_state in define_loss.
Args:
start_state: Model-defined state to replicate.
batch_size: Batch dimension for data.
Returns:
Replicated versions of the state.
"""
flattened_state = nest.flatten(start_state)
replicated_state = [
array_ops.tile(
array_ops.expand_dims(state_nonbatch, 0),
array_ops.concat([[batch_size], array_ops.ones(
[array_ops.rank(state_nonbatch)], dtype=dtypes.int32)], 0))
for state_nonbatch in flattened_state
]
return nest.pack_sequence_as(start_state, replicated_state)
Moments = collections.namedtuple("Moments", ["mean", "variance"])
# Currently all of these statistics are computed incrementally (i.e. are updated
# every time a new mini-batch of training data is presented) when this object is
# created in InputStatisticsFromMiniBatch.
InputStatistics = collections.namedtuple(
"InputStatistics",
["series_start_moments", # The mean and variance of each feature in a chunk
# (with a size configured in the statistics
# object) at the start of the series. A tuple of
# (mean, variance), each with shape [number of
# features], floating point. One use is in state
# space models, to keep priors calibrated even as
# earlier parts of the series are presented. If
# this object was created by
# InputStatisticsFromMiniBatch, these moments are
# computed based on the earliest chunk of data
# presented so far. However, there is a race
# condition in the update, so these may reflect
# statistics later in the series, but should
# eventually reflect statistics in a chunk at the
# series start.
"overall_feature_moments", # The mean and variance of each feature over
# the entire series. A tuple of (mean,
# variance), each with shape [number of
# features]. If this object was created by
# InputStatisticsFromMiniBatch, these moments
# are estimates based on the data seen so far.
"start_time", # The first (lowest) time in the series, a scalar
# integer. If this object was created by
# InputStatisticsFromMiniBatch, this is the lowest time seen
# so far rather than the lowest time that will ever be seen
# (guaranteed to be at least as low as the lowest time
# presented in the current minibatch).
"total_observation_count", # Count of data points, a scalar integer. If
# this object was created by
# InputStatisticsFromMiniBatch, this is an
# estimate of the total number of observations
# in the whole dataset computed based on the
# density of the series and the minimum and
# maximum times seen.
])
# TODO(allenl): It would be nice to do something with full series statistics
# when the user provides that.
class InputStatisticsFromMiniBatch(object):
"""Generate statistics from mini-batch input."""
def __init__(self, num_features, dtype, starting_variance_window_size=16):
"""Configure the input statistics object.
Args:
num_features: Number of features for the time series
dtype: The floating point data type to use.
starting_variance_window_size: The number of datapoints to use when
computing the mean and variance at the start of the series.
"""
self._starting_variance_window_size = starting_variance_window_size
self._num_features = num_features
self._dtype = dtype
def initialize_graph(self, features, update_statistics=True):
"""Create any ops needed to provide input statistics.
Should be called before statistics are requested.
Args:
features: A dictionary, the output of a `TimeSeriesInputFn` (with keys
TrainEvalFeatures.TIMES and TrainEvalFeatures.VALUES).
update_statistics: Whether `features` should be used to update adaptive
statistics. Typically True for training and false for evaluation.
Returns:
An InputStatistics object composed of Variables, which will be updated
based on mini-batches of data if requested.
"""
if (TrainEvalFeatures.TIMES in features
and TrainEvalFeatures.VALUES in features):
times = features[TrainEvalFeatures.TIMES]
values = features[TrainEvalFeatures.VALUES]
else:
# times and values may not be available, for example during prediction. We
# still need to retrieve our variables so that they can be read from, even
# if we're not going to update them.
times = None
values = None
# Create/retrieve variables representing input statistics, initialized
# without data to avoid deadlocking if variables are initialized before
# queue runners are started.
with variable_scope.variable_scope("input_statistics", use_resource=True):
statistics = self._create_variable_statistics_object()
with variable_scope.variable_scope(
"input_statistics_auxiliary", use_resource=True):
# Secondary statistics, necessary for the incremental computation of the
# primary statistics (e.g. counts and sums for computing a mean
# incrementally).
auxiliary_variables = self._AdaptiveInputAuxiliaryStatistics(
num_features=self._num_features, dtype=self._dtype)
if update_statistics and times is not None and values is not None:
# If we have times and values from mini-batch input, create update ops to
# take the new data into account.
assign_op = self._update_statistics_from_mini_batch(
statistics, auxiliary_variables, times, values)
with ops.control_dependencies([assign_op]):
stat_variables = nest.pack_sequence_as(statistics, [
array_ops.identity(tensor) for tensor in nest.flatten(statistics)
])
# Since start time updates have a race condition, ensure that the
# reported start time is at least as low as the lowest time in this
# mini-batch. The start time should converge on the correct value
# eventually even with the race condition, but for example state space
# models have an assertion which could fail without this
# post-processing.
return stat_variables._replace(start_time=gen_math_ops.minimum(
stat_variables.start_time, math_ops.reduce_min(times)))
else:
return statistics
class _AdaptiveInputAuxiliaryStatistics(collections.namedtuple(
"_AdaptiveInputAuxiliaryStatistics",
["max_time_seen", # The maximum time seen (best effort if updated from
# multiple workers; see notes about race condition
# below).
"chunk_count", # The number of chunks seen.
"inter_observation_duration_sum", # The sum across chunks of their "time
# density" (number of times per
# example).
"example_count", # The number of examples seen (each example has a
# single time associated with it and one or more
# real-valued features).
"overall_feature_sum", # The sum of values for each feature. Shape
# [number of features].
"overall_feature_sum_of_squares", # The sum of squared values for each
# feature. Shape [number of features]
])):
"""Extra statistics used to incrementally update InputStatistics."""
def __new__(cls, num_features, dtype):
return super(
InputStatisticsFromMiniBatch # pylint: disable=protected-access
._AdaptiveInputAuxiliaryStatistics,
cls).__new__(
cls,
max_time_seen=variable_scope.get_variable(
name="max_time_seen",
initializer=dtypes.int64.min,
dtype=dtypes.int64,
trainable=False),
chunk_count=variable_scope.get_variable(
name="chunk_count",
initializer=init_ops.zeros_initializer(),
shape=[],
dtype=dtypes.int64,
trainable=False),
inter_observation_duration_sum=variable_scope.get_variable(
name="inter_observation_duration_sum",
initializer=init_ops.zeros_initializer(),
shape=[],
dtype=dtype,
trainable=False),
example_count=variable_scope.get_variable(
name="example_count",
shape=[],
dtype=dtypes.int64,
trainable=False),
overall_feature_sum=variable_scope.get_variable(
name="overall_feature_sum",
shape=[num_features],
dtype=dtype,
initializer=init_ops.zeros_initializer(),
trainable=False),
overall_feature_sum_of_squares=variable_scope.get_variable(
name="overall_feature_sum_of_squares",
shape=[num_features],
dtype=dtype,
initializer=init_ops.zeros_initializer(),
trainable=False))
def _update_statistics_from_mini_batch(
self, statistics, auxiliary_variables, times, values):
"""Given mini-batch input, update `statistics` and `auxiliary_variables`."""
values = math_ops.cast(values, self._dtype)
# The density (measured in times per observation) that we see in each part
# of the mini-batch.
batch_inter_observation_duration = (math_ops.cast(
math_ops.reduce_max(times, axis=1) - math_ops.reduce_min(times, axis=1),
self._dtype) / math_ops.cast(
array_ops.shape(times)[1] - 1, self._dtype))
# Co-locate updates with their variables to minimize race conditions when
# updating statistics.
with ops.device(auxiliary_variables.max_time_seen.device):
# There is a race condition if this value is being updated from multiple
# workers. However, it should eventually reach the correct value if the
# last chunk is presented enough times.
max_time_seen_assign = state_ops.assign(
auxiliary_variables.max_time_seen,
gen_math_ops.maximum(auxiliary_variables.max_time_seen,
math_ops.reduce_max(times)))
with ops.device(auxiliary_variables.chunk_count.device):
chunk_count_assign = state_ops.assign_add(auxiliary_variables.chunk_count,
array_ops.shape(
times,
out_type=dtypes.int64)[0])
with ops.device(auxiliary_variables.inter_observation_duration_sum.device):
inter_observation_duration_assign = state_ops.assign_add(
auxiliary_variables.inter_observation_duration_sum,
math_ops.reduce_sum(batch_inter_observation_duration))
with ops.device(auxiliary_variables.example_count.device):
example_count_assign = state_ops.assign_add(
auxiliary_variables.example_count,
array_ops.size(times, out_type=dtypes.int64))
# Note: These mean/variance updates assume that all points are equally
# likely, which is not true if _chunks_ are sampled uniformly from the space
# of all possible contiguous chunks, since points at the start and end of
# the series are then members of fewer chunks. For series which are much
# longer than the chunk size (the usual/expected case), this effect becomes
# irrelevant.
with ops.device(auxiliary_variables.overall_feature_sum.device):
overall_feature_sum_assign = state_ops.assign_add(
auxiliary_variables.overall_feature_sum,
math_ops.reduce_sum(values, axis=[0, 1]))
with ops.device(auxiliary_variables.overall_feature_sum_of_squares.device):
overall_feature_sum_of_squares_assign = state_ops.assign_add(
auxiliary_variables.overall_feature_sum_of_squares,
math_ops.reduce_sum(values**2, axis=[0, 1]))
per_chunk_aux_updates = control_flow_ops.group(
max_time_seen_assign, chunk_count_assign,
inter_observation_duration_assign, example_count_assign,
overall_feature_sum_assign, overall_feature_sum_of_squares_assign)
with ops.control_dependencies([per_chunk_aux_updates]):
example_count_float = math_ops.cast(auxiliary_variables.example_count,
self._dtype)
new_feature_mean = (auxiliary_variables.overall_feature_sum /
example_count_float)
overall_feature_mean_update = state_ops.assign(
statistics.overall_feature_moments.mean, new_feature_mean)
overall_feature_var_update = state_ops.assign(
statistics.overall_feature_moments.variance,
# De-biased n / (n - 1) variance correction
example_count_float / (example_count_float - 1.) *
(auxiliary_variables.overall_feature_sum_of_squares /
example_count_float - new_feature_mean**2))
# TODO(b/35675805): Remove this cast
min_time_batch = math_ops.cast(math_ops.argmin(times[:, 0]), dtypes.int32)
def series_start_updates():
# If this is the lowest-time chunk that we have seen so far, update
# series start moments to reflect that. Note that these statistics are
# "best effort", as there are race conditions in the update (however,
# they should eventually converge if the start of the series is
# presented enough times).
mean, variance = nn.moments(
values[min_time_batch, :self._starting_variance_window_size],
axes=[0])
return control_flow_ops.group(
state_ops.assign(statistics.series_start_moments.mean, mean),
state_ops.assign(statistics.series_start_moments.variance,
variance))
with ops.device(statistics.start_time.device):
series_start_update = control_flow_ops.cond(
# Update moments whenever we even match the lowest time seen so far,
# to ensure that series start statistics are eventually updated to
# their correct values, despite race conditions (i.e. eventually
# statistics.start_time will reflect the global lowest time, and
# given that we will eventually update the series start moments to
# their correct values).
math_ops.less_equal(times[min_time_batch, 0],
statistics.start_time),
series_start_updates,
control_flow_ops.no_op)
with ops.control_dependencies([series_start_update]):
# There is a race condition if this update is performed in parallel on
# multiple workers. Since models may be sensitive to being presented
# with times before the putative start time, the value of this
# variable is post-processed above to guarantee that each worker is
# presented with a start time which is at least as low as the lowest
# time in its current mini-batch.
start_time_update = state_ops.assign(statistics.start_time,
gen_math_ops.minimum(
statistics.start_time,
math_ops.reduce_min(times)))
inter_observation_duration_estimate = (
auxiliary_variables.inter_observation_duration_sum / math_ops.cast(
auxiliary_variables.chunk_count, self._dtype))
# Estimate the total number of observations as:
# (end time - start time + 1) * average intra-chunk time density
total_observation_count_update = state_ops.assign(
statistics.total_observation_count,
math_ops.cast(
gen_math_ops.round(
math_ops.cast(max_time_seen_assign -
start_time_update + 1, self._dtype) /
inter_observation_duration_estimate), dtypes.int64))
per_chunk_stat_updates = control_flow_ops.group(
overall_feature_mean_update, overall_feature_var_update,
series_start_update, start_time_update,
total_observation_count_update)
return per_chunk_stat_updates
def _create_variable_statistics_object(self):
"""Creates non-trainable variables representing input statistics."""
series_start_moments = Moments(
mean=variable_scope.get_variable(
name="series_start_mean",
shape=[self._num_features],
dtype=self._dtype,
initializer=init_ops.zeros_initializer(),
trainable=False),
variance=variable_scope.get_variable(
name="series_start_variance",
shape=[self._num_features],
dtype=self._dtype,
initializer=init_ops.ones_initializer(),
trainable=False))
overall_feature_moments = Moments(
mean=variable_scope.get_variable(
name="overall_feature_mean",
shape=[self._num_features],
dtype=self._dtype,
initializer=init_ops.zeros_initializer(),
trainable=False),
variance=variable_scope.get_variable(
name="overall_feature_var",
shape=[self._num_features],
dtype=self._dtype,
initializer=init_ops.ones_initializer(),
trainable=False))
start_time = variable_scope.get_variable(
name="start_time",
dtype=dtypes.int64,
initializer=dtypes.int64.max,
trainable=False)
total_observation_count = variable_scope.get_variable(
name="total_observation_count",
shape=[],
dtype=dtypes.int64,
initializer=init_ops.ones_initializer(),
trainable=False)
return InputStatistics(
series_start_moments=series_start_moments,
overall_feature_moments=overall_feature_moments,
start_time=start_time,
total_observation_count=total_observation_count)
|
tensorflow-master
|
tensorflow/contrib/timeseries/python/timeseries/math_utils.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Defines ways of splicing and re-arranging time series.
This file provides methods for reading, parsing, and re-arranging a time
series. The main departure from standard TensorFlow input pipelines is a focus
on "chunking" a time series, i.e. slicing it into small contiguous windows which
are then batched together for training, a form of truncated
backpropagation. This typically provides a significant speedup compared to
looping over the whole series sequentially, by exploiting data parallelism and
by reducing redundant contributions to gradients (due to redundant information
in the series itself).
A series, consisting of times (an increasing vector of integers) and values (one
or more floating point values for each time) along with any exogenous features,
is stored either in memory or on disk in various formats (e.g. "one record per
timestep" on disk, or as a dictionary of Numpy arrays in memory). The location
and format is specified by configuring a `TimeSeriesReader` object
(e.g. `NumpyReader`, `CSVReader`), which reads the data into the TensorFlow
graph. A `TimeSeriesInputFn` object (typically `RandomWindowInputFn`) then
performs windowing and batching.
Time series are passed through this pipeline as dictionaries mapping feature
names to their values. For training and evaluation, these require at minimum
`TrainEvalFeatures.TIMES` (scalar integers, one per timestep) and
`TrainEvalFeatures.VALUES` (may be either univariate or multivariate). Exogenous
features may have any shape, but are likewise associated with a timestep. Times
themselves need not be contiguous or regular (although smaller/fewer gaps are
generally better), but each timestep must have all `VALUES` and any exogenous
features (i.e. times may be missing, but given that a time is specified, every
other feature must also be specified for that step; some models may support
making exogenous updates conditional).
The expected use case of a `TimeSeriesInputFn` is that it is first configured
(for example setting a batch or window size) and passed a reader (a
`TimeSeriesReader` object). The `TimeSeriesInputFn` can then be passed as the
input_fn of an Estimator.
For example, `RandomWindowInputFn` is useful for creating batches of random
chunks of a series for training:
```
# Read data in the default "time,value" CSV format with no header
reader = input_pipeline.CSVReader(csv_file_name)
# Set up windowing and batching for training
train_input_fn = input_pipeline.RandomWindowInputFn(
reader, batch_size=16, window_size=16)
# Fit model parameters to data
estimator.train(input_fn=train_input_fn, steps=150)
```
`RandomWindowInputFn` is the primary tool for training and quantitative
evaluation of time series. `WholeDatasetInputFn`, which reads a whole series
into memory, is useful for qualitative evaluation and preparing to make
predictions with `predict_continuation_input_fn`.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import numpy
from tensorflow.contrib.timeseries.python.timeseries import feature_keys
from tensorflow.contrib.timeseries.python.timeseries import model_utils
from tensorflow.python.estimator import estimator_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import training
from tensorflow.python.util import nest
def predict_continuation_input_fn(evaluation,
steps=None,
times=None,
exogenous_features=None):
"""An Estimator input_fn for running predict() after evaluate().
If the call to evaluate() we are making predictions based on had a batch_size
greater than one, predictions will start after each of these windows
(i.e. will have the same batch dimension).
Args:
evaluation: The dictionary returned by `Estimator.evaluate`, with keys
FilteringResults.STATE_TUPLE and FilteringResults.TIMES.
steps: The number of steps to predict (scalar), starting after the
evaluation. If `times` is specified, `steps` must not be; one is required.
times: A [batch_size x window_size] array of integers (not a Tensor)
indicating times to make predictions for. These times must be after the
corresponding evaluation. If `steps` is specified, `times` must not be;
one is required. If the batch dimension is omitted, it is assumed to be 1.
exogenous_features: Optional dictionary. If specified, indicates exogenous
features for the model to use while making the predictions. Values must
have shape [batch_size x window_size x ...], where `batch_size` matches
the batch dimension used when creating `evaluation`, and `window_size` is
either the `steps` argument or the `window_size` of the `times` argument
(depending on which was specified).
Returns:
An `input_fn` suitable for passing to the `predict` function of a time
series `Estimator`.
Raises:
ValueError: If `times` or `steps` are misspecified.
"""
if exogenous_features is None:
exogenous_features = {}
predict_times = model_utils.canonicalize_times_or_steps_from_output(
times=times, steps=steps, previous_model_output=evaluation)
features = {
feature_keys.PredictionFeatures.STATE_TUPLE:
evaluation[feature_keys.FilteringResults.STATE_TUPLE],
feature_keys.PredictionFeatures.TIMES:
predict_times
}
features.update(exogenous_features)
def _predict_input_fn():
"""An input_fn for predict()."""
# Prevents infinite iteration with a constant output in an Estimator's
# predict().
limited_features = {}
for key, values in features.items():
limited_values = nest.map_structure(
lambda value: training.limit_epochs(value, num_epochs=1), values)
limited_features[key] = limited_values
return (limited_features, None)
return _predict_input_fn
class TimeSeriesReader(object):
"""Reads from and parses a data source for a `TimeSeriesInputFn`.
This class provides methods that read a few records (`read`) or the full data
set at once (`read_full`), and returns them as dictionaries mapping feature
names to feature Tensors. Please see note at the top of the file for the
structure of these dictionaries. The output is generally chunked by a
`TimeSeriesInputFn` before being passed to the model.
"""
def check_dataset_size(self, minimum_dataset_size):
"""When possible, raises an error if the dataset is too small.
This method allows TimeSeriesReaders to raise informative error messages if
the user has selected a window size in their TimeSeriesInputFn which is
larger than the dataset size. However, many TimeSeriesReaders will not have
access to a dataset size, in which case they do not need to override this
method.
Args:
minimum_dataset_size: The minimum number of records which should be
contained in the dataset. Readers should attempt to raise an error when
possible if an epoch of data contains fewer records.
"""
pass
@abc.abstractmethod
def read(self):
"""Parses one or more records into a feature dictionary.
This method is expected to be called by a `TimeSeriesInputFn` object, and is
not for use with models directly.
A `TimeSeriesReader` object reads multiple records at a single time for
efficiency; the size of these batches is an implementation detail internal
to the input pipeline. These records should generally be sequential,
although some out-of-order records due to file wraparounds are expected and
must be handled by callers.
Returns:
A dictionary mapping feature names to `Tensor` values, each with an
arbitrary batch dimension (for efficiency) as their first dimension.
"""
pass
@abc.abstractmethod
def read_full(self):
"""Return the full dataset.
Largely for interactive use/plotting (or evaluation on small
datasets). Generally not very efficient. Not recommended for training.
Returns:
Same return type as `read`, but with the full dataset rather than an
arbitrary chunk of it. A dictionary mapping feature names to `Tensor`
values, where the size of the first dimension of each `Tensor` is the
number of samples in the entire dataset. These `Tensor`s should be
constant across graph invocations, assuming that the underlying data
remains constant. Current implementations re-read data on each graph
invocation, although this may change in the future.
"""
pass
class NumpyReader(TimeSeriesReader):
"""A time series parser for feeding Numpy arrays to a `TimeSeriesInputFn`.
Avoids embedding data in the graph as constants.
"""
def __init__(self, data, read_num_records_hint=4096):
"""Numpy array input for a `TimeSeriesInputFn`.
Args:
data: A dictionary mapping feature names to Numpy arrays, with two
possible shapes (requires keys `TrainEvalFeatures.TIMES` and
`TrainEvalFeatures.VALUES`): Univariate; `TIMES` and `VALUES` are both
vectors of shape [series length] Multivariate; `TIMES` is a vector of
shape [series length], `VALUES` has shape [series length x number of
features]. In any case, `VALUES` and any exogenous features must have
their shapes prefixed by the shape of the value corresponding to the
`TIMES` key.
read_num_records_hint: The maximum number of samples to read at one time,
for efficiency.
"""
self._features = _canonicalize_numpy_data(data, require_single_batch=True)
self._read_num_records_hint = read_num_records_hint
def check_dataset_size(self, minimum_dataset_size):
"""Raise an error if the dataset is too small."""
dataset_size = self._features[feature_keys.TrainEvalFeatures.TIMES].shape[1]
if dataset_size < minimum_dataset_size:
raise ValueError(
("A TimeSeriesInputFn is configured to create windows of size {}, "
"but only {} records were available in the dataset. Either decrease "
"the window size or provide more records.").format(
minimum_dataset_size, dataset_size))
def read(self):
"""Returns a large chunk of the Numpy arrays for later re-chunking."""
# Remove the batch dimension from all features
features = {
key: numpy.squeeze(value, axis=0)
for key, value in self._features.items()
}
return estimator_lib.inputs.numpy_input_fn(
x=features,
# The first dimensions of features are the series length, since we have
# removed the batch dimension above. We now pull out
# self._read_num_records_hint steps of this single time series to pass
# to the TimeSeriesInputFn.
batch_size=self._read_num_records_hint,
num_epochs=None,
shuffle=False)()
def read_full(self):
"""Returns `Tensor` versions of the full Numpy arrays."""
features = estimator_lib.inputs.numpy_input_fn(
x=self._features,
batch_size=1,
num_epochs=None,
queue_capacity=2, # Each queue element is a full copy of the dataset
shuffle=False)()
# TimeSeriesInputFn expect just a batch dimension
return {
feature_name: array_ops.squeeze(feature_value, axis=0)
for feature_name, feature_value in features.items()
}
class ReaderBaseTimeSeriesParser(TimeSeriesReader):
"""Base for time series readers which wrap a `tf.compat.v1.ReaderBase`."""
def __init__(self, filenames, read_num_records_hint=4096):
"""Configure the time series reader.
Args:
filenames: A string or list of strings indicating files to read records
from.
read_num_records_hint: When not reading a full dataset, indicates the
number of records to transfer in a single chunk (for efficiency). The
actual number transferred at one time may vary.
"""
self._filenames = filenames
self._read_num_records_hint = read_num_records_hint
@abc.abstractmethod
def _get_reader(self):
"""Get an instance of the tf.compat.v1.ReaderBase associated with this class."""
pass
@abc.abstractmethod
def _process_records(self, lines):
"""Given string items, return a processed dictionary of Tensors.
Args:
lines: A 1-dimensional string Tensor, each representing a record to parse
(source dependent, e.g. a line of a file, or a serialized protocol
buffer).
Returns:
A dictionary mapping feature names to their values. The batch dimensions
should match the length of `lines`.
"""
pass
def _get_filename_queue(self, epoch_limit):
"""Constructs a filename queue with an epoch limit.
`epoch_limit` is intended as an error checking fallback to prevent a reader
from infinitely looping in its requests for more work items if none are
available in any file. It should be set high enough that it is never reached
assuming at least one record exists in some file.
Args:
epoch_limit: The maximum number of times to read through the complete list
of files before throwing an OutOfRangeError.
Returns:
A tuple of (filename_queue, epoch_limiter):
filename_queue: A FIFOQueue with filename work items.
epoch_limiter: The local variable used for epoch limitation. This should
be set to zero before a reader is passed `filename_queue` in order to
reset the epoch limiter's state.
"""
epoch_limiter = variable_scope.variable(
initial_value=constant_op.constant(0, dtype=dtypes.int64),
name="epoch_limiter",
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES])
filenames_tensor = array_ops.reshape(
ops.convert_to_tensor(self._filenames), [-1])
# We can't rely on epoch_limiter being initialized, since queue runners are
# started before local variables are initialized. Instead, we ignore epoch
# limits before variable initialization. This means that prior to variable
# initialization, a QueueRunner may cause a reader to enter an un-checked
# infinite loop. However, as soon as local variables are initialized, we
# will start incrementing and checking epoch_limiter, which will interrupt
# any in-progress loops.
conditional_count_up_to = control_flow_ops.cond(
state_ops.is_variable_initialized(
epoch_limiter), lambda: epoch_limiter.count_up_to(epoch_limit),
lambda: constant_op.constant(0, dtype=dtypes.int64))
with ops.control_dependencies([conditional_count_up_to]):
filenames_tensor = array_ops.identity(filenames_tensor)
filename_queue = input_lib.string_input_producer(
filenames_tensor, shuffle=False, capacity=1)
return filename_queue, epoch_limiter
def read(self):
"""Reads a chunk of data from the `tf.compat.v1.ReaderBase` for later re-chunking."""
# Assuming there is at least one item to be read among all of the files in
# self._filenames, we will not need to go through more than
# self._read_num_records_hint epochs to get a batch of
# self._read_num_records_hint records. Setting this limit and resetting it
# before each reader.read_up_to call prevents infinite looping when there
# are no records available in any of the files.
filename_queue, epoch_limiter = self._get_filename_queue(
epoch_limit=self._read_num_records_hint)
reader = self._get_reader()
epoch_reset_op = state_ops.assign(epoch_limiter, 0)
with ops.control_dependencies([epoch_reset_op]):
_, records = reader.read_up_to(filename_queue,
self._read_num_records_hint)
return self._process_records(records)
def read_full(self):
"""Reads a full epoch of data into memory."""
reader = self._get_reader()
# Set a hard limit of 2 epochs through self._filenames. If there are any
# records available, we should only end up reading the first record in the
# second epoch before exiting the while loop and subsequently resetting the
# epoch limit. If there are no records available in any of the files, this
# hard limit prevents the reader.read_up_to call from looping infinitely.
filename_queue, epoch_limiter = self._get_filename_queue(epoch_limit=2)
epoch_reset_op = state_ops.assign(epoch_limiter, 0)
with ops.control_dependencies([epoch_reset_op]):
first_key, first_value = reader.read_up_to(filename_queue, 1)
# Read until we get a duplicate key (one epoch)
def _while_condition(current_key, current_value, current_index,
collected_records):
del current_value, current_index, collected_records # unused
return math_ops.not_equal(
array_ops.squeeze(current_key, axis=0),
array_ops.squeeze(first_key, axis=0))
def _while_body(current_key, current_value, current_index,
collected_records):
del current_key # unused
new_key, new_value = reader.read_up_to(filename_queue, 1)
new_key.set_shape([1])
new_value.set_shape([1])
return (new_key, new_value, current_index + 1,
collected_records.write(current_index, current_value))
_, _, _, records_ta = control_flow_ops.while_loop(
_while_condition,
_while_body,
[
constant_op.constant([""]),
first_value,
0, # current_index starting value
tensor_array_ops.TensorArray( # collected_records
dtype=dtypes.string,
size=0,
dynamic_size=True)
])
records = records_ta.concat()
# Reset the reader when we're done so that subsequent requests for data get
# the dataset in the proper order.
with ops.control_dependencies([records]):
reader_reset_op = reader.reset()
with ops.control_dependencies([reader_reset_op]):
records = array_ops.identity(records)
return self._process_records(records)
class CSVReader(ReaderBaseTimeSeriesParser):
"""Reads from a collection of CSV-formatted files."""
def __init__(self,
filenames,
column_names=(feature_keys.TrainEvalFeatures.TIMES,
feature_keys.TrainEvalFeatures.VALUES),
column_dtypes=None,
skip_header_lines=None,
read_num_records_hint=4096):
"""CSV-parsing reader for a `TimeSeriesInputFn`.
Args:
filenames: A filename or list of filenames to read the time series from.
Each line must have columns corresponding to `column_names`.
column_names: A list indicating names for each feature.
`TrainEvalFeatures.TIMES` and `TrainEvalFeatures.VALUES` are required;
`VALUES` may be repeated to indicate a multivariate series.
column_dtypes: If provided, must be a list with the same length as
`column_names`, indicating dtypes for each column. Defaults to
`tf.int64` for `TrainEvalFeatures.TIMES` and `tf.float32` for everything
else.
skip_header_lines: Passed on to `tf.compat.v1.TextLineReader`; skips this
number of lines at the beginning of each file.
read_num_records_hint: When not reading a full dataset, indicates the
number of records to parse/transfer in a single chunk (for efficiency).
The actual number transferred at one time may be more or less.
Raises:
ValueError: If required column names are not specified, or if lengths do
not match.
"""
if feature_keys.TrainEvalFeatures.TIMES not in column_names:
raise ValueError("'{}' is a required column.".format(
feature_keys.TrainEvalFeatures.TIMES))
if feature_keys.TrainEvalFeatures.VALUES not in column_names:
raise ValueError("'{}' is a required column.".format(
feature_keys.TrainEvalFeatures.VALUES))
if column_dtypes is not None and len(column_dtypes) != len(column_names):
raise ValueError(
("If specified, the length of column_dtypes must match the length of "
"column_names (got column_dtypes={} and column_names={}).").format(
column_dtypes, column_names))
if sum(1 for column_name in column_names
if column_name == feature_keys.TrainEvalFeatures.TIMES) != 1:
raise ValueError("Got more than one times column ('{}'), but exactly "
"one is required.".format(
feature_keys.TrainEvalFeatures.TIMES))
self._column_names = column_names
self._column_dtypes = column_dtypes
self._skip_header_lines = skip_header_lines
super(CSVReader, self).__init__(
filenames=filenames, read_num_records_hint=read_num_records_hint)
def _get_reader(self):
return io_ops.TextLineReader(skip_header_lines=self._skip_header_lines)
def _process_records(self, lines):
"""Parse `lines` as CSV records."""
if self._column_dtypes is None:
default_values = [(array_ops.zeros([], dtypes.int64),) if
column_name == feature_keys.TrainEvalFeatures.TIMES else
() for column_name in self._column_names]
else:
default_values = [
(array_ops.zeros([], dtype),) for dtype in self._column_dtypes
]
columns = parsing_ops.decode_csv(lines, default_values)
features_lists = {}
for column_name, value in zip(self._column_names, columns):
features_lists.setdefault(column_name, []).append(value)
features = {}
for column_name, values in features_lists.items():
if column_name == feature_keys.TrainEvalFeatures.TIMES:
features[column_name] = values[0]
else:
features[column_name] = array_ops.stack(values, axis=1)
return features
class TFExampleReader(ReaderBaseTimeSeriesParser):
"""Reads and parses `tf.Example`s from a TFRecords file."""
def __init__(self, filenames, features):
"""Configure `tf.Example` parsing.
Args:
filenames: A filename or list of filenames to read the time series from.
Each line must have columns corresponding to `column_names`.
features: A dictionary mapping from feature keys to
`tf.io.FixedLenFeature` objects. Must include `TrainEvalFeatures.TIMES`
(scalar integer) and `TrainEvalFeatures.VALUES` (floating point vector)
features.
Raises:
ValueError: If required times/values features are not present.
"""
if feature_keys.TrainEvalFeatures.TIMES not in features:
raise ValueError("'{}' is a required column.".format(
feature_keys.TrainEvalFeatures.TIMES))
if feature_keys.TrainEvalFeatures.VALUES not in features:
raise ValueError("'{}' is a required column.".format(
feature_keys.TrainEvalFeatures.VALUES))
self._features = features
super(TFExampleReader, self).__init__(filenames=filenames)
def _get_reader(self):
return io_ops.TFRecordReader()
def _process_records(self, examples):
"""Parse `tf.Example`s into `Tensors`."""
return parsing_ops.parse_example(
serialized=examples, features=self._features)
class TimeSeriesInputFn(object):
"""Base for classes which create batches of windows from a time series."""
@abc.abstractmethod
def create_batch(self):
"""Creates chunked Tensors from times, values, and other features.
Suitable for use as the input_fn argument of a tf.estimator.Estimator's
fit() or evaluate() method.
Returns:
A tuple of (features, targets):
features: A dictionary with `TrainEvalFeatures.TIMES` and
`TrainEvalFeatures.VALUES` as keys, `TIMES` having an associated value
with shape [batch size x window length], `VALUES` with shape [batch
size x window length x number of features]. Any other features will
also have shapes prefixed with [batch size x window length].
targets: Not used, but must have a value for compatibility with the
Estimator API. That value should be None.
"""
pass
def __call__(self):
# Allow a TimeSeriesInputFn to be used as an input function directly
return self.create_batch()
class WholeDatasetInputFn(TimeSeriesInputFn):
"""Supports passing a full time series to a model for evaluation/inference.
Note that this `TimeSeriesInputFn` is not designed for high throughput, and
should not be used for training. It allows for sequential evaluation on a full
dataset (with sequential in-sample predictions), which then feeds naturally
into `predict_continuation_input_fn` for making out-of-sample
predictions. While this is useful for plotting and interactive use,
`RandomWindowInputFn` is better suited to training and quantitative
evaluation.
"""
# TODO(allenl): A SequentialWindowInputFn for getting model end state without
# loading the whole dataset into memory (or for quantitative evaluation of
# sequential models). Note that an Estimator using such a TimeSeriesInputFn
# won't return in-sample predictions for the whole dataset, which means it
# won't be terribly useful for interactive use/plotting (unless the user
# passes in concat metrics). Also need to be careful about state saving for
# sequential models, particularly the gaps between chunks.
def __init__(self, time_series_reader):
"""Initialize the `TimeSeriesInputFn`.
Args:
time_series_reader: A TimeSeriesReader object.
"""
self._reader = time_series_reader
super(WholeDatasetInputFn, self).__init__()
def create_batch(self):
"""A suitable `input_fn` for an `Estimator`'s `evaluate()`.
Returns:
A dictionary mapping feature names to `Tensors`, each shape
prefixed by [1, data set size] (i.e. a batch size of 1).
"""
features = self._reader.read_full()
# Add a batch dimension of one to each feature.
return ({
feature_name: feature_value[None, ...]
for feature_name, feature_value in features.items()
}, None)
class RandomWindowInputFn(TimeSeriesInputFn):
"""Wraps a `TimeSeriesReader` to create random batches of windows.
Tensors are first collected into sequential windows (in a windowing queue
created by `tf.compat.v1.train.batch`, based on the order returned from
`time_series_reader`), then these windows are randomly batched (in a
`RandomShuffleQueue`), the Tensors returned by `create_batch` having shapes
prefixed by [`batch_size`, `window_size`].
This `TimeSeriesInputFn` is useful for both training and quantitative
evaluation (but be sure to run several epochs for sequential models such as
`StructuralEnsembleRegressor` to completely flush stale state left over from
training). For qualitative evaluation or when preparing for predictions, use
`WholeDatasetInputFn`.
"""
def __init__(self,
time_series_reader,
window_size,
batch_size,
queue_capacity_multiplier=1000,
shuffle_min_after_dequeue_multiplier=2,
discard_out_of_order=True,
discard_consecutive_batches_limit=1000,
jitter=True,
num_threads=2,
shuffle_seed=None):
"""Configure the RandomWindowInputFn.
Args:
time_series_reader: A TimeSeriesReader object.
window_size: The number of examples to keep together sequentially. This
controls the length of truncated backpropagation: smaller values mean
less sequential computation, which can lead to faster training, but
create a coarser approximation to the gradient (which would ideally be
computed by a forward pass over the entire sequence in order).
batch_size: The number of windows to place together in a batch. Larger
values will lead to more stable gradients during training.
queue_capacity_multiplier: The capacity for the queues used to create
batches, specified as a multiple of `batch_size` (for
RandomShuffleQueue) and `batch_size * window_size` (for the FIFOQueue).
Controls the maximum number of windows stored. Should be greater than
`shuffle_min_after_dequeue_multiplier`.
shuffle_min_after_dequeue_multiplier: The minimum number of windows in the
RandomShuffleQueue after a dequeue, which controls the amount of entropy
introduced during batching. Specified as a multiple of `batch_size`.
discard_out_of_order: If True, windows of data which have times which
decrease (a higher time followed by a lower time) are discarded. If
False, the window and associated features are instead sorted so that
times are non-decreasing. Discarding is typically faster, as models do
not have to deal with artificial gaps in the data. However, discarding
does create a bias where the beginnings and endings of files are
under-sampled.
discard_consecutive_batches_limit: Raise an OutOfRangeError if more than
this number of batches are discarded without a single non-discarded
window (prevents infinite looping when the dataset is too small).
jitter: If True, randomly discards examples between some windows in order
to avoid deterministic chunking patterns. This is important for models
like AR which may otherwise overfit a fixed chunking.
num_threads: Use this number of threads for queues. Setting a value of 1
removes one source of non-determinism (and in combination with
shuffle_seed should provide deterministic windowing).
shuffle_seed: A seed for window shuffling. The default value of None
provides random behavior. With `shuffle_seed` set and `num_threads=1`,
provides deterministic behavior.
"""
self._reader = time_series_reader
self._window_size = window_size
self._reader.check_dataset_size(minimum_dataset_size=self._window_size)
self._batch_size = batch_size
self._queue_capacity_multiplier = queue_capacity_multiplier
self._shuffle_min_after_dequeue_multiplier = (
shuffle_min_after_dequeue_multiplier)
self._discard_out_of_order = discard_out_of_order
self._discard_limit = discard_consecutive_batches_limit
self._jitter = jitter
if num_threads is None:
self._num_threads = self._batch_size
else:
self._num_threads = num_threads
self._shuffle_seed = shuffle_seed
super(RandomWindowInputFn, self).__init__()
def create_batch(self):
"""Create queues to window and batch time series data.
Returns:
A dictionary of Tensors corresponding to the output of `self._reader`
(from the `time_series_reader` constructor argument), each with shapes
prefixed by [`batch_size`, `window_size`].
"""
features = self._reader.read()
if self._jitter:
# TODO(agarwal, allenl): Figure out if more jitter is needed here.
jitter = random_ops.random_uniform(shape=[], maxval=2, dtype=dtypes.int32)
else:
jitter = 0
# To keep things efficient, we pass from the windowing batcher to the
# batch-of-windows batcher in batches. This avoids the need for huge numbers
# of threads, but does mean that jitter is only applied occasionally.
# TODO(allenl): Experiment with different internal passing sizes.
internal_passing_size = self._batch_size
features_windowed = input_lib.batch(
features,
batch_size=self._window_size * internal_passing_size + jitter,
enqueue_many=True,
capacity=(self._queue_capacity_multiplier * internal_passing_size *
self._window_size),
num_threads=self._num_threads)
raw_features_windowed = features_windowed
if self._jitter:
features_windowed = {
key: value[jitter:] for key, value in features_windowed.items()
}
features_windowed = {
key: array_ops.reshape(
value,
array_ops.concat([[internal_passing_size, self._window_size],
array_ops.shape(value)[1:]],
axis=0))
for key, value in features_windowed.items()
}
batch_and_window_shape = tensor_shape.TensorShape(
[internal_passing_size, self._window_size])
for key in features_windowed.keys():
features_windowed[key].set_shape(
batch_and_window_shape.concatenate(
raw_features_windowed[key].get_shape()[1:]))
# When switching files, we may end up with windows where the time is not
# decreasing, even if times within each file are sorted (and even if those
# files are visited in order, when looping back around to the beginning of
# the first file). This is hard for models to deal with, so we either
# discard such examples, creating a bias where the beginning and end of the
# series is under-sampled, or we sort the window, creating large gaps.
times = features_windowed[feature_keys.TrainEvalFeatures.TIMES]
if self._discard_out_of_order:
non_decreasing = math_ops.reduce_all(
times[:, 1:] >= times[:, :-1], axis=1)
# Ensure that no more than self._discard_limit complete batches are
# discarded contiguously (resetting the count when we find a single clean
# window). This prevents infinite looping when the dataset is smaller than
# the window size.
# TODO(allenl): Figure out a way to return informative errors from
# count_up_to.
discarded_windows_limiter = variable_scope.variable(
initial_value=constant_op.constant(0, dtype=dtypes.int64),
name="discarded_windows_limiter",
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES])
def _initialized_limit_check():
return control_flow_ops.cond(
math_ops.reduce_any(non_decreasing),
lambda: state_ops.assign(discarded_windows_limiter, 0),
lambda: discarded_windows_limiter.count_up_to(self._discard_limit))
discard_limit_op = control_flow_ops.cond(
state_ops.is_variable_initialized(discarded_windows_limiter),
_initialized_limit_check,
lambda: constant_op.constant(0, dtype=dtypes.int64))
with ops.control_dependencies([discard_limit_op]):
non_decreasing = array_ops.identity(non_decreasing)
else:
_, indices_descending = nn.top_k(
times, k=array_ops.shape(times)[-1], sorted=True)
indices = array_ops.reverse(indices_descending, axis=[0])
features_windowed = {
key: array_ops.gather(params=value, indices=indices)
for key, value in features_windowed.items()
}
non_decreasing = True
features_batched = input_lib.maybe_shuffle_batch(
features_windowed,
num_threads=self._num_threads,
seed=self._shuffle_seed,
batch_size=self._batch_size,
capacity=self._queue_capacity_multiplier * self._batch_size,
min_after_dequeue=(self._shuffle_min_after_dequeue_multiplier *
self._batch_size),
keep_input=non_decreasing,
enqueue_many=True)
return (features_batched, None)
def _canonicalize_numpy_data(data, require_single_batch):
"""Do basic checking and reshaping for Numpy data.
Args:
data: A dictionary mapping keys to Numpy arrays, with several possible
shapes (requires keys `TrainEvalFeatures.TIMES` and
`TrainEvalFeatures.VALUES`): Single example; `TIMES` is a scalar and
`VALUES` is either a scalar or a vector of length [number of features].
Sequence; `TIMES` is a vector of shape [series length], `VALUES` either
has shape [series length] (univariate) or [series length x number of
features] (multivariate). Batch of sequences; `TIMES` is a vector of
shape [batch size x series length], `VALUES` has shape [batch size x
series length] or [batch size x series length x number of features]. In
any case, `VALUES` and any exogenous features must have their shapes
prefixed by the shape of the value corresponding to the `TIMES` key.
require_single_batch: If True, raises an error if the provided data has a
batch dimension > 1.
Returns:
A dictionary with features normalized to have shapes prefixed with [batch
size x series length]. The sizes of dimensions which were omitted in the
inputs are 1.
Raises:
ValueError: If dimensions are incorrect or do not match, or required
features are missing.
"""
features = {key: numpy.array(value) for key, value in data.items()}
if (feature_keys.TrainEvalFeatures.TIMES not in features or
feature_keys.TrainEvalFeatures.VALUES not in features):
raise ValueError("{} and {} are required features.".format(
feature_keys.TrainEvalFeatures.TIMES,
feature_keys.TrainEvalFeatures.VALUES))
times = features[feature_keys.TrainEvalFeatures.TIMES]
for key, value in features.items():
if value.shape[:len(times.shape)] != times.shape:
raise ValueError(
("All features must have their shapes prefixed by the shape of the"
" times feature. Got shape {} for feature '{}', but shape {} for"
" '{}'").format(value.shape, key, times.shape,
feature_keys.TrainEvalFeatures.TIMES))
if not times.shape: # a single example
if not features[feature_keys.TrainEvalFeatures.VALUES].shape: # univariate
# Add a feature dimension (with one feature)
features[feature_keys.TrainEvalFeatures.VALUES] = features[
feature_keys.TrainEvalFeatures.VALUES][..., None]
elif len(features[feature_keys.TrainEvalFeatures.VALUES].shape) > 1:
raise ValueError(
("Got an unexpected number of dimensions for the '{}' feature."
" Was expecting at most 1 dimension"
" ([number of features]) since '{}' does not "
"have a batch or time dimension, but got shape {}").format(
feature_keys.TrainEvalFeatures.VALUES,
feature_keys.TrainEvalFeatures.TIMES,
features[feature_keys.TrainEvalFeatures.VALUES].shape))
# Add trivial batch and time dimensions for every feature
features = {key: value[None, None, ...] for key, value in features.items()}
if len(times.shape) == 1: # shape [series length]
if len(features[feature_keys.TrainEvalFeatures.VALUES].shape
) == 1: # shape [series length]
# Add a feature dimension (with one feature)
features[feature_keys.TrainEvalFeatures.VALUES] = features[
feature_keys.TrainEvalFeatures.VALUES][..., None]
elif len(features[feature_keys.TrainEvalFeatures.VALUES].shape) > 2:
raise ValueError(
("Got an unexpected number of dimensions for the '{}' feature."
" Was expecting at most 2 dimensions"
" ([series length, number of features]) since '{}' does not "
"have a batch dimension, but got shape {}").format(
feature_keys.TrainEvalFeatures.VALUES,
feature_keys.TrainEvalFeatures.TIMES,
features[feature_keys.TrainEvalFeatures.VALUES].shape))
# Add trivial batch dimensions for every feature
features = {key: value[None, ...] for key, value in features.items()}
elif len(features[feature_keys.TrainEvalFeatures.TIMES].shape
) != 2: # shape [batch size, series length]
raise ValueError(
("Got an unexpected number of dimensions for times. Was expecting at "
"most two ([batch size, series length]), but got shape {}.").format(
times.shape))
if require_single_batch:
# We don't expect input to be already batched; batching is done later
if features[feature_keys.TrainEvalFeatures.TIMES].shape[0] != 1:
raise ValueError("Got batch input, was expecting unbatched input.")
return features
|
tensorflow-master
|
tensorflow/contrib/timeseries/python/timeseries/input_pipeline.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Convenience functions for working with time series saved_models.
@@predict_continuation
@@cold_start_filter
@@filter_continuation
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.timeseries.python.timeseries import feature_keys as _feature_keys
from tensorflow.contrib.timeseries.python.timeseries import head as _head
from tensorflow.contrib.timeseries.python.timeseries import input_pipeline as _input_pipeline
from tensorflow.contrib.timeseries.python.timeseries import model_utils as _model_utils
from tensorflow.python.util.all_util import remove_undocumented
def _colate_features_to_feeds_and_fetches(signature,
features,
graph,
continue_from=None):
"""Uses a saved model signature to construct feed and fetch dictionaries."""
if continue_from is None:
state_values = {}
elif _feature_keys.FilteringResults.STATE_TUPLE in continue_from:
# We're continuing from an evaluation, so we need to unpack/flatten state.
state_values = _head.state_to_dictionary(
continue_from[_feature_keys.FilteringResults.STATE_TUPLE])
else:
state_values = continue_from
input_feed_tensors_by_name = {
input_key: graph.as_graph_element(input_value.name)
for input_key, input_value in signature.inputs.items()
}
output_tensors_by_name = {
output_key: graph.as_graph_element(output_value.name)
for output_key, output_value in signature.outputs.items()
}
feed_dict = {}
for state_key, state_value in state_values.items():
feed_dict[input_feed_tensors_by_name[state_key]] = state_value
for feature_key, feature_value in features.items():
feed_dict[input_feed_tensors_by_name[feature_key]] = feature_value
return output_tensors_by_name, feed_dict
def predict_continuation(continue_from,
signatures,
session,
steps=None,
times=None,
exogenous_features=None):
"""Perform prediction using an exported saved model.
Analogous to _input_pipeline.predict_continuation_input_fn, but operates on a
saved model rather than feeding into Estimator's predict method.
Args:
continue_from: A dictionary containing the results of either an Estimator's
evaluate method or filter_continuation. Used to determine the model state
to make predictions starting from.
signatures: The `MetaGraphDef` protocol buffer returned from
`tf.compat.v1.saved_model.loader.load`. Used to determine the names of
Tensors to feed and fetch. Must be from the same model as `continue_from`.
session: The session to use. The session's graph must be the one into which
`tf.compat.v1.saved_model.loader.load` loaded the model.
steps: The number of steps to predict (scalar), starting after the
evaluation or filtering. If `times` is specified, `steps` must not be; one
is required.
times: A [batch_size x window_size] array of integers (not a Tensor)
indicating times to make predictions for. These times must be after the
corresponding evaluation or filtering. If `steps` is specified, `times`
must not be; one is required. If the batch dimension is omitted, it is
assumed to be 1.
exogenous_features: Optional dictionary. If specified, indicates exogenous
features for the model to use while making the predictions. Values must
have shape [batch_size x window_size x ...], where `batch_size` matches
the batch dimension used when creating `continue_from`, and `window_size`
is either the `steps` argument or the `window_size` of the `times`
argument (depending on which was specified).
Returns:
A dictionary with model-specific predictions (typically having keys "mean"
and "covariance") and a feature_keys.PredictionResults.TIMES key indicating
the times for which the predictions were computed.
Raises:
ValueError: If `times` or `steps` are misspecified.
"""
if exogenous_features is None:
exogenous_features = {}
predict_times = _model_utils.canonicalize_times_or_steps_from_output(
times=times, steps=steps, previous_model_output=continue_from)
features = {_feature_keys.PredictionFeatures.TIMES: predict_times}
features.update(exogenous_features)
predict_signature = signatures.signature_def[
_feature_keys.SavedModelLabels.PREDICT]
output_tensors_by_name, feed_dict = _colate_features_to_feeds_and_fetches(
continue_from=continue_from,
signature=predict_signature,
features=features,
graph=session.graph)
output = session.run(output_tensors_by_name, feed_dict=feed_dict)
output[_feature_keys.PredictionResults.TIMES] = features[
_feature_keys.PredictionFeatures.TIMES]
return output
def cold_start_filter(signatures, session, features):
"""Perform filtering using an exported saved model.
Filtering refers to updating model state based on new observations.
Predictions based on the returned model state will be conditioned on these
observations.
Starts from the model's default/uninformed state.
Args:
signatures: The `MetaGraphDef` protocol buffer returned from
`tf.compat.v1.saved_model.loader.load`. Used to determine the names of
Tensors to feed and fetch. Must be from the same model as `continue_from`.
session: The session to use. The session's graph must be the one into which
`tf.compat.v1.saved_model.loader.load` loaded the model.
features: A dictionary mapping keys to Numpy arrays, with several possible
shapes (requires keys `FilteringFeatures.TIMES` and
`FilteringFeatures.VALUES`): Single example; `TIMES` is a scalar and
`VALUES` is either a scalar or a vector of length [number of features].
Sequence; `TIMES` is a vector of shape [series length], `VALUES` either
has shape [series length] (univariate) or [series length x number of
features] (multivariate). Batch of sequences; `TIMES` is a vector of
shape [batch size x series length], `VALUES` has shape [batch size x
series length] or [batch size x series length x number of features]. In
any case, `VALUES` and any exogenous features must have their shapes
prefixed by the shape of the value corresponding to the `TIMES` key.
Returns:
A dictionary containing model state updated to account for the observations
in `features`.
"""
filter_signature = signatures.signature_def[
_feature_keys.SavedModelLabels.COLD_START_FILTER]
features = _input_pipeline._canonicalize_numpy_data( # pylint: disable=protected-access
data=features,
require_single_batch=False)
output_tensors_by_name, feed_dict = _colate_features_to_feeds_and_fetches(
signature=filter_signature, features=features, graph=session.graph)
output = session.run(output_tensors_by_name, feed_dict=feed_dict)
# Make it easier to chain filter -> predict by keeping track of the current
# time.
output[_feature_keys.FilteringResults.TIMES] = features[
_feature_keys.FilteringFeatures.TIMES]
return output
def filter_continuation(continue_from, signatures, session, features):
"""Perform filtering using an exported saved model.
Filtering refers to updating model state based on new observations.
Predictions based on the returned model state will be conditioned on these
observations.
Args:
continue_from: A dictionary containing the results of either an Estimator's
evaluate method or a previous filter step (cold start or continuation).
Used to determine the model state to start filtering from.
signatures: The `MetaGraphDef` protocol buffer returned from
`tf.compat.v1.saved_model.loader.load`. Used to determine the names of
Tensors to feed and fetch. Must be from the same model as `continue_from`.
session: The session to use. The session's graph must be the one into which
`tf.compat.v1.saved_model.loader.load` loaded the model.
features: A dictionary mapping keys to Numpy arrays, with several possible
shapes (requires keys `FilteringFeatures.TIMES` and
`FilteringFeatures.VALUES`): Single example; `TIMES` is a scalar and
`VALUES` is either a scalar or a vector of length [number of features].
Sequence; `TIMES` is a vector of shape [series length], `VALUES` either
has shape [series length] (univariate) or [series length x number of
features] (multivariate). Batch of sequences; `TIMES` is a vector of
shape [batch size x series length], `VALUES` has shape [batch size x
series length] or [batch size x series length x number of features]. In
any case, `VALUES` and any exogenous features must have their shapes
prefixed by the shape of the value corresponding to the `TIMES` key.
Returns:
A dictionary containing model state updated to account for the observations
in `features`.
"""
filter_signature = signatures.signature_def[
_feature_keys.SavedModelLabels.FILTER]
features = _input_pipeline._canonicalize_numpy_data( # pylint: disable=protected-access
data=features,
require_single_batch=False)
output_tensors_by_name, feed_dict = _colate_features_to_feeds_and_fetches(
continue_from=continue_from,
signature=filter_signature,
features=features,
graph=session.graph)
output = session.run(output_tensors_by_name, feed_dict=feed_dict)
# Make it easier to chain filter -> predict by keeping track of the current
# time.
output[_feature_keys.FilteringResults.TIMES] = features[
_feature_keys.FilteringFeatures.TIMES]
return output
remove_undocumented(module_name=__name__)
|
tensorflow-master
|
tensorflow/contrib/timeseries/python/timeseries/saved_model_utils.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Commonly used special feature names for time series models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.saved_model import signature_constants
class State(object):
"""Key formats for accepting/returning state."""
# The model-dependent state to start from, as a single tuple.
STATE_TUPLE = "start_tuple"
# Same meaning as STATE_TUPLE, but prefixes keys representing flattened model
# state rather than mapping to a nested tuple containing model state,
# primarily for use with export_savedmodel.
STATE_PREFIX = "model_state"
class Times(object):
"""Key formats for accepting/returning times."""
# An increasing vector of integers.
TIMES = "times"
class Values(object):
"""Key formats for accepting/returning values."""
# Floating point, with one or more values corresponding to each time in TIMES.
VALUES = "values"
class TrainEvalFeatures(Times, Values):
"""Feature names used during training and evaluation."""
pass
class PredictionFeatures(Times, State):
"""Feature names used during prediction."""
pass
class FilteringFeatures(Times, Values, State):
"""Special feature names for filtering."""
pass
class PredictionResults(Times):
"""Keys returned when predicting (not comprehensive)."""
pass
class FilteringResults(Times, State):
"""Keys returned from evaluation/filtering."""
pass
class SavedModelLabels(object):
"""Names of signatures exported with export_savedmodel."""
PREDICT = signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
FILTER = "filter"
COLD_START_FILTER = "cold_start_filter"
|
tensorflow-master
|
tensorflow/contrib/timeseries/python/timeseries/feature_keys.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A time series library in TensorFlow (TFTS)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.timeseries.python.timeseries import saved_model_utils
# pylint: disable=wildcard-import
from tensorflow.contrib.timeseries.python.timeseries.ar_model import *
from tensorflow.contrib.timeseries.python.timeseries.estimators import *
from tensorflow.contrib.timeseries.python.timeseries.feature_keys import *
from tensorflow.contrib.timeseries.python.timeseries.head import *
from tensorflow.contrib.timeseries.python.timeseries.input_pipeline import *
# pylint: enable=wildcard-import
|
tensorflow-master
|
tensorflow/contrib/timeseries/python/timeseries/__init__.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for model_utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.timeseries.python.timeseries import model_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class ModelUtilsTest(test.TestCase):
def test_parameter_switching(self):
parameter = array_ops.constant(5)
overridden_parameter = array_ops.constant(3)
with self.cached_session():
getter = model_utils.parameter_switch({overridden_parameter: 4})
self.assertEqual(5, getter(parameter))
self.assertEqual(4, getter(overridden_parameter))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/timeseries/python/timeseries/model_utils_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for state management."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy
from tensorflow.contrib.timeseries.python.timeseries import feature_keys
from tensorflow.contrib.timeseries.python.timeseries import input_pipeline
from tensorflow.contrib.timeseries.python.timeseries import math_utils
from tensorflow.contrib.timeseries.python.timeseries import model
from tensorflow.contrib.timeseries.python.timeseries import state_management
from tensorflow.contrib.timeseries.python.timeseries import test_utils
from tensorflow.python.estimator import estimator_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator as coordinator_lib
from tensorflow.python.training import queue_runner_impl
from tensorflow.python.training import training as train
from tensorflow.python.util import nest
class StubTimeSeriesModel(model.TimeSeriesModel):
def __init__(self, correct_offset=False):
self._correct_offset = correct_offset
super(StubTimeSeriesModel, self).__init__(1)
def initialize_graph(self, input_statistics=None):
super(StubTimeSeriesModel, self).initialize_graph(
input_statistics=input_statistics)
self.prior_var = variable_scope.get_variable(
"prior", [], initializer=init_ops.constant_initializer(0.))
def generate(self, *args):
pass
def predict(self, *args):
pass
def get_start_state(self):
return (array_ops.zeros([], dtype=dtypes.int64), self.prior_var)
def get_batch_loss(self, features, mode, state):
raise NotImplementedError("This stub only supports managed state.")
def per_step_batch_loss(self, features, mode, state):
times = features[feature_keys.TrainEvalFeatures.TIMES]
values = features[feature_keys.TrainEvalFeatures.VALUES]
(priors_from_time, prior) = state
time_corrected_priors = prior + math_ops.cast(
math_utils.batch_start_time(times) - priors_from_time, dtypes.float32)
posterior = time_corrected_priors[:, None] + math_ops.cast(
times - math_utils.batch_start_time(times)[:, None], dtypes.float32)
batch_end_values = array_ops.squeeze(
array_ops.slice(values, [0, array_ops.shape(times)[1] - 1, 0],
[-1, 1, -1]),
axis=[1, 2])
# A pretty odd but easy to think about loss: L1 loss on the batch end
# values.
loss = math_ops.reduce_sum(
math_ops.abs(
array_ops.reshape(posterior[:, -1], [-1]) - batch_end_values))
if self._correct_offset:
posterior += batch_end_values[0] - posterior[0, -1]
posteriors = (times, posterior)
return loss, posteriors, {"dummy_predictions": array_ops.zeros_like(values)}
class ChainingStateManagerTest(test.TestCase):
def _make_test_data(self, length, cut_start, cut_end, offset, step=1):
times_full = step * numpy.arange(length, dtype=numpy.int64)
values_full = offset + step * numpy.arange(length, dtype=numpy.float32)
if cut_start is not None:
times = numpy.concatenate((times_full[:cut_start],
times_full[cut_end:]))
values = numpy.concatenate((values_full[:cut_start],
values_full[cut_end:]))
else:
times = times_full
values = values_full
return {
feature_keys.TrainEvalFeatures.TIMES: times,
feature_keys.TrainEvalFeatures.VALUES: values
}
def _test_initialization(self, warmup_iterations, batch_size):
stub_model = StubTimeSeriesModel()
data = self._make_test_data(length=20, cut_start=None, cut_end=None,
offset=0.)
if batch_size == -1:
input_fn = test_utils.AllWindowInputFn(
input_pipeline.NumpyReader(data), window_size=10)
else:
input_fn = input_pipeline.RandomWindowInputFn(
input_pipeline.NumpyReader(data),
window_size=10,
batch_size=batch_size)
chainer = state_management.ChainingStateManager(
state_saving_interval=1)
features, _ = input_fn()
stub_model.initialize_graph()
chainer.initialize_graph(model=stub_model)
model_outputs = chainer.define_loss(
model=stub_model, features=features, mode=estimator_lib.ModeKeys.TRAIN)
with self.cached_session() as session:
variables.global_variables_initializer().run()
coordinator = coordinator_lib.Coordinator()
queue_runner_impl.start_queue_runners(session, coord=coordinator)
for _ in range(warmup_iterations):
# Warm up saved state
model_outputs.loss.eval()
outputs = model_outputs.loss.eval()
coordinator.request_stop()
coordinator.join()
return outputs
def test_zero_initializations(self):
# Even with no initialization, we are imputing values up to each chunk,
# which in this case gives exact values.
self.assertEqual(0., self._test_initialization(
warmup_iterations=0, batch_size=-1))
def test_one_initializations(self):
# Further initialization should still be correct, if redundant
self.assertEqual(0., self._test_initialization(
warmup_iterations=1, batch_size=-1))
def test_stochastic_batch(self):
# It shouldn't matter whether we're using a full deterministic batch or a
# smaller stochastic batch.
self.assertEqual(0., self._test_initialization(
warmup_iterations=1, batch_size=5))
def _test_pass_to_next(self, read_offset, step, correct_offset):
stub_model = StubTimeSeriesModel(correct_offset=correct_offset)
data = self._make_test_data(
length=100 + read_offset, cut_start=None, cut_end=None, offset=100.,
step=step)
init_input_fn = input_pipeline.WholeDatasetInputFn(
input_pipeline.NumpyReader(
{k: v[:-read_offset] for k, v in data.items()}))
result_input_fn = input_pipeline.WholeDatasetInputFn(
input_pipeline.NumpyReader(
{k: v[read_offset:] for k, v in data.items()}))
chainer = state_management.ChainingStateManager(
state_saving_interval=1)
stub_model.initialize_graph()
chainer.initialize_graph(model=stub_model)
init_model_outputs = chainer.define_loss(
model=stub_model, features=init_input_fn()[0],
mode=estimator_lib.ModeKeys.TRAIN)
result_model_outputs = chainer.define_loss(
model=stub_model, features=result_input_fn()[0],
mode=estimator_lib.ModeKeys.TRAIN)
with self.cached_session() as session:
variables.global_variables_initializer().run()
coordinator = coordinator_lib.Coordinator()
queue_runner_impl.start_queue_runners(session, coord=coordinator)
init_model_outputs.loss.eval()
returned_loss = result_model_outputs.loss.eval()
coordinator.request_stop()
coordinator.join()
return returned_loss
def test_pass_to_next_step_one_no_correction(self):
self.assertEqual(100., self._test_pass_to_next(
read_offset=1, step=1, correct_offset=False))
def test_pass_to_next_step_one_with_correction(self):
self.assertEqual(0., self._test_pass_to_next(
read_offset=1, step=1, correct_offset=True))
def test_pass_to_next_step_three_with_correction(self):
self.assertEqual(0., self._test_pass_to_next(
read_offset=1, step=3, correct_offset=True))
def test_large_read_offset(self):
self.assertEqual(0., self._test_pass_to_next(
read_offset=50, step=20, correct_offset=True))
def test_past_init_offset(self):
self.assertEqual(100., self._test_pass_to_next(
read_offset=100, step=20, correct_offset=True))
def _test_missing_values(self, cut_start, cut_end, offset):
stub_model = StubTimeSeriesModel()
data = self._make_test_data(
length=100, cut_start=cut_start, cut_end=cut_end, offset=offset)
input_fn = test_utils.AllWindowInputFn(
input_pipeline.NumpyReader(data), window_size=10)
chainer = state_management.ChainingStateManager(
state_saving_interval=1)
features, _ = input_fn()
stub_model.initialize_graph()
chainer.initialize_graph(model=stub_model)
model_outputs = chainer.define_loss(
model=stub_model, features=features, mode=estimator_lib.ModeKeys.TRAIN)
with self.cached_session() as session:
variables.global_variables_initializer().run()
coordinator = coordinator_lib.Coordinator()
queue_runner_impl.start_queue_runners(session, coord=coordinator)
for _ in range(10):
model_outputs.loss.eval()
returned_loss = model_outputs.loss.eval()
coordinator.request_stop()
coordinator.join()
return returned_loss
def test_missing_values_ten(self):
# Each posterior should be off by 10 from the offset in the values. 90
# values with a chunk size of 10 means 90 - 10 + 1 possible chunks.
self.assertEqual((90 - 10 + 1) * 10, self._test_missing_values(
cut_start=20, cut_end=30, offset=10.))
def test_missing_values_five(self):
self.assertEqual((95 - 10 + 1) * 10, self._test_missing_values(
cut_start=15, cut_end=20, offset=10.))
class _StateOverrideModel(model.TimeSeriesModel):
def __init__(self):
super(_StateOverrideModel, self).__init__(num_features=1)
def generate(self, *args):
pass
def predict(self, *args):
pass
def get_start_state(self):
return (constant_op.constant([20, 30, 40], dtype=dtypes.int64),
(constant_op.constant(-10, dtype=dtypes.int64),
constant_op.constant([30., 50.], dtype=dtypes.float64)))
def get_batch_loss(self, features, mode, state):
per_observation_loss, state, outputs = self.per_step_batch_loss(
features, mode, state)
state = nest.map_structure(lambda element: element[:, -1], state)
outputs["observed"] = features[feature_keys.TrainEvalFeatures.VALUES]
return model.ModelOutputs(
loss=per_observation_loss,
end_state=state,
predictions=outputs,
prediction_times=features[feature_keys.TrainEvalFeatures.TIMES])
def per_step_batch_loss(self, features, mode, state):
return (
constant_op.constant(1.),
# Assumes only one step: this is the per-step loss.
nest.map_structure(
lambda element: ops.convert_to_tensor(element)[:, None], state),
{
"dummy_predictions":
array_ops.zeros_like(
features[feature_keys.TrainEvalFeatures.VALUES])
})
class _StateOverrideTest(test.TestCase):
def test_state_override(self):
test_start_state = (numpy.array([[2, 3, 4]]), (numpy.array([2]),
numpy.array([[3., 5.]])))
data = {
feature_keys.FilteringFeatures.TIMES: numpy.arange(5),
feature_keys.FilteringFeatures.VALUES: numpy.zeros(shape=[5, 3])
}
features, _ = input_pipeline.WholeDatasetInputFn(
input_pipeline.NumpyReader(data))()
features[feature_keys.FilteringFeatures.STATE_TUPLE] = test_start_state
stub_model = _StateOverrideModel()
chainer = state_management.ChainingStateManager()
stub_model.initialize_graph()
chainer.initialize_graph(model=stub_model)
model_outputs = chainer.define_loss(
model=stub_model, features=features, mode=estimator_lib.ModeKeys.EVAL)
with train.MonitoredSession() as session:
end_state = session.run(model_outputs.end_state)
nest.assert_same_structure(test_start_state, end_state)
for expected, received in zip(
nest.flatten(test_start_state), nest.flatten(end_state)):
self.assertAllEqual(expected, received)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/timeseries/python/timeseries/state_management_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the time series input pipeline."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import tempfile
import numpy
from tensorflow.contrib.timeseries.python.timeseries import input_pipeline
from tensorflow.contrib.timeseries.python.timeseries import test_utils
from tensorflow.contrib.timeseries.python.timeseries.feature_keys import TrainEvalFeatures
from tensorflow.core.example import example_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.lib.io import tf_record
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator as coordinator_lib
from tensorflow.python.training import queue_runner_impl
def _make_csv_temp_file(to_write, test_tmpdir):
_, data_file = tempfile.mkstemp(dir=test_tmpdir)
with open(data_file, "w") as f:
csvwriter = csv.writer(f)
for record in to_write:
csvwriter.writerow(record)
return data_file
def _make_csv_time_series(num_features, num_samples, test_tmpdir):
filename = _make_csv_temp_file(
[[i] + [float(i) * 2. + feature_number
for feature_number in range(num_features)]
for i in range(num_samples)],
test_tmpdir=test_tmpdir)
return filename
def _make_tfexample_series(num_features, num_samples, test_tmpdir):
_, data_file = tempfile.mkstemp(dir=test_tmpdir)
with tf_record.TFRecordWriter(data_file) as writer:
for i in range(num_samples):
example = example_pb2.Example()
times = example.features.feature[TrainEvalFeatures.TIMES]
times.int64_list.value.append(i)
values = example.features.feature[TrainEvalFeatures.VALUES]
values.float_list.value.extend(
[float(i) * 2. + feature_number
for feature_number in range(num_features)])
writer.write(example.SerializeToString())
return data_file
def _make_numpy_time_series(num_features, num_samples):
times = numpy.arange(num_samples)
values = times[:, None] * 2. + numpy.arange(num_features)[None, :]
return {TrainEvalFeatures.TIMES: times,
TrainEvalFeatures.VALUES: values}
class RandomWindowInputFnTests(test.TestCase):
def _random_window_input_fn_test_template(
self, time_series_reader, window_size, batch_size, num_features,
discard_out_of_order=False):
input_fn = input_pipeline.RandomWindowInputFn(
time_series_reader=time_series_reader,
window_size=window_size, batch_size=batch_size)
result, _ = input_fn()
init_op = variables.local_variables_initializer()
with self.cached_session() as session:
coordinator = coordinator_lib.Coordinator()
queue_runner_impl.start_queue_runners(session, coord=coordinator)
session.run(init_op)
features = session.run(result)
coordinator.request_stop()
coordinator.join()
self.assertAllEqual([batch_size, window_size],
features[TrainEvalFeatures.TIMES].shape)
for window_position in range(window_size - 1):
for batch_position in range(batch_size):
# Checks that all times are contiguous
self.assertEqual(
features[TrainEvalFeatures.TIMES][batch_position,
window_position + 1],
features[TrainEvalFeatures.TIMES][batch_position,
window_position] + 1)
self.assertAllEqual([batch_size, window_size, num_features],
features[TrainEvalFeatures.VALUES].shape)
self.assertEqual("int64", features[TrainEvalFeatures.TIMES].dtype)
for feature_number in range(num_features):
self.assertAllEqual(
features[TrainEvalFeatures.TIMES] * 2. + feature_number,
features[TrainEvalFeatures.VALUES][:, :, feature_number])
return features
def _test_out_of_order(self, time_series_reader, discard_out_of_order):
self._random_window_input_fn_test_template(
time_series_reader=time_series_reader,
num_features=1, window_size=2, batch_size=5,
discard_out_of_order=discard_out_of_order)
def test_csv_sort_out_of_order(self):
filename = _make_csv_time_series(num_features=1, num_samples=50,
test_tmpdir=self.get_temp_dir())
time_series_reader = input_pipeline.CSVReader([filename])
self._test_out_of_order(time_series_reader, discard_out_of_order=False)
def test_tfexample_sort_out_of_order(self):
filename = _make_tfexample_series(
num_features=1, num_samples=50,
test_tmpdir=self.get_temp_dir())
time_series_reader = input_pipeline.TFExampleReader(
[filename],
features={
TrainEvalFeatures.TIMES: parsing_ops.FixedLenFeature(
shape=[], dtype=dtypes.int64),
TrainEvalFeatures.VALUES: parsing_ops.FixedLenFeature(
shape=[1], dtype=dtypes.float32)})
self._test_out_of_order(time_series_reader, discard_out_of_order=False)
def test_numpy_sort_out_of_order(self):
data = _make_numpy_time_series(num_features=1, num_samples=50)
time_series_reader = input_pipeline.NumpyReader(data)
self._test_out_of_order(time_series_reader, discard_out_of_order=False)
def test_csv_discard_out_of_order(self):
filename = _make_csv_time_series(num_features=1, num_samples=50,
test_tmpdir=self.get_temp_dir())
time_series_reader = input_pipeline.CSVReader([filename])
self._test_out_of_order(time_series_reader, discard_out_of_order=True)
def test_csv_discard_out_of_order_window_equal(self):
filename = _make_csv_time_series(num_features=1, num_samples=3,
test_tmpdir=self.get_temp_dir())
time_series_reader = input_pipeline.CSVReader([filename])
self._random_window_input_fn_test_template(
time_series_reader=time_series_reader,
num_features=1, window_size=3, batch_size=5,
discard_out_of_order=True)
def test_csv_discard_out_of_order_window_too_large(self):
filename = _make_csv_time_series(num_features=1, num_samples=2,
test_tmpdir=self.get_temp_dir())
time_series_reader = input_pipeline.CSVReader([filename])
with self.assertRaises(errors.OutOfRangeError):
self._random_window_input_fn_test_template(
time_series_reader=time_series_reader,
num_features=1, window_size=3, batch_size=5,
discard_out_of_order=True)
def test_csv_no_data(self):
filename = _make_csv_time_series(num_features=1, num_samples=0,
test_tmpdir=self.get_temp_dir())
time_series_reader = input_pipeline.CSVReader([filename])
with self.assertRaises(errors.OutOfRangeError):
self._test_out_of_order(time_series_reader, discard_out_of_order=True)
def test_numpy_discard_out_of_order(self):
data = _make_numpy_time_series(num_features=1, num_samples=50)
time_series_reader = input_pipeline.NumpyReader(data)
self._test_out_of_order(time_series_reader, discard_out_of_order=True)
def test_numpy_discard_out_of_order_window_equal(self):
data = _make_numpy_time_series(num_features=1, num_samples=3)
time_series_reader = input_pipeline.NumpyReader(data)
self._random_window_input_fn_test_template(
time_series_reader=time_series_reader,
num_features=1, window_size=3, batch_size=5,
discard_out_of_order=True)
def test_numpy_discard_out_of_order_window_too_large(self):
data = _make_numpy_time_series(num_features=1, num_samples=2)
time_series_reader = input_pipeline.NumpyReader(data)
with self.assertRaisesRegexp(ValueError, "only 2 records were available"):
self._random_window_input_fn_test_template(
time_series_reader=time_series_reader,
num_features=1, window_size=3, batch_size=5,
discard_out_of_order=True)
def _test_multivariate(self, time_series_reader, num_features):
self._random_window_input_fn_test_template(
time_series_reader=time_series_reader,
num_features=num_features,
window_size=2,
batch_size=5)
def test_csv_multivariate(self):
filename = _make_csv_time_series(num_features=2, num_samples=50,
test_tmpdir=self.get_temp_dir())
time_series_reader = input_pipeline.CSVReader(
[filename],
column_names=(TrainEvalFeatures.TIMES, TrainEvalFeatures.VALUES,
TrainEvalFeatures.VALUES))
self._test_multivariate(time_series_reader=time_series_reader,
num_features=2)
def test_tfexample_multivariate(self):
filename = _make_tfexample_series(
num_features=2, num_samples=50,
test_tmpdir=self.get_temp_dir())
time_series_reader = input_pipeline.TFExampleReader(
[filename],
features={
TrainEvalFeatures.TIMES: parsing_ops.FixedLenFeature(
shape=[], dtype=dtypes.int64),
TrainEvalFeatures.VALUES: parsing_ops.FixedLenFeature(
shape=[2], dtype=dtypes.float32)})
self._test_multivariate(time_series_reader=time_series_reader,
num_features=2)
def test_numpy_multivariate(self):
data = _make_numpy_time_series(num_features=3, num_samples=50)
time_series_reader = input_pipeline.NumpyReader(data)
self._test_multivariate(time_series_reader, num_features=3)
def test_numpy_withbatch(self):
data_nobatch = _make_numpy_time_series(num_features=4, num_samples=100)
data = {feature_name: feature_value[None]
for feature_name, feature_value in data_nobatch.items()}
time_series_reader = input_pipeline.NumpyReader(data)
self._random_window_input_fn_test_template(
time_series_reader=time_series_reader,
num_features=4,
window_size=3,
batch_size=5)
def test_numpy_nobatch_nofeatures(self):
data = _make_numpy_time_series(num_features=1, num_samples=100)
data[TrainEvalFeatures.VALUES] = data[TrainEvalFeatures.VALUES][:, 0]
time_series_reader = input_pipeline.NumpyReader(data)
self._random_window_input_fn_test_template(
time_series_reader=time_series_reader,
num_features=1,
window_size=16,
batch_size=16)
class WholeDatasetInputFnTests(test.TestCase):
def _whole_dataset_input_fn_test_template(
self, time_series_reader, num_features, num_samples):
result, _ = input_pipeline.WholeDatasetInputFn(time_series_reader)()
with self.cached_session() as session:
session.run(variables.local_variables_initializer())
coordinator = coordinator_lib.Coordinator()
queue_runner_impl.start_queue_runners(session, coord=coordinator)
features = session.run(result)
coordinator.request_stop()
coordinator.join()
self.assertEqual("int64", features[TrainEvalFeatures.TIMES].dtype)
self.assertAllEqual(numpy.arange(num_samples, dtype=numpy.int64)[None, :],
features[TrainEvalFeatures.TIMES])
for feature_number in range(num_features):
self.assertAllEqual(
features[TrainEvalFeatures.TIMES] * 2. + feature_number,
features[TrainEvalFeatures.VALUES][:, :, feature_number])
def test_csv(self):
filename = _make_csv_time_series(num_features=3, num_samples=50,
test_tmpdir=self.get_temp_dir())
time_series_reader = input_pipeline.CSVReader(
[filename],
column_names=(TrainEvalFeatures.TIMES, TrainEvalFeatures.VALUES,
TrainEvalFeatures.VALUES, TrainEvalFeatures.VALUES))
self._whole_dataset_input_fn_test_template(
time_series_reader=time_series_reader, num_features=3, num_samples=50)
def test_csv_no_data(self):
filename = _make_csv_time_series(num_features=1, num_samples=0,
test_tmpdir=self.get_temp_dir())
time_series_reader = input_pipeline.CSVReader([filename])
with self.assertRaises(errors.OutOfRangeError):
self._whole_dataset_input_fn_test_template(
time_series_reader=time_series_reader, num_features=1, num_samples=50)
def test_tfexample(self):
filename = _make_tfexample_series(
num_features=4, num_samples=100,
test_tmpdir=self.get_temp_dir())
time_series_reader = input_pipeline.TFExampleReader(
[filename],
features={
TrainEvalFeatures.TIMES: parsing_ops.FixedLenFeature(
shape=[], dtype=dtypes.int64),
TrainEvalFeatures.VALUES: parsing_ops.FixedLenFeature(
shape=[4], dtype=dtypes.float32)})
self._whole_dataset_input_fn_test_template(
time_series_reader=time_series_reader, num_features=4, num_samples=100)
def test_numpy(self):
data = _make_numpy_time_series(num_features=4, num_samples=100)
time_series_reader = input_pipeline.NumpyReader(data)
self._whole_dataset_input_fn_test_template(
time_series_reader=time_series_reader, num_features=4, num_samples=100)
def test_numpy_withbatch(self):
data_nobatch = _make_numpy_time_series(num_features=4, num_samples=100)
data = {feature_name: feature_value[None]
for feature_name, feature_value in data_nobatch.items()}
time_series_reader = input_pipeline.NumpyReader(data)
self._whole_dataset_input_fn_test_template(
time_series_reader=time_series_reader, num_features=4, num_samples=100)
def test_numpy_nobatch_nofeatures(self):
data = _make_numpy_time_series(num_features=1, num_samples=100)
data[TrainEvalFeatures.VALUES] = data[TrainEvalFeatures.VALUES][:, 0]
time_series_reader = input_pipeline.NumpyReader(data)
self._whole_dataset_input_fn_test_template(
time_series_reader=time_series_reader, num_features=1, num_samples=100)
class AllWindowInputFnTests(test.TestCase):
def _all_window_input_fn_test_template(
self, time_series_reader, num_samples, window_size,
original_numpy_features=None):
input_fn = test_utils.AllWindowInputFn(
time_series_reader=time_series_reader,
window_size=window_size)
features, _ = input_fn()
init_op = variables.local_variables_initializer()
with self.cached_session() as session:
coordinator = coordinator_lib.Coordinator()
queue_runner_impl.start_queue_runners(session, coord=coordinator)
session.run(init_op)
chunked_times, chunked_values = session.run(
[features[TrainEvalFeatures.TIMES],
features[TrainEvalFeatures.VALUES]])
coordinator.request_stop()
coordinator.join()
self.assertAllEqual([num_samples - window_size + 1, window_size],
chunked_times.shape)
if original_numpy_features is not None:
original_times = original_numpy_features[TrainEvalFeatures.TIMES]
original_values = original_numpy_features[TrainEvalFeatures.VALUES]
self.assertAllEqual(original_times, numpy.unique(chunked_times))
self.assertAllEqual(original_values[chunked_times],
chunked_values)
def test_csv(self):
filename = _make_csv_time_series(num_features=1, num_samples=50,
test_tmpdir=self.get_temp_dir())
time_series_reader = input_pipeline.CSVReader(
[filename],
column_names=(TrainEvalFeatures.TIMES, TrainEvalFeatures.VALUES))
self._all_window_input_fn_test_template(
time_series_reader=time_series_reader, num_samples=50, window_size=10)
def test_numpy(self):
data = _make_numpy_time_series(num_features=2, num_samples=31)
time_series_reader = input_pipeline.NumpyReader(data)
self._all_window_input_fn_test_template(
time_series_reader=time_series_reader, original_numpy_features=data,
num_samples=31, window_size=5)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/timeseries/python/timeseries/input_pipeline_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base class for time series models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import six
from tensorflow.contrib.timeseries.python.timeseries import math_utils
from tensorflow.contrib.timeseries.python.timeseries.feature_keys import PredictionFeatures
from tensorflow.contrib.timeseries.python.timeseries.feature_keys import TrainEvalFeatures
from tensorflow.python.feature_column import feature_column_lib as feature_column
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.util import nest
ModelOutputs = collections.namedtuple( # pylint: disable=invalid-name
typename="ModelOutputs",
field_names=[
"loss", # The scalar value to be minimized during training.
"end_state", # A nested tuple specifying the model's state after
# running on the specified data
"predictions", # A dictionary of predictions, each with shape prefixed
# by the shape of `prediction_times`.
"prediction_times" # A [batch size x window size] integer Tensor
# indicating times for which values in `predictions`
# were computed.
])
@six.add_metaclass(abc.ABCMeta)
class TimeSeriesModel(object):
"""Base class for creating generative time series models."""
def __init__(self,
num_features,
exogenous_feature_columns=None,
dtype=dtypes.float32):
"""Constructor for generative models.
Args:
num_features: Number of features for the time series
exogenous_feature_columns: A list of `tf.feature_column`s (for example
`tf.feature_column.embedding_column`) corresponding to exogenous
features which provide extra information to the model but are not
part of the series to be predicted. Passed to
`tf.compat.v1.feature_column.input_layer`.
dtype: The floating point datatype to use.
"""
if exogenous_feature_columns:
self._exogenous_feature_columns = exogenous_feature_columns
else:
self._exogenous_feature_columns = []
self.num_features = num_features
self.dtype = dtype
self._input_statistics = None
self._graph_initialized = False
self._stats_means = None
self._stats_sigmas = None
@property
def exogenous_feature_columns(self):
"""`tf.feature_colum`s for features which are not predicted."""
return self._exogenous_feature_columns
# TODO(allenl): Move more of the generic machinery for generating and
# predicting into TimeSeriesModel, and possibly share it between generate()
# and predict()
def generate(self, number_of_series, series_length,
model_parameters=None, seed=None):
"""Sample synthetic data from model parameters, with optional substitutions.
Returns `number_of_series` possible sequences of future values, sampled from
the generative model with each conditioned on the previous. Samples are
based on trained parameters, except for those parameters explicitly
overridden in `model_parameters`.
For distributions over future observations, see predict().
Args:
number_of_series: Number of time series to create.
series_length: Length of each time series.
model_parameters: A dictionary mapping model parameters to values, which
replace trained parameters when generating data.
seed: If specified, return deterministic time series according to this
value.
Returns:
A dictionary with keys TrainEvalFeatures.TIMES (mapping to an array with
shape [number_of_series, series_length]) and TrainEvalFeatures.VALUES
(mapping to an array with shape [number_of_series, series_length,
num_features]).
"""
raise NotImplementedError("This model does not support generation.")
def initialize_graph(self, input_statistics=None):
"""Define ops for the model, not depending on any previously defined ops.
Args:
input_statistics: A math_utils.InputStatistics object containing input
statistics. If None, data-independent defaults are used, which may
result in longer or unstable training.
"""
self._graph_initialized = True
self._input_statistics = input_statistics
if self._input_statistics:
self._stats_means, variances = (
self._input_statistics.overall_feature_moments)
self._stats_sigmas = math_ops.sqrt(variances)
def _scale_data(self, data):
"""Scale data according to stats (input scale -> model scale)."""
if self._input_statistics is not None:
return (data - self._stats_means) / self._stats_sigmas
else:
return data
def _scale_variance(self, variance):
"""Scale variances according to stats (input scale -> model scale)."""
if self._input_statistics is not None:
return variance / self._input_statistics.overall_feature_moments.variance
else:
return variance
def _scale_back_data(self, data):
"""Scale back data according to stats (model scale -> input scale)."""
if self._input_statistics is not None:
return (data * self._stats_sigmas) + self._stats_means
else:
return data
def _scale_back_variance(self, variance):
"""Scale back variances according to stats (model scale -> input scale)."""
if self._input_statistics is not None:
return variance * self._input_statistics.overall_feature_moments.variance
else:
return variance
def _check_graph_initialized(self):
if not self._graph_initialized:
raise ValueError(
"TimeSeriesModels require initialize_graph() to be called before "
"use. This defines variables and ops in the default graph, and "
"allows Tensor-valued input statistics to be specified.")
def define_loss(self, features, mode):
"""Default loss definition with state replicated across a batch.
Time series passed to this model have a batch dimension, and each series in
a batch can be operated on in parallel. This loss definition assumes that
each element of the batch represents an independent sample conditioned on
the same initial state (i.e. it is simply replicated across the batch). A
batch size of one provides sequential operations on a single time series.
More complex processing may operate instead on get_start_state() and
get_batch_loss() directly.
Args:
features: A dictionary (such as is produced by a chunker) with at minimum
the following key/value pairs (others corresponding to the
`exogenous_feature_columns` argument to `__init__` may be included
representing exogenous regressors):
TrainEvalFeatures.TIMES: A [batch size x window size] integer Tensor
with times for each observation. If there is no artificial chunking,
the window size is simply the length of the time series.
TrainEvalFeatures.VALUES: A [batch size x window size x num features]
Tensor with values for each observation.
mode: The tf.estimator.ModeKeys mode to use (TRAIN, EVAL). For INFER,
see predict().
Returns:
A ModelOutputs object.
"""
self._check_graph_initialized()
start_state = math_utils.replicate_state(
start_state=self.get_start_state(),
batch_size=array_ops.shape(features[TrainEvalFeatures.TIMES])[0])
return self.get_batch_loss(features=features, mode=mode, state=start_state)
# TODO(vitalyk,allenl): Better documentation surrounding options for chunking,
# references to papers, etc.
@abc.abstractmethod
def get_start_state(self):
"""Returns a tuple of state for the start of the time series.
For example, a mean and covariance. State should not have a batch
dimension, and will often be TensorFlow Variables to be learned along with
the rest of the model parameters.
"""
pass
@abc.abstractmethod
def get_batch_loss(self, features, mode, state):
"""Return predictions, losses, and end state for a time series.
Args:
features: A dictionary with times, values, and (optionally) exogenous
regressors. See `define_loss`.
mode: The tf.estimator.ModeKeys mode to use (TRAIN, EVAL, INFER).
state: Model-dependent state, each with size [batch size x ...]. The
number and type will typically be fixed by the model (for example a
mean and variance).
Returns:
A ModelOutputs object.
"""
pass
@abc.abstractmethod
def predict(self, features):
"""Returns predictions of future observations given an initial state.
Computes distributions for future observations. For sampled draws from the
model where each is conditioned on the previous, see generate().
Args:
features: A dictionary with at minimum the following key/value pairs
(others corresponding to the `exogenous_feature_columns` argument to
`__init__` may be included representing exogenous regressors):
PredictionFeatures.TIMES: A [batch size x window size] Tensor with
times to make predictions for. Times must be increasing within each
part of the batch, and must be greater than the last time `state` was
updated.
PredictionFeatures.STATE_TUPLE: Model-dependent state, each with size
[batch size x ...]. The number and type will typically be fixed by the
model (for example a mean and variance). Typically these will be the
end state returned by get_batch_loss, predicting beyond that data.
Returns:
A dictionary with model-dependent predictions corresponding to the
requested times. Keys indicate the type of prediction, and values have
shape [batch size x window size x ...]. For example state space models
return a "predicted_mean" and "predicted_covariance".
"""
pass
def _get_exogenous_embedding_shape(self):
"""Computes the shape of the vector returned by _process_exogenous_features.
Returns:
The shape as a list. Does not include a batch dimension.
"""
if not self._exogenous_feature_columns:
return (0,)
with ops.Graph().as_default():
parsed_features = (
feature_column.make_parse_example_spec(
self._exogenous_feature_columns))
placeholder_features = parsing_ops.parse_example(
serialized=array_ops.placeholder(shape=[None], dtype=dtypes.string),
features=parsed_features)
embedded = feature_column.input_layer(
features=placeholder_features,
feature_columns=self._exogenous_feature_columns)
return embedded.get_shape().as_list()[1:]
def _process_exogenous_features(self, times, features):
"""Create a single vector from exogenous features.
Args:
times: A [batch size, window size] vector of times for this batch,
primarily used to check the shape information of exogenous features.
features: A dictionary of exogenous features corresponding to the columns
in self._exogenous_feature_columns. Each value should have a shape
prefixed by [batch size, window size].
Returns:
A Tensor with shape [batch size, window size, exogenous dimension], where
the size of the exogenous dimension depends on the exogenous feature
columns passed to the model's constructor.
Raises:
ValueError: If an exogenous feature has an unknown rank.
"""
if self._exogenous_feature_columns:
exogenous_features_single_batch_dimension = {}
for name, tensor in features.items():
if tensor.get_shape().ndims is None:
# input_from_feature_columns does not support completely unknown
# feature shapes, so we save on a bit of logic and provide a better
# error message by checking that here.
raise ValueError(
("Features with unknown rank are not supported. Got shape {} for "
"feature {}.").format(tensor.get_shape(), name))
tensor_shape_dynamic = array_ops.shape(tensor)
tensor = array_ops.reshape(
tensor,
array_ops.concat([[tensor_shape_dynamic[0]
* tensor_shape_dynamic[1]],
tensor_shape_dynamic[2:]], axis=0))
# Avoid shape warnings when embedding "scalar" exogenous features (those
# with only batch and window dimensions); input_from_feature_columns
# expects input ranks to match the embedded rank.
if tensor.get_shape().ndims == 1 and tensor.dtype != dtypes.string:
exogenous_features_single_batch_dimension[name] = tensor[:, None]
else:
exogenous_features_single_batch_dimension[name] = tensor
embedded_exogenous_features_single_batch_dimension = (
feature_column.input_layer(
features=exogenous_features_single_batch_dimension,
feature_columns=self._exogenous_feature_columns,
trainable=True))
exogenous_regressors = array_ops.reshape(
embedded_exogenous_features_single_batch_dimension,
array_ops.concat(
[
array_ops.shape(times), array_ops.shape(
embedded_exogenous_features_single_batch_dimension)[1:]
],
axis=0))
exogenous_regressors.set_shape(times.get_shape().concatenate(
embedded_exogenous_features_single_batch_dimension.get_shape()[1:]))
exogenous_regressors = math_ops.cast(
exogenous_regressors, dtype=self.dtype)
else:
# Not having any exogenous features is a special case so that models can
# avoid superfluous updates, which may not be free of side effects due to
# bias terms in transformations.
exogenous_regressors = None
return exogenous_regressors
# TODO(allenl): Add a superclass of SequentialTimeSeriesModel which fuses
# filtering/prediction/exogenous into one step, and move looping constructs to
# that class.
class SequentialTimeSeriesModel(TimeSeriesModel):
"""Base class for recurrent generative models.
Models implementing this interface have three main functions, corresponding to
abstract methods:
_filtering_step: Updates state based on observations and computes a loss.
_prediction_step: Predicts a batch of observations and new model state.
_imputation_step: Updates model state across a gap.
_exogenous_input_step: Updates state to account for exogenous regressors.
Models may also specify a _window_initializer to prepare for a window of data.
See StateSpaceModel for a concrete example of a model implementing this
interface.
"""
def __init__(self,
train_output_names,
predict_output_names,
num_features,
normalize_features=False,
dtype=dtypes.float32,
exogenous_feature_columns=None,
exogenous_update_condition=None,
static_unrolling_window_size_threshold=None):
"""Initialize a SequentialTimeSeriesModel.
Args:
train_output_names: A list of products/predictions returned from
_filtering_step.
predict_output_names: A list of products/predictions returned from
_prediction_step.
num_features: Number of features for the time series
normalize_features: Boolean. If True, `values` are passed normalized to
the model (via self._scale_data). Scaling is done for the whole window
as a batch, which is slightly more efficient than scaling inside the
window loop. The model must then define _scale_back_predictions, which
may use _scale_back_data or _scale_back_variance to return predictions
to the input scale.
dtype: The floating point datatype to use.
exogenous_feature_columns: A list of `tf.feature_column`s objects. See
`TimeSeriesModel`.
exogenous_update_condition: A function taking two Tensor arguments `times`
(shape [batch size]) and `features` (a dictionary mapping exogenous
feature keys to Tensors with shapes [batch size, ...]) and returning a
boolean Tensor with shape [batch size] indicating whether state should
be updated using exogenous features for each part of the batch. Where
it is False, no exogenous update is performed. If None (default),
exogenous updates are always performed. Useful for avoiding "leaky"
frequent exogenous updates when sparse updates are desired. Called
only during graph construction.
static_unrolling_window_size_threshold: Controls whether a `tf.while_loop`
is used when looping over a window of data. If
`static_unrolling_window_size_threshold` is None, a `tf.while_loop` is
always used. Otherwise it must be an integer, and the graph is
replicated for each step taken whenever the window size is less than
or equal to this value (if the window size is available in the static
shape information of the TrainEvalFeatures.TIMES feature). Static
unrolling generally decreases the per-step time for small window/batch
sizes, but increases graph construction time.
"""
super(SequentialTimeSeriesModel, self).__init__(
num_features=num_features, dtype=dtype,
exogenous_feature_columns=exogenous_feature_columns)
self._exogenous_update_condition = exogenous_update_condition
self._train_output_names = train_output_names
self._predict_output_names = predict_output_names
self._normalize_features = normalize_features
self._static_unrolling_window_size_threshold = (
static_unrolling_window_size_threshold)
def _scale_back_predictions(self, predictions):
"""Return a window of predictions to input scale.
Args:
predictions: A dictionary mapping from prediction names to Tensors.
Returns:
A dictionary with values corrected for input normalization (e.g. with
self._scale_back_mean and possibly self._scale_back_variance). May be a
mutated version of the argument.
"""
raise NotImplementedError(
"SequentialTimeSeriesModel normalized input data"
" (normalize_features=True), but no method was provided to transform "
"the predictions back to the input scale.")
@abc.abstractmethod
def _filtering_step(self, current_times, current_values, state, predictions):
"""Compute a single-step loss for a batch of data.
Args:
current_times: A [batch size] Tensor of times for each observation.
current_values: A [batch size] Tensor of values for each observation.
state: Model state, updated to current_times.
predictions: The outputs of _prediction_step
Returns:
A tuple of (updated state, outputs):
updated state: Model state taking current_values into account.
outputs: A dictionary of Tensors with keys corresponding to
self._train_output_names, plus a special "loss" key. The value
corresponding to "loss" is minimized during training. Other outputs
may include one-step-ahead predictions, for example a predicted
location and scale.
"""
pass
@abc.abstractmethod
def _prediction_step(self, current_times, state):
"""Compute a batch of single-step predictions.
Args:
current_times: A [batch size] Tensor of times for each observation.
state: Model state, imputed to one step before current_times.
Returns:
A tuple of (updated state, outputs):
updated state: Model state updated to current_times.
outputs: A dictionary of Tensors with keys corresponding to
self._predict_output_names.
"""
pass
@abc.abstractmethod
def _imputation_step(self, current_times, state):
"""Update model state across missing values.
Called to prepare model state for _filtering_step and _prediction_step.
Args:
current_times: A [batch size] Tensor; state will be imputed up to, but not
including, these timesteps.
state: The pre-imputation model state, Tensors with shape [batch size x
...].
Returns:
Updated/imputed model state, corresponding to `state`.
"""
pass
@abc.abstractmethod
def _exogenous_input_step(
self, current_times, current_exogenous_regressors, state):
"""Update state to account for exogenous regressors.
Args:
current_times: A [batch size] Tensor of times for the exogenous values
being input.
current_exogenous_regressors: A [batch size x exogenous input dimension]
Tensor of exogenous values for each part of the batch.
state: Model state, a possibly nested list of Tensors, each with shape
[batch size x ...].
Returns:
Updated model state, structure and shapes matching the `state` argument.
"""
pass
# TODO(allenl): Move regularization to a separate object (optional and
# configurable)
def _loss_additions(self, times, values, mode):
"""Additions to per-observation normalized loss, e.g. regularization.
Args:
times: A [batch size x window size] Tensor with times for each
observation.
values: A [batch size x window size x num features] Tensor with values for
each observation.
mode: The tf.estimator.ModeKeys mode to use (TRAIN, EVAL, INFER).
Returns:
A scalar value to add to the per-observation normalized loss.
"""
del times, values, mode
return 0.
def _window_initializer(self, times, state):
"""Prepare for training or prediction on a window of data.
Args:
times: A [batch size x window size] Tensor with times for each
observation.
state: Model-dependent state, each with size [batch size x ...]. The
number and type will typically be fixed by the model (for example a
mean and variance).
Returns:
Nothing
"""
pass
def get_batch_loss(self, features, mode, state):
"""Calls self._filtering_step. See TimeSeriesModel.get_batch_loss."""
per_observation_loss, state, outputs = self.per_step_batch_loss(
features, mode, state)
# per_step_batch_loss returns [batch size, window size, ...] state, whereas
# get_batch_loss is expected to return [batch size, ...] state for the last
# element of a window
state = nest.pack_sequence_as(
state,
[state_element[:, -1] for state_element in nest.flatten(state)])
outputs["observed"] = features[TrainEvalFeatures.VALUES]
return ModelOutputs(
loss=per_observation_loss,
end_state=state,
predictions=outputs,
prediction_times=features[TrainEvalFeatures.TIMES])
def _apply_exogenous_update(
self, current_times, step_number, state, raw_features,
embedded_exogenous_regressors):
"""Performs a conditional state update based on exogenous features."""
if embedded_exogenous_regressors is None:
return state
else:
current_exogenous_regressors = embedded_exogenous_regressors[
:, step_number, :]
exogenous_updated_state = self._exogenous_input_step(
current_times=current_times,
current_exogenous_regressors=current_exogenous_regressors,
state=state)
if self._exogenous_update_condition is not None:
current_raw_exogenous_features = {
key: value[:, step_number] for key, value in raw_features.items()
if key not in [PredictionFeatures.STATE_TUPLE,
TrainEvalFeatures.TIMES,
TrainEvalFeatures.VALUES]}
conditionally_updated_state_flat = []
for updated_state_element, original_state_element in zip(
nest.flatten(exogenous_updated_state),
nest.flatten(state)):
conditionally_updated_state_flat.append(
array_ops.where(
self._exogenous_update_condition(
times=current_times,
features=current_raw_exogenous_features),
updated_state_element,
original_state_element))
return nest.pack_sequence_as(state, conditionally_updated_state_flat)
else:
return exogenous_updated_state
def per_step_batch_loss(self, features, mode, state):
"""Computes predictions, losses, and intermediate model states.
Args:
features: A dictionary with times, values, and (optionally) exogenous
regressors. See `define_loss`.
mode: The tf.estimator.ModeKeys mode to use (TRAIN, EVAL, INFER).
state: Model-dependent state, each with size [batch size x ...]. The
number and type will typically be fixed by the model (for example a
mean and variance).
Returns:
A tuple of (loss, filtered_states, predictions)
loss: Average loss values across the batch.
filtered_states: For each Tensor in `state` with shape [batch size x
...], `filtered_states` has a Tensor with shape [batch size x window
size x ...] with filtered state for each part of the batch and
window.
predictions: A dictionary with model-dependent one-step-ahead (or
at-least-one-step-ahead with missing values) predictions, with keys
indicating the type of prediction and values having shape [batch
size x window size x ...]. For example state space models provide
"mean", "covariance", and "log_likelihood".
"""
self._check_graph_initialized()
times = math_ops.cast(features[TrainEvalFeatures.TIMES], dtype=dtypes.int64)
values = math_ops.cast(features[TrainEvalFeatures.VALUES], dtype=self.dtype)
if self._normalize_features:
values = self._scale_data(values)
exogenous_regressors = self._process_exogenous_features(
times=times,
features={key: value for key, value in features.items()
if key not in [TrainEvalFeatures.TIMES,
TrainEvalFeatures.VALUES]})
def _batch_loss_filtering_step(step_number, current_times, state):
"""Make a prediction and update it based on data."""
current_values = values[:, step_number, :]
state = self._apply_exogenous_update(
step_number=step_number, current_times=current_times, state=state,
raw_features=features,
embedded_exogenous_regressors=exogenous_regressors)
predicted_state, predictions = self._prediction_step(
current_times=current_times,
state=state)
filtered_state, outputs = self._filtering_step(
current_times=current_times,
current_values=current_values,
state=predicted_state,
predictions=predictions)
return filtered_state, outputs
state, outputs = self._state_update_loop(
times=times, state=state, state_update_fn=_batch_loss_filtering_step,
outputs=["loss"] + self._train_output_names)
outputs["loss"].set_shape(times.get_shape())
loss_sum = math_ops.reduce_sum(outputs["loss"])
per_observation_loss = (loss_sum / math_ops.cast(
math_ops.reduce_prod(array_ops.shape(times)), dtype=self.dtype))
per_observation_loss += self._loss_additions(times, values, mode)
# Since we have window-level additions to the loss, its per-step value is
# misleading, so we avoid returning it.
del outputs["loss"]
if self._normalize_features:
outputs = self._scale_back_predictions(outputs)
return per_observation_loss, state, outputs
def predict(self, features):
"""Calls self._prediction_step in a loop. See TimeSeriesModel.predict."""
predict_times = ops.convert_to_tensor(features[PredictionFeatures.TIMES],
dtypes.int64)
start_state = features[PredictionFeatures.STATE_TUPLE]
exogenous_regressors = self._process_exogenous_features(
times=predict_times,
features={
key: value
for key, value in features.items()
if key not in
[PredictionFeatures.TIMES, PredictionFeatures.STATE_TUPLE]
})
def _call_prediction_step(step_number, current_times, state):
state = self._apply_exogenous_update(
step_number=step_number, current_times=current_times, state=state,
raw_features=features,
embedded_exogenous_regressors=exogenous_regressors)
state, outputs = self._prediction_step(
current_times=current_times, state=state)
return state, outputs
_, predictions = self._state_update_loop(
times=predict_times, state=start_state,
state_update_fn=_call_prediction_step,
outputs=self._predict_output_names)
if self._normalize_features:
predictions = self._scale_back_predictions(predictions)
return predictions
class _FakeTensorArray(object):
"""An interface for Python lists that is similar to TensorArray.
Used for easy switching between static and dynamic looping.
"""
def __init__(self):
self.values = []
def write(self, unused_position, value):
del unused_position
self.values.append(value)
return self
def _state_update_loop(self, times, state, state_update_fn, outputs):
"""Iterates over `times`, calling `state_update_fn` to collect outputs.
Args:
times: A [batch size x window size] Tensor of integers to iterate over.
state: A list of model-specific state Tensors, each with shape [batch size
x ...].
state_update_fn: A callback taking the following arguments
step_number; A scalar integer Tensor indicating the current position
in the window.
current_times; A [batch size] vector of Integers indicating times
for each part of the batch.
state; Current model state.
It returns a tuple of (updated state, output_values), output_values
being a dictionary of Tensors with keys corresponding to `outputs`.
outputs: A list of strings indicating values which will be saved while
iterating. Must match the keys of the dictionary returned by
state_update_fn.
Returns:
A tuple of (state, output_dict)
state: The final model state.
output_dict: A dictionary of outputs corresponding to those specified in
`outputs` and computed in state_update_fn.
"""
times = ops.convert_to_tensor(times, dtype=dtypes.int64)
window_static_shape = tensor_shape.dimension_value(times.shape[1])
if self._static_unrolling_window_size_threshold is None:
static_unroll = False
else:
# The user has specified a threshold for static loop unrolling.
if window_static_shape is None:
# We don't have static shape information for the window size, so dynamic
# looping is our only option.
static_unroll = False
elif window_static_shape <= self._static_unrolling_window_size_threshold:
# The threshold is satisfied; unroll statically
static_unroll = True
else:
# A threshold was set but not satisfied
static_unroll = False
self._window_initializer(times, state)
def _run_condition(step_number, *unused):
del unused # not part of while loop run condition
return math_ops.less(step_number, window_size)
def _state_update_step(
step_number, state, state_accumulators, output_accumulators,
reuse=False):
"""Impute, then take one state_update_fn step, accumulating outputs."""
with variable_scope.variable_scope("state_update_step", reuse=reuse):
current_times = times[:, step_number]
state = self._imputation_step(current_times=current_times, state=state)
output_accumulators_dict = {
accumulator_key: accumulator
for accumulator_key, accumulator
in zip(outputs, output_accumulators)}
step_state, output_values = state_update_fn(
step_number=step_number,
current_times=current_times,
state=state)
assert set(output_values.keys()) == set(outputs)
new_output_accumulators = []
for output_key in outputs:
accumulator = output_accumulators_dict[output_key]
output_value = output_values[output_key]
new_output_accumulators.append(
accumulator.write(step_number, output_value))
flat_step_state = nest.flatten(step_state)
assert len(state_accumulators) == len(flat_step_state)
new_state_accumulators = []
new_state_flat = []
for step_state_value, state_accumulator, original_state in zip(
flat_step_state, state_accumulators, nest.flatten(state)):
# Make sure the static shape information is complete so while_loop
# does not complain about shape information changing.
step_state_value.set_shape(original_state.get_shape())
new_state_flat.append(step_state_value)
new_state_accumulators.append(state_accumulator.write(
step_number, step_state_value))
step_state = nest.pack_sequence_as(state, new_state_flat)
return (step_number + 1, step_state,
new_state_accumulators, new_output_accumulators)
window_size = array_ops.shape(times)[1]
def _window_size_tensor_array(dtype):
if static_unroll:
return self._FakeTensorArray()
else:
return tensor_array_ops.TensorArray(
dtype=dtype, size=window_size, dynamic_size=False)
initial_loop_arguments = [
array_ops.zeros([], dtypes.int32),
state,
[_window_size_tensor_array(element.dtype)
for element in nest.flatten(state)],
[_window_size_tensor_array(self.dtype) for _ in outputs]]
if static_unroll:
arguments = initial_loop_arguments
for step_number in range(tensor_shape.dimension_value(times.shape[1])):
arguments = _state_update_step(
array_ops.constant(step_number, dtypes.int32), *arguments[1:],
reuse=(step_number > 0)) # Variable sharing between steps
else:
arguments = control_flow_ops.while_loop(
cond=_run_condition,
body=_state_update_step,
loop_vars=initial_loop_arguments)
(_, _, state_loop_result, outputs_loop_result) = arguments
def _stack_and_transpose(tensor_array):
"""Stack and re-order the dimensions of a TensorArray."""
if static_unroll:
return array_ops.stack(tensor_array.values, axis=1)
else:
# TensorArrays from while_loop stack with window size as the first
# dimension, so this function swaps it and the batch dimension to
# maintain the [batch x window size x ...] convention used elsewhere.
stacked = tensor_array.stack()
return array_ops.transpose(
stacked,
perm=array_ops.concat([[1, 0], math_ops.range(
2, array_ops.rank(stacked))], 0))
outputs_dict = {output_key: _stack_and_transpose(output)
for output_key, output
in zip(outputs, outputs_loop_result)}
full_state = nest.pack_sequence_as(
state,
[_stack_and_transpose(state_element)
for state_element in state_loop_result])
return full_state, outputs_dict
|
tensorflow-master
|
tensorflow/contrib/timeseries/python/timeseries/model.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import tempfile
import numpy
import six
from tensorflow.contrib.timeseries.python.timeseries import ar_model
from tensorflow.contrib.timeseries.python.timeseries import estimators
from tensorflow.contrib.timeseries.python.timeseries import feature_keys
from tensorflow.contrib.timeseries.python.timeseries import input_pipeline
from tensorflow.contrib.timeseries.python.timeseries import saved_model_utils
from tensorflow.python.client import session
from tensorflow.python.estimator import estimator_lib
from tensorflow.python.feature_column import feature_column_lib as feature_column
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
from tensorflow.python.saved_model import loader
from tensorflow.python.saved_model import tag_constants
class _SeedRunConfig(estimator_lib.RunConfig):
@property
def tf_random_seed(self):
return 3
class TimeSeriesRegressorTest(test.TestCase):
def _fit_restore_fit_test_template(self, estimator_fn, dtype):
"""Tests restoring previously fit models."""
model_dir = tempfile.mkdtemp(dir=self.get_temp_dir())
exogenous_feature_columns = (
feature_column.numeric_column("exogenous"),
)
first_estimator = estimator_fn(model_dir, exogenous_feature_columns)
times = numpy.arange(20, dtype=numpy.int64)
values = numpy.arange(20, dtype=dtype.as_numpy_dtype)
exogenous = numpy.arange(20, dtype=dtype.as_numpy_dtype)
features = {
feature_keys.TrainEvalFeatures.TIMES: times,
feature_keys.TrainEvalFeatures.VALUES: values,
"exogenous": exogenous
}
train_input_fn = input_pipeline.RandomWindowInputFn(
input_pipeline.NumpyReader(features), shuffle_seed=2, num_threads=1,
batch_size=16, window_size=16)
eval_input_fn = input_pipeline.RandomWindowInputFn(
input_pipeline.NumpyReader(features), shuffle_seed=3, num_threads=1,
batch_size=16, window_size=16)
first_estimator.train(input_fn=train_input_fn, steps=1)
first_evaluation = first_estimator.evaluate(
input_fn=eval_input_fn, steps=1)
first_loss_before_fit = first_evaluation["loss"]
self.assertAllEqual(first_loss_before_fit, first_evaluation["average_loss"])
self.assertAllEqual([], first_loss_before_fit.shape)
first_estimator.train(input_fn=train_input_fn, steps=1)
first_loss_after_fit = first_estimator.evaluate(
input_fn=eval_input_fn, steps=1)["loss"]
self.assertAllEqual([], first_loss_after_fit.shape)
second_estimator = estimator_fn(model_dir, exogenous_feature_columns)
second_estimator.train(input_fn=train_input_fn, steps=1)
whole_dataset_input_fn = input_pipeline.WholeDatasetInputFn(
input_pipeline.NumpyReader(features))
whole_dataset_evaluation = second_estimator.evaluate(
input_fn=whole_dataset_input_fn, steps=1)
exogenous_values_ten_steps = {
"exogenous": numpy.arange(
10, dtype=dtype.as_numpy_dtype)[None, :, None]
}
predict_input_fn = input_pipeline.predict_continuation_input_fn(
evaluation=whole_dataset_evaluation,
exogenous_features=exogenous_values_ten_steps,
steps=10)
# Also tests that limit_epochs in predict_continuation_input_fn prevents
# infinite iteration
(estimator_predictions,
) = list(second_estimator.predict(input_fn=predict_input_fn))
self.assertAllEqual([10, 1], estimator_predictions["mean"].shape)
input_receiver_fn = first_estimator.build_raw_serving_input_receiver_fn()
export_location = first_estimator.export_saved_model(
self.get_temp_dir(), input_receiver_fn)
with ops.Graph().as_default():
with session.Session() as sess:
signatures = loader.load(sess, [tag_constants.SERVING], export_location)
# Test that prediction and filtering can continue from evaluation output
saved_prediction = saved_model_utils.predict_continuation(
continue_from=whole_dataset_evaluation,
steps=10,
exogenous_features=exogenous_values_ten_steps,
signatures=signatures,
session=sess)
# Saved model predictions should be the same as Estimator predictions
# starting from the same evaluation.
for prediction_key, prediction_value in estimator_predictions.items():
self.assertAllClose(prediction_value,
numpy.squeeze(
saved_prediction[prediction_key], axis=0))
first_filtering = saved_model_utils.filter_continuation(
continue_from=whole_dataset_evaluation,
features={
feature_keys.FilteringFeatures.TIMES: times[None, -1] + 2,
feature_keys.FilteringFeatures.VALUES: values[None, -1] + 2.,
"exogenous": values[None, -1, None] + 12.
},
signatures=signatures,
session=sess)
# Test that prediction and filtering can continue from filtering output
second_saved_prediction = saved_model_utils.predict_continuation(
continue_from=first_filtering,
steps=1,
exogenous_features={
"exogenous": numpy.arange(
1, dtype=dtype.as_numpy_dtype)[None, :, None]
},
signatures=signatures,
session=sess)
self.assertEqual(
times[-1] + 3,
numpy.squeeze(
second_saved_prediction[feature_keys.PredictionResults.TIMES]))
saved_model_utils.filter_continuation(
continue_from=first_filtering,
features={
feature_keys.FilteringFeatures.TIMES: times[-1] + 3,
feature_keys.FilteringFeatures.VALUES: values[-1] + 3.,
"exogenous": values[-1, None] + 13.
},
signatures=signatures,
session=sess)
# Test cold starting
six.assertCountEqual(
self,
[feature_keys.FilteringFeatures.TIMES,
feature_keys.FilteringFeatures.VALUES,
"exogenous"],
signatures.signature_def[
feature_keys.SavedModelLabels.COLD_START_FILTER].inputs.keys())
batch_numpy_times = numpy.tile(
numpy.arange(30, dtype=numpy.int64)[None, :], (10, 1))
batch_numpy_values = numpy.ones([10, 30, 1])
state = saved_model_utils.cold_start_filter(
signatures=signatures,
session=sess,
features={
feature_keys.FilteringFeatures.TIMES: batch_numpy_times,
feature_keys.FilteringFeatures.VALUES: batch_numpy_values,
"exogenous": 10. + batch_numpy_values
}
)
predict_times = numpy.tile(
numpy.arange(30, 45, dtype=numpy.int64)[None, :], (10, 1))
predictions = saved_model_utils.predict_continuation(
continue_from=state,
times=predict_times,
exogenous_features={
"exogenous": numpy.tile(numpy.arange(
15, dtype=dtype.as_numpy_dtype), (10,))[None, :, None]
},
signatures=signatures,
session=sess)
self.assertAllEqual([10, 15, 1], predictions["mean"].shape)
def test_fit_restore_fit_ar_flat(self):
def _estimator_fn(model_dir, exogenous_feature_columns):
return estimators.ARRegressor(
periodicities=10, input_window_size=10, output_window_size=6,
num_features=1, model_dir=model_dir, config=_SeedRunConfig(),
# This test is flaky with normal likelihood loss (could add more
# training iterations instead).
loss=ar_model.ARModel.SQUARED_LOSS,
exogenous_feature_columns=exogenous_feature_columns)
self._fit_restore_fit_test_template(_estimator_fn, dtype=dtypes.float32)
def test_fit_restore_fit_ar_lstm(self):
def _estimator_fn(model_dir, exogenous_feature_columns):
return estimators.TimeSeriesRegressor(
model=ar_model.ARModel(
periodicities=10, input_window_size=10, output_window_size=6,
num_features=1,
exogenous_feature_columns=exogenous_feature_columns,
prediction_model_factory=functools.partial(
ar_model.LSTMPredictionModel,
num_units=10)),
config=_SeedRunConfig(),
model_dir=model_dir)
self._fit_restore_fit_test_template(_estimator_fn, dtype=dtypes.float32)
def test_fit_restore_fit_structural_ensemble_regressor(self):
dtype = dtypes.float32
def _estimator_fn(model_dir, exogenous_feature_columns):
return estimators.StructuralEnsembleRegressor(
num_features=1, periodicities=10, model_dir=model_dir, dtype=dtype,
config=_SeedRunConfig(),
exogenous_feature_columns=exogenous_feature_columns)
self._fit_restore_fit_test_template(_estimator_fn, dtype=dtype)
def test_structural_ensemble_numpy_input(self):
numpy_data = {"times": numpy.arange(50),
"values": numpy.random.normal(size=[50])}
estimators.StructuralEnsembleRegressor(
num_features=1, periodicities=[], model_dir=self.get_temp_dir(),
config=_SeedRunConfig()).train(
input_pipeline.WholeDatasetInputFn(
input_pipeline.NumpyReader(numpy_data)),
steps=1)
def test_ar_lstm_regressor(self):
dtype = dtypes.float32
model_dir = tempfile.mkdtemp(dir=self.get_temp_dir())
exogenous_feature_columns = (
feature_column.numeric_column("exogenous"),
)
estimator = estimators.LSTMAutoRegressor(
periodicities=10,
input_window_size=10,
output_window_size=6,
model_dir=model_dir,
num_features=1,
extra_feature_columns=exogenous_feature_columns,
num_units=10,
config=_SeedRunConfig())
times = numpy.arange(20, dtype=numpy.int64)
values = numpy.arange(20, dtype=dtype.as_numpy_dtype)
exogenous = numpy.arange(20, dtype=dtype.as_numpy_dtype)
features = {
feature_keys.TrainEvalFeatures.TIMES: times,
feature_keys.TrainEvalFeatures.VALUES: values,
"exogenous": exogenous
}
train_input_fn = input_pipeline.RandomWindowInputFn(
input_pipeline.NumpyReader(features), shuffle_seed=2, num_threads=1,
batch_size=16, window_size=16)
eval_input_fn = input_pipeline.RandomWindowInputFn(
input_pipeline.NumpyReader(features), shuffle_seed=3, num_threads=1,
batch_size=16, window_size=16)
estimator.train(input_fn=train_input_fn, steps=1)
evaluation = estimator.evaluate(
input_fn=eval_input_fn, steps=1)
self.assertAllEqual(evaluation["loss"], evaluation["average_loss"])
self.assertAllEqual([], evaluation["loss"].shape)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/timeseries/python/timeseries/estimators_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for math_utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy
from tensorflow.contrib.timeseries.python.timeseries import input_pipeline
from tensorflow.contrib.timeseries.python.timeseries import math_utils
from tensorflow.contrib.timeseries.python.timeseries.feature_keys import TrainEvalFeatures
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator as coordinator_lib
from tensorflow.python.training import queue_runner_impl
class MathUtilsTest(test.TestCase):
def setUp(self):
numpy.random.seed(10)
def test_power_sums_tensor(self):
transition = numpy.random.normal(size=[4, 4]).astype(numpy.float32)
addition = numpy.random.normal(size=[4, 4]).astype(numpy.float32)
array_size = 2
result = []
transition_power = numpy.identity(4)
running_sum = numpy.zeros([4, 4], dtype=numpy.float32)
for _ in range(array_size + 1):
result.append(running_sum)
current_contribution = numpy.dot(numpy.dot(transition_power, addition),
transition_power.T)
# pylint: disable=g-no-augmented-assignment
# += has different semantics here; want to make a copy
running_sum = running_sum + current_contribution
# pylint: enable=g-no-augmented-assignment
transition_power = numpy.dot(transition, transition_power)
with self.cached_session():
self.assertAllClose(result,
math_utils.power_sums_tensor(
array_size, transition, addition).eval())
def test_matrix_to_powers(self):
matrix = numpy.random.normal(size=[4, 4]).astype(numpy.float32)
powers = numpy.random.randint(low=0, high=10, size=20)
result = []
for i in range(powers.shape[0]):
result.append(numpy.linalg.matrix_power(matrix, powers[i]))
with self.cached_session():
self.assertAllClose(result,
math_utils.matrix_to_powers(matrix, powers).eval(),
rtol=1e-5,
atol=1e-5)
def test_batch_matrix_pow(self):
batch = numpy.random.normal(size=[15, 4, 4]).astype(numpy.float32)
powers = numpy.random.randint(low=0, high=10, size=batch.shape[0])
result = []
for i in range(batch.shape[0]):
result.append(numpy.linalg.matrix_power(batch[i], powers[i]))
with self.cached_session():
# TODO(allenl): Numerical errors seem to be creeping in. Maybe it can be
# made slightly more stable?
self.assertAllClose(result,
math_utils.batch_matrix_pow(batch, powers).eval(),
rtol=1e-5,
atol=1e-5)
def test_batch_times_matrix(self):
left = numpy.random.normal(size=[5, 3, 2]).astype(numpy.float32)
left_transpose = numpy.transpose(left, [0, 2, 1])
right = numpy.random.normal(size=[2, 3]).astype(numpy.float32)
expected_result = numpy.dot(left, right)
with self.cached_session():
self.assertAllClose(expected_result,
math_utils.batch_times_matrix(
left, right).eval())
self.assertAllClose(expected_result,
math_utils.batch_times_matrix(
left_transpose, right,
adj_x=True).eval())
self.assertAllClose(expected_result,
math_utils.batch_times_matrix(
left, right.T,
adj_y=True).eval())
self.assertAllClose(expected_result,
math_utils.batch_times_matrix(
left_transpose, right.T,
adj_x=True, adj_y=True).eval())
def test_matrix_times_batch(self):
left = numpy.random.normal(size=[5, 7]).astype(numpy.float32)
right = numpy.random.normal(size=[3, 7, 9]).astype(numpy.float32)
right_transpose = numpy.transpose(right, [0, 2, 1])
expected_result = numpy.transpose(numpy.dot(right_transpose, left.T),
[0, 2, 1])
with self.cached_session():
self.assertAllClose(expected_result,
math_utils.matrix_times_batch(
left, right).eval())
self.assertAllClose(expected_result,
math_utils.matrix_times_batch(
left.T, right,
adj_x=True).eval())
self.assertAllClose(expected_result,
math_utils.matrix_times_batch(
left, right_transpose,
adj_y=True).eval())
self.assertAllClose(expected_result,
math_utils.matrix_times_batch(
left.T, right_transpose,
adj_x=True, adj_y=True).eval())
def test_make_diagonal_undefined_shapes(self):
with self.cached_session():
completely_undefined = array_ops.placeholder(dtype=dtypes.float32)
partly_undefined = array_ops.placeholder(
shape=[None, None], dtype=dtypes.float32)
blocked = math_utils.block_diagonal([completely_undefined,
[[2.]],
partly_undefined])
self.assertEqual([None, None],
blocked.get_shape().as_list())
self.assertAllEqual(
[[1., 0., 0., 0.],
[0., 2., 0., 0.],
[0., 0., 3., 4.],
[0., 0., 5., 6.]],
blocked.eval(feed_dict={
completely_undefined: [[1.]],
partly_undefined: [[3., 4.],
[5., 6.]]}))
def test_make_diagonal_mostly_defined_shapes(self):
with self.cached_session():
mostly_defined = array_ops.placeholder(
shape=[None, 2], dtype=dtypes.float32)
blocked = math_utils.block_diagonal([[[2.]],
mostly_defined,
[[7.]]])
self.assertEqual([None, 4],
blocked.get_shape().as_list())
self.assertAllEqual(
[[2., 0., 0., 0.],
[0., 3., 4., 0.],
[0., 5., 6., 0.],
[0., 0., 0., 7.]],
blocked.eval(feed_dict={
mostly_defined: [[3., 4.],
[5., 6.]]}))
class TestMakeToeplitzMatrix(test.TestCase):
def test_make_toeplitz_matrix_1(self):
inputs = numpy.array([[[1.]], [[2.]], [[3.]]])
output_expected = numpy.array([[1., 2, 3], [2, 1, 2], [3, 2, 1]])
self._test_make_toeplitz_matrix(inputs, output_expected)
def test_make_toeplitz_matrix_2(self):
inputs = numpy.array(
[[[1, 2.], [3, 4]], [[5, 6], [7, 8]], [[8, 9], [10, 11]]])
output_expected = numpy.array(
[[1., 2., 5., 6, 8, 9],
[3, 4, 7, 8, 10, 11],
[5, 6, 1, 2, 5, 6],
[7, 8, 3, 4, 7, 8],
[8, 9, 5, 6, 1, 2],
[10, 11, 7, 8, 3, 4]])
self._test_make_toeplitz_matrix(inputs, output_expected)
def _test_make_toeplitz_matrix(self, inputs, output_expected):
output_tf = math_utils.make_toeplitz_matrix(inputs)
with self.cached_session() as sess:
output_tf_np = sess.run(output_tf)
self.assertAllClose(output_tf_np, output_expected)
class TestMakeCovarianceMatrix(test.TestCase):
def test_zero_size_matrix(self):
raw = numpy.zeros([0, 0])
with self.cached_session():
constructed = math_utils.sign_magnitude_positive_definite(raw=raw).eval()
self.assertEqual((0, 0), constructed.shape)
def test_sign_magnitude_positive_definite(self):
for dtype in [dtypes.float32, dtypes.float64]:
with self.cached_session():
matrix_tensor = math_utils.sign_magnitude_positive_definite(
raw=constant_op.constant([[-1., -2.], [3., 4.]], dtype=dtype),
off_diagonal_scale=constant_op.constant(-1., dtype=dtype),
overall_scale=constant_op.constant(1., dtype=dtype))
matrix_evaled = matrix_tensor.eval()
self.assertAllClose(matrix_evaled, matrix_evaled.T)
self.assertTrue(numpy.all(numpy.linalg.eigvals(matrix_evaled) > 0))
class TestLookupTable(test.TestCase):
def test_tuple_of_tensors_lookup(self):
hash_table = math_utils.TupleOfTensorsLookup(
key_dtype=dtypes.int64,
default_values=[[
array_ops.ones([3, 2], dtype=dtypes.float32),
array_ops.zeros([5], dtype=dtypes.float64)
],
array_ops.ones([7, 7], dtype=dtypes.int64)],
empty_key=-1,
deleted_key=-2,
name="test_lookup")
def stack_tensor(base_tensor):
return array_ops.stack([base_tensor + 1, base_tensor + 2])
with self.cached_session() as session:
((float_output, double_output), int_output) = session.run(
hash_table.lookup([2, 1, 0]))
def expected_output_before_insert(base_tensor):
return [base_tensor,
base_tensor,
base_tensor]
self.assertAllClose(
expected_output_before_insert(numpy.ones([3, 2])),
float_output)
self.assertAllClose(
expected_output_before_insert(numpy.zeros([5])),
double_output)
self.assertAllEqual(
expected_output_before_insert(numpy.ones([7, 7], dtype=numpy.int64)),
int_output)
hash_table.insert(
keys=[1, 2],
values=[[
stack_tensor(array_ops.ones([3, 2], dtype=dtypes.float32)),
stack_tensor(array_ops.zeros([5], dtype=dtypes.float64))
], stack_tensor(array_ops.ones([7, 7], dtype=dtypes.int64))]).run()
((float_output, double_output), int_output) = session.run(
hash_table.lookup([2, 1, 0]))
def expected_output_after_insert(base_tensor):
return [base_tensor + 2,
base_tensor + 1,
base_tensor]
self.assertAllClose(
expected_output_after_insert(numpy.ones([3, 2])),
float_output)
self.assertAllClose(
expected_output_after_insert(numpy.zeros([5])),
double_output)
self.assertAllEqual(
expected_output_after_insert(numpy.ones([7, 7], dtype=numpy.int64)),
int_output)
class InputStatisticsTests(test.TestCase):
def _input_statistics_test_template(
self, stat_object, num_features, dtype, give_full_data,
warmup_iterations=0, rtol=1e-6, data_length=500, chunk_size=4):
graph = ops.Graph()
with graph.as_default():
numpy_dtype = dtype.as_numpy_dtype
values = (
(numpy.arange(data_length, dtype=numpy_dtype)[..., None]
+ numpy.arange(num_features, dtype=numpy_dtype)[None, ...])[None])
times = 2 * (numpy.arange(data_length)[None]) - 3
if give_full_data:
stat_object.set_data((times, values))
features = {TrainEvalFeatures.TIMES: times,
TrainEvalFeatures.VALUES: values}
input_fn = input_pipeline.RandomWindowInputFn(
batch_size=16, window_size=chunk_size,
time_series_reader=input_pipeline.NumpyReader(features))
statistics = stat_object.initialize_graph(
features=input_fn()[0])
with self.session(graph=graph) as session:
variables.global_variables_initializer().run()
coordinator = coordinator_lib.Coordinator()
queue_runner_impl.start_queue_runners(session, coord=coordinator)
for _ in range(warmup_iterations):
# A control dependency should ensure that, for queue-based statistics,
# a use of any statistic is preceded by an update of all adaptive
# statistics.
statistics.total_observation_count.eval()
self.assertAllClose(
range(num_features) + numpy.mean(numpy.arange(chunk_size))[None],
statistics.series_start_moments.mean.eval(),
rtol=rtol)
self.assertAllClose(
numpy.tile(numpy.var(numpy.arange(chunk_size))[None],
[num_features]),
statistics.series_start_moments.variance.eval(),
rtol=rtol)
self.assertAllClose(
numpy.mean(values[0], axis=0),
statistics.overall_feature_moments.mean.eval(),
rtol=rtol)
self.assertAllClose(
numpy.var(values[0], axis=0),
statistics.overall_feature_moments.variance.eval(),
rtol=rtol)
self.assertAllClose(
-3,
statistics.start_time.eval(),
rtol=rtol)
self.assertAllClose(
data_length,
statistics.total_observation_count.eval(),
rtol=rtol)
coordinator.request_stop()
coordinator.join()
def test_queue(self):
for dtype in [dtypes.float32, dtypes.float64]:
for num_features in [1, 2, 3]:
self._input_statistics_test_template(
math_utils.InputStatisticsFromMiniBatch(
num_features=num_features, dtype=dtype),
num_features=num_features,
dtype=dtype,
give_full_data=False,
warmup_iterations=1000,
rtol=0.1)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/timeseries/python/timeseries/math_utils_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Timeseries head."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
from tensorflow.contrib.timeseries.python.timeseries import feature_keys
from tensorflow.python.estimator import estimator_lib
from tensorflow.python.estimator.canned import head as head_lib
from tensorflow.python.estimator.canned import metric_keys
from tensorflow.python.estimator.export import export_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import metrics_impl
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.summary import summary
from tensorflow.python.training import training_util
from tensorflow.python.util import nest
class _NoStatePredictOutput(export_lib.PredictOutput):
def as_signature_def(self, receiver_tensors):
no_state_receiver_tensors = {
key: value for key, value in receiver_tensors.items()
if not key.startswith(feature_keys.State.STATE_PREFIX)}
return super(_NoStatePredictOutput, self).as_signature_def(
receiver_tensors=no_state_receiver_tensors)
class TimeSeriesRegressionHead(head_lib._Head): # pylint:disable=protected-access
"""Determines input and output signatures for a time series model."""
def __init__(self,
model,
state_manager,
optimizer,
input_statistics_generator=None,
name=None):
"""Creates a `_Head` for time series regression.
Args:
model: A model for time series regression.
state_manager: A state manager.
optimizer: An optimizer.
input_statistics_generator: A input statistics generator.
name: An optional name for the model.
"""
self.model = model
self.state_manager = state_manager
self.optimizer = optimizer
self.input_statistics_generator = input_statistics_generator
self._name = name
@property
def name(self):
return self._name
# TODO(terrytangyuan): consolidate `model_outputs` and `_Head.LossSpec`
# once `_Head.create_loss` becomes extendable
def create_loss(self, features, mode, logits=None, labels=None):
"""See `_Head`."""
model_outputs = self.state_manager.define_loss(
self.model, features, mode)
summary.scalar(
head_lib._summary_key(self._name, metric_keys.MetricKeys.LOSS),
model_outputs.loss)
return model_outputs
@property
def logits_dimension(self):
"""See `_Head`."""
return 1
def _train_ops(self, features):
"""Add training ops to the graph."""
mode = estimator_lib.ModeKeys.TRAIN
with variable_scope.variable_scope(
"model",
# Use ResourceVariables to avoid race conditions.
use_resource=True):
model_outputs = self.create_loss(features, mode)
train_op = self.optimizer.minimize(
model_outputs.loss,
global_step=training_util.get_global_step())
return estimator_lib.EstimatorSpec(
loss=model_outputs.loss,
mode=mode,
train_op=train_op)
def _evaluate_ops(self, features):
"""Add ops for evaluation (aka filtering) to the graph."""
mode = estimator_lib.ModeKeys.EVAL
with variable_scope.variable_scope("model", use_resource=True):
model_outputs = self.create_loss(features, mode)
metrics = {}
# Just output in-sample predictions for the last chunk seen
for prediction_key, prediction_value in model_outputs.predictions.items():
metrics[prediction_key] = _identity_metric_single(prediction_key,
prediction_value)
metrics[feature_keys.FilteringResults.TIMES] = _identity_metric_single(
feature_keys.FilteringResults.TIMES, model_outputs.prediction_times)
metrics[feature_keys.FilteringResults.STATE_TUPLE] = (
_identity_metric_nested(feature_keys.FilteringResults.STATE_TUPLE,
model_outputs.end_state))
metrics[metric_keys.MetricKeys.LOSS_MEAN] = metrics_impl.mean(
model_outputs.loss, name="average_loss")
return estimator_lib.EstimatorSpec(
loss=model_outputs.loss,
mode=mode,
eval_metric_ops=metrics,
# needed for custom metrics.
predictions=model_outputs.predictions)
def _predict_ops(self, features):
"""Add ops for prediction to the graph."""
with variable_scope.variable_scope("model", use_resource=True):
prediction = self.model.predict(features=features)
prediction[feature_keys.PredictionResults.TIMES] = features[
feature_keys.PredictionFeatures.TIMES]
return estimator_lib.EstimatorSpec(
predictions=prediction, mode=estimator_lib.ModeKeys.PREDICT)
def _serving_ops(self, features):
"""Add ops for serving to the graph."""
with variable_scope.variable_scope("model", use_resource=True):
prediction_outputs = self.model.predict(features=features)
with variable_scope.variable_scope("model", reuse=True):
filtering_outputs = self.create_loss(
features, estimator_lib.ModeKeys.EVAL)
with variable_scope.variable_scope("model", reuse=True):
no_state_features = {
k: v for k, v in features.items()
if not k.startswith(feature_keys.State.STATE_PREFIX)}
# Ignore any state management when cold-starting. The model's default
# start state is replicated across the batch.
cold_filtering_outputs = self.model.define_loss(
features=no_state_features, mode=estimator_lib.ModeKeys.EVAL)
return estimator_lib.EstimatorSpec(
mode=estimator_lib.ModeKeys.PREDICT,
export_outputs={
feature_keys.SavedModelLabels.PREDICT:
export_lib.PredictOutput(prediction_outputs),
feature_keys.SavedModelLabels.FILTER:
export_lib.PredictOutput(
state_to_dictionary(filtering_outputs.end_state)),
feature_keys.SavedModelLabels.COLD_START_FILTER:
_NoStatePredictOutput(
state_to_dictionary(cold_filtering_outputs.end_state))
},
# Likely unused, but it is necessary to return `predictions` to satisfy
# the Estimator's error checking.
predictions={})
def _convert_feature_to_tensor(self, name, value):
"""Casts features to the correct dtype based on their name."""
if name in [
feature_keys.TrainEvalFeatures.TIMES,
feature_keys.PredictionFeatures.TIMES
]:
return math_ops.cast(value, dtypes.int64)
if name == feature_keys.TrainEvalFeatures.VALUES:
return math_ops.cast(value, self.model.dtype)
if name == feature_keys.PredictionFeatures.STATE_TUPLE:
return value # Correct dtypes are model-dependent
return sparse_tensor.convert_to_tensor_or_sparse_tensor(value)
def _gather_state(self, features):
"""Returns `features` with state packed, indicates if packing was done."""
prefixed_state_re = re.compile(r"^" + feature_keys.State.STATE_PREFIX +
r"_(\d+)$")
numbered_state = []
for key, tensor in features.items():
search_result = prefixed_state_re.search(key)
if search_result:
numbered_state.append((int(search_result.group(1)), key, tensor))
if not numbered_state:
return features, False
features = features.copy()
for _, key, _ in numbered_state:
del features[key]
numbered_state.sort(key=lambda number, *_: number)
features[feature_keys.State.STATE_TUPLE] = nest.pack_sequence_as(
structure=self.model.get_start_state(),
flat_sequence=[tensor for _, _, tensor in numbered_state])
return features, True
def _check_predict_features(self, features):
"""Raises errors if features are not suitable for prediction."""
if feature_keys.PredictionFeatures.TIMES not in features:
raise ValueError("Expected a '{}' feature for prediction.".format(
feature_keys.PredictionFeatures.TIMES))
if feature_keys.PredictionFeatures.STATE_TUPLE not in features:
raise ValueError("Expected a '{}' feature for prediction.".format(
feature_keys.PredictionFeatures.STATE_TUPLE))
times_feature = features[feature_keys.PredictionFeatures.TIMES]
if not times_feature.get_shape().is_compatible_with([None, None]):
raise ValueError(
("Expected shape (batch dimension, window size) for feature '{}' "
"(got shape {})").format(feature_keys.PredictionFeatures.TIMES,
times_feature.get_shape()))
_check_feature_shapes_compatible_with(
features=features,
compatible_with_name=feature_keys.PredictionFeatures.TIMES,
compatible_with_value=times_feature,
ignore=set([
# Model-dependent shapes
feature_keys.PredictionFeatures.STATE_TUPLE
]))
def create_estimator_spec(self, features, mode, labels=None):
"""Performs basic error checking and returns an EstimatorSpec."""
with ops.name_scope(self._name, "head"):
if labels is not None and labels != {}: # for better error messages.
raise ValueError(
"The model received a `labels`, which is not supported. "
"Pass '{}' and '{}' as features.".format(
feature_keys.TrainEvalFeatures.TIMES,
feature_keys.TrainEvalFeatures.VALUES))
del labels
features = {
name: self._convert_feature_to_tensor(name=name, value=value)
for name, value in features.items()
}
if self.input_statistics_generator is not None:
input_statistics = self.input_statistics_generator.initialize_graph(
features, update_statistics=(mode == estimator_lib.ModeKeys.TRAIN))
else:
input_statistics = None
self.model.initialize_graph(input_statistics=input_statistics)
# _gather_state requires the model to have its graph initialized (so it
# has access to the structure of the model's state)
features, passed_flat_state = self._gather_state(features)
if (mode == estimator_lib.ModeKeys.TRAIN or
mode == estimator_lib.ModeKeys.EVAL):
_check_train_eval_features(features, self.model)
elif mode == estimator_lib.ModeKeys.PREDICT:
self._check_predict_features(features)
else:
raise ValueError("Unknown mode '{}' passed to model_fn.".format(mode))
self.state_manager.initialize_graph(
model=self.model, input_statistics=input_statistics)
if mode == estimator_lib.ModeKeys.TRAIN:
return self._train_ops(features)
elif mode == estimator_lib.ModeKeys.EVAL:
return self._evaluate_ops(features)
elif mode == estimator_lib.ModeKeys.PREDICT and not passed_flat_state:
return self._predict_ops(features)
elif mode == estimator_lib.ModeKeys.PREDICT and passed_flat_state:
# The mode is PREDICT, but we're actually in export_savedmodel for
# serving. We want to return two graphs: one for filtering (state + data
# -> state) and one for predicting (state -> prediction).
return self._serving_ops(features)
class OneShotPredictionHead(TimeSeriesRegressionHead):
"""A time series head which exports a single stateless serving signature.
The serving default signature exported by this head expects `times`, `values`,
and any exogenous features, but no state. `values` has shape `[batch_size,
filter_length, num_features]` and `times` has shape `[batch_size,
total_length]`, where `total_length > filter_length`. Any exogenous features
must have their shapes prefixed by the shape of the `times` feature.
When serving, first performs filtering on the series up to `filter_length`
starting from the default start state for the model, then computes predictions
on the remainder of the series, returning them.
Model state is neither accepted nor returned, so filtering must be performed
each time predictions are requested when using this head.
"""
def _check_predict_features(self, features):
"""Raises errors if features are not suitable for one-shot prediction."""
if feature_keys.PredictionFeatures.TIMES not in features:
raise ValueError("Expected a '{}' feature for prediction.".format(
feature_keys.PredictionFeatures.TIMES))
if feature_keys.TrainEvalFeatures.VALUES not in features:
raise ValueError("Expected a '{}' feature for prediction.".format(
feature_keys.TrainEvalFeatures.VALUES))
if feature_keys.PredictionFeatures.STATE_TUPLE not in features:
raise ValueError("Expected a '{}' feature for prediction.".format(
feature_keys.PredictionFeatures.STATE_TUPLE))
times_feature = features[feature_keys.PredictionFeatures.TIMES]
if not times_feature.get_shape().is_compatible_with([None, None]):
raise ValueError(
("Expected shape (batch dimension, window size) for feature '{}' "
"(got shape {})").format(feature_keys.PredictionFeatures.TIMES,
times_feature.get_shape()))
_check_feature_shapes_compatible_with(
features=features,
compatible_with_name=feature_keys.PredictionFeatures.TIMES,
compatible_with_value=times_feature,
ignore=set([
# Model-dependent shapes
feature_keys.PredictionFeatures.STATE_TUPLE,
# One shot prediction head relies on values being shorter than
# times. Even though we're predicting eventually, we need values for
# the filtering phase.
feature_keys.TrainEvalFeatures.VALUES,
]))
def _evaluate_ops(self, features):
"""Add ops for evaluation (aka filtering) to the graph."""
spec = super(OneShotPredictionHead, self)._evaluate_ops(features)
# No state is fed to OneShotPredictionHead, so we don't return it; it being
# a tuple can cause issues for downstream infrastructure.
del spec.eval_metric_ops[feature_keys.State.STATE_TUPLE]
return spec
def _serving_ops(self, features):
"""Add ops for serving to the graph."""
with variable_scope.variable_scope("model", use_resource=True):
filtering_features = {}
prediction_features = {}
values_length = array_ops.shape(
features[feature_keys.FilteringFeatures.VALUES])[1]
for key, value in features.items():
if key == feature_keys.State.STATE_TUPLE:
# Ignore state input. The model's default start state is replicated
# across the batch.
continue
if key == feature_keys.FilteringFeatures.VALUES:
filtering_features[key] = value
else:
filtering_features[key] = value[:, :values_length]
prediction_features[key] = value[:, values_length:]
cold_filtering_outputs = self.model.define_loss(
features=filtering_features, mode=estimator_lib.ModeKeys.EVAL)
prediction_features[feature_keys.State.STATE_TUPLE] = (
cold_filtering_outputs.end_state)
with variable_scope.variable_scope("model", reuse=True):
prediction_outputs = self.model.predict(
features=prediction_features)
return estimator_lib.EstimatorSpec(
mode=estimator_lib.ModeKeys.PREDICT,
export_outputs={
feature_keys.SavedModelLabels.PREDICT:
_NoStatePredictOutput(prediction_outputs),
},
# Likely unused, but it is necessary to return `predictions` to satisfy
# the Estimator's error checking.
predictions={})
def _check_feature_shapes_compatible_with(features,
compatible_with_name,
compatible_with_value,
ignore=None):
"""Checks all features are compatible with the given time-like feature."""
if ignore is None:
ignore = set()
for name, value in features.items():
if name in ignore:
continue
feature_shape = value.get_shape()
if feature_shape.ndims is None:
continue
if feature_shape.ndims < 2:
raise ValueError(
("Features must have shape (batch dimension, window size, ...) "
"(got rank {} for feature '{}')").format(feature_shape.ndims, name))
if not feature_shape[:2].is_compatible_with(
compatible_with_value.get_shape()):
raise ValueError(
("Features must have shape (batch dimension, window size, ...) "
"where batch dimension and window size match the "
"'{times_feature}' feature (got shape {feature_shape} for "
"feature '{feature_name}' but shape {times_shape} for feature "
"'{times_feature}')").format(
times_feature=compatible_with_name,
feature_shape=feature_shape,
feature_name=name,
times_shape=compatible_with_value.get_shape()))
def _check_train_eval_features(features, model):
"""Raise errors if features are not suitable for training/evaluation."""
if feature_keys.TrainEvalFeatures.TIMES not in features:
raise ValueError("Expected a '{}' feature for training/evaluation.".format(
feature_keys.TrainEvalFeatures.TIMES))
if feature_keys.TrainEvalFeatures.VALUES not in features:
raise ValueError("Expected a '{}' feature for training/evaluation.".format(
feature_keys.TrainEvalFeatures.VALUES))
times_feature = features[feature_keys.TrainEvalFeatures.TIMES]
if not times_feature.get_shape().is_compatible_with([None, None]):
raise ValueError(
("Expected shape (batch dimension, window size) for feature '{}' "
"(got shape {})").format(feature_keys.TrainEvalFeatures.TIMES,
times_feature.get_shape()))
values_feature = features[feature_keys.TrainEvalFeatures.VALUES]
if not values_feature.get_shape().is_compatible_with(
[None, None, model.num_features]):
raise ValueError(
("Expected shape (batch dimension, window size, {num_features}) "
"for feature '{feature_name}', since the model was configured "
"with num_features={num_features} (got shape {got_shape})").format(
num_features=model.num_features,
feature_name=feature_keys.TrainEvalFeatures.VALUES,
got_shape=times_feature.get_shape()))
_check_feature_shapes_compatible_with(
features=features,
compatible_with_name=feature_keys.TrainEvalFeatures.TIMES,
compatible_with_value=times_feature,
ignore=set([
feature_keys.State.STATE_TUPLE # Model-dependent shapes
]))
def _identity_metric_single(name, input_tensor):
"""A metric which takes on its last updated value.
This keeps evaluation metrics in sync with one another, since update ops are
run separately from their result Tensors. Simply returning (input_tensor,
no_op) as a metric with a value but no update means that a metric will come
from a different batch of data than metrics which cache values in a Variable
(e.g. the default loss metric).
Args:
name: A name for the metric.
input_tensor: Any Tensor.
Returns:
A tuple of (value, update_op).
"""
metric_variable = variable_scope.variable(
name="{}_identity_metric".format(name),
initial_value=array_ops.zeros([], dtype=input_tensor.dtype),
collections=[ops.GraphKeys.LOCAL_VARIABLES],
validate_shape=False)
update_op = state_ops.assign(
metric_variable, input_tensor, validate_shape=False)
# This shape will be correct once the first update runs (but may be
# incomplete, so is not helpful for initializing the variable).
metric_variable.set_shape(input_tensor.get_shape())
return (metric_variable.value(), update_op)
def _identity_metric_nested(name, input_tensors):
"""Create identity metrics for a nested tuple of Tensors."""
update_ops = []
value_tensors = []
for tensor_number, tensor in enumerate(nest.flatten(input_tensors)):
value_tensor, update_op = _identity_metric_single(
name="{}_{}".format(name, tensor_number), input_tensor=tensor)
update_ops.append(update_op)
value_tensors.append(value_tensor)
return (nest.pack_sequence_as(input_tensors, value_tensors),
control_flow_ops.group(*update_ops))
def state_to_dictionary(state_tuple):
"""Flatten model state into a dictionary with string keys."""
flattened = {}
for state_number, state_value in enumerate(nest.flatten(state_tuple)):
prefixed_state_name = "{}_{:02d}".format(feature_keys.State.STATE_PREFIX,
state_number)
flattened[prefixed_state_name] = state_value
return flattened
|
tensorflow-master
|
tensorflow/contrib/timeseries/python/timeseries/head.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ar_model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import numpy as np
from tensorflow.contrib.timeseries.python.timeseries import ar_model
from tensorflow.contrib.timeseries.python.timeseries import input_pipeline
from tensorflow.contrib.timeseries.python.timeseries import test_utils
from tensorflow.contrib.timeseries.python.timeseries.estimators import ARRegressor
from tensorflow.contrib.timeseries.python.timeseries.feature_keys import PredictionFeatures
from tensorflow.contrib.timeseries.python.timeseries.feature_keys import TrainEvalFeatures
from tensorflow.python.client import session
from tensorflow.python.estimator import estimator_lib
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import coordinator as coordinator_lib
from tensorflow.python.training import queue_runner_impl
from tensorflow.python.training import training
class ARModelTest(test.TestCase):
def create_data(self,
noise_stddev,
anomaly_prob,
multiple_periods=False,
anomaly_stddev_scale=20):
self.period = 25
num_samples = 200
time = 1 + 3 * np.arange(num_samples).astype(np.int64)
time_offset = (2 * np.pi * (time % self.period).astype(np.float) /
self.period).reshape([-1, 1])
if multiple_periods:
period2 = 55
self.period = [self.period, period2]
time_offset2 = ((time % period2).astype(np.float) / period2).reshape(
[-1, 1])
data1 = np.sin(time_offset / 2.0) ** 2 * (1 + time_offset2)
else:
data1 = np.sin(2 * time_offset) + np.cos(3 * time_offset)
data1 += noise_stddev / 4. * np.random.randn(num_samples, 1)
data2 = (np.sin(3 * time_offset) + np.cos(5 * time_offset) +
noise_stddev / 3. * np.random.randn(num_samples, 1))
# Add some anomalies to data1
if anomaly_prob > 0.:
num_anomalies = int(anomaly_prob * num_samples)
anomaly_values = (anomaly_stddev_scale * noise_stddev / 4 *
np.random.randn(num_anomalies))
indices = np.random.randint(0, num_samples, num_anomalies)
for index, val in zip(indices, anomaly_values):
data1[index] += val
data = np.concatenate((4 * data1, 3 * data2), axis=1)
split = int(num_samples * 0.8)
train_data = {TrainEvalFeatures.TIMES: time[0:split],
TrainEvalFeatures.VALUES: data[0:split]}
test_data = {TrainEvalFeatures.TIMES: time[split:],
TrainEvalFeatures.VALUES: data[split:]}
return (train_data, test_data)
# Note that most models will require many more steps to fully converge. We
# have used a small number of steps here to keep the running time small.
def train_helper(self, input_window_size, loss,
max_loss=None, train_steps=200,
anomaly_prob=0.01,
anomaly_distribution=None,
multiple_periods=False):
np.random.seed(3)
data_noise_stddev = 0.2
if max_loss is None:
if loss == ar_model.ARModel.NORMAL_LIKELIHOOD_LOSS:
max_loss = 1.0
else:
max_loss = 0.05 / (data_noise_stddev ** 2)
train_data, test_data = self.create_data(
noise_stddev=data_noise_stddev,
anomaly_prob=anomaly_prob,
multiple_periods=multiple_periods)
output_window_size = 10
window_size = input_window_size + output_window_size
class _RunConfig(estimator_lib.RunConfig):
@property
def tf_random_seed(self):
return 3
estimator = ARRegressor(
periodicities=self.period,
anomaly_prior_probability=0.01 if anomaly_distribution else None,
anomaly_distribution=anomaly_distribution,
num_features=2,
output_window_size=output_window_size,
num_time_buckets=20,
input_window_size=input_window_size,
hidden_layer_sizes=[16],
loss=loss,
config=_RunConfig())
train_input_fn = input_pipeline.RandomWindowInputFn(
time_series_reader=input_pipeline.NumpyReader(train_data),
window_size=window_size,
batch_size=64,
num_threads=1,
shuffle_seed=2)
test_input_fn = test_utils.AllWindowInputFn(
time_series_reader=input_pipeline.NumpyReader(test_data),
window_size=window_size)
# Test training
estimator.train(
input_fn=train_input_fn,
steps=train_steps)
test_evaluation = estimator.evaluate(input_fn=test_input_fn, steps=1)
test_loss = test_evaluation["loss"]
logging.info("Final test loss: %f", test_loss)
self.assertLess(test_loss, max_loss)
if loss == ar_model.ARModel.SQUARED_LOSS:
# Test that the evaluation loss is reported without input scaling.
self.assertAllClose(
test_loss,
np.mean((test_evaluation["mean"] - test_evaluation["observed"]) ** 2))
# Test predict
train_data_times = train_data[TrainEvalFeatures.TIMES]
train_data_values = train_data[TrainEvalFeatures.VALUES]
test_data_times = test_data[TrainEvalFeatures.TIMES]
test_data_values = test_data[TrainEvalFeatures.VALUES]
predict_times = np.expand_dims(np.concatenate(
[train_data_times[input_window_size:], test_data_times]), 0)
predict_true_values = np.expand_dims(np.concatenate(
[train_data_values[input_window_size:], test_data_values]), 0)
state_times = np.expand_dims(train_data_times[:input_window_size], 0)
state_values = np.expand_dims(
train_data_values[:input_window_size, :], 0)
state_exogenous = state_times[:, :, None][:, :, :0]
def prediction_input_fn():
return ({
PredictionFeatures.TIMES: training.limit_epochs(
predict_times, num_epochs=1),
PredictionFeatures.STATE_TUPLE: (state_times,
state_values,
state_exogenous)
}, {})
(predictions,) = tuple(estimator.predict(input_fn=prediction_input_fn))
predicted_mean = predictions["mean"][:, 0]
true_values = predict_true_values[0, :, 0]
if loss == ar_model.ARModel.NORMAL_LIKELIHOOD_LOSS:
variances = predictions["covariance"][:, 0]
standard_deviations = np.sqrt(variances)
# Note that we may get tighter bounds with more training steps.
errors = np.abs(predicted_mean - true_values) > 4 * standard_deviations
fraction_errors = np.mean(errors)
logging.info("Fraction errors: %f", fraction_errors)
def test_time_regression_squared(self):
self.train_helper(input_window_size=0,
train_steps=350,
loss=ar_model.ARModel.SQUARED_LOSS)
def test_autoregression_squared(self):
self.train_helper(input_window_size=15,
loss=ar_model.ARModel.SQUARED_LOSS)
def test_autoregression_short_input_window(self):
self.train_helper(input_window_size=8,
loss=ar_model.ARModel.SQUARED_LOSS)
def test_autoregression_normal(self):
self.train_helper(input_window_size=10,
loss=ar_model.ARModel.NORMAL_LIKELIHOOD_LOSS,
train_steps=300,
max_loss=50., # Just make sure there are no exceptions.
anomaly_distribution=None)
def test_autoregression_normal_multiple_periods(self):
self.train_helper(input_window_size=10,
loss=ar_model.ARModel.NORMAL_LIKELIHOOD_LOSS,
max_loss=2.0,
multiple_periods=True,
anomaly_distribution=None)
def test_autoregression_normal_anomalies_normal(self):
self.train_helper(
input_window_size=10,
loss=ar_model.ARModel.NORMAL_LIKELIHOOD_LOSS,
anomaly_distribution=ar_model.AnomalyMixtureARModel.GAUSSIAN_ANOMALY)
def test_autoregression_normal_anomalies_cauchy(self):
self.train_helper(
input_window_size=10,
max_loss=1.5,
loss=ar_model.ARModel.NORMAL_LIKELIHOOD_LOSS,
anomaly_distribution=ar_model.AnomalyMixtureARModel.CAUCHY_ANOMALY)
def test_wrong_window_size(self):
estimator = ARRegressor(
periodicities=10, num_features=1,
input_window_size=10, output_window_size=6)
def _bad_window_size_input_fn():
return ({TrainEvalFeatures.TIMES: [[1]],
TrainEvalFeatures.VALUES: [[[1.]]]},
None)
def _good_data():
return ({TrainEvalFeatures.TIMES: np.arange(16)[None, :],
TrainEvalFeatures.VALUES: array_ops.reshape(
np.arange(16), [1, 16, 1])},
None)
with self.assertRaisesRegexp(ValueError, "set window_size=16"):
estimator.train(input_fn=_bad_window_size_input_fn, steps=1)
# Get a checkpoint for evaluation
estimator.train(input_fn=_good_data, steps=1)
with self.assertRaisesRegexp(ValueError, "requires a window of at least"):
estimator.evaluate(input_fn=_bad_window_size_input_fn, steps=1)
def test_predictions_direct_flat(self):
g = ops.Graph()
with g.as_default():
model = ar_model.ARModel(periodicities=2,
num_features=1,
num_time_buckets=10,
input_window_size=2,
output_window_size=2,
prediction_model_factory=functools.partial(
ar_model.FlatPredictionModel,
hidden_layer_sizes=[40, 10]))
with session.Session():
predicted_values = model.predict({
PredictionFeatures.TIMES: [[4, 6, 10]],
PredictionFeatures.STATE_TUPLE: (
[[1, 2]], [[[1.], [2.]]], [[[], []]])
})
variables.global_variables_initializer().run()
self.assertAllEqual(predicted_values["mean"].eval().shape,
[1, 3, 1])
def test_predictions_direct_lstm(self):
g = ops.Graph()
with g.as_default():
model = ar_model.ARModel(periodicities=2,
num_features=1,
num_time_buckets=10,
input_window_size=2,
output_window_size=2,
prediction_model_factory=functools.partial(
ar_model.LSTMPredictionModel,
num_units=16))
with session.Session():
predicted_values = model.predict({
PredictionFeatures.TIMES: [[4, 6, 10]],
PredictionFeatures.STATE_TUPLE: (
[[1, 2]], [[[1.], [2.]]], [[[], []]])
})
variables.global_variables_initializer().run()
self.assertAllEqual(predicted_values["mean"].eval().shape,
[1, 3, 1])
def test_long_eval(self):
g = ops.Graph()
with g.as_default():
model = ar_model.ARModel(periodicities=2,
num_features=1,
num_time_buckets=10,
input_window_size=2,
output_window_size=1)
raw_features = {
TrainEvalFeatures.TIMES: [[1, 3, 5, 7, 11]],
TrainEvalFeatures.VALUES: [[[1.], [2.], [3.], [4.], [5.]]]}
chunked_features, _ = test_utils.AllWindowInputFn(
time_series_reader=input_pipeline.NumpyReader(raw_features),
window_size=3)()
model.initialize_graph()
with variable_scope.variable_scope("armodel") as scope:
raw_evaluation = model.define_loss(
raw_features, mode=estimator_lib.ModeKeys.EVAL)
with variable_scope.variable_scope(scope, reuse=True):
chunked_evaluation = model.define_loss(
chunked_features, mode=estimator_lib.ModeKeys.EVAL)
with session.Session() as sess:
coordinator = coordinator_lib.Coordinator()
queue_runner_impl.start_queue_runners(sess, coord=coordinator)
variables.global_variables_initializer().run()
raw_evaluation_evaled, chunked_evaluation_evaled = sess.run(
[raw_evaluation, chunked_evaluation])
self.assertAllClose(chunked_evaluation_evaled.loss,
raw_evaluation_evaled.loss)
last_chunk_evaluation_state = [
state[-1, None] for state in
chunked_evaluation_evaled.end_state]
for last_chunk_state_member, raw_state_member in zip(
last_chunk_evaluation_state, raw_evaluation_evaled.end_state):
self.assertAllClose(last_chunk_state_member, raw_state_member)
self.assertAllEqual([[5, 7, 11]],
raw_evaluation_evaled.prediction_times)
for feature_name in raw_evaluation.predictions:
self.assertAllEqual(
[1, 3, 1], # batch, window, num_features. The window size has 2
# cut off for the first input_window.
raw_evaluation_evaled.predictions[feature_name].shape)
self.assertAllClose(
np.reshape(chunked_evaluation_evaled.predictions[feature_name],
[-1]),
np.reshape(raw_evaluation_evaled.predictions[feature_name],
[-1]))
coordinator.request_stop()
coordinator.join()
def test_long_eval_discard_indivisible(self):
g = ops.Graph()
with g.as_default():
model = ar_model.ARModel(periodicities=2,
num_features=1,
num_time_buckets=10,
input_window_size=2,
output_window_size=2)
raw_features = {
TrainEvalFeatures.TIMES: [[1, 3, 5, 7, 11]],
TrainEvalFeatures.VALUES: [[[1.], [2.], [3.], [4.], [5.]]]}
model.initialize_graph()
raw_evaluation = model.define_loss(
raw_features, mode=estimator_lib.ModeKeys.EVAL)
with session.Session() as sess:
coordinator = coordinator_lib.Coordinator()
queue_runner_impl.start_queue_runners(sess, coord=coordinator)
variables.global_variables_initializer().run()
raw_evaluation_evaled = sess.run(raw_evaluation)
self.assertAllEqual([[7, 11]],
raw_evaluation_evaled.prediction_times)
for feature_name in raw_evaluation.predictions:
self.assertAllEqual(
[1, 2, 1], # batch, window, num_features. The window has two cut
# off for the first input window and one discarded so
# that the remainder is divisible into output windows.
raw_evaluation_evaled.predictions[feature_name].shape)
coordinator.request_stop()
coordinator.join()
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/timeseries/python/timeseries/ar_model_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for head."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
from absl.testing import parameterized
import numpy
import six
from tensorflow.contrib.estimator.python.estimator import extenders
from tensorflow.contrib.timeseries.examples import lstm as lstm_example
from tensorflow.contrib.timeseries.python.timeseries import ar_model
from tensorflow.contrib.timeseries.python.timeseries import estimators as ts_estimators
from tensorflow.contrib.timeseries.python.timeseries import feature_keys
from tensorflow.contrib.timeseries.python.timeseries import head as ts_head_lib
from tensorflow.contrib.timeseries.python.timeseries import input_pipeline
from tensorflow.contrib.timeseries.python.timeseries import model
from tensorflow.contrib.timeseries.python.timeseries import state_management
from tensorflow.core.example import example_pb2
from tensorflow.python.client import session as session_lib
from tensorflow.python.estimator import estimator_lib
from tensorflow.python.feature_column import feature_column_lib as feature_column
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import metrics
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.saved_model import loader
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.training import adam
from tensorflow.python.training import coordinator as coordinator_lib
from tensorflow.python.training import queue_runner_impl
from tensorflow.python.training import training as train
class HeadTest(test.TestCase):
def test_labels_provided_error(self):
model_fn = _stub_model_fn()
for mode in [estimator_lib.ModeKeys.TRAIN, estimator_lib.ModeKeys.EVAL,
estimator_lib.ModeKeys.PREDICT]:
with self.assertRaisesRegexp(ValueError, "received a `labels`"):
model_fn(features={}, labels={"a": "b"}, mode=mode)
with self.assertRaisesRegexp(ValueError, "received a `labels`"):
model_fn(features={}, labels=array_ops.zeros([]), mode=mode)
def test_unknown_mode(self):
model_fn = _stub_model_fn()
with self.assertRaisesRegexp(ValueError, "Unknown mode 'Not a mode'"):
model_fn(features={}, labels={}, mode="Not a mode")
class _TickerModel(object):
num_features = 1
dtype = dtypes.float32
def initialize_graph(self, input_statistics):
pass
def define_loss(self, features, mode):
del mode # unused
return model.ModelOutputs(
loss=features["ticker"],
end_state=(features["ticker"], features["ticker"]),
prediction_times=array_ops.zeros(()),
predictions={"ticker": features["ticker"]})
class EvaluationMetricsTests(test.TestCase):
def test_metrics_consistent(self):
# Tests that the identity metrics used to report in-sample predictions match
# the behavior of standard metrics.
g = ops.Graph()
with g.as_default():
features = {
feature_keys.TrainEvalFeatures.TIMES:
array_ops.zeros((1, 1)),
feature_keys.TrainEvalFeatures.VALUES:
array_ops.zeros((1, 1, 1)),
"ticker":
array_ops.reshape(
math_ops.cast(
variables.VariableV1(
name="ticker",
initial_value=0,
dtype=dtypes.int64,
collections=[ops.GraphKeys.LOCAL_VARIABLES])
.count_up_to(10),
dtype=dtypes.float32), (1, 1, 1))
}
model_fn = ts_head_lib.TimeSeriesRegressionHead(
model=_TickerModel(),
state_manager=state_management.PassthroughStateManager(),
optimizer=train.GradientDescentOptimizer(0.001)).create_estimator_spec
outputs = model_fn(
features=features, labels=None, mode=estimator_lib.ModeKeys.EVAL)
metric_update_ops = [
metric[1] for metric in outputs.eval_metric_ops.values()]
loss_mean, loss_update = metrics.mean(outputs.loss)
metric_update_ops.append(loss_update)
with self.cached_session() as sess:
coordinator = coordinator_lib.Coordinator()
queue_runner_impl.start_queue_runners(sess, coord=coordinator)
variables.local_variables_initializer().run()
sess.run(metric_update_ops)
loss_evaled, metric_evaled, nested_metric_evaled = sess.run(
(loss_mean, outputs.eval_metric_ops["ticker"][0],
outputs.eval_metric_ops[feature_keys.FilteringResults.STATE_TUPLE][
0][0]))
# The custom model_utils metrics for in-sample predictions should be in
# sync with the Estimator's mean metric for model loss.
self.assertAllClose(0., loss_evaled)
self.assertAllClose((((0.,),),), metric_evaled)
self.assertAllClose((((0.,),),), nested_metric_evaled)
coordinator.request_stop()
coordinator.join()
def test_custom_metrics(self):
"""Tests that the custom metrics can be applied to the estimator."""
model_dir = self.get_temp_dir()
estimator = ts_estimators.TimeSeriesRegressor(
model=lstm_example._LSTMModel(num_features=1, num_units=4),
optimizer=adam.AdamOptimizer(0.001),
config=estimator_lib.RunConfig(tf_random_seed=4),
model_dir=model_dir)
def input_fn():
return {
feature_keys.TrainEvalFeatures.TIMES: [[1, 2, 3], [7, 8, 9]],
feature_keys.TrainEvalFeatures.VALUES:
numpy.array([[[0.], [1.], [0.]], [[2.], [3.], [2.]]])
}
def metrics_fn(predictions, features):
# checking that the inputs are properly passed.
predict = predictions["mean"]
target = features[feature_keys.TrainEvalFeatures.VALUES][:, -1, 0]
return {
"plain_boring_metric386":
(math_ops.reduce_mean(math_ops.abs(predict - target)),
control_flow_ops.no_op()),
"fun_metric101": (math_ops.reduce_sum(predict + target),
control_flow_ops.no_op()),
}
# Evaluation without training is enough for testing custom metrics.
estimator = extenders.add_metrics(estimator, metrics_fn)
evaluation = estimator.evaluate(input_fn, steps=1)
self.assertIn("plain_boring_metric386", evaluation)
self.assertIn("fun_metric101", evaluation)
self.assertIn("average_loss", evaluation)
# The values are deterministic because of fixed tf_random_seed.
# However if they become flaky, remove such exacts comparisons.
self.assertAllClose(evaluation["plain_boring_metric386"], 1.130380)
self.assertAllClose(evaluation["fun_metric101"], 10.435442)
class _StubModel(object):
num_features = 3
dtype = dtypes.float64
def initialize_graph(self, input_statistics):
del input_statistics # unused
def _stub_model_fn():
return ts_head_lib.TimeSeriesRegressionHead(
model=_StubModel(),
state_manager=state_management.PassthroughStateManager(),
optimizer=train.AdamOptimizer(0.001)).create_estimator_spec
class TrainEvalFeatureCheckingTests(test.TestCase):
def test_no_time_feature(self):
model_fn = _stub_model_fn()
for mode in [estimator_lib.ModeKeys.TRAIN, estimator_lib.ModeKeys.EVAL]:
with self.assertRaisesRegexp(ValueError, "Expected a '{}' feature".format(
feature_keys.TrainEvalFeatures.TIMES)):
model_fn(
features={feature_keys.TrainEvalFeatures.VALUES: [[[1.]]]},
labels=None,
mode=mode)
def test_no_value_feature(self):
model_fn = _stub_model_fn()
for mode in [estimator_lib.ModeKeys.TRAIN, estimator_lib.ModeKeys.EVAL]:
with self.assertRaisesRegexp(ValueError, "Expected a '{}' feature".format(
feature_keys.TrainEvalFeatures.VALUES)):
model_fn(
features={feature_keys.TrainEvalFeatures.TIMES: [[1]]},
labels=None,
mode=mode)
def test_bad_time_rank(self):
model_fn = _stub_model_fn()
for mode in [estimator_lib.ModeKeys.TRAIN, estimator_lib.ModeKeys.EVAL]:
with self.assertRaisesRegexp(ValueError,
"Expected shape.*for feature '{}'".format(
feature_keys.TrainEvalFeatures.TIMES)):
model_fn(
features={
feature_keys.TrainEvalFeatures.TIMES: [[[1]]],
feature_keys.TrainEvalFeatures.VALUES: [[[1.]]]
},
labels=None,
mode=mode)
def test_bad_value_rank(self):
model_fn = _stub_model_fn()
for mode in [estimator_lib.ModeKeys.TRAIN, estimator_lib.ModeKeys.EVAL]:
with self.assertRaisesRegexp(ValueError,
"Expected shape.*for feature '{}'".format(
feature_keys.TrainEvalFeatures.VALUES)):
model_fn(
features={
feature_keys.TrainEvalFeatures.TIMES: [[1]],
feature_keys.TrainEvalFeatures.VALUES: [[1.]]
},
labels=None,
mode=mode)
def test_bad_value_num_features(self):
model_fn = _stub_model_fn()
for mode in [estimator_lib.ModeKeys.TRAIN, estimator_lib.ModeKeys.EVAL]:
with self.assertRaisesRegexp(
ValueError, "Expected shape.*, 3.*for feature '{}'".format(
feature_keys.TrainEvalFeatures.VALUES)):
model_fn(
features={
feature_keys.TrainEvalFeatures.TIMES: [[1]],
feature_keys.TrainEvalFeatures.VALUES: [[[1.]]]
},
labels=None,
mode=mode)
def test_bad_exogenous_shape(self):
model_fn = _stub_model_fn()
for mode in [estimator_lib.ModeKeys.TRAIN, estimator_lib.ModeKeys.EVAL]:
with self.assertRaisesRegexp(
ValueError,
"Features must have shape.*for feature 'exogenous'"):
model_fn(
features={
feature_keys.TrainEvalFeatures.TIMES: [[1]],
feature_keys.TrainEvalFeatures.VALUES: [[[1., 2., 3.]]],
"exogenous": [[1], [2]]
},
labels=None,
mode=mode)
class PredictFeatureCheckingTests(test.TestCase):
def test_no_time_feature(self):
model_fn = _stub_model_fn()
with self.assertRaisesRegexp(ValueError, "Expected a '{}' feature".format(
feature_keys.PredictionFeatures.TIMES)):
model_fn(
features={
feature_keys.PredictionFeatures.STATE_TUPLE: ([[[1.]]], 1.)
},
labels=None,
mode=estimator_lib.ModeKeys.PREDICT)
def test_no_start_state_feature(self):
model_fn = _stub_model_fn()
with self.assertRaisesRegexp(ValueError, "Expected a '{}' feature".format(
feature_keys.PredictionFeatures.STATE_TUPLE)):
model_fn(
features={feature_keys.PredictionFeatures.TIMES: [[1]]},
labels=None,
mode=estimator_lib.ModeKeys.PREDICT)
def test_bad_time_rank(self):
model_fn = _stub_model_fn()
with self.assertRaisesRegexp(ValueError,
"Expected shape.*for feature '{}'".format(
feature_keys.PredictionFeatures.TIMES)):
model_fn(
features={
feature_keys.PredictionFeatures.TIMES: 1,
feature_keys.PredictionFeatures.STATE_TUPLE: (1, (2, 3.))
},
labels=None,
mode=estimator_lib.ModeKeys.PREDICT)
def test_bad_exogenous_shape(self):
model_fn = _stub_model_fn()
with self.assertRaisesRegexp(
ValueError,
"Features must have shape.*for feature 'exogenous'"):
model_fn(
features={
feature_keys.PredictionFeatures.TIMES: [[1]],
feature_keys.PredictionFeatures.STATE_TUPLE: (1, (2, 3.)),
"exogenous": 1.
},
labels=None,
mode=estimator_lib.ModeKeys.PREDICT)
def _custom_time_series_regressor(
model_dir, head_type, exogenous_feature_columns):
return ts_estimators.TimeSeriesRegressor(
model=lstm_example._LSTMModel(
num_features=5, num_units=128,
exogenous_feature_columns=exogenous_feature_columns),
optimizer=adam.AdamOptimizer(0.001),
config=estimator_lib.RunConfig(tf_random_seed=4),
state_manager=state_management.ChainingStateManager(),
head_type=head_type,
model_dir=model_dir)
def _structural_ensemble_regressor(
model_dir, head_type, exogenous_feature_columns):
return ts_estimators.StructuralEnsembleRegressor(
periodicities=None,
num_features=5,
exogenous_feature_columns=exogenous_feature_columns,
head_type=head_type,
model_dir=model_dir)
def _ar_lstm_regressor(
model_dir, head_type, exogenous_feature_columns):
return ts_estimators.TimeSeriesRegressor(
model=ar_model.ARModel(
periodicities=10, input_window_size=10, output_window_size=6,
num_features=5,
exogenous_feature_columns=exogenous_feature_columns,
prediction_model_factory=functools.partial(
ar_model.LSTMPredictionModel,
num_units=10)),
head_type=head_type,
model_dir=model_dir)
class OneShotTests(parameterized.TestCase):
@parameterized.named_parameters(
{"testcase_name": "ar_lstm_regressor",
"estimator_factory": _ar_lstm_regressor},
{"testcase_name": "custom_time_series_regressor",
"estimator_factory": _custom_time_series_regressor},
{"testcase_name": "structural_ensemble_regressor",
"estimator_factory": _structural_ensemble_regressor})
def test_one_shot_prediction_head_export(self, estimator_factory):
def _new_temp_dir():
return os.path.join(test.get_temp_dir(), str(ops.uid()))
model_dir = _new_temp_dir()
categorical_column = feature_column.categorical_column_with_hash_bucket(
key="categorical_exogenous_feature", hash_bucket_size=16)
exogenous_feature_columns = [
feature_column.numeric_column(
"2d_exogenous_feature", shape=(2,)),
feature_column.embedding_column(
categorical_column=categorical_column, dimension=10)]
estimator = estimator_factory(
model_dir=model_dir,
exogenous_feature_columns=exogenous_feature_columns,
head_type=ts_head_lib.OneShotPredictionHead)
train_features = {
feature_keys.TrainEvalFeatures.TIMES: numpy.arange(
20, dtype=numpy.int64),
feature_keys.TrainEvalFeatures.VALUES: numpy.tile(numpy.arange(
20, dtype=numpy.float32)[:, None], [1, 5]),
"2d_exogenous_feature": numpy.ones([20, 2]),
"categorical_exogenous_feature": numpy.array(
["strkey"] * 20)[:, None]
}
train_input_fn = input_pipeline.RandomWindowInputFn(
input_pipeline.NumpyReader(train_features), shuffle_seed=2,
num_threads=1, batch_size=16, window_size=16)
estimator.train(input_fn=train_input_fn, steps=5)
result = estimator.evaluate(input_fn=train_input_fn, steps=1)
self.assertIn("average_loss", result)
self.assertNotIn(feature_keys.State.STATE_TUPLE, result)
input_receiver_fn = estimator.build_raw_serving_input_receiver_fn()
export_location = estimator.export_saved_model(_new_temp_dir(),
input_receiver_fn)
graph = ops.Graph()
with graph.as_default():
with session_lib.Session() as session:
signatures = loader.load(
session, [tag_constants.SERVING], export_location)
self.assertEqual([feature_keys.SavedModelLabels.PREDICT],
list(signatures.signature_def.keys()))
predict_signature = signatures.signature_def[
feature_keys.SavedModelLabels.PREDICT]
six.assertCountEqual(
self,
[feature_keys.FilteringFeatures.TIMES,
feature_keys.FilteringFeatures.VALUES,
"2d_exogenous_feature",
"categorical_exogenous_feature"],
predict_signature.inputs.keys())
features = {
feature_keys.TrainEvalFeatures.TIMES: numpy.tile(
numpy.arange(35, dtype=numpy.int64)[None, :], [2, 1]),
feature_keys.TrainEvalFeatures.VALUES: numpy.tile(numpy.arange(
20, dtype=numpy.float32)[None, :, None], [2, 1, 5]),
"2d_exogenous_feature": numpy.ones([2, 35, 2]),
"categorical_exogenous_feature": numpy.tile(numpy.array(
["strkey"] * 35)[None, :, None], [2, 1, 1])
}
feeds = {
graph.as_graph_element(input_value.name): features[input_key]
for input_key, input_value in predict_signature.inputs.items()}
fetches = {output_key: graph.as_graph_element(output_value.name)
for output_key, output_value
in predict_signature.outputs.items()}
output = session.run(fetches, feed_dict=feeds)
self.assertEqual((2, 15, 5), output["mean"].shape)
# Build a parsing input function, then make a tf.Example for it to parse.
export_location = estimator.export_saved_model(
_new_temp_dir(),
estimator.build_one_shot_parsing_serving_input_receiver_fn(
filtering_length=20, prediction_length=15))
graph = ops.Graph()
with graph.as_default():
with session_lib.Session() as session:
example = example_pb2.Example()
times = example.features.feature[feature_keys.TrainEvalFeatures.TIMES]
values = example.features.feature[feature_keys.TrainEvalFeatures.VALUES]
times.int64_list.value.extend(range(35))
for i in range(20):
values.float_list.value.extend(
[float(i) * 2. + feature_number
for feature_number in range(5)])
real_feature = example.features.feature["2d_exogenous_feature"]
categortical_feature = example.features.feature[
"categorical_exogenous_feature"]
for i in range(35):
real_feature.float_list.value.extend([1, 1])
categortical_feature.bytes_list.value.append(b"strkey")
# Serialize the tf.Example for feeding to the Session
examples = [example.SerializeToString()] * 2
signatures = loader.load(
session, [tag_constants.SERVING], export_location)
predict_signature = signatures.signature_def[
feature_keys.SavedModelLabels.PREDICT]
((_, input_value),) = predict_signature.inputs.items()
feeds = {graph.as_graph_element(input_value.name): examples}
fetches = {output_key: graph.as_graph_element(output_value.name)
for output_key, output_value
in predict_signature.outputs.items()}
output = session.run(fetches, feed_dict=feeds)
self.assertEqual((2, 15, 5), output["mean"].shape)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/timeseries/python/timeseries/head_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Estimators for time series models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from tensorflow.contrib.timeseries.python.timeseries import ar_model
from tensorflow.contrib.timeseries.python.timeseries import feature_keys
from tensorflow.contrib.timeseries.python.timeseries import head as ts_head_lib
from tensorflow.contrib.timeseries.python.timeseries import math_utils
from tensorflow.contrib.timeseries.python.timeseries import state_management
from tensorflow.contrib.timeseries.python.timeseries.state_space_models import state_space_model
from tensorflow.contrib.timeseries.python.timeseries.state_space_models import structural_ensemble
from tensorflow.contrib.timeseries.python.timeseries.state_space_models.filtering_postprocessor import StateInterpolatingAnomalyDetector
from tensorflow.python.estimator import estimator_lib
from tensorflow.python.estimator.canned import optimizers
from tensorflow.python.estimator.export import export_lib
from tensorflow.python.feature_column import feature_column_lib as feature_column
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.training import training as train
from tensorflow.python.util import nest
class TimeSeriesRegressor(estimator_lib.Estimator):
"""An Estimator to fit and evaluate a time series model."""
def __init__(self,
model,
state_manager=None,
optimizer=None,
model_dir=None,
config=None,
head_type=ts_head_lib.TimeSeriesRegressionHead):
"""Initialize the Estimator.
Args:
model: The time series model to wrap (inheriting from TimeSeriesModel).
state_manager: The state manager to use, or (by default)
PassthroughStateManager if none is needed.
optimizer: The optimization algorithm to use when training, inheriting
from tf.train.Optimizer. Defaults to Adam with step size 0.02.
model_dir: See `Estimator`.
config: See `Estimator`.
head_type: The kind of head to use for the model (inheriting from
`TimeSeriesRegressionHead`).
"""
input_statistics_generator = math_utils.InputStatisticsFromMiniBatch(
dtype=model.dtype, num_features=model.num_features)
if state_manager is None:
if isinstance(model, ar_model.ARModel):
state_manager = state_management.FilteringOnlyStateManager()
else:
state_manager = state_management.PassthroughStateManager()
if optimizer is None:
optimizer = train.AdamOptimizer(0.02)
self._model = model
ts_regression_head = head_type(
model=model,
state_manager=state_manager,
optimizer=optimizer,
input_statistics_generator=input_statistics_generator)
model_fn = ts_regression_head.create_estimator_spec
super(TimeSeriesRegressor, self).__init__(
model_fn=model_fn, model_dir=model_dir, config=config)
def _model_start_state_placeholders(self,
batch_size_tensor,
static_batch_size=None):
"""Creates placeholders with zeroed start state for the current model."""
gathered_state = {}
# Models may not know the shape of their state without creating some
# variables/ops. Avoid polluting the default graph by making a new one. We
# use only static metadata from the returned Tensors.
with ops.Graph().as_default():
self._model.initialize_graph()
# Evaluate the initial state as same-dtype "zero" values. These zero
# constants aren't used, but are necessary for feeding to
# placeholder_with_default for the "cold start" case where state is not
# fed to the model.
def _zeros_like_constant(tensor):
return tensor_util.constant_value(array_ops.zeros_like(tensor))
start_state = nest.map_structure(_zeros_like_constant,
self._model.get_start_state())
for prefixed_state_name, state in ts_head_lib.state_to_dictionary(
start_state).items():
state_shape_with_batch = tensor_shape.TensorShape(
(static_batch_size,)).concatenate(state.shape)
default_state_broadcast = array_ops.tile(
state[None, ...],
multiples=array_ops.concat([
batch_size_tensor[None],
array_ops.ones(len(state.shape), dtype=dtypes.int32)
],
axis=0))
gathered_state[prefixed_state_name] = array_ops.placeholder_with_default(
input=default_state_broadcast,
name=prefixed_state_name,
shape=state_shape_with_batch)
return gathered_state
def build_one_shot_parsing_serving_input_receiver_fn(self,
filtering_length,
prediction_length,
default_batch_size=None,
values_input_dtype=None,
truncate_values=False):
"""Build an input_receiver_fn for export_savedmodel accepting tf.Examples.
Only compatible with `OneShotPredictionHead` (see `head`).
Args:
filtering_length: The number of time steps used as input to the model, for
which values are provided. If more than `filtering_length` values are
provided (via `truncate_values`), only the first `filtering_length`
values are used.
prediction_length: The number of time steps requested as predictions from
the model. Times and all exogenous features must be provided for these
steps.
default_batch_size: If specified, must be a scalar integer. Sets the batch
size in the static shape information of all feature Tensors, which means
only this batch size will be accepted by the exported model. If None
(default), static shape information for batch sizes is omitted.
values_input_dtype: An optional dtype specification for values in the
tf.Example protos (either float32 or int64, since these are the numeric
types supported by tf.Example). After parsing, values are cast to the
model's dtype (float32 or float64).
truncate_values: If True, expects `filtering_length + prediction_length`
values to be provided, but only uses the first `filtering_length`. If
False (default), exactly `filtering_length` values must be provided.
Returns:
An input_receiver_fn which may be passed to the Estimator's
export_savedmodel.
Expects features contained in a vector of serialized tf.Examples with
shape [batch size] (dtype `tf.string`), each tf.Example containing
features with the following shapes:
times: [filtering_length + prediction_length] integer
values: [filtering_length, num features] floating point. If
`truncate_values` is True, expects `filtering_length +
prediction_length` values but only uses the first `filtering_length`.
all exogenous features: [filtering_length + prediction_length, ...]
(various dtypes)
"""
if values_input_dtype is None:
values_input_dtype = dtypes.float32
if truncate_values:
values_proto_length = filtering_length + prediction_length
else:
values_proto_length = filtering_length
def _serving_input_receiver_fn():
"""A receiver function to be passed to export_savedmodel."""
times_column = feature_column.numeric_column(
key=feature_keys.TrainEvalFeatures.TIMES, dtype=dtypes.int64)
values_column = feature_column.numeric_column(
key=feature_keys.TrainEvalFeatures.VALUES,
dtype=values_input_dtype,
shape=(self._model.num_features,))
parsed_features_no_sequence = (
feature_column.make_parse_example_spec(
list(self._model.exogenous_feature_columns) +
[times_column, values_column]))
parsed_features = {}
for key, feature_spec in parsed_features_no_sequence.items():
if isinstance(feature_spec, parsing_ops.FixedLenFeature):
if key == feature_keys.TrainEvalFeatures.VALUES:
parsed_features[key] = feature_spec._replace(
shape=((values_proto_length,) + feature_spec.shape))
else:
parsed_features[key] = feature_spec._replace(
shape=((filtering_length + prediction_length,) +
feature_spec.shape))
elif feature_spec.dtype == dtypes.string:
parsed_features[key] = parsing_ops.FixedLenFeature(
shape=(filtering_length + prediction_length,),
dtype=dtypes.string)
else: # VarLenFeature
raise ValueError("VarLenFeatures not supported, got %s for key %s" %
(feature_spec, key))
tfexamples = array_ops.placeholder(
shape=[default_batch_size], dtype=dtypes.string, name="input")
features = parsing_ops.parse_example(
serialized=tfexamples, features=parsed_features)
features[feature_keys.TrainEvalFeatures.TIMES] = array_ops.squeeze(
features[feature_keys.TrainEvalFeatures.TIMES], axis=-1)
features[feature_keys.TrainEvalFeatures.VALUES] = math_ops.cast(
features[feature_keys.TrainEvalFeatures.VALUES],
dtype=self._model.dtype)[:, :filtering_length]
features.update(
self._model_start_state_placeholders(
batch_size_tensor=array_ops.shape(
features[feature_keys.TrainEvalFeatures.TIMES])[0],
static_batch_size=default_batch_size))
return export_lib.ServingInputReceiver(features, {"examples": tfexamples})
return _serving_input_receiver_fn
def build_raw_serving_input_receiver_fn(self,
default_batch_size=None,
default_series_length=None):
"""Build an input_receiver_fn for export_savedmodel which accepts arrays.
Automatically creates placeholders for exogenous `FeatureColumn`s passed to
the model.
Args:
default_batch_size: If specified, must be a scalar integer. Sets the batch
size in the static shape information of all feature Tensors, which means
only this batch size will be accepted by the exported model. If None
(default), static shape information for batch sizes is omitted.
default_series_length: If specified, must be a scalar integer. Sets the
series length in the static shape information of all feature Tensors,
which means only this series length will be accepted by the exported
model. If None (default), static shape information for series length is
omitted.
Returns:
An input_receiver_fn which may be passed to the Estimator's
export_savedmodel.
"""
def _serving_input_receiver_fn():
"""A receiver function to be passed to export_savedmodel."""
placeholders = {}
time_placeholder = array_ops.placeholder(
name=feature_keys.TrainEvalFeatures.TIMES,
dtype=dtypes.int64,
shape=[default_batch_size, default_series_length])
placeholders[feature_keys.TrainEvalFeatures.TIMES] = time_placeholder
# Values are only necessary when filtering. For prediction the default
# value will be ignored.
placeholders[feature_keys.TrainEvalFeatures.VALUES] = (
array_ops.placeholder_with_default(
name=feature_keys.TrainEvalFeatures.VALUES,
input=array_ops.zeros(
shape=[
default_batch_size if default_batch_size else 0,
default_series_length if default_series_length else 0,
self._model.num_features
],
dtype=self._model.dtype),
shape=(default_batch_size, default_series_length,
self._model.num_features)))
if self._model.exogenous_feature_columns:
with ops.Graph().as_default():
# Default placeholders have only an unknown batch dimension. Make them
# in a separate graph, then splice in the series length to the shapes
# and re-create them in the outer graph.
parsed_features = (
feature_column.make_parse_example_spec(
self._model.exogenous_feature_columns))
placeholder_features = parsing_ops.parse_example(
serialized=array_ops.placeholder(
shape=[None], dtype=dtypes.string),
features=parsed_features)
exogenous_feature_shapes = {
key: (value.get_shape(), value.dtype) for key, value
in placeholder_features.items()}
for feature_key, (batch_only_feature_shape,
value_dtype) in (exogenous_feature_shapes.items()):
batch_only_feature_shape = (
batch_only_feature_shape.with_rank_at_least(1).as_list())
feature_shape = ([default_batch_size, default_series_length] +
batch_only_feature_shape[1:])
placeholders[feature_key] = array_ops.placeholder(
dtype=value_dtype, name=feature_key, shape=feature_shape)
batch_size_tensor = array_ops.shape(time_placeholder)[0]
placeholders.update(
self._model_start_state_placeholders(
batch_size_tensor, static_batch_size=default_batch_size))
return export_lib.ServingInputReceiver(placeholders, placeholders)
return _serving_input_receiver_fn
class ARRegressor(TimeSeriesRegressor):
"""An Estimator for an (optionally non-linear) autoregressive model.
ARRegressor is a window-based model, inputting fixed windows of length
`input_window_size` and outputting fixed windows of length
`output_window_size`. These two parameters must add up to the window_size
passed to the `Chunker` used to create an `input_fn` for training or
evaluation. `RandomWindowInputFn` is suggested for both training and
evaluation, although it may be seeded for deterministic evaluation.
"""
def __init__(self,
periodicities,
input_window_size,
output_window_size,
num_features,
exogenous_feature_columns=None,
num_time_buckets=10,
loss=ar_model.ARModel.NORMAL_LIKELIHOOD_LOSS,
hidden_layer_sizes=None,
anomaly_prior_probability=None,
anomaly_distribution=None,
optimizer=None,
model_dir=None,
config=None):
"""Initialize the Estimator.
Args:
periodicities: periodicities of the input data, in the same units as the
time feature. Note this can be a single value or a list of values for
multiple periodicities.
input_window_size: Number of past time steps of data to look at when doing
the regression.
output_window_size: Number of future time steps to predict. Note that
setting it to > 1 empirically seems to give a better fit.
num_features: The dimensionality of the time series (one for univariate,
more than one for multivariate).
exogenous_feature_columns: A list of `tf.feature_column`s (for example
`tf.feature_column.embedding_column`) corresponding to exogenous
features which provide extra information to the model but are not part
of the series to be predicted. Passed to
`tf.compat.v1.feature_column.input_layer`.
num_time_buckets: Number of buckets into which to divide (time %
periodicity) for generating time based features.
loss: Loss function to use for training. Currently supported values are
SQUARED_LOSS and NORMAL_LIKELIHOOD_LOSS. Note that for
NORMAL_LIKELIHOOD_LOSS, we train the covariance term as well. For
SQUARED_LOSS, the evaluation loss is reported based on un-scaled
observations and predictions, while the training loss is computed on
normalized data.
hidden_layer_sizes: list of sizes of hidden layers.
anomaly_prior_probability: If specified, constructs a mixture model under
which anomalies (modeled with `anomaly_distribution`) have this prior
probability. See `AnomalyMixtureARModel`.
anomaly_distribution: May not be specified unless
anomaly_prior_probability is specified and is not None. Controls the
distribution of anomalies under the mixture model. Currently either
`ar_model.AnomalyMixtureARModel.GAUSSIAN_ANOMALY` or
`ar_model.AnomalyMixtureARModel.CAUCHY_ANOMALY`. See
`AnomalyMixtureARModel`. Defaults to `GAUSSIAN_ANOMALY`.
optimizer: The optimization algorithm to use when training, inheriting
from tf.train.Optimizer. Defaults to Adagrad with step size 0.1.
model_dir: See `Estimator`.
config: See `Estimator`.
Raises:
ValueError: For invalid combinations of arguments.
"""
if optimizer is None:
optimizer = train.AdagradOptimizer(0.1)
if anomaly_prior_probability is None and anomaly_distribution is not None:
raise ValueError("anomaly_prior_probability is required if "
"anomaly_distribution is specified.")
if anomaly_prior_probability is None:
if anomaly_distribution is None:
anomaly_distribution = ar_model.AnomalyMixtureARModel.GAUSSIAN_ANOMALY
model = ar_model.ARModel(
periodicities=periodicities,
num_features=num_features,
prediction_model_factory=functools.partial(
ar_model.FlatPredictionModel,
hidden_layer_sizes=hidden_layer_sizes),
exogenous_feature_columns=exogenous_feature_columns,
num_time_buckets=num_time_buckets,
input_window_size=input_window_size,
output_window_size=output_window_size,
loss=loss)
else:
if loss != ar_model.ARModel.NORMAL_LIKELIHOOD_LOSS:
raise ValueError(
"AnomalyMixtureARModel only supports "
"ar_model.ARModel.NORMAL_LIKELIHOOD_LOSS for its loss argument.")
model = ar_model.AnomalyMixtureARModel(
periodicities=periodicities,
input_window_size=input_window_size,
output_window_size=output_window_size,
num_features=num_features,
prediction_model_factory=functools.partial(
ar_model.FlatPredictionModel,
hidden_layer_sizes=hidden_layer_sizes),
exogenous_feature_columns=exogenous_feature_columns,
num_time_buckets=num_time_buckets,
anomaly_prior_probability=anomaly_prior_probability,
anomaly_distribution=anomaly_distribution)
state_manager = state_management.FilteringOnlyStateManager()
super(ARRegressor, self).__init__(
model=model,
state_manager=state_manager,
optimizer=optimizer,
model_dir=model_dir,
config=config)
# TODO(b/113684821): Add detailed documentation on what the input_fn should do.
# Add an example of making and returning a Dataset object. Determine if
# endogenous features can be passed in as FeatureColumns. Move ARModel's loss
# functions into a more general location.
class LSTMAutoRegressor(TimeSeriesRegressor):
"""An Estimator for an LSTM autoregressive model.
LSTMAutoRegressor is a window-based model, inputting fixed windows of length
`input_window_size` and outputting fixed windows of length
`output_window_size`. These two parameters must add up to the window_size
of data returned by the `input_fn`.
Each periodicity in the `periodicities` arg is divided by the `num_timesteps`
into timesteps that are represented as time features added to the model.
A good heuristic for picking an appropriate periodicity for a given data set
would be the length of cycles in the data. For example, energy usage in a
home is typically cyclic each day. If the time feature in a home energy
usage dataset is in the unit of hours, then 24 would be an appropriate
periodicity. Similarly, a good heuristic for `num_timesteps` is how often the
data is expected to change within the cycle. For the aforementioned home
energy usage dataset and periodicity of 24, then 48 would be a reasonable
value if usage is expected to change every half hour.
Each feature's value for a given example with time t is the difference
between t and the start of the timestep it falls under. If it doesn't fall
under a feature's associated timestep, then that feature's value is zero.
For example: if `periodicities` = (9, 12) and `num_timesteps` = 3, then 6
features would be added to the model, 3 for periodicity 9 and 3 for
periodicity 12.
For an example data point where t = 17:
- It's in the 3rd timestep for periodicity 9 (2nd period is 9-18 and 3rd
timestep is 15-18)
- It's in the 2nd timestep for periodicity 12 (2nd period is 12-24 and
2nd timestep is between 16-20).
Therefore the 6 added features for this row with t = 17 would be:
# Feature name (periodicity#_timestep#), feature value
P9_T1, 0 # not in first timestep
P9_T2, 0 # not in second timestep
P9_T3, 2 # 17 - 15 since 15 is the start of the 3rd timestep
P12_T1, 0 # not in first timestep
P12_T2, 1 # 17 - 16 since 16 is the start of the 2nd timestep
P12_T3, 0 # not in third timestep
Example Code:
```python
extra_feature_columns = (
feature_column.numeric_column("exogenous_variable"),
)
estimator = LSTMAutoRegressor(
periodicities=10,
input_window_size=10,
output_window_size=5,
model_dir="/path/to/model/dir",
num_features=1,
extra_feature_columns=extra_feature_columns,
num_timesteps=50,
num_units=10,
optimizer=tf.compat.v1.train.ProximalAdagradOptimizer(...))
# Input builders
def input_fn_train():
return {
"times": tf.range(15)[None, :],
"values": tf.random.normal(shape=[1, 15, 1])
}
estimator.train(input_fn=input_fn_train, steps=100)
def input_fn_eval():
pass
metrics = estimator.evaluate(input_fn=input_fn_eval, steps=10)
def input_fn_predict():
pass
predictions = estimator.predict(input_fn=input_fn_predict)
```
"""
def __init__(self,
periodicities,
input_window_size,
output_window_size,
model_dir=None,
num_features=1,
extra_feature_columns=None,
num_timesteps=10,
loss=ar_model.ARModel.NORMAL_LIKELIHOOD_LOSS,
num_units=128,
optimizer="Adam",
config=None):
"""Initialize the Estimator.
Args:
periodicities: periodicities of the input data, in the same units as the
time feature (for example 24 if feeding hourly data with a daily
periodicity, or 60 * 24 if feeding minute-level data with daily
periodicity). Note this can be a single value or a list of values for
multiple periodicities.
input_window_size: Number of past time steps of data to look at when doing
the regression.
output_window_size: Number of future time steps to predict. Note that
setting this value to > 1 empirically seems to give a better fit.
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model.
num_features: The dimensionality of the time series (default value is one
for univariate, more than one for multivariate).
extra_feature_columns: A list of `tf.feature_column`s (for example
`tf.feature_column.embedding_column`) corresponding to features which
provide extra information to the model but are not part of the series to
be predicted.
num_timesteps: Number of buckets into which to divide (time %
periodicity). This value multiplied by the number of periodicities is
the number of time features added to the model.
loss: Loss function to use for training. Currently supported values are
SQUARED_LOSS and NORMAL_LIKELIHOOD_LOSS. Note that for
NORMAL_LIKELIHOOD_LOSS, we train the covariance term as well. For
SQUARED_LOSS, the evaluation loss is reported based on un-scaled
observations and predictions, while the training loss is computed on
normalized data.
num_units: The size of the hidden state in the encoder and decoder LSTM
cells.
optimizer: string, `tf.compat.v1.train.Optimizer` object, or callable that
defines the optimizer algorithm to use for training. Defaults to the
Adam optimizer with a learning rate of 0.01.
config: Optional `estimator.RunConfig` object to configure the runtime
settings.
"""
optimizer = optimizers.get_optimizer_instance(optimizer, learning_rate=0.01)
model = ar_model.ARModel(
periodicities=periodicities,
input_window_size=input_window_size,
output_window_size=output_window_size,
num_features=num_features,
exogenous_feature_columns=extra_feature_columns,
num_time_buckets=num_timesteps,
loss=loss,
prediction_model_factory=functools.partial(
ar_model.LSTMPredictionModel, num_units=num_units))
state_manager = state_management.FilteringOnlyStateManager()
super(LSTMAutoRegressor, self).__init__(
model=model,
state_manager=state_manager,
optimizer=optimizer,
model_dir=model_dir,
config=config,
head_type=ts_head_lib.OneShotPredictionHead)
class StateSpaceRegressor(TimeSeriesRegressor):
"""An Estimator for general state space models."""
def __init__(self,
model,
state_manager=None,
optimizer=None,
model_dir=None,
config=None,
head_type=ts_head_lib.TimeSeriesRegressionHead):
"""See TimeSeriesRegressor. Uses the ChainingStateManager by default."""
if not isinstance(model, state_space_model.StateSpaceModel):
raise ValueError(
"StateSpaceRegressor only supports state space models (children of "
"StateSpaceModel) in its `model` argument, got {}.".format(model))
if state_manager is None:
state_manager = state_management.ChainingStateManager()
super(StateSpaceRegressor, self).__init__(
model=model,
state_manager=state_manager,
optimizer=optimizer,
model_dir=model_dir,
config=config,
head_type=head_type)
class StructuralEnsembleRegressor(StateSpaceRegressor):
"""An Estimator for structural time series models.
"Structural" refers to the fact that this model explicitly accounts for
structure in the data, such as periodicity and trends.
`StructuralEnsembleRegressor` is a state space model. It contains components
for modeling level, local linear trends, periodicity, and mean-reverting
transients via a moving average component. Multivariate series are fit with
full covariance matrices for observation and latent state transition noise,
each feature of the multivariate series having its own latent components.
Note that unlike `ARRegressor`, `StructuralEnsembleRegressor` is sequential,
and so accepts variable window sizes with the same model.
For training, `RandomWindowInputFn` is recommended as an `input_fn`. Model
state is managed through `ChainingStateManager`: since state space models are
inherently sequential, we save state from previous iterations to get
approximate/eventual consistency while achieving good performance through
batched computation.
For evaluation, either pass a significant chunk of the series in a single
window (e.g. set `window_size` to the whole series with
`WholeDatasetInputFn`), or use enough random evaluation iterations to cover
several passes through the whole dataset. Either method will ensure that stale
saved state has been flushed.
"""
def __init__(self,
periodicities,
num_features,
cycle_num_latent_values=11,
moving_average_order=4,
autoregressive_order=0,
exogenous_feature_columns=None,
exogenous_update_condition=None,
dtype=dtypes.float64,
anomaly_prior_probability=None,
optimizer=None,
model_dir=None,
config=None,
head_type=ts_head_lib.TimeSeriesRegressionHead):
"""Initialize the Estimator.
Args:
periodicities: The expected periodicity of the data (for example 24 if
feeding hourly data with a daily periodicity, or 60 * 24 if feeding
minute-level data with daily periodicity). Either a scalar or a list.
This parameter can be any real value, and does not control the size of
the model. However, increasing this without increasing
`num_values_per_cycle` will lead to smoother periodic behavior, as the
same number of distinct values will be cycled through over a longer
period of time.
num_features: The dimensionality of the time series (one for univariate,
more than one for multivariate).
cycle_num_latent_values: Along with `moving_average_order` and
`num_features`, controls the latent state size of the model. Square
matrices of size `num_features * (moving_average_order +
cycle_num_latent_values + 3)` are created and multiplied, so larger
values may be slow. The trade-off is with resolution: cycling between
a smaller number of latent values means that only smoother functions
can be modeled.
moving_average_order: Controls model size (along with
`cycle_num_latent_values` and `autoregressive_order`) and the number of
steps before transient deviations revert to the mean defined by the
period and level/trend components.
autoregressive_order: Each contribution from this component is a linear
combination of this many previous contributions. Also helps to determine
the model size. Learning autoregressive coefficients typically requires
more steps and a smaller step size than other components.
exogenous_feature_columns: A list of `tf.feature_column`s (for example
`tf.feature_column.embedding_column`) corresponding to exogenous
features which provide extra information to the model but are not part
of the series to be predicted. Passed to
`tf.compat.v1.feature_column.input_layer`.
exogenous_update_condition: A function taking two Tensor arguments,
`times` (shape [batch size]) and `features` (a dictionary mapping
exogenous feature keys to Tensors with shapes [batch size, ...]), and
returning a boolean Tensor with shape [batch size] indicating whether
state should be updated using exogenous features for each part of the
batch. Where it is False, no exogenous update is performed. If None
(default), exogenous updates are always performed. Useful for avoiding
"leaky" frequent exogenous updates when sparse updates are desired.
Called only during graph construction. See the "known anomaly" example
for example usage.
dtype: The floating point data type to compute with. float32 may be
faster, but can be problematic for larger models and longer time series.
anomaly_prior_probability: If not None, the model attempts to
automatically detect and ignore anomalies during training. This
parameter then controls the prior probability of an anomaly. Values
closer to 0 mean that points will be discarded less frequently. The
default value (None) means that anomalies are not discarded, which may
be slightly faster.
optimizer: The optimization algorithm to use when training, inheriting
from tf.train.Optimizer. Defaults to Adam with step size 0.02.
model_dir: See `Estimator`.
config: See `Estimator`.
head_type: The kind of head to use for the model (inheriting from
`TimeSeriesRegressionHead`).
"""
if anomaly_prior_probability is not None:
filtering_postprocessor = StateInterpolatingAnomalyDetector(
anomaly_prior_probability=anomaly_prior_probability)
else:
filtering_postprocessor = None
state_space_model_configuration = (
state_space_model.StateSpaceModelConfiguration(
num_features=num_features,
dtype=dtype,
filtering_postprocessor=filtering_postprocessor,
exogenous_feature_columns=exogenous_feature_columns,
exogenous_update_condition=exogenous_update_condition))
model = structural_ensemble.MultiResolutionStructuralEnsemble(
cycle_num_latent_values=cycle_num_latent_values,
moving_average_order=moving_average_order,
autoregressive_order=autoregressive_order,
periodicities=periodicities,
configuration=state_space_model_configuration)
super(StructuralEnsembleRegressor, self).__init__(
model=model,
optimizer=optimizer,
model_dir=model_dir,
config=config,
head_type=head_type)
|
tensorflow-master
|
tensorflow/contrib/timeseries/python/timeseries/estimators.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Auto-Regressive models for time series data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.rnn.python.ops import lstm_ops
from tensorflow.contrib.timeseries.python.timeseries import math_utils
from tensorflow.contrib.timeseries.python.timeseries import model
from tensorflow.contrib.timeseries.python.timeseries import model_utils
from tensorflow.contrib.timeseries.python.timeseries.feature_keys import PredictionFeatures
from tensorflow.contrib.timeseries.python.timeseries.feature_keys import TrainEvalFeatures
from tensorflow.python.estimator import estimator_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.keras.engine import sequential
from tensorflow.python.keras.engine import training
from tensorflow.python.keras.layers import core
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope
class FlatPredictionModel(training.Model):
"""Flattens input and output windows and puts them through dense layers.
This model does not operate on its own, but rather is a plugin to
`ARModel`. See `ARModel`'s constructor documentation
(`prediction_model_factory`) for a usage example.
"""
def __init__(self,
num_features,
input_window_size,
output_window_size,
hidden_layer_sizes=None):
"""Construct the flat prediction model.
Args:
num_features: number of input features per time step.
input_window_size: Number of past time steps of data to look at when doing
the regression.
output_window_size: Number of future time steps to predict. Note that
setting it to > 1 empirically seems to give a better fit.
hidden_layer_sizes: list of sizes of hidden layers.
"""
super(FlatPredictionModel, self).__init__()
self._input_flatten = core.Flatten()
self._output_flatten = core.Flatten()
if hidden_layer_sizes:
self._hidden_layers = sequential.Sequential([
core.Dense(layer_size, activation=nn_ops.relu)
for layer_size in hidden_layer_sizes])
else:
self._hidden_layers = None
self._mean_transform = core.Dense(num_features * output_window_size,
name="predicted_mean")
self._covariance_transform = core.Dense(num_features * output_window_size,
name="log_sigma_square")
self._prediction_shape = [-1, output_window_size, num_features]
def call(self, input_window_features, output_window_features):
"""Compute predictions from input and output windows.
Args:
input_window_features: A floating point Tensor with shape [batch size,
input window size, input features]. The batch dimension may not have
static shape information, but the window size and number of input
features are known at graph construction time and recorded in the static
shape information for the `input_window_features` `Tensor`. Note that
`input_window_size` may be zero.
output_window_features: A floating point Tensor with shape [batch size,
output window size, output features]. As with `input_window_features`,
the last two dimensions have static shape information. If there are no
output features, the size of the last dimension will be zero.
Returns:
A dictionary of predictions with keys "mean" and "covariance" (only
diagonal covariances are currently supported). Each has shape
[batch size, output window size, num_features], where num_features is the
same as the constructor argument.
"""
if input_window_features.shape.dims[1].value == 0:
# TODO(allenl): Make reshape()'s static shape information work on
# zero-size Tensors? Currently this special case is required because
# otherwise the Dense layers get unknown last dimensions.
activation = self._output_flatten(output_window_features)
elif output_window_features.shape.dims[2].value == 0:
activation = self._input_flatten(input_window_features)
else:
activation = array_ops.concat(
[self._input_flatten(input_window_features),
self._output_flatten(output_window_features)],
axis=1)
if self._hidden_layers:
activation = self._hidden_layers(activation)
predicted_mean = array_ops.reshape(
self._mean_transform(activation),
self._prediction_shape)
predicted_covariance = array_ops.reshape(
gen_math_ops.exp(self._covariance_transform(activation)),
self._prediction_shape)
return {"mean": predicted_mean,
"covariance": predicted_covariance}
class LSTMPredictionModel(training.Model):
"""A simple encoder/decoder model using an LSTM.
This model does not operate on its own, but rather is a plugin to
`ARModel`. See `ARModel`'s constructor documentation
(`prediction_model_factory`) for a usage example.
"""
def __init__(self,
num_features,
input_window_size,
output_window_size,
num_units=128):
"""Construct the LSTM prediction model.
Args:
num_features: number of input features per time step.
input_window_size: Number of past time steps of data to look at when doing
the regression.
output_window_size: Number of future time steps to predict. Note that
setting it to > 1 empirically seems to give a better fit.
num_units: The number of units in the encoder and decoder LSTM cells.
"""
super(LSTMPredictionModel, self).__init__()
self._encoder = lstm_ops.LSTMBlockFusedCell(
num_units=num_units, name="encoder")
self._decoder = lstm_ops.LSTMBlockFusedCell(
num_units=num_units, name="decoder")
self._mean_transform = core.Dense(num_features,
name="mean_transform")
self._covariance_transform = core.Dense(num_features,
name="covariance_transform")
def call(self, input_window_features, output_window_features):
"""Compute predictions from input and output windows."""
# Convert to time major
input_window_features = array_ops.transpose(input_window_features,
[1, 0, 2])
output_window_features = array_ops.transpose(output_window_features,
[1, 0, 2])
_, encoder_state = self._encoder(
input_window_features, dtype=self.dtype)
decoder_output, _ = self._decoder(
output_window_features, dtype=self.dtype,
initial_state=encoder_state)
# Switch back to batch major
decoder_output = array_ops.transpose(decoder_output, [1, 0, 2])
predicted_mean = self._mean_transform(decoder_output)
predicted_covariance = gen_math_ops.exp(
self._covariance_transform(decoder_output))
return {"mean": predicted_mean,
"covariance": predicted_covariance}
class ARModel(model.TimeSeriesModel):
"""Auto-regressive model, both linear and non-linear.
Features to the model include time and values of input_window_size timesteps,
and times for output_window_size timesteps. These are passed through a
configurable prediction model, and then fed to a loss function (e.g. squared
loss).
Note that this class can also be used to regress against time only by setting
the input_window_size to zero.
Each periodicity in the `periodicities` arg is divided by the
`num_time_buckets` into time buckets that are represented as features added
to the model.
A good heuristic for picking an appropriate periodicity for a given data set
would be the length of cycles in the data. For example, energy usage in a
home is typically cyclic each day. If the time feature in a home energy
usage dataset is in the unit of hours, then 24 would be an appropriate
periodicity. Similarly, a good heuristic for `num_time_buckets` is how often
the data is expected to change within the cycle. For the aforementioned home
energy usage dataset and periodicity of 24, then 48 would be a reasonable
value if usage is expected to change every half hour.
Each feature's value for a given example with time t is the difference
between t and the start of the time bucket it falls under. If it doesn't fall
under a feature's associated time bucket, then that feature's value is zero.
For example: if `periodicities` = (9, 12) and `num_time_buckets` = 3, then 6
features would be added to the model, 3 for periodicity 9 and 3 for
periodicity 12.
For an example data point where t = 17:
- It's in the 3rd time bucket for periodicity 9 (2nd period is 9-18 and 3rd
time bucket is 15-18)
- It's in the 2nd time bucket for periodicity 12 (2nd period is 12-24 and
2nd time bucket is between 16-20).
Therefore the 6 added features for this row with t = 17 would be:
# Feature name (periodicity#_timebucket#), feature value
P9_T1, 0 # not in first time bucket
P9_T2, 0 # not in second time bucket
P9_T3, 2 # 17 - 15 since 15 is the start of the 3rd time bucket
P12_T1, 0 # not in first time bucket
P12_T2, 1 # 17 - 16 since 16 is the start of the 2nd time bucket
P12_T3, 0 # not in third time bucket
"""
SQUARED_LOSS = "squared_loss"
NORMAL_LIKELIHOOD_LOSS = "normal_likelihood_loss"
def __init__(self,
periodicities,
input_window_size,
output_window_size,
num_features,
prediction_model_factory=FlatPredictionModel,
num_time_buckets=10,
loss=NORMAL_LIKELIHOOD_LOSS,
exogenous_feature_columns=None):
"""Constructs an auto-regressive model.
Args:
periodicities: periodicities of the input data, in the same units as the
time feature (for example 24 if feeding hourly data with a daily
periodicity, or 60 * 24 if feeding minute-level data with daily
periodicity). Note this can be a single value or a list of values for
multiple periodicities.
input_window_size: Number of past time steps of data to look at when doing
the regression.
output_window_size: Number of future time steps to predict. Note that
setting it to > 1 empirically seems to give a better fit.
num_features: number of input features per time step.
prediction_model_factory: A callable taking arguments `num_features`,
`input_window_size`, and `output_window_size` and returning a
`tf.keras.Model`. The `Model`'s `call()` takes two arguments: an input
window and an output window, and returns a dictionary of predictions.
See `FlatPredictionModel` for an example. Example usage:
```python model = ar_model.ARModel( periodicities=2, num_features=3,
prediction_model_factory=functools.partial( FlatPredictionModel,
hidden_layer_sizes=[10, 10])) ```
The default model computes predictions as a linear function of flattened
input and output windows.
num_time_buckets: Number of buckets into which to divide (time %
periodicity). This value multiplied by the number of periodicities is
the number of time features added to the model.
loss: Loss function to use for training. Currently supported values are
SQUARED_LOSS and NORMAL_LIKELIHOOD_LOSS. Note that for
NORMAL_LIKELIHOOD_LOSS, we train the covariance term as well. For
SQUARED_LOSS, the evaluation loss is reported based on un-scaled
observations and predictions, while the training loss is computed on
normalized data (if input statistics are available).
exogenous_feature_columns: A list of `tf.feature_column`s (for example
`tf.feature_column.embedding_column`) corresponding to
features which provide extra information to the model but are not part
of the series to be predicted.
"""
self._model_factory = prediction_model_factory
self.input_window_size = input_window_size
self.output_window_size = output_window_size
self.window_size = self.input_window_size + self.output_window_size
self.loss = loss
super(ARModel, self).__init__(
num_features=num_features,
exogenous_feature_columns=exogenous_feature_columns)
if exogenous_feature_columns is not None:
self.exogenous_size = self._get_exogenous_embedding_shape()[-1]
else:
self.exogenous_size = 0
assert num_time_buckets > 0
self._buckets = int(num_time_buckets)
if periodicities is None or not periodicities:
periodicities = []
elif (not isinstance(periodicities, list) and
not isinstance(periodicities, tuple)):
periodicities = [periodicities]
self._periodicities = [int(p) for p in periodicities]
for p in self._periodicities:
assert p > 0
assert len(self._periodicities) or self.input_window_size
assert output_window_size > 0
def initialize_graph(self, input_statistics=None):
super(ARModel, self).initialize_graph(input_statistics=input_statistics)
self._model_scope = variable_scope.variable_scope(
# The trailing slash means we strip all enclosing variable_scopes, which
# unfortunately is necessary because the model gets called inside and
# outside a "while" scope (for prediction and training respectively),
# and the variables names need to match.
"model/", use_resource=True)
self._model_instance = self._model_factory(
num_features=self.num_features,
input_window_size=self.input_window_size,
output_window_size=self.output_window_size)
def get_start_state(self):
# State which matches the format we'll return later. Typically this will not
# be used by the model directly, but the shapes and dtypes should match so
# that the serving input_receiver_fn gets placeholder shapes correct.
return (array_ops.zeros([self.input_window_size], dtype=dtypes.int64),
array_ops.zeros(
[self.input_window_size, self.num_features], dtype=self.dtype),
array_ops.zeros(
[self.input_window_size, self.exogenous_size],
dtype=self.dtype))
# TODO(allenl,agarwal): Support sampling for AR.
def random_model_parameters(self, seed=None):
pass
def generate(self, number_of_series, series_length,
model_parameters=None, seed=None):
pass
def _predicted_covariance_op(self, activations, num_values):
activation, activation_size = activations[-1]
if self.loss == ARModel.NORMAL_LIKELIHOOD_LOSS:
log_sigma_square = model_utils.fully_connected(
activation,
activation_size,
self.output_window_size * num_values,
name="log_sigma_square",
activation=None)
predicted_covariance = gen_math_ops.exp(log_sigma_square)
predicted_covariance = array_ops.reshape(
predicted_covariance, [-1, self.output_window_size, num_values])
else:
shape = array_ops.stack([
array_ops.shape(activation)[0],
constant_op.constant(self.output_window_size),
constant_op.constant(num_values)
])
predicted_covariance = array_ops.ones(shape=shape, dtype=activation.dtype)
return predicted_covariance
def _predicted_mean_op(self, activations):
activation, activation_size = activations[-1]
predicted_mean = model_utils.fully_connected(
activation,
activation_size,
self.output_window_size * self.num_features,
name="predicted_mean",
activation=None)
return array_ops.reshape(predicted_mean,
[-1, self.output_window_size, self.num_features])
def prediction_ops(self, times, values, exogenous_regressors):
"""Compute model predictions given input data.
Args:
times: A [batch size, self.window_size] integer Tensor, the first
self.input_window_size times in each part of the batch indicating
input features, and the last self.output_window_size times indicating
prediction times.
values: A [batch size, self.input_window_size, self.num_features] Tensor
with input features.
exogenous_regressors: A [batch size, self.window_size,
self.exogenous_size] Tensor with exogenous features.
Returns:
Tuple (predicted_mean, predicted_covariance), where each element is a
Tensor with shape [batch size, self.output_window_size,
self.num_features].
"""
times.get_shape().assert_is_compatible_with([None, self.window_size])
batch_size = array_ops.shape(times)[0]
if self.input_window_size:
values.get_shape().assert_is_compatible_with(
[None, self.input_window_size, self.num_features])
if exogenous_regressors is not None:
exogenous_regressors.get_shape().assert_is_compatible_with(
[None, self.window_size, self.exogenous_size])
# Create input features.
input_window_features = []
input_feature_size = 0
output_window_features = []
output_feature_size = 0
if self._periodicities:
_, time_features = self._compute_time_features(times)
num_time_features = self._buckets * len(self._periodicities)
time_features = array_ops.reshape(
time_features,
[batch_size,
self.window_size,
num_time_features])
input_time_features, output_time_features = array_ops.split(
time_features, (self.input_window_size, self.output_window_size),
axis=1)
input_feature_size += num_time_features
output_feature_size += num_time_features
input_window_features.append(input_time_features)
output_window_features.append(output_time_features)
if self.input_window_size:
inp = array_ops.slice(values, [0, 0, 0], [-1, self.input_window_size, -1])
input_window_features.append(
array_ops.reshape(
inp,
[batch_size, self.input_window_size, self.num_features]))
input_feature_size += self.num_features
if self.exogenous_size:
input_exogenous_features, output_exogenous_features = array_ops.split(
exogenous_regressors,
(self.input_window_size, self.output_window_size),
axis=1)
input_feature_size += self.exogenous_size
output_feature_size += self.exogenous_size
input_window_features.append(input_exogenous_features)
output_window_features.append(output_exogenous_features)
assert input_window_features
input_window_features = array_ops.concat(input_window_features, axis=2)
if output_window_features:
output_window_features = array_ops.concat(output_window_features, axis=2)
else:
output_window_features = array_ops.zeros(
[batch_size, self.output_window_size, 0],
dtype=self.dtype)
static_batch_size = times.get_shape().dims[0].value
input_window_features.set_shape(
[static_batch_size, self.input_window_size, input_feature_size])
output_window_features.set_shape(
[static_batch_size, self.output_window_size, output_feature_size])
return self._output_window_predictions(input_window_features,
output_window_features)
def _output_window_predictions(
self, input_window_features, output_window_features):
with self._model_scope:
predictions = self._model_instance(
input_window_features, output_window_features)
result_shape = [None, self.output_window_size, self.num_features]
for v in predictions.values():
v.set_shape(result_shape)
return predictions
def loss_op(self, targets, prediction_ops):
"""Create loss_op."""
prediction = prediction_ops["mean"]
if self.loss == ARModel.NORMAL_LIKELIHOOD_LOSS:
covariance = prediction_ops["covariance"]
sigma = math_ops.sqrt(gen_math_ops.maximum(covariance, 1e-5))
loss_op = -math_ops.reduce_sum(
math_utils.normal_log_prob(targets, sigma, prediction))
else:
assert self.loss == ARModel.SQUARED_LOSS, self.loss
loss_op = math_ops.reduce_sum(
math_ops.squared_difference(prediction, targets))
loss_op /= math_ops.cast(
math_ops.reduce_prod(array_ops.shape(targets)), loss_op.dtype)
return loss_op
def _process_exogenous_features(self, times, features):
embedded = super(ARModel, self)._process_exogenous_features(
times=times, features=features)
if embedded is None:
assert self.exogenous_size == 0
# No embeddings. Return a zero-size [batch, times, 0] array so we don't
# have to special case it downstream.
return array_ops.zeros(
array_ops.concat([array_ops.shape(times), constant_op.constant([0])],
axis=0))
else:
return embedded
# TODO(allenl, agarwal): Consider better ways of warm-starting predictions.
def predict(self, features):
"""Computes predictions multiple steps into the future.
Args:
features: A dictionary with the following key/value pairs:
PredictionFeatures.TIMES: A [batch size, predict window size]
integer Tensor of times, after the window of data indicated by
`STATE_TUPLE`, to make predictions for.
PredictionFeatures.STATE_TUPLE: A tuple of (times, values), times with
shape [batch size, self.input_window_size], values with shape [batch
size, self.input_window_size, self.num_features] representing a
segment of the time series before `TIMES`. This data is used
to start of the autoregressive computation. This should have data for
at least self.input_window_size timesteps.
And any exogenous features, with shapes prefixed by shape of `TIMES`.
Returns:
A dictionary with keys, "mean", "covariance". The
values are Tensors of shape [batch_size, predict window size,
num_features] and correspond to the values passed in `TIMES`.
"""
if not self._graph_initialized:
self.initialize_graph()
predict_times = math_ops.cast(
ops.convert_to_tensor(features[PredictionFeatures.TIMES]), dtypes.int32)
exogenous_regressors = self._process_exogenous_features(
times=predict_times,
features={key: value for key, value in features.items()
if key not in [TrainEvalFeatures.TIMES,
TrainEvalFeatures.VALUES,
PredictionFeatures.STATE_TUPLE]})
with ops.control_dependencies(
[check_ops.assert_equal(array_ops.shape(predict_times)[1],
array_ops.shape(exogenous_regressors)[1])]):
exogenous_regressors = array_ops.identity(exogenous_regressors)
batch_size = array_ops.shape(predict_times)[0]
num_predict_values = array_ops.shape(predict_times)[1]
prediction_iterations = ((num_predict_values + self.output_window_size - 1)
// self.output_window_size)
# Pad predict_times and exogenous regressors so as to have exact multiple of
# self.output_window_size values per example.
padding_size = (prediction_iterations * self.output_window_size -
num_predict_values)
predict_times = array_ops.pad(
predict_times, [[0, 0], [0, padding_size]])
exogenous_regressors = array_ops.pad(
exogenous_regressors, [[0, 0], [0, padding_size], [0, 0]])
state = features[PredictionFeatures.STATE_TUPLE]
(state_times, state_values, state_exogenous_regressors) = state
state_times = math_ops.cast(
ops.convert_to_tensor(state_times), dtypes.int32)
state_values = ops.convert_to_tensor(state_values, dtype=self.dtype)
state_exogenous_regressors = ops.convert_to_tensor(
state_exogenous_regressors, dtype=self.dtype)
initial_input_times = predict_times[:, :self.output_window_size]
initial_input_exogenous_regressors = (
exogenous_regressors[:, :self.output_window_size, :])
if self.input_window_size > 0:
initial_input_times = array_ops.concat(
[state_times[:, -self.input_window_size:], initial_input_times], 1)
values_size = array_ops.shape(state_values)[1]
times_size = array_ops.shape(state_times)[1]
with ops.control_dependencies([
check_ops.assert_greater_equal(values_size, self.input_window_size),
check_ops.assert_equal(values_size, times_size)
]):
initial_input_values = state_values[:, -self.input_window_size:, :]
initial_input_exogenous_regressors = array_ops.concat(
[state_exogenous_regressors[:, -self.input_window_size:, :],
initial_input_exogenous_regressors[
:, :self.output_window_size, :]],
axis=1)
else:
initial_input_values = 0
# Iterate over the predict_times, predicting self.output_window_size values
# in each iteration.
def _while_condition(iteration_number, *unused_args):
return math_ops.less(iteration_number, prediction_iterations)
def _while_body(iteration_number, input_times, input_values,
input_exogenous_regressors, mean_ta, covariance_ta):
"""Predict self.output_window_size values."""
prediction_ops = self.prediction_ops(
input_times, input_values, input_exogenous_regressors)
predicted_mean = prediction_ops["mean"]
predicted_covariance = prediction_ops["covariance"]
offset = self.output_window_size * gen_math_ops.minimum(
iteration_number + 1, prediction_iterations - 1)
if self.input_window_size > 0:
if self.output_window_size < self.input_window_size:
new_input_values = array_ops.concat(
[input_values[:, self.output_window_size:, :], predicted_mean], 1)
new_input_exogenous_regressors = array_ops.concat(
[input_exogenous_regressors[:, -self.input_window_size:, :],
exogenous_regressors[
:, offset:offset + self.output_window_size, :]],
axis=1)
new_input_times = array_ops.concat([
input_times[:, -self.input_window_size:],
predict_times[:, offset:offset + self.output_window_size]
], 1)
else:
new_input_values = predicted_mean[:, -self.input_window_size:, :]
new_input_exogenous_regressors = exogenous_regressors[
:,
offset - self.input_window_size:offset + self.output_window_size,
:]
new_input_times = predict_times[
:,
offset - self.input_window_size:offset + self.output_window_size]
else:
new_input_values = input_values
new_input_exogenous_regressors = exogenous_regressors[
:, offset:offset + self.output_window_size, :]
new_input_times = predict_times[:,
offset:offset + self.output_window_size]
new_input_times.set_shape(initial_input_times.get_shape())
new_input_exogenous_regressors.set_shape(
initial_input_exogenous_regressors.get_shape())
new_mean_ta = mean_ta.write(iteration_number, predicted_mean)
if isinstance(covariance_ta, tensor_array_ops.TensorArray):
new_covariance_ta = covariance_ta.write(iteration_number,
predicted_covariance)
else:
new_covariance_ta = covariance_ta
return (iteration_number + 1,
new_input_times,
new_input_values,
new_input_exogenous_regressors,
new_mean_ta,
new_covariance_ta)
# Note that control_flow_ops.while_loop doesn't seem happy with None. Hence
# using 0 for cases where we don't want to predict covariance.
covariance_ta_init = (tensor_array_ops.TensorArray(
dtype=self.dtype, size=prediction_iterations)
if self.loss != ARModel.SQUARED_LOSS else 0.)
mean_ta_init = tensor_array_ops.TensorArray(
dtype=self.dtype, size=prediction_iterations)
_, _, _, _, mean_ta, covariance_ta = control_flow_ops.while_loop(
_while_condition, _while_body, [
0,
initial_input_times,
initial_input_values,
initial_input_exogenous_regressors,
mean_ta_init,
covariance_ta_init
])
def _parse_ta(values_ta):
"""Helper function to parse the returned TensorArrays."""
if not isinstance(values_ta, tensor_array_ops.TensorArray):
return None
predictions_length = prediction_iterations * self.output_window_size
# Shape [prediction_iterations, batch_size, self.output_window_size,
# self.num_features]
values_packed = values_ta.stack()
# Transpose to move batch dimension outside.
output_values = array_ops.reshape(
array_ops.transpose(values_packed, [1, 0, 2, 3]),
array_ops.stack([batch_size, predictions_length, -1]))
# Clip to desired size
return output_values[:, :num_predict_values, :]
predicted_mean = _parse_ta(mean_ta)
predicted_covariance = _parse_ta(covariance_ta)
if predicted_covariance is None:
predicted_covariance = array_ops.ones_like(predicted_mean)
# Transform and scale the mean and covariance appropriately.
predicted_mean = self._scale_back_data(predicted_mean)
predicted_covariance = self._scale_back_variance(predicted_covariance)
return {"mean": predicted_mean,
"covariance": predicted_covariance}
def _process_window(self, features, mode, exogenous_regressors):
"""Compute model outputs on a single window of data."""
times = math_ops.cast(features[TrainEvalFeatures.TIMES], dtypes.int64)
values = math_ops.cast(features[TrainEvalFeatures.VALUES], dtype=self.dtype)
exogenous_regressors = math_ops.cast(exogenous_regressors, dtype=self.dtype)
original_values = values
# Extra shape checking for the window size (above that in
# `head.create_estimator_spec`).
expected_times_shape = [None, self.window_size]
if not times.get_shape().is_compatible_with(expected_times_shape):
raise ValueError(
("ARModel with input_window_size={input_window_size} "
"and output_window_size={output_window_size} expects "
"feature '{times_feature}' to have shape (batch_size, "
"{window_size}) (for any batch_size), but got shape {times_shape}. "
"If you are using RandomWindowInputFn, set "
"window_size={window_size} or adjust the input_window_size and "
"output_window_size arguments to ARModel.").format(
input_window_size=self.input_window_size,
output_window_size=self.output_window_size,
times_feature=TrainEvalFeatures.TIMES,
window_size=self.window_size,
times_shape=times.get_shape()))
values = self._scale_data(values)
if self.input_window_size > 0:
input_values = values[:, :self.input_window_size, :]
else:
input_values = None
prediction_ops = self.prediction_ops(
times, input_values, exogenous_regressors)
prediction = prediction_ops["mean"]
covariance = prediction_ops["covariance"]
targets = array_ops.slice(values, [0, self.input_window_size, 0],
[-1, -1, -1])
targets.get_shape().assert_is_compatible_with(prediction.get_shape())
if (mode == estimator_lib.ModeKeys.EVAL
and self.loss == ARModel.SQUARED_LOSS):
# Report an evaluation loss which matches the expected
# (observed - predicted) ** 2.
# Note that this affects only evaluation; the training loss is unaffected.
loss = self.loss_op(
self._scale_back_data(targets),
{"mean": self._scale_back_data(prediction_ops["mean"])})
else:
loss = self.loss_op(targets, prediction_ops)
# Scale back the prediction.
prediction = self._scale_back_data(prediction)
covariance = self._scale_back_variance(covariance)
return model.ModelOutputs(
loss=loss,
end_state=(times[:, -self.input_window_size:],
values[:, -self.input_window_size:, :],
exogenous_regressors[:, -self.input_window_size:, :]),
predictions={"mean": prediction, "covariance": covariance,
"observed": original_values[:, -self.output_window_size:]},
prediction_times=times[:, -self.output_window_size:])
def get_batch_loss(self, features, mode, state):
"""Computes predictions and a loss.
Args:
features: A dictionary (such as is produced by a chunker) with the
following key/value pairs (shapes are given as required for training):
TrainEvalFeatures.TIMES: A [batch size, self.window_size] integer
Tensor with times for each observation. To train on longer
sequences, the data should first be chunked.
TrainEvalFeatures.VALUES: A [batch size, self.window_size,
self.num_features] Tensor with values for each observation.
When evaluating, `TIMES` and `VALUES` must have a window size of at
least self.window_size, but it may be longer, in which case the last
window_size - self.input_window_size times (or fewer if this is not
divisible by self.output_window_size) will be evaluated on with
non-overlapping output windows (and will have associated
predictions). This is primarily to support qualitative
evaluation/plotting, and is not a recommended way to compute evaluation
losses (since there is no overlap in the output windows, which for
window-based models is an undesirable bias).
mode: The tf.estimator.ModeKeys mode to use (TRAIN or EVAL).
state: Unused
Returns:
A model.ModelOutputs object.
Raises:
ValueError: If `mode` is not TRAIN or EVAL, or if static shape information
is incorrect.
"""
features = {feature_name: ops.convert_to_tensor(feature_value)
for feature_name, feature_value in features.items()}
times = features[TrainEvalFeatures.TIMES]
exogenous_regressors = self._process_exogenous_features(
times=times,
features={key: value for key, value in features.items()
if key not in [TrainEvalFeatures.TIMES,
TrainEvalFeatures.VALUES,
PredictionFeatures.STATE_TUPLE]})
if mode == estimator_lib.ModeKeys.TRAIN:
# For training, we require the window size to be self.window_size as
# iterating sequentially on larger windows could introduce a bias.
return self._process_window(
features, mode=mode, exogenous_regressors=exogenous_regressors)
elif mode == estimator_lib.ModeKeys.EVAL:
# For evaluation, we allow the user to pass in a larger window, in which
# case we try to cover as much of the window as possible without
# overlap. Quantitative evaluation is more efficient/correct with fixed
# windows matching self.window_size (as with training), but this looping
# allows easy plotting of "in-sample" predictions.
times.get_shape().assert_has_rank(2)
static_window_size = times.get_shape().dims[1].value
if (static_window_size is not None
and static_window_size < self.window_size):
raise ValueError(
("ARModel requires a window of at least input_window_size + "
"output_window_size to evaluate on (input_window_size={}, "
"output_window_size={}, and got shape {} for feature '{}' (batch "
"size, window size)).").format(
self.input_window_size, self.output_window_size,
times.get_shape(), TrainEvalFeatures.TIMES))
num_iterations = ((array_ops.shape(times)[1] - self.input_window_size)
// self.output_window_size)
output_size = num_iterations * self.output_window_size
# Rather than dealing with overlapping windows of output, discard a bit at
# the beginning if output windows don't cover evenly.
crop_length = output_size + self.input_window_size
features = {feature_name: feature_value[:, -crop_length:]
for feature_name, feature_value in features.items()}
# Note that, unlike the ARModel's predict() while_loop and the
# SequentialTimeSeriesModel while_loop, each iteration here can run in
# parallel, since we are not feeding predictions or state from previous
# iterations.
def _while_condition(iteration_number, loss_ta, mean_ta, covariance_ta):
del loss_ta, mean_ta, covariance_ta # unused
return iteration_number < num_iterations
def _while_body(iteration_number, loss_ta, mean_ta, covariance_ta):
"""Perform a processing step on a single window of data."""
base_offset = iteration_number * self.output_window_size
model_outputs = self._process_window(
features={
feature_name:
feature_value[:, base_offset:base_offset + self.window_size]
for feature_name, feature_value in features.items()},
mode=mode,
exogenous_regressors=exogenous_regressors[
:, base_offset:base_offset + self.window_size])
# This code needs to be updated if new predictions are added in
# self._process_window
assert len(model_outputs.predictions) == 3
assert "mean" in model_outputs.predictions
assert "covariance" in model_outputs.predictions
assert "observed" in model_outputs.predictions
return (iteration_number + 1,
loss_ta.write(
iteration_number, model_outputs.loss),
mean_ta.write(
iteration_number, model_outputs.predictions["mean"]),
covariance_ta.write(
iteration_number, model_outputs.predictions["covariance"]))
_, loss_ta, mean_ta, covariance_ta = control_flow_ops.while_loop(
_while_condition, _while_body,
[0,
tensor_array_ops.TensorArray(dtype=self.dtype, size=num_iterations),
tensor_array_ops.TensorArray(dtype=self.dtype, size=num_iterations),
tensor_array_ops.TensorArray(dtype=self.dtype, size=num_iterations)])
values = math_ops.cast(features[TrainEvalFeatures.VALUES],
dtype=self.dtype)
batch_size = array_ops.shape(times)[0]
prediction_shape = [batch_size, self.output_window_size * num_iterations,
self.num_features]
(previous_state_times,
previous_state_values,
previous_state_exogenous_regressors) = state
# Make sure returned state always has windows of self.input_window_size,
# even if we were passed fewer than self.input_window_size points this
# time.
if self.input_window_size > 0:
new_state_times = array_ops.concat(
[previous_state_times,
math_ops.cast(times, dtype=dtypes.int64)],
axis=1)[:, -self.input_window_size:]
new_state_times.set_shape((None, self.input_window_size))
new_state_values = array_ops.concat(
[previous_state_values,
self._scale_data(values)], axis=1)[:, -self.input_window_size:, :]
new_state_values.set_shape((None, self.input_window_size,
self.num_features))
new_exogenous_regressors = array_ops.concat(
[previous_state_exogenous_regressors,
exogenous_regressors], axis=1)[:, -self.input_window_size:, :]
new_exogenous_regressors.set_shape(
(None,
self.input_window_size,
self.exogenous_size))
else:
# There is no state to keep, and the strided slices above do not handle
# input_window_size=0.
new_state_times = previous_state_times
new_state_values = previous_state_values
new_exogenous_regressors = previous_state_exogenous_regressors
return model.ModelOutputs(
loss=math_ops.reduce_mean(loss_ta.stack(), axis=0),
end_state=(new_state_times,
new_state_values,
new_exogenous_regressors),
predictions={
"mean": array_ops.reshape(
array_ops.transpose(mean_ta.stack(), [1, 0, 2, 3]),
prediction_shape),
"covariance": array_ops.reshape(
array_ops.transpose(covariance_ta.stack(), [1, 0, 2, 3]),
prediction_shape),
"observed": values[:, -output_size:]},
prediction_times=times[:, -output_size:])
else:
raise ValueError(
"Unknown mode '{}' passed to get_batch_loss.".format(mode))
def _compute_time_features(self, time):
"""Compute some features on the time value."""
batch_size = array_ops.shape(time)[0]
num_periods = len(self._periodicities)
# Reshape to 3D.
periods = constant_op.constant(
self._periodicities, shape=[1, 1, num_periods, 1], dtype=time.dtype)
time = array_ops.reshape(time, [batch_size, -1, 1, 1])
window_offset = time / self._periodicities
# Cast to appropriate type and scale to [0, 1) range
mod = (math_ops.cast(time % periods, self.dtype) * self._buckets /
math_ops.cast(periods, self.dtype))
# Bucketize based on some fixed width intervals. For a value t and interval
# [a, b), we return (t - a) if a <= t < b, else 0.
intervals = array_ops.reshape(
math_ops.range(self._buckets, dtype=self.dtype),
[1, 1, 1, self._buckets])
mod = nn_ops.relu(mod - intervals)
mod = array_ops.where(mod < 1.0, mod, array_ops.zeros_like(mod))
return window_offset, mod
class AnomalyMixtureARModel(ARModel):
"""Model data as a mixture of normal and anomaly distributions.
Note that this model works by changing the loss function to reduce the penalty
when predicting an anomalous target. However the predictions are still based
on anomalous input features, and this may affect the quality of fit. One
possible solution is to downweight/filter anomalous inputs, but that requires
more sequential processing instead of completely random windows.
"""
GAUSSIAN_ANOMALY = "gaussian"
CAUCHY_ANOMALY = "cauchy"
def __init__(self,
periodicities,
anomaly_prior_probability,
input_window_size,
output_window_size,
num_features,
prediction_model_factory=FlatPredictionModel,
anomaly_distribution=GAUSSIAN_ANOMALY,
num_time_buckets=10,
exogenous_feature_columns=None):
assert (anomaly_prior_probability < 1.0 and
anomaly_prior_probability > 0.0)
self._anomaly_prior_probability = anomaly_prior_probability
assert anomaly_distribution in [
AnomalyMixtureARModel.GAUSSIAN_ANOMALY,
AnomalyMixtureARModel.CAUCHY_ANOMALY]
self._anomaly_distribution = anomaly_distribution
super(AnomalyMixtureARModel, self).__init__(
periodicities=periodicities,
num_features=num_features,
num_time_buckets=num_time_buckets,
input_window_size=input_window_size,
output_window_size=output_window_size,
loss=ARModel.NORMAL_LIKELIHOOD_LOSS,
prediction_model_factory=prediction_model_factory,
exogenous_feature_columns=exogenous_feature_columns)
def _create_anomaly_ops(self, times, values, prediction_ops_dict):
anomaly_log_param = variable_scope.get_variable(
"anomaly_log_param",
shape=[],
dtype=self.dtype,
initializer=init_ops.zeros_initializer())
# Anomaly param is the variance for Gaussian and scale for Cauchy
# distribution.
prediction_ops_dict["anomaly_params"] = gen_math_ops.exp(anomaly_log_param)
def prediction_ops(self, times, values, exogenous_regressors):
prediction_ops_dict = super(AnomalyMixtureARModel, self).prediction_ops(
times, values, exogenous_regressors)
self._create_anomaly_ops(times, values, prediction_ops_dict)
return prediction_ops_dict
def _anomaly_log_prob(self, targets, prediction_ops):
prediction = prediction_ops["mean"]
if self._anomaly_distribution == AnomalyMixtureARModel.GAUSSIAN_ANOMALY:
anomaly_variance = prediction_ops["anomaly_params"]
anomaly_sigma = math_ops.sqrt(
gen_math_ops.maximum(anomaly_variance, 1e-5))
log_prob = math_utils.normal_log_prob(targets, anomaly_sigma, prediction)
else:
assert self._anomaly_distribution == AnomalyMixtureARModel.CAUCHY_ANOMALY
anomaly_scale = prediction_ops["anomaly_params"]
log_prob = math_utils.cauchy_log_prob(targets, anomaly_scale, prediction)
return log_prob
def loss_op(self, targets, prediction_ops):
"""Create loss_op."""
prediction = prediction_ops["mean"]
covariance = prediction_ops["covariance"]
# Normal data log probability.
sigma = math_ops.sqrt(gen_math_ops.maximum(covariance, 1e-5))
log_prob1 = math_utils.normal_log_prob(targets, sigma, prediction)
log_prob1 += math_ops.log(1 - self._anomaly_prior_probability)
# Anomaly log probability.
log_prob2 = self._anomaly_log_prob(targets, prediction_ops)
log_prob2 += math_ops.log(self._anomaly_prior_probability)
# We need to compute log(exp(log_prob1) + exp(log_prob2). For numerical
# stability, we rewrite the expression as below.
p1 = gen_math_ops.minimum(log_prob1, log_prob2)
p2 = gen_math_ops.maximum(log_prob1, log_prob2)
mixed_log_prob = p2 + math_ops.log(1 + gen_math_ops.exp(p1 - p2))
loss_op = -math_ops.reduce_sum(mixed_log_prob)
loss_op /= math_ops.cast(
math_ops.reduce_prod(array_ops.shape(targets)), self.dtype)
return loss_op
|
tensorflow-master
|
tensorflow/contrib/timeseries/python/timeseries/ar_model.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Classes for wrapping a model to operate on different data shapes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
from tensorflow.contrib.timeseries.python.timeseries import feature_keys
from tensorflow.contrib.timeseries.python.timeseries import math_utils
from tensorflow.contrib.timeseries.python.timeseries.model import ModelOutputs
from tensorflow.python.estimator import estimator_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.util import nest
class PassthroughStateManager(object):
"""A minimal wrapper for models which do not need state management."""
def __init__(self):
self._input_statistics = None
self._graph_initialized = False
def initialize_graph(self, model, input_statistics=None):
"""Adds required operations to the graph."""
del model # unused
self._graph_initialized = True
self._input_statistics = input_statistics
def define_loss(self, model, features, mode):
"""Wrap "model" with StateManager-specific operations.
Args:
model: The model (inheriting from TimeSeriesModel) to manage state for.
features: A dictionary with the following key/value pairs:
feature_keys.TrainEvalFeatures.TIMES: A [batch size x window size]
Tensor with times for each observation.
feature_keys.TrainEvalFeatures.VALUES: A [batch size x window size x num
features] Tensor with values for each observation.
mode: The tf.estimator.ModeKeys mode to use (TRAIN or EVAL).
Returns:
A ModelOutputs object.
Raises:
ValueError: If start state was specified.
"""
if feature_keys.State.STATE_TUPLE in features:
raise ValueError(
"Overriding start state is not supported for this model.")
return model.define_loss(features, mode)
class _OverridableStateManager(PassthroughStateManager):
"""Base class for state managers which support overriding model state."""
@abc.abstractmethod
def _define_loss_with_saved_state(self, model, features, mode):
pass
def define_loss(self, model, features, mode):
"""Switches between explicit start state and managed state."""
if feature_keys.FilteringFeatures.STATE_TUPLE in features:
# Explicit start state has been provided, so we should use that.
if mode == estimator_lib.ModeKeys.TRAIN:
raise ValueError(
"Overriding saved state for training is not supported (but a value "
"for feature {} was specified).".format(
feature_keys.FilteringFeatures.STATE_TUPLE))
start_state = features[feature_keys.FilteringFeatures.STATE_TUPLE]
del features[feature_keys.FilteringFeatures.STATE_TUPLE]
return model.get_batch_loss(
features=features, mode=mode, state=start_state)
else:
# No explicit start state; use managed state.
return self._define_loss_with_saved_state(
model=model, features=features, mode=mode)
class FilteringOnlyStateManager(_OverridableStateManager):
"""State manager for models which use state only for filtering.
Window-based models (ARModel) do not require state to be fed during training
(instead requiring a specific window size). Rather than requiring a minimum
window size for filtering, these models maintain this window in their state,
and so need state to be fed.
"""
def _define_loss_with_saved_state(self, model, features, mode):
return model.define_loss(features, mode)
class ChainingStateManager(_OverridableStateManager):
"""Maintains state across a batch for SequentialTimeSeriesModel subclasses.
The batch dimension is treated as indexing sequential chunks of the same
timeseries. End state from each chunk is fed as start state to the next chunk
during the next timestep. This is an approximation to full-batch training for
sequential models, but is typically much faster while still accurately
recovering parameters. The speedup comes from reduced scheduling overhead of
TensorFlow ops, since each operation can do much more work.
"""
def __init__(self, state_saving_interval=20, checkpoint_state=False):
"""Initialize the state manager.
Args:
state_saving_interval: This state manager saves intermediate model state
every `state_saving_interval` times. Larger values save memory, and
checkpoint size if `checkpoint_state` is enabled, but models
will need to impute across artificial gaps of up to this size
(i.e. gaps not appearing in the original data). This imputation may
affect training. Set state_saving_interval to 1 to avoid any
artificial imputation.
checkpoint_state: If True, saved intermediate model state will be
written to checkpoints. Checkpoints will then scale with dataset
size. If False, state will be freshly imputed from the beginning of a
series each time the model is restored, which means it may take a few
iterations for state to warm up.
"""
super(ChainingStateManager, self).__init__()
self._checkpoint_state = checkpoint_state
self._state_saving_interval = state_saving_interval
self._start_state = None
self._cached_states = None
def initialize_graph(self, model, input_statistics=None):
"""Adds required operations to the graph."""
super(ChainingStateManager, self).initialize_graph(
model=model, input_statistics=input_statistics)
self._start_state = model.get_start_state()
self._cached_states = math_utils.TupleOfTensorsLookup(
key_dtype=dtypes.int64,
default_values=self._start_state,
empty_key=-1,
deleted_key=-2,
name="cached_states",
checkpoint=self._checkpoint_state)
def _define_loss_with_saved_state(self, model, features, mode):
"""Feeds end state from one training iteration into the next.
Args:
model: The model to wrap. Compatible with children of TimeSeriesModel.
features: Dictionary with Tensor values defining the data to be
processed. The expected key/value pairs are at minimum:
feature_keys.TrainEvalFeatures.TIMES: A [number of chunks x window
size] Tensor with times for each observation, the result of chunking
a single longer time series.
feature_keys.TrainEvalFeatures.VALUES: A [number of chunks x window
size x num features] Tensor with values for each observation,
corresponding to times.
mode: The tf.estimator.ModeKeys mode to use. For EVAL and INFER, no
batching is performed, which may be slow. This is to avoid giving
cached and almost certainly stale values.
Returns:
A ModelOutputs object.
Raises:
ValueError: If initialize_graph has not been called.
"""
if not self._graph_initialized:
raise ValueError("ChainingStateManager requires initialize_graph() to be "
"called before use.")
(loss_op, end_state, batch_predictions) = self._update_cached_states(
model=model,
features=features,
mode=mode)
# Add a batch dimension so state can be used directly (e.g. for predictions)
# without the user manually reshaping it.
last_end_state_flat = [end_state_value[-1][None]
for end_state_value in nest.flatten(end_state)]
batch_predictions["observed"] = features[
feature_keys.TrainEvalFeatures.VALUES]
return ModelOutputs(
loss=loss_op,
end_state=nest.pack_sequence_as(end_state, last_end_state_flat),
predictions=batch_predictions,
prediction_times=features[feature_keys.TrainEvalFeatures.TIMES])
def _get_chunk_number(self, time):
return time // self._state_saving_interval
def _get_cached_states(self, times):
"""Retrieve cached states for a batch of times."""
read_chunk_numbers = self._get_chunk_number(times)
looked_up_state = list(self._cached_states.lookup(
math_ops.cast(read_chunk_numbers, dtypes.int64)))
looked_up_state = tuple(looked_up_state)
# We need to special-case the first chunk in a series to explicitly rely on
# the model's starting state so that gradients flow back to it. Otherwise it
# would affect only initialization, and would not be read from or updated
# during training. Not doing this also isolates that part of the graph,
# leading to errors on model reload if there are trainable variables
# affecting a model's start state.
if self._input_statistics is not None:
start_time = self._input_statistics.start_time
else:
start_time = 0
set_to_start_state = math_ops.equal(read_chunk_numbers,
self._get_chunk_number(start_time))
new_states = []
for start_state_value, cache_variable in zip(
nest.flatten(
math_utils.replicate_state(self._start_state,
array_ops.shape(times)[0])),
nest.flatten(looked_up_state)):
new_states.append(
array_ops.where(set_to_start_state, start_state_value,
cache_variable))
looked_up_state = nest.pack_sequence_as(looked_up_state, new_states)
return looked_up_state
def _update_cached_states(self, model, features, mode):
"""Read, process, and write chunks to the cache."""
times = features[feature_keys.TrainEvalFeatures.TIMES]
looked_up_state = self._get_cached_states(times[:, 0])
(model_loss, intermediate_states,
batch_predictions) = model.per_step_batch_loss(
features=features,
mode=mode,
state=looked_up_state)
# We need to at least write to the bucket after the one we read from.
min_chunk_numbers = self._get_chunk_number(times) + 1
# We write to the bucket that would have been read had the window started at
# the next sample (except for the last sample in the window, which gets
# written to the next bucket). This assumes fixed missing times (i.e. if we
# were presented with times [10, 50] we will never see times [30, 50]).
#
# TODO(allenl): Retrieve the highest time less than the current time rather
# than relying on fixed bucketing.
write_chunk_numbers = math_ops.maximum(
self._get_chunk_number(array_ops.concat(
[times[:, 1:], times[:, -1:] + 1], axis=1)),
min_chunk_numbers)
# Write once for every computed state; this may mean that we write multiple
# times to the same cell, but later writes will take precedence.
save_ops = [
self._cached_states.insert(
keys=write_chunk_numbers,
values=intermediate_states)]
end_state = nest.pack_sequence_as(
intermediate_states,
[state_element[:, -1]
for state_element in nest.flatten(intermediate_states)])
with ops.control_dependencies(save_ops):
# Make sure end states get saved at each iteration
loss_op = array_ops.identity(model_loss)
return loss_op, end_state, batch_predictions
|
tensorflow-master
|
tensorflow/contrib/timeseries/python/timeseries/state_management.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for testing state space models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy
from tensorflow.contrib.timeseries.python.timeseries import math_utils
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
def transition_power_test_template(test_case, model, num_steps):
"""Tests the transition_to_powers function of a state space model."""
transition_matrix = ops.convert_to_tensor(
model.get_state_transition(), dtype=model.dtype)
step_number = array_ops.placeholder(shape=[], dtype=dtypes.int64)
state_dimension = tensor_shape.dimension_value(transition_matrix.shape[0])
previous_matrix = array_ops.placeholder(
shape=[state_dimension, state_dimension], dtype=transition_matrix.dtype)
true_single_step_update = math_ops.matmul(previous_matrix,
transition_matrix)
model_output_tensor = model.transition_to_powers(powers=array_ops.stack(
[step_number, step_number]))
with test_case.test_session():
starting_matrix = linalg_ops.eye(
state_dimension, batch_shape=array_ops.shape(num_steps)).eval()
evaled_current_matrix = starting_matrix
for iteration_number in range(num_steps):
model_output = model_output_tensor.eval(
feed_dict={step_number: iteration_number})
test_case.assertAllClose(
evaled_current_matrix,
model_output[0],
rtol=1e-8 if evaled_current_matrix.dtype == numpy.float64 else 1e-4)
evaled_current_matrix = true_single_step_update.eval(
feed_dict={previous_matrix: evaled_current_matrix})
def noise_accumulator_test_template(test_case, model, num_steps):
"""Tests `model`'s transition_power_noise_accumulator."""
transition_matrix = ops.convert_to_tensor(
model.get_state_transition(), dtype=model.dtype)
noise_transform = ops.convert_to_tensor(
model.get_noise_transform(), dtype=model.dtype)
state_dimension = tensor_shape.dimension_value(transition_matrix.shape[0])
state_noise_dimension = tensor_shape.dimension_value(noise_transform.shape[1])
gen_noise_addition = math_utils.sign_magnitude_positive_definite(
raw=random_ops.random_normal(
shape=[state_noise_dimension, state_noise_dimension],
dtype=model.dtype))
gen_starting_noise = math_utils.sign_magnitude_positive_definite(
random_ops.random_normal(
shape=[state_dimension, state_dimension], dtype=model.dtype))
starting_noise = array_ops.placeholder(
shape=[state_dimension, state_dimension], dtype=model.dtype)
step_number = array_ops.placeholder(shape=[], dtype=dtypes.int64)
starting_transitioned = math_ops.matmul(
math_ops.matmul(transition_matrix, starting_noise),
transition_matrix,
adjoint_b=True)
with test_case.test_session():
evaled_starting_noise = gen_starting_noise.eval()
current_starting_noise_transitioned = evaled_starting_noise
current_noise = evaled_starting_noise
evaled_noise_addition = gen_noise_addition.eval()
evaled_noise_addition_transformed = math_ops.matmul(
math_ops.matmul(noise_transform, evaled_noise_addition),
noise_transform,
adjoint_b=True).eval()
model.state_transition_noise_covariance = evaled_noise_addition
model._window_initializer( # pylint: disable=protected-access
times=math_ops.range(num_steps + 1)[..., None], state=(None, None, 0))
model_update = model.transition_power_noise_accumulator(
num_steps=step_number)
for iteration_number in range(num_steps):
model_new_noise = model_update.eval(
feed_dict={step_number: iteration_number})
test_case.assertAllClose(
current_noise,
model_new_noise + current_starting_noise_transitioned,
rtol=1e-8 if current_noise.dtype == numpy.float64 else 1e-3)
current_starting_noise_transitioned = starting_transitioned.eval(
feed_dict={starting_noise: current_starting_noise_transitioned})
current_noise = (
starting_transitioned.eval(
feed_dict={starting_noise: current_noise})
+ evaled_noise_addition_transformed)
|
tensorflow-master
|
tensorflow/contrib/timeseries/python/timeseries/state_space_models/test_utils.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implements a state space model with level and local linear trends."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.timeseries.python.timeseries.state_space_models import state_space_model
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope
class AdderStateSpaceModel(state_space_model.StateSpaceModel):
"""A state space model component with level and slope.
At each timestep, level <- level + slope. Level is observed, slope is not.
"""
def __init__(
self,
use_level_noise=True,
configuration=state_space_model.StateSpaceModelConfiguration()):
"""Configure the model.
Args:
use_level_noise: Whether to model the time series as having level noise.
configuration: A StateSpaceModelConfiguration object.
"""
self.use_level_noise = use_level_noise
super(AdderStateSpaceModel, self).__init__(
configuration=configuration)
def get_prior_mean(self):
"""If un-chunked data is available, set initial level to the first value."""
with variable_scope.variable_scope(self._variable_scope):
if self._input_statistics is not None:
# TODO(allenl): Better support for multivariate series here.
initial_value = array_ops.stack([
math_ops.reduce_mean(
self._scale_data(
self._input_statistics.series_start_moments.mean)),
0.
])
return initial_value + variable_scope.get_variable(
name="prior_state_mean",
shape=initial_value.get_shape(),
initializer=init_ops.zeros_initializer(),
dtype=self.dtype,
trainable=self._configuration.trainable_start_state)
else:
return super(AdderStateSpaceModel, self).get_prior_mean()
def transition_to_powers(self, powers):
"""Computes powers of the adder transition matrix efficiently.
Args:
powers: An integer Tensor, shape [...], with powers to raise the
transition matrix to.
Returns:
A floating point Tensor with shape [..., 2, 2] containing:
transition^power = [[1., power],
[0., 1.]]
"""
paddings = array_ops.concat(
[
array_ops.zeros([array_ops.rank(powers), 2], dtype=dtypes.int32),
[(0, 1), (1, 0)]
],
axis=0)
powers_padded = array_ops.pad(powers[..., None, None], paddings=paddings)
identity_matrices = linalg_ops.eye(
num_rows=2, batch_shape=array_ops.shape(powers), dtype=self.dtype)
return identity_matrices + math_ops.cast(powers_padded, self.dtype)
def transition_power_noise_accumulator(self, num_steps):
"""Computes power sums in closed form."""
def _pack_and_reshape(*values):
return array_ops.reshape(
array_ops.stack(axis=1, values=values),
array_ops.concat(values=[array_ops.shape(num_steps), [2, 2]], axis=0))
num_steps = math_ops.cast(num_steps, self.dtype)
noise_transitions = num_steps - 1
noise_transform = ops.convert_to_tensor(self.get_noise_transform(),
self.dtype)
noise_covariance_transformed = math_ops.matmul(
math_ops.matmul(noise_transform,
self.state_transition_noise_covariance),
noise_transform,
adjoint_b=True)
# Un-packing the transformed noise as:
# [[a b]
# [c d]]
a, b, c, d = array_ops.unstack(
array_ops.reshape(noise_covariance_transformed, [-1, 4]), axis=1)
sum_of_first_n = noise_transitions * (noise_transitions + 1) / 2
sum_of_first_n_squares = sum_of_first_n * (2 * noise_transitions + 1) / 3
return _pack_and_reshape(
num_steps * a + sum_of_first_n * (b + c) + sum_of_first_n_squares * d,
num_steps * b + sum_of_first_n * d,
num_steps * c + sum_of_first_n * d,
num_steps * d)
def get_state_transition(self):
return [[1., 1.], # Add slope to level
[0., 1.]] # Maintain slope
def get_noise_transform(self):
if self.use_level_noise:
return [[1., 0.],
[0., 1.]]
else:
return [[0.],
[1.]]
def get_observation_model(self, times):
"""Observe level but not slope.
See StateSpaceModel.get_observation_model.
Args:
times: Unused. See the parent class for details.
Returns:
A static, univariate observation model for later broadcasting.
"""
del times # Does not rely on times. Uses broadcasting from the parent.
return constant_op.constant([1., 0.], dtype=self.dtype)
|
tensorflow-master
|
tensorflow/contrib/timeseries/python/timeseries/state_space_models/level_trend.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implements a time series model with seasonality, trends, and transients."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.timeseries.python.timeseries.state_space_models import level_trend
from tensorflow.contrib.timeseries.python.timeseries.state_space_models import periodic
from tensorflow.contrib.timeseries.python.timeseries.state_space_models import state_space_model
from tensorflow.contrib.timeseries.python.timeseries.state_space_models import varma
from tensorflow.python.ops import variable_scope
from tensorflow.python.util import nest
def _replicate_level_trend_models(multivariate_configuration,
univariate_configuration):
"""Helper function to construct a multivariate level/trend component."""
with variable_scope.variable_scope("adder"):
# Construct a level and trend model for each feature, with correlated
# transition noise.
adder_features = []
for feature in range(multivariate_configuration.num_features):
with variable_scope.variable_scope("feature{}".format(feature)):
adder_features.append(level_trend.AdderStateSpaceModel(
configuration=univariate_configuration))
adder_part = state_space_model.StateSpaceCorrelatedFeaturesEnsemble(
ensemble_members=adder_features,
configuration=multivariate_configuration)
return adder_part
class StructuralEnsemble(state_space_model.StateSpaceIndependentEnsemble):
r"""A structural state space time series model.
In the spirit of:
Scott, Steven L., and Hal R. Varian. "Predicting the present with bayesian
structural time series." International Journal of Mathematical Modelling and
Numerical Optimisation 5.1-2 (2014): 4-23.
Without the spike-and-slab prior, and with point estimates of parameters
instead of sampling.
The model includes level, trend, seasonality, and a transient moving average.
An observation at time t is drawn according to:
observation_t = level_t + seasonality_t + moving_average_t
+ observation_noise_t
level_t = level_{t-1} + trend_{t-1} + level_noise_t
trend_t = trend_{t-1} + trend_noise_t
seasonality_t = -\sum_{n=1}^{num_seasons-1} seasonality_{t-n} +
seasonality_noise_t
moving_average_t = transient_t
+ \sum_{j=1}^{moving_average_order} ma_coefs_j * transient_{t - j}
`observation_noise`, `level_noise`, `trend noise`, `seasonality_noise`, and
`transient` are (typically scalar) Gaussian random variables whose variance is
learned from data, and that variance is not time dependent in this
implementation. Level noise is optional due to its similarity with observation
noise in some cases. Seasonality is enforced by constraining a full cycle of
seasonal variables to have zero expectation, allowing seasonality to adapt
over time. The moving average coefficients `ma_coefs` are learned.
When presented with a multivariate series (more than one "feature", here
referring to endogenous features of the series), the model is replicated
across these features (one copy per feature of each periodic component, and
one level/trend model per feature), and correlations in transition noise are
learned between these replicated components (see
StateSpaceCorrelatedFeaturesEnsemble). This is in addition to the learned
correlations in observation noise between features. While this is often the
most expressive thing to do with multiple features, it does mean that the
model grows quite quickly, creating and computing with square matrices with
each dimension equal to num_features * (sum(periodicities) +
moving_average_order + 3), meaning that some operations are approximately
cubic in this value.
"""
# TODO(allenl): Implement partial model replication/sharing for multivariate
# series (to save time/memory when the series presented can be modeled as a
# smaller number of underlying series). Likely just a modification of the
# observation model so that each feature of the series is a learned linear
# combination of the replicated models.
def __init__(self,
periodicities,
moving_average_order,
autoregressive_order,
use_level_noise=True,
configuration=state_space_model.StateSpaceModelConfiguration()):
"""Initialize the Basic Structural Time Series model.
Args:
periodicities: Number of time steps for cyclic behavior. May be a list, in
which case one periodic component is created for each element.
moving_average_order: The number of moving average coefficients to use,
which also defines the number of steps after which transient
deviations revert to the mean defined by periodic and level/trend
components.
autoregressive_order: The number of steps back for autoregression.
use_level_noise: Whether to model the time series as having level
noise. See level_noise in the model description above.
configuration: A StateSpaceModelConfiguration object.
"""
component_model_configuration = configuration._replace(
use_observation_noise=False)
univariate_component_model_configuration = (
component_model_configuration._replace(
num_features=1))
adder_part = _replicate_level_trend_models(
multivariate_configuration=component_model_configuration,
univariate_configuration=univariate_component_model_configuration)
with variable_scope.variable_scope("varma"):
varma_part = varma.VARMA(
autoregressive_order=autoregressive_order,
moving_average_order=moving_average_order,
configuration=component_model_configuration)
cycle_parts = []
periodicity_list = nest.flatten(periodicities)
for cycle_number, cycle_periodicity in enumerate(periodicity_list):
# For each specified periodicity, construct models for each feature with
# correlated noise.
with variable_scope.variable_scope("cycle{}".format(cycle_number)):
cycle_features = []
for feature in range(configuration.num_features):
with variable_scope.variable_scope("feature{}".format(feature)):
cycle_features.append(periodic.CycleStateSpaceModel(
periodicity=cycle_periodicity,
configuration=univariate_component_model_configuration))
cycle_parts.append(
state_space_model.StateSpaceCorrelatedFeaturesEnsemble(
ensemble_members=cycle_features,
configuration=component_model_configuration))
super(StructuralEnsemble, self).__init__(
ensemble_members=[adder_part, varma_part] + cycle_parts,
configuration=configuration)
# TODO(allenl): Implement a multi-resolution moving average component to
# decouple model size from the length of transient deviations.
class MultiResolutionStructuralEnsemble(
state_space_model.StateSpaceIndependentEnsemble):
"""A structural ensemble modeling arbitrary periods with a fixed model size.
See periodic.ResolutionCycleModel, which allows a fixed number of latent
values to cycle at multiple/variable resolutions, for more details on the
difference between MultiResolutionStructuralEnsemble and
StructuralEnsemble. With `cycle_num_latent_values` (controlling model size)
equal to `periodicities` (controlling the time over which these values
complete a full cycle), the models are
equivalent. MultiResolutionStructuralEnsemble allows `periodicities` to vary
while the model size remains fixed. Note that high `periodicities` without a
correspondingly high `cycle_num_latent_values` means that the modeled series
must have a relatively smooth periodic component.
Multiple features are handled the same way as in StructuralEnsemble (one
replication per feature, with correlations learned between the replicated
models). This strategy produces a very flexible model, but means that series
with many features may be slow to train.
Model size (the state dimension) is:
num_features * (sum(cycle_num_latent_values)
+ max(moving_average_order + 1, autoregressive_order) + 2)
"""
def __init__(self,
cycle_num_latent_values,
moving_average_order,
autoregressive_order,
periodicities,
use_level_noise=True,
configuration=state_space_model.StateSpaceModelConfiguration()):
"""Initialize the multi-resolution structural ensemble.
Args:
cycle_num_latent_values: Controls the model size and the number of latent
values cycled between (but not the periods over which they cycle).
Reducing this parameter can save significant amounts of memory, but
the tradeoff is with resolution: cycling between a smaller number of
latent values means that only smoother functions can be modeled. For
multivariate series, may either be a scalar integer (in which case it
is applied to all periodic components) or a list with length matching
`periodicities`.
moving_average_order: The number of moving average coefficients to use,
which also defines the number of steps after which transient
deviations revert to the mean defined by periodic and level/trend
components. Adds to model size.
autoregressive_order: The number of steps back for
autoregression. Learning autoregressive coefficients typically
requires more steps and a smaller step size than other components.
periodicities: Same meaning as for StructuralEnsemble: number of steps for
cyclic behavior. Floating point and Tensor values are supported. May
be a list of values, in which case one component is created for each
periodicity. If `periodicities` is a list while
`cycle_num_latent_values` is a scalar, its value is broadcast to each
periodic component. Otherwise they should be lists of the same length,
in which case they are paired.
use_level_noise: See StructuralEnsemble.
configuration: A StateSpaceModelConfiguration object.
Raises:
ValueError: If `cycle_num_latent_values` is neither a scalar nor agrees in
size with `periodicities`.
"""
component_model_configuration = configuration._replace(
use_observation_noise=False)
univariate_component_model_configuration = (
component_model_configuration._replace(
num_features=1))
adder_part = _replicate_level_trend_models(
multivariate_configuration=component_model_configuration,
univariate_configuration=univariate_component_model_configuration)
with variable_scope.variable_scope("varma"):
varma_part = varma.VARMA(
autoregressive_order=autoregressive_order,
moving_average_order=moving_average_order,
configuration=component_model_configuration)
cycle_parts = []
if periodicities is None:
periodicities = []
periodicity_list = nest.flatten(periodicities)
latent_values_list = nest.flatten(cycle_num_latent_values)
if len(periodicity_list) != len(latent_values_list):
if len(latent_values_list) != 1:
raise ValueError(
("`cycle_num_latent_values` must either be a list with the same "
"size as `periodicity` or a scalar. Received length {} "
"`cycle_num_latent_values`, while `periodicities` has length {}.")
.format(len(latent_values_list), len(periodicity_list)))
latent_values_list *= len(periodicity_list)
for cycle_number, (cycle_periodicity, num_latent_values) in enumerate(
zip(periodicity_list, latent_values_list)):
with variable_scope.variable_scope("cycle{}".format(cycle_number)):
cycle_features = []
for feature in range(configuration.num_features):
with variable_scope.variable_scope("feature{}".format(feature)):
cycle_features.append(
periodic.ResolutionCycleModel(
num_latent_values=num_latent_values,
periodicity=cycle_periodicity,
configuration=univariate_component_model_configuration))
cycle_parts.append(
state_space_model.StateSpaceCorrelatedFeaturesEnsemble(
ensemble_members=cycle_features,
configuration=component_model_configuration))
super(MultiResolutionStructuralEnsemble, self).__init__(
ensemble_members=[adder_part, varma_part] + cycle_parts,
configuration=configuration)
|
tensorflow-master
|
tensorflow/contrib/timeseries/python/timeseries/state_space_models/structural_ensemble.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""State space components for modeling seasonality."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy
from tensorflow.contrib.timeseries.python.timeseries.state_space_models import state_space_model
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import math_ops
class CycleStateSpaceModel(state_space_model.StateSpaceModel):
"""A state space model component which cycles between values.
Stores N values using N - 1 latent values, the Nth being the negative sum of
those explicitly stored. At any given timestep one of these values is
observed. Noise is assumed to affect only one of the transitions.
"""
def __init__(
self,
periodicity,
configuration=state_space_model.StateSpaceModelConfiguration()):
self._periodicity = periodicity
super(CycleStateSpaceModel, self).__init__(configuration=configuration)
def get_state_transition(self):
return self.transition_to_powers(array_ops.ones([], dtype=dtypes.int32))
def get_noise_transform(self):
# transition_power_noise_accumulator makes assumptions about this
# transformation. If the noise transform is modified or overridden,
# transition_power_noise_accumulator must be modified as well (or discarded,
# as it is simply an optimization).
return array_ops.pad(
array_ops.ones([1], dtype=self.dtype),
paddings=[(0, self._periodicity - 2)])[..., None]
def transition_to_powers(self, powers):
"""Computes powers of the cycle transition matrix efficiently.
Args:
powers: An integer Tensor, shape [...], with powers to raise the
transition matrix to.
Returns:
A floating point Tensor with shape [..., self._periodicity - 1,
self._periodicity - 1] containing:
(transition^power)_{i, j} = {
1 if (i - j) % self._periodicity == power % self._periodicity
-1 if (i + 1) % self._periodicity == power % self._periodicity
0 otherwise}
"""
powers %= self._periodicity
range_shape_padded = array_ops.reshape(
math_ops.range(self._periodicity - 1, dtype=powers.dtype),
array_ops.concat(
[
array_ops.ones([array_ops.rank(powers)], dtype=dtypes.int32),
[self._periodicity - 1]
],
axis=0))
is_row_negative = math_ops.equal(range_shape_padded + 1, powers[..., None])
row_indicator_shape = array_ops.shape(is_row_negative)
negative_row_indicator = array_ops.where(is_row_negative, -array_ops.ones(
shape=row_indicator_shape, dtype=self.dtype),
array_ops.zeros(
row_indicator_shape,
dtype=self.dtype))
coord_diff = (range_shape_padded[..., None]
- range_shape_padded[..., None, :])
is_one = math_ops.equal(coord_diff % self._periodicity,
powers[..., None, None])
positive_ones = array_ops.where(is_one,
array_ops.ones(
array_ops.shape(is_one),
dtype=self.dtype),
array_ops.zeros(
array_ops.shape(is_one),
dtype=self.dtype))
return math_ops.cast(positive_ones + negative_row_indicator[..., None],
self.dtype)
def transition_power_noise_accumulator(
self, num_steps, noise_addition_coefficient=1):
r"""Sum the transitioned covariance matrix over a number of steps.
Assumes that state_transition_noise_covariance is a matrix with a single
non-zero value in the upper left.
Args:
num_steps: A [...] shape integer Tensor with numbers of steps to compute
power sums for.
noise_addition_coefficient: A multiplier for the state transition noise
covariance (used in ResolutionCycleModel to compute multiples of full
period sums).
Returns:
The computed power sum, with shape [..., state dimension, state
dimension] containing:
[\sum_{p=0}^{num_steps - 1} (
state_transition^p
* state_transition_noise_covariance
* (state_transition^p)^T)]_{i, j} = {
-contribution_{j + 1} if j == i - 1
contribution_{j + 1} + contribution{j} if j == i
-contribution_{j} if j == i + 1
0 otherwise
}
contribution_k = noise_scalar
* ((num_steps + self._periodicity - 1 - (k % self._periodicity))
// self._periodicity)
Where contribution_k is the sum of noise_scalar additions to component k
of the periodicity.
"""
noise_addition_scalar = array_ops.squeeze(
self.state_transition_noise_covariance, axis=[-1, -2])
period_range_reshaped = array_ops.reshape(
math_ops.range(self._periodicity, dtype=num_steps.dtype),
array_ops.concat(
[
array_ops.ones([array_ops.rank(num_steps)], dtype=dtypes.int32),
[self._periodicity]
],
axis=0))
reversed_remaining_steps = ((period_range_reshaped
- (num_steps[..., None] - 1))
% self._periodicity)
period_additions_reversed = (ops.convert_to_tensor(
noise_addition_coefficient,
self.dtype)[..., None] * noise_addition_scalar * math_ops.cast(
(num_steps[..., None] + reversed_remaining_steps) //
self._periodicity,
dtype=self.dtype))
period_additions_diag = array_ops.matrix_diag(period_additions_reversed)
upper_band = array_ops.concat(
[
array_ops.zeros_like(period_additions_diag[..., :-1, 0:1]),
-period_additions_diag[..., :-1, 0:-2]
],
axis=-1)
lower_band = array_ops.concat(
[
array_ops.zeros_like(period_additions_diag[..., 0:1, :-1]),
-period_additions_diag[..., 0:-2, :-1]
],
axis=-2)
period_additions_rotated = array_ops.concat(
[
period_additions_reversed[..., -1:],
period_additions_reversed[..., :-2]
],
axis=-1)
diagonal = array_ops.matrix_diag(period_additions_reversed[..., :-1] +
period_additions_rotated)
return diagonal + lower_band + upper_band
def get_observation_model(self, times):
"""Observe only the first of the rotating latent values.
See StateSpaceModel.get_observation_model.
Args:
times: Unused. See the parent class for details.
Returns:
A static, univariate observation model for later broadcasting.
"""
del times # Does not rely on times. Uses broadcasting from the parent.
return array_ops.concat(
values=[
array_ops.ones([1], dtype=self.dtype), array_ops.zeros(
[self._periodicity - 2], dtype=self.dtype)
],
axis=0)
class ResolutionCycleModel(CycleStateSpaceModel):
"""A version of CycleStateSpaceModel with variable resolution.
Cycles between "num_latent_values" latent values over a period of
"periodicity", smoothly interpolating. Simply raises the transition matrix
from CycleStateSpaceModel to the power (num_latent_values / periodicity).
Specifically, ResolutionCycleModel uses the following eigendecomposition of
the CycleStateSpaceModel matrix (there are several parameterizations, others
leading to roots of the matrix with complex values):
eigenvectors_{i, j}
= root_of_unity(floor(j / 2) + 1, i * (-1)^(j + 1))
- root_of_unity(floor(j / 2) + 1, (i + 1) * (-1)^(j + 1))
eigenvalues_j = root_of_unity(floor(j / 2) + 1, (-1)^j)
root_of_unity(root_number, to_power)
= exp(to_power * 2 * pi * sqrt(-1) * root_number
/ num_latent_values)
The transition matrix for ResolutionCycleModel is then:
eigenvectors
* diag(eigenvalues^(num_latent_values / periodicity))
* eigenvectors^-1
Since the eigenvalues are paired with their conjugates (conj(e^(sqrt(-1)*x)) =
e^(-sqrt(-1)*x)), the resulting matrix has real components (this is why only
odd numbers of latent values are supported, since the size of the matrix is
one less than the number of latent values and there must be an even number of
eigenvalues to pair them off).
See ./g3doc/periodic_multires_derivation.md for details.
"""
def __init__(
self,
num_latent_values,
periodicity,
near_integer_threshold=1e-8,
configuration=state_space_model.StateSpaceModelConfiguration()):
"""Initialize the ResolutionCycleModel.
Args:
num_latent_values: Controls the representational power and memory usage of
the model. The transition matrix has shape [num_latent_values - 1,
num_latent_values - 1]. Must be an odd integer (see class docstring for
why).
periodicity: The number of steps for cyclic behavior. May be a Tensor, and
need not be an integer (although integer values greater than
num_latent_values have more efficient special cases).
near_integer_threshold: When avoiding singularities, controls how close a
number should be to that singularity before the special case takes over.
configuration: A StateSpaceModelConfiguration object.
Raises:
ValueError: If num_latent_values is not odd.
"""
if num_latent_values % 2 != 1:
raise ValueError("Only odd numbers of latent values are supported.")
self._num_latent_values = num_latent_values
self._true_periodicity = periodicity
self._near_integer_threshold = near_integer_threshold
super(ResolutionCycleModel, self).__init__(
periodicity=num_latent_values,
configuration=configuration)
def _close_to_integer(self, value):
value = math_ops.cast(value, self.dtype)
return math_ops.less(
math_ops.abs(value - gen_math_ops.round(value)),
self._near_integer_threshold)
def transition_to_powers(self, powers):
"""Computes TransitionMatrix^power efficiently.
For an n x n transition matrix we have:
(TransitionMatrix**power)_{i, j) = (-1) ** i * sin(pi * power) / (n + 1)
* ((-1) ** j / sin(pi / (n + 1) * (power - i + j))
+ 1 / sin(pi / (n + 1) * (power - i - 1)))
The sin(pi * power) term is zero whenever "power" is an integer. However,
the 1 / sin(x) terms (cosecants) occasionally (when their arguments are
multiples of pi) cancel out this value. The limit as the argument approaches
an integer value gives the "correct" result, but computing these separately
gives 0 * inf = NaN. Instead, there is a special case for near-integer
values.
Args:
powers: A floating point Tensor of powers to raise the transition matrix
to.
Returns:
A [..., self._num_latent_values - 1, self._num_latent_values - 1] floating
point Tensor with the transition matrix raised to each power in
`powers`.
"""
num_latent_values_float = math_ops.cast(self._num_latent_values, self.dtype)
latent_values_per_period = (num_latent_values_float / math_ops.cast(
self._true_periodicity, dtype=self.dtype))
original_matrix_powers = (math_ops.cast(powers, self.dtype) *
latent_values_per_period)
global_coeff = (math_ops.sin(original_matrix_powers * numpy.pi) /
num_latent_values_float)[..., None, None]
matrix_dimension_range = array_ops.reshape(
math_ops.range(self._num_latent_values - 1),
array_ops.concat(
[
array_ops.ones(
[array_ops.rank(original_matrix_powers)],
dtype=dtypes.int32), [self._num_latent_values - 1]
],
axis=0))
matrix_dimension_range_float = math_ops.cast(matrix_dimension_range,
self.dtype)
alternating = math_ops.cast(1 - 2 * (matrix_dimension_range % 2),
self.dtype)
row_addend = 1. / math_ops.sin(numpy.pi / num_latent_values_float * (
original_matrix_powers[..., None] - matrix_dimension_range_float - 1))
column_minus_row = (matrix_dimension_range_float[..., None, :]
- matrix_dimension_range_float[..., None])
full_matrix_addend = (alternating[..., None, :] / math_ops.sin(
numpy.pi / num_latent_values_float *
(original_matrix_powers[..., None, None] + column_minus_row)))
continuous_construction = global_coeff * alternating[..., None] * (
row_addend[..., None] + full_matrix_addend)
# For integer powers, the above formula is only correct in the limit,
# yielding NaNs as written. We defer to the super-class in such cases, which
# computes integer powers exactly.
return array_ops.where(
self._close_to_integer(original_matrix_powers),
super(ResolutionCycleModel, self).transition_to_powers(
math_ops.cast(
gen_math_ops.round(original_matrix_powers), dtypes.int64)),
continuous_construction)
def transition_power_noise_accumulator(self, num_steps):
"""Sum the transitioned covariance matrix over a number of steps.
Args:
num_steps: An integer Tensor of any shape [...] indicating the number of
steps to compute for each part of the batch.
Returns:
A [..., self._num_latent_values - 1, self._num_latent_values - 1] floating
point Tensor corresponding to each requested number of steps, containing:
sum_{i=1}^{steps} transition^i * noise_covariance
* (transition^i)^T
"""
def _whole_periods_folded():
"""A more efficient special casing for integer periods.
We knock off full periods, leaving at most self._true_periodicity steps to
compute.
Returns:
A tuple of (remaining_whole_steps, current_accumulation):
remaining_whole_steps: An integer Tensor with the same shape as the
`num_steps` argument to `transition_power_noise_accumulator`,
indicating the reduced number of steps which must be computed
sequentially and added to `current_accumulation`.
current_accumulation: A [..., self._num_latent_values - 1,
self._num_latent_values - 1] floating point Tensor corresponding to
the accumulations for steps which were computed in this function.
"""
original_transition_noise_addition_coefficient = (math_ops.cast(
self._true_periodicity, self.dtype) / math_ops.cast(
self._num_latent_values, self.dtype))
full_period_accumulation = super(
ResolutionCycleModel, self).transition_power_noise_accumulator(
noise_addition_coefficient=
original_transition_noise_addition_coefficient,
num_steps=ops.convert_to_tensor(
self._num_latent_values, dtype=num_steps.dtype))
periodicity_integer = math_ops.cast(self._true_periodicity,
num_steps.dtype)
full_periods = math_ops.cast(num_steps // periodicity_integer, self.dtype)
current_accumulation = full_periods[..., None, None] * array_ops.reshape(
full_period_accumulation,
array_ops.concat(
[
array_ops.ones(
[array_ops.rank(full_periods)], dtype=dtypes.int32),
array_ops.shape(full_period_accumulation)
],
axis=0))
remaining_whole_steps = num_steps % periodicity_integer
return remaining_whole_steps, current_accumulation
def _no_whole_period_computation():
"""A less efficient special casing for real valued periods.
This special casing is still preferable to computing using sequential
matrix multiplies (parallelizable, more numerically stable), but is linear
in the number of steps.
Returns:
Same shapes and types as `_whole_periods_folded`, but no folding is done
in this function.
"""
current_accumulation = array_ops.zeros(
array_ops.concat(
[
array_ops.shape(num_steps),
[self._num_latent_values - 1, self._num_latent_values - 1]
],
axis=0),
dtype=self.dtype)
remaining_whole_steps = num_steps
return remaining_whole_steps, current_accumulation
# Decide whether it's feasible to compute whole periods in closed form,
# taking advantage of the fact that a sum over self._true_periodicity steps
# in our transition matrix is proportional to a sum over
# self._num_latent_values steps in the unmodified matrix (because each
# latent value gets the same treatment). This is possible for integer
# self._true_periodicity, since we stay aligned to integer steps. For real
# valued self._true_periodicity, or when the cyclic behavior is a higher
# resolution than 1 per step, taking whole periods leads to misalignment
# with integer steps, which would be difficult to recover from.
remaining_whole_steps, current_accumulation = control_flow_ops.cond(
self._whole_period_folding(), _whole_periods_folded,
_no_whole_period_computation)
steps_to_compute = math_ops.reduce_max(remaining_whole_steps)
remaining_step_noise_additions = self._power_sum_array(steps_to_compute)
noise_addition_scalar = array_ops.squeeze(
self.state_transition_noise_covariance, axis=[-1, -2])
return current_accumulation + noise_addition_scalar * array_ops.gather(
remaining_step_noise_additions, indices=remaining_whole_steps)
def _whole_period_folding(self):
"""Decides whether computing a whole period maintains alignment."""
return math_ops.logical_and(
self._close_to_integer(self._true_periodicity),
math_ops.greater_equal(self._true_periodicity, self._num_latent_values))
def _power_sum_array(self, max_remaining_steps):
r"""Computes \sum_{i=0}^{N-1} A^i B (A^i)^T for N=0..max_remaining_steps.
A is the transition matrix and B is the noise covariance.
This is more efficient in practice than math_utils.power_sums_tensor, since
each A^i B (A^i)^T term has a closed-form expression not depending on i - 1.
Thus vectorization can replace explicit looping.
Uses a cumulative sum on the following expression:
(transition^p * transition_covariance * (transition^p)^T)_{i, j}
= (-1)^(i + j) * sin^2(pi * p) / num_latent_values^2
* (1/sin(pi / num_latent_values * (p - i))
+ 1/sin(pi / num_latent_values * (p - i - 1)))
* (1/sin(pi / num_latent_values * (p - j))
+ 1/sin(pi / num_latent_values * (p - j - 1)))
The expression being derived from the eigenvectors and eigenvalues given in
the class docstring (and as with CycleStateSpaceModel taking advantage of
the sparsity of the transition covariance).
Args:
max_remaining_steps: A scalar integer Tensor indicating the number of
non-trivial values to compute.
Returns:
A [max_remaining_steps + 1, self._num_latent_values - 1,
self._num_latent_values - 1] floating point Tensor S with cumulative power
sums.
S[N] = \sum_{i=0}^{N-1} A^i B (A^i)^T
S[0] is the zero matrix
S[1] is B
S[2] is A B A^T + B
"""
num_latent_values_float = math_ops.cast(self._num_latent_values, self.dtype)
latent_values_per_period = (num_latent_values_float / math_ops.cast(
self._true_periodicity, dtype=self.dtype))
original_matrix_powers = (math_ops.cast(
math_ops.range(max_remaining_steps),
self.dtype) * latent_values_per_period)
matrix_dimension_range = math_ops.range(
self._num_latent_values - 1)[None, ...]
matrix_dimension_range_float = math_ops.cast(matrix_dimension_range,
self.dtype)
def _cosecant_with_freq(coefficient):
return 1. / math_ops.sin(numpy.pi / num_latent_values_float * coefficient)
power_minus_index = (original_matrix_powers[..., None]
- matrix_dimension_range_float)
mesh_values = (_cosecant_with_freq(power_minus_index)
+ _cosecant_with_freq(power_minus_index - 1.))
meshed = mesh_values[..., None, :] * mesh_values[..., None]
full_matrix_alternating = math_ops.cast(1 - 2 * (
(matrix_dimension_range[..., None, :] +
matrix_dimension_range[..., None]) % 2), self.dtype)
def _sine_discontinuity(value):
"""A special case for dealing with discontinuities.
Decides whether `value` is close to an integer, and if so computes:
lim x->n |sin(x * pi)| / sin(x * pi) = sign(sin(n * pi))
= cos(n * pi)
Args:
value: The floating point Tensor value which may lead to a
discontinuity.
Returns:
A tuple of (is_discontinuous, sign):
is_discontinuous: A boolean Tensor of the same shape as `value`,
indicating whether it is near an integer.
sign: A floating point Tensor indicating the sign of the discontinuity
(being near 1 or -1 when `is_discontinuous` is True), of the same
shape and type as `value`.
"""
normalized = value / num_latent_values_float
is_discontinuous = self._close_to_integer(normalized)
sign = math_ops.cos(normalized * numpy.pi)
return is_discontinuous, sign
index_discontinuous, index_sign = _sine_discontinuity(
original_matrix_powers[..., None]
- matrix_dimension_range_float)
index_minus_discontinuous, index_minus_sign = _sine_discontinuity(
original_matrix_powers[..., None]
- matrix_dimension_range_float
- 1)
ones_mask_vector = math_ops.logical_or(index_discontinuous,
index_minus_discontinuous)
ones_sign_vector = array_ops.where(index_discontinuous, index_sign,
index_minus_sign)
ones_mask = math_ops.logical_and(ones_mask_vector[..., None],
ones_mask_vector[..., None, :])
zeros_mask = self._close_to_integer(original_matrix_powers)
zeroed = array_ops.where(zeros_mask, array_ops.zeros_like(meshed), meshed)
global_coefficient = (math_ops.sin(numpy.pi * original_matrix_powers) /
num_latent_values_float)
masked_meshed = array_ops.where(
ones_mask, ones_sign_vector[..., None] * ones_sign_vector[..., None, :],
zeroed * global_coefficient[..., None, None]**2)
powers_above_zero = full_matrix_alternating * masked_meshed
return array_ops.pad(
math_ops.cumsum(powers_above_zero), [(1, 0), (0, 0), (0, 0)])
|
tensorflow-master
|
tensorflow/contrib/timeseries/python/timeseries/state_space_models/periodic.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Filtering postprocessors for SequentialTimeSeriesModels."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
from tensorflow.contrib.timeseries.python.timeseries import math_utils
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.util import nest
@six.add_metaclass(abc.ABCMeta)
class FilteringStepPostprocessor(object):
"""Base class for processors that are applied after each filter step."""
@abc.abstractmethod
def process_filtering_step(self, current_times, current_values,
predicted_state, filtered_state, outputs):
"""Extends/modifies a filtering step, altering state and loss.
Args:
current_times: A [batch size] integer Tensor of times.
current_values: A [batch size x num features] Tensor of values filtering
is being performed on.
predicted_state: A (possibly nested) list of Tensors indicating model
state which does not take `current_times` and `current_values` into
account.
filtered_state: Same structure as predicted_state, but updated to take
`current_times` and `current_values` into account.
outputs: A dictionary of outputs produced by model filtering
(SequentialTimeSeriesModel._process_filtering_step).
Returns: A tuple of (new_state, updated_outputs);
new_state: Updated state with the same structure as `filtered_state` and
`predicted_state`.
updated_outputs: The `outputs` dictionary, updated with any new outputs
from this filtering postprocessor.
"""
pass
@abc.abstractproperty
def output_names(self):
return []
def cauchy_alternative_to_gaussian(current_times, current_values, outputs):
"""A Cauchy anomaly distribution, centered at a Gaussian prediction.
Performs an entropy-matching approximation of the scale parameters of
independent Cauchy distributions given the covariance matrix of a multivariate
Gaussian in outputs["covariance"], and centers the Cauchy distributions at
outputs["mean"]. This requires that the model that we are creating an
alternative/anomaly distribution for produces a mean and covariance.
Args:
current_times: A [batch size] Tensor of times, unused.
current_values: A [batch size x num features] Tensor of values to evaluate
the anomaly distribution at.
outputs: A dictionary of Tensors with keys "mean" and "covariance"
describing the Gaussian to construct an anomaly distribution from. The
value corresponding to "mean" has shape [batch size x num features], and
the value corresponding to "covariance" has shape [batch size x num
features x num features].
Returns:
A [batch size] Tensor of log likelihoods; the anomaly log PDF evaluated at
`current_values`.
"""
del current_times # unused
cauchy_scale = math_utils.entropy_matched_cauchy_scale(outputs["covariance"])
individual_log_pdfs = math_utils.cauchy_log_prob(
loc=outputs["mean"],
scale=cauchy_scale,
x=current_values)
return math_ops.reduce_sum(individual_log_pdfs, axis=1)
def _interpolate_state_linear(first_state, second_state, first_responsibility):
"""Interpolate between two model states linearly."""
interpolated_state_flat = []
for first_state_tensor, second_state_tensor in zip(
nest.flatten(first_state), nest.flatten(second_state)):
assert first_state_tensor.dtype == second_state_tensor.dtype
if first_state_tensor.dtype.is_floating:
# Pad the responsibility shape with ones up to the state's rank so that it
# broadcasts
first_responsibility_padded = array_ops.reshape(
tensor=first_responsibility,
shape=array_ops.concat([
array_ops.shape(first_responsibility), array_ops.ones(
[array_ops.rank(first_state_tensor) - 1], dtype=dtypes.int32)
], 0))
interpolated_state = (
first_responsibility_padded * first_state_tensor
+ (1. - first_responsibility_padded) * second_state_tensor)
interpolated_state.set_shape(first_state_tensor.get_shape())
interpolated_state_flat.append(interpolated_state)
else:
# Integer dtypes are probably representing times, and don't need
# interpolation. Make sure they're identical to be sure.
with ops.control_dependencies(
[check_ops.assert_equal(first_state_tensor, second_state_tensor)]):
interpolated_state_flat.append(array_ops.identity(first_state_tensor))
return nest.pack_sequence_as(first_state, interpolated_state_flat)
class StateInterpolatingAnomalyDetector(FilteringStepPostprocessor):
"""An anomaly detector which guards model state against outliers.
Smoothly interpolates between a model's predicted and inferred states, based
on the posterior probability of an anomaly, p(anomaly | data). This is useful
if anomalies would otherwise lead to model state which is hard to recover
from (Gaussian state space models suffer from this, for example).
Relies on (1) an alternative distribution, typically with heavier tails than
the model's normal predictions, and (2) a prior probability of an anomaly. The
prior probability acts as a penalty, discouraging the system from marking too
many points as anomalies. The alternative distribution indicates the
probability of a datapoint given that it is an anomaly, and is a heavy-tailed
distribution (Cauchy) centered around the model's predictions by default.
Specifically, we have:
p(anomaly | data) = p(data | anomaly) * anomaly_prior_probability
/ (p(data | not anomaly) * (1 - anomaly_prior_probability)
+ p(data | anomaly) * anomaly_prior_probability)
This is simply Bayes' theorem, where p(data | anomaly) is the
alternative/anomaly distribution, p(data | not anomaly) is the model's
predicted distribution, and anomaly_prior_probability is the prior probability
of an anomaly occurring (user-specified, defaulting to 1%).
Rather than computing p(anomaly | data) directly, we use the odds ratio:
odds_ratio = p(data | anomaly) * anomaly_prior_probability
/ (p(data | not anomaly) * (1 - anomaly_prior_probability))
This has the same information as p(anomaly | data):
odds_ratio = p(anomaly | data) / p(not anomaly | data)
A "responsibility" score is computed for the model based on the log odds
ratio, and state interpolated based on this responsibility:
model_responsibility = 1 / (1 + exp(-responsibility_scaling
* ln(odds_ratio)))
model_state = filtered_model_state * model_responsibility
+ predicted_model_state * (1 - model_responsibility)
loss = model_responsibility
* ln(p(data | not anomaly) * (1 - anomaly_prior_probability))
+ (1 - model_responsibility)
* ln(p(data | anomaly) * anomaly_prior_probability)
"""
output_names = ["anomaly_score"]
def __init__(self,
anomaly_log_likelihood=cauchy_alternative_to_gaussian,
anomaly_prior_probability=0.01,
responsibility_scaling=1.0):
"""Configure the anomaly detector.
Args:
anomaly_log_likelihood: A function taking `current_times`,
`current_values`, and `outputs` (same as the corresponding arguments
to process_filtering_step) and returning a [batch size] Tensor of log
likelihoods under an anomaly distribution.
anomaly_prior_probability: A scalar value, between 0 and 1, indicating the
prior probability of a particular example being an anomaly.
responsibility_scaling: A positive scalar controlling how fast
interpolation transitions between not-anomaly and anomaly; lower
values (closer to 0) create a smoother/slower transition.
"""
self._anomaly_log_likelihood = anomaly_log_likelihood
self._responsibility_scaling = responsibility_scaling
self._anomaly_prior_probability = anomaly_prior_probability
def process_filtering_step(self, current_times, current_values,
predicted_state, filtered_state, outputs):
"""Fall back on `predicted_state` for anomalies.
Args:
current_times: A [batch size] integer Tensor of times.
current_values: A [batch size x num features] Tensor of values filtering
is being performed on.
predicted_state: A (possibly nested) list of Tensors indicating model
state which does not take `current_times` and `current_values` into
account.
filtered_state: Same structure as predicted_state, but updated to take
`current_times` and `current_values` into account.
outputs: A dictionary of outputs produced by model filtering. Must
include `log_likelihood`, a [batch size] Tensor indicating the log
likelihood of the observations under the model's predictions.
Returns:
A tuple of (new_state, updated_outputs);
new_state: Updated state with the same structure as `filtered_state` and
`predicted_state`; predicted_state for anomalies and filtered_state
otherwise (per batch element).
updated_outputs: The `outputs` dictionary, updated with a new "loss"
(the interpolated negative log likelihoods under the model and
anomaly distributions) and "anomaly_score" (the log odds ratio of
each part of the batch being an anomaly).
"""
anomaly_log_likelihood = self._anomaly_log_likelihood(
current_times=current_times,
current_values=current_values,
outputs=outputs)
anomaly_prior_probability = ops.convert_to_tensor(
self._anomaly_prior_probability, dtype=current_values.dtype)
# p(data | anomaly) * p(anomaly)
data_and_anomaly_log_probability = (
anomaly_log_likelihood + math_ops.log(anomaly_prior_probability))
# p(data | no anomaly) * p(no anomaly)
data_and_no_anomaly_log_probability = (
outputs["log_likelihood"] + math_ops.log(1. - anomaly_prior_probability)
)
# A log odds ratio is slightly nicer here than computing p(anomaly | data),
# since it is centered around zero
anomaly_log_odds_ratio = (
data_and_anomaly_log_probability
- data_and_no_anomaly_log_probability)
model_responsibility = math_ops.sigmoid(-self._responsibility_scaling *
anomaly_log_odds_ratio)
# Do a linear interpolation between predicted and inferred model state
# based on the model's "responsibility". If we knew for sure whether
# this was an anomaly or not (binary responsibility), this would be the
# correct thing to do, but given that we don't it's just a
# (differentiable) heuristic.
interpolated_state = _interpolate_state_linear(
first_state=filtered_state,
second_state=predicted_state,
first_responsibility=model_responsibility)
# TODO(allenl): Try different responsibility scalings and interpolation
# methods (e.g. average in probability space rather than log space).
interpolated_log_likelihood = (
model_responsibility * data_and_no_anomaly_log_probability
+ (1. - model_responsibility) * data_and_anomaly_log_probability)
outputs["loss"] = -interpolated_log_likelihood
outputs["anomaly_score"] = anomaly_log_odds_ratio
return (interpolated_state, outputs)
|
tensorflow-master
|
tensorflow/contrib/timeseries/python/timeseries/state_space_models/filtering_postprocessor.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Multivariate autoregressive model (vector autoregression).
Implements the following model (num_blocks = max(ar_order, ma_order + 1)):
y(t, 1) = \sum_{i=1}^{ar_order} ar_coefs[i] * y(t - 1, i)
y(t, i) = y(t - 1, i - 1) + ma_coefs[i - 1] * e(t) for 1 < i < num_blocks
y(t, num_blocks) = y(t - 1, num_blocks - 1) + e(t)
Where e(t) are Gaussian with zero mean and learned covariance.
Each element of ar_coefs and ma_coefs is a [num_features x num_features]
matrix. Each y(t, i) is a vector of length num_features. Indices in the above
equations are one-based. Initial conditions y(0, i) come from prior state (which
may either be learned or left as a constant with high prior covariance).
If ar_order > ma_order, the observation model is:
y(t, 1) + observation_noise(t)
If ma_order >= ar_order, it is (to observe the moving average component):
y(t, 1) + y(t, num_blocks) + observation_noise(t)
Where observation_noise(t) are Gaussian with zero mean and learned covariance.
This implementation uses a formulation which puts all of the autoregressive
coefficients in the transition equation for the observed component, which
enables learning using truncated backpropagation. Noise is not applied directly
to the observed component (with the exception of standard observation noise),
which further aids learning of the autoregressive coefficients when VARMA is in
an ensemble with other models (in which case having an observation noise term is
usually unavoidable).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.timeseries.python.timeseries import math_utils
from tensorflow.contrib.timeseries.python.timeseries.state_space_models import state_space_model
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope
class VARMA(state_space_model.StateSpaceModel):
"""A VARMA model implementation as a special case of the state space model."""
def __init__(self,
autoregressive_order,
moving_average_order,
configuration=state_space_model.StateSpaceModelConfiguration()):
"""Construct a VARMA model.
The size of the latent state for this model is:
num_features * max(autoregressive_order, moving_average_order + 1)
Square matrices of this size are constructed and multiplied.
Args:
autoregressive_order: The maximum autoregressive lag.
moving_average_order: The maximum moving average lag, after which
transient deviations are expected to return to their long-term mean.
configuration: A StateSpaceModelConfiguration object.
"""
self.ar_order = autoregressive_order
self.ma_order = moving_average_order
self.state_num_blocks = max(autoregressive_order, moving_average_order + 1)
super(VARMA, self).__init__(configuration=configuration)
self.state_dimension = self.state_num_blocks * self.num_features
def _define_parameters(self, observation_transition_tradeoff_log=None):
with variable_scope.variable_scope(self._variable_scope):
# TODO(allenl): Evaluate parameter transformations for AR/MA coefficients
# which improve interpretability/stability.
self.ar_coefs = variable_scope.get_variable(
name="ar_coefs",
shape=[self.num_features, self.num_features, self.ar_order],
dtype=self.dtype,
initializer=init_ops.zeros_initializer())
self.ma_coefs = variable_scope.get_variable(
name="ma_coefs",
initializer=array_ops.tile(
linalg_ops.eye(self.num_features, dtype=self.dtype)[None, :, :],
[self.ma_order, 1, 1]),
dtype=self.dtype)
super(VARMA, self)._define_parameters(
observation_transition_tradeoff_log=observation_transition_tradeoff_log)
def get_state_transition(self):
"""Construct state transition matrix from VARMA parameters.
Returns:
the state transition matrix. It has shape
[self.state_dimension, self.state_dimension].
"""
# Pad any unused AR blocks with zeros. The extra state is necessary if
# ma_order >= ar_order.
ar_coefs_padded = array_ops.reshape(
array_ops.pad(self.ar_coefs,
[[0, 0], [0, 0],
[0, self.state_num_blocks - self.ar_order]]),
[self.num_features, self.state_dimension])
shift_matrix = array_ops.pad(
linalg_ops.eye(
(self.state_num_blocks - 1) * self.num_features, dtype=self.dtype),
[[0, 0], [0, self.num_features]])
return array_ops.concat([ar_coefs_padded, shift_matrix], axis=0)
def get_noise_transform(self):
"""Construct state noise transform matrix from VARMA parameters.
Returns:
the state noise transform matrix. It has shape
[self.state_dimension, self.num_features].
"""
# Noise is broadcast, through the moving average coefficients, to
# un-observed parts of the latent state.
ma_coefs_padded = array_ops.reshape(
array_ops.pad(self.ma_coefs,
[[self.state_num_blocks - 1 - self.ma_order, 0], [0, 0],
[0, 0]]),
[(self.state_num_blocks - 1) * self.num_features, self.num_features],
name="noise_transform")
# Deterministically apply noise to the oldest component.
return array_ops.concat(
[ma_coefs_padded,
linalg_ops.eye(self.num_features, dtype=self.dtype)],
axis=0)
def get_observation_model(self, times):
"""Construct observation model matrix from VARMA parameters.
Args:
times: A [batch size] vector indicating the times observation models are
requested for. Unused.
Returns:
the observation model matrix. It has shape
[self.num_features, self.state_dimension].
"""
del times # StateSpaceModel will broadcast along the batch dimension
if self.ar_order > self.ma_order or self.state_num_blocks < 2:
return array_ops.pad(
linalg_ops.eye(self.num_features, dtype=self.dtype),
[[0, 0], [0, self.num_features * (self.state_num_blocks - 1)]],
name="observation_model")
else:
# Add a second observed component which "catches" the accumulated moving
# average errors as they reach the end of the state. If ar_order >
# ma_order, this is unnecessary, since accumulated errors cycle naturally.
return array_ops.concat(
[
array_ops.pad(
linalg_ops.eye(self.num_features, dtype=self.dtype),
[[0, 0], [0,
self.num_features * (self.state_num_blocks - 2)]]),
linalg_ops.eye(self.num_features, dtype=self.dtype)
],
axis=1,
name="observation_model")
def get_state_transition_noise_covariance(
self, minimum_initial_variance=1e-5):
# Most state space models use only an explicit observation noise term to
# model deviations from expectations, and so a low initial transition noise
# parameter is helpful there. Since deviations from expectations are also
# modeled as transition noise in VARMA, we set its initial value based on a
# slight over-estimate empirical observation noise.
if self._input_statistics is not None:
feature_variance = self._scale_variance(
self._input_statistics.series_start_moments.variance)
initial_transition_noise_scale = math_ops.log(
math_ops.maximum(
math_ops.reduce_mean(feature_variance), minimum_initial_variance))
else:
initial_transition_noise_scale = 0.
state_noise_transform = ops.convert_to_tensor(
self.get_noise_transform(), dtype=self.dtype)
state_noise_dimension = tensor_shape.dimension_value(
state_noise_transform.shape[1])
return math_utils.variable_covariance_matrix(
state_noise_dimension, "state_transition_noise",
dtype=self.dtype,
initial_overall_scale_log=initial_transition_noise_scale)
|
tensorflow-master
|
tensorflow/contrib/timeseries/python/timeseries/state_space_models/varma.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the structural state space ensembles."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy
from tensorflow.contrib import layers
from tensorflow.contrib.layers.python.layers import feature_column
from tensorflow.contrib.timeseries.python.timeseries import estimators
from tensorflow.contrib.timeseries.python.timeseries import input_pipeline
from tensorflow.contrib.timeseries.python.timeseries.feature_keys import TrainEvalFeatures
from tensorflow.contrib.timeseries.python.timeseries.state_space_models import state_space_model
from tensorflow.contrib.timeseries.python.timeseries.state_space_models import structural_ensemble
from tensorflow.python.estimator import estimator_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.platform import test
class StructuralEnsembleEstimatorTests(test.TestCase):
def simple_data(self, sample_every, dtype, period, num_samples, num_features):
time = sample_every * numpy.arange(num_samples)
noise = numpy.random.normal(
scale=0.01, size=[num_samples, num_features])
values = noise + numpy.sin(
numpy.arange(num_features)[None, ...]
+ time[..., None] / float(period) * 2.0 * numpy.pi).astype(
dtype.as_numpy_dtype)
return {TrainEvalFeatures.TIMES: numpy.reshape(time, [1, -1]),
TrainEvalFeatures.VALUES: numpy.reshape(
values, [1, -1, num_features])}
def dry_run_train_helper(
self, sample_every, period, num_samples, model_type, model_args,
num_features=1):
numpy.random.seed(1)
dtype = dtypes.float32
features = self.simple_data(
sample_every, dtype=dtype, period=period, num_samples=num_samples,
num_features=num_features)
model = model_type(
configuration=(
state_space_model.StateSpaceModelConfiguration(
num_features=num_features,
dtype=dtype,
covariance_prior_fn=lambda _: 0.)),
**model_args)
class _RunConfig(estimator_lib.RunConfig):
@property
def tf_random_seed(self):
return 4
estimator = estimators.StateSpaceRegressor(model, config=_RunConfig())
train_input_fn = input_pipeline.RandomWindowInputFn(
input_pipeline.NumpyReader(features), num_threads=1, shuffle_seed=1,
batch_size=16, window_size=16)
eval_input_fn = input_pipeline.WholeDatasetInputFn(
input_pipeline.NumpyReader(features))
estimator.train(input_fn=train_input_fn, max_steps=1)
first_evaluation = estimator.evaluate(input_fn=eval_input_fn, steps=1)
estimator.train(input_fn=train_input_fn, max_steps=3)
second_evaluation = estimator.evaluate(input_fn=eval_input_fn, steps=1)
self.assertLess(second_evaluation["loss"], first_evaluation["loss"])
def test_structural_multivariate(self):
self.dry_run_train_helper(
sample_every=3,
period=5,
num_samples=100,
num_features=3,
model_type=structural_ensemble.StructuralEnsemble,
model_args={
"periodicities": 2,
"moving_average_order": 2,
"autoregressive_order": 1
})
def test_exogenous_input(self):
"""Test that no errors are raised when using exogenous features."""
dtype = dtypes.float64
times = [1, 2, 3, 4, 5, 6]
values = [[0.01], [5.10], [5.21], [0.30], [5.41], [0.50]]
feature_a = [["off"], ["on"], ["on"], ["off"], ["on"], ["off"]]
sparse_column_a = feature_column.sparse_column_with_keys(
column_name="feature_a", keys=["on", "off"])
one_hot_a = layers.one_hot_column(sparse_id_column=sparse_column_a)
regressor = estimators.StructuralEnsembleRegressor(
periodicities=[],
num_features=1,
moving_average_order=0,
exogenous_feature_columns=[one_hot_a],
dtype=dtype)
features = {TrainEvalFeatures.TIMES: times,
TrainEvalFeatures.VALUES: values,
"feature_a": feature_a}
train_input_fn = input_pipeline.RandomWindowInputFn(
input_pipeline.NumpyReader(features),
window_size=6, batch_size=1)
regressor.train(input_fn=train_input_fn, steps=1)
eval_input_fn = input_pipeline.WholeDatasetInputFn(
input_pipeline.NumpyReader(features))
evaluation = regressor.evaluate(input_fn=eval_input_fn, steps=1)
predict_input_fn = input_pipeline.predict_continuation_input_fn(
evaluation, times=[[7, 8, 9]],
exogenous_features={"feature_a": [[["on"], ["off"], ["on"]]]})
regressor.predict(input_fn=predict_input_fn)
def test_no_periodicity(self):
"""Test that no errors are raised when periodicites is None."""
dtype = dtypes.float64
times = [1, 2, 3, 4, 5, 6]
values = [[0.01], [5.10], [5.21], [0.30], [5.41], [0.50]]
regressor = estimators.StructuralEnsembleRegressor(
periodicities=None,
num_features=1,
moving_average_order=0,
dtype=dtype)
features = {TrainEvalFeatures.TIMES: times,
TrainEvalFeatures.VALUES: values}
train_input_fn = input_pipeline.RandomWindowInputFn(
input_pipeline.NumpyReader(features),
window_size=6, batch_size=1)
regressor.train(input_fn=train_input_fn, steps=1)
eval_input_fn = input_pipeline.WholeDatasetInputFn(
input_pipeline.NumpyReader(features))
evaluation = regressor.evaluate(input_fn=eval_input_fn, steps=1)
predict_input_fn = input_pipeline.predict_continuation_input_fn(
evaluation, times=[[7, 8, 9]])
regressor.predict(input_fn=predict_input_fn)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/timeseries/python/timeseries/state_space_models/structural_ensemble_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Abstract base for state space models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import numpy
from tensorflow.contrib.layers.python.layers import layers
from tensorflow.contrib.timeseries.python.timeseries import math_utils
from tensorflow.contrib.timeseries.python.timeseries import model
from tensorflow.contrib.timeseries.python.timeseries import model_utils
from tensorflow.contrib.timeseries.python.timeseries.feature_keys import TrainEvalFeatures
from tensorflow.contrib.timeseries.python.timeseries.state_space_models import kalman_filter
from tensorflow.python.estimator import estimator_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope
class StateSpaceModelConfiguration(
collections.namedtuple(
typename="StateSpaceModelConfiguration",
field_names=[
"num_features", "use_observation_noise", "dtype",
"covariance_prior_fn", "bayesian_prior_weighting",
"filtering_postprocessor", "trainable_start_state",
"exogenous_noise_increases", "exogenous_noise_decreases",
"exogenous_feature_columns", "exogenous_update_condition",
"filtering_maximum_posterior_variance_ratio",
"filtering_minimum_posterior_variance",
"transition_covariance_initial_log_scale_bias",
"static_unrolling_window_size_threshold"])):
"""Configuration options for StateSpaceModels."""
def __new__(
cls,
num_features=1,
use_observation_noise=True,
dtype=dtypes.float32,
covariance_prior_fn=math_utils.log_noninformative_covariance_prior,
bayesian_prior_weighting=True,
filtering_postprocessor=None,
trainable_start_state=False,
exogenous_noise_increases=True,
exogenous_noise_decreases=False,
exogenous_feature_columns=None,
exogenous_update_condition=None,
filtering_maximum_posterior_variance_ratio=1e6,
filtering_minimum_posterior_variance=0.,
transition_covariance_initial_log_scale_bias=-5.,
static_unrolling_window_size_threshold=None):
"""Configuration options for StateSpaceModels.
Args:
num_features: Output dimension for model
use_observation_noise: If true, observations are modeled as noisy
functions of the current state. If false, observations are a
deterministic function of the current state. Only applicable to the
top-level model in an ensemble. Consider also changing the
transition_covariance_initial_log_scale_bias when disabling observation
noise, as its default setting assumes that observation noise is part of
the model.
dtype: The float dtype to use when defining the model.
covariance_prior_fn: A function mapping from a covariance matrix to a
scalar value (e.g. log likelihood) which can be summed across
matrices. Defaults to an independent Jeffreys prior on the diagonal
elements (regularizing as log(1. / variance)). To use a flat prior
(i.e. no regularization), set to `lambda _: 0.`. Defaults to
relatively uninformative priors on state transition and observation
noise, which have the effect of encouraging low-noise solutions which
provide confident predictions when possible. Without regularization,
transition noise tends to remain high, and multi-step predictions are
under-confident.
bayesian_prior_weighting: If True, weights the prior (covariance_prior_fn)
based on an estimate of the full dataset size. If False, weights it
based on the mini-batch window size, which (while statistically
improper) can lead to more desirable low-noise solutions in cases
where the full dataset is large enough to overwhelm the prior.
filtering_postprocessor: A FilteringStepPostprocessor object to use,
useful for ignoring anomalies in training data.
trainable_start_state: If True, start state may depend on trainable
Variables. If False, it will not.
exogenous_noise_increases: If True, exogenous regressors can add to model
state, increasing uncertainty. If both this parameter and
exogenous_noise_decreases are False, exogenous regressors are ignored.
exogenous_noise_decreases: If True, exogenous regressors can "set" model
state, decreasing uncertainty. If both this parameter and
exogenous_noise_increases are False, exogenous regressors are ignored.
exogenous_feature_columns: A list of `tf.feature_column`s (for example
`tf.feature_column.embedding_column`) corresponding to exogenous
features which provide extra information to the model but are not part
of the series to be predicted. Passed to
`tf.compat.v1.feature_column.input_layer`.
exogenous_update_condition: A function taking two Tensor arguments `times`
(shape [batch size]) and `features` (a dictionary mapping exogenous
feature keys to Tensors with shapes [batch size, ...]) and returning a
boolean Tensor with shape [batch size] indicating whether state should
be updated using exogenous features for each part of the batch. Where
it is False, no exogenous update is performed. If None (default),
exogenous updates are always performed. Useful for avoiding "leaky"
frequent exogenous updates when sparse updates are desired. Called
only during graph construction.
filtering_maximum_posterior_variance_ratio: The maximum allowed ratio of
two diagonal entries in a state covariance matrix just prior to
filtering. Lower values mean that filtering will be more numerically
stable, at the cost of artificially increasing estimated uncertainty
in some cases. This parameter can be important when learning a
transition matrix.
filtering_minimum_posterior_variance: The minimum diagonal value in a
state covariance matrix just prior to filtering, preventing numerical
instability due to deterministic beliefs (sometimes an issue when
learning transition matrices). This value should be set several orders
of magnitude below any expected minimum state uncertainty.
transition_covariance_initial_log_scale_bias: Controls the initial
tradeoff between the transition noise covariance matrix and the
observation noise covariance matrix, on a log scale (the elements of
the transition noise covariance matrix are proportional to `e^{X +
transition_covariance_initial_log_scale_bias}` where `X` is learned
and may depend on input statistics, observation noise covariance is
proportional to `e^{Y -
transition_covariance_initial_log_scale_bias}`). For models *with*
observation noise, -5 is a reasonable value. Models which do not use
observation noise, and are not part of an ensemble which does use
observation noise, should have this set to 0 or more to avoid
numerical issues due to filtering with too little noise.
static_unrolling_window_size_threshold: Only relevant for the top-level
StateSpaceModel in an ensemble; enables switching between static and
dynamic looping (if not None, default, meaning that no static
unrolling is performed) based on the window size (windows with this
size and smaller will have their graphs unrolled statically). See the
SequentialTimeSeriesModel constructor for details.
Returns:
A StateSpaceModelConfiguration object.
"""
if exogenous_feature_columns is None:
exogenous_feature_columns = []
return super(StateSpaceModelConfiguration, cls).__new__(
cls, num_features, use_observation_noise, dtype,
covariance_prior_fn, bayesian_prior_weighting,
filtering_postprocessor, trainable_start_state,
exogenous_noise_increases, exogenous_noise_decreases,
exogenous_feature_columns, exogenous_update_condition,
filtering_maximum_posterior_variance_ratio,
filtering_minimum_posterior_variance,
transition_covariance_initial_log_scale_bias,
static_unrolling_window_size_threshold)
class StateSpaceModel(model.SequentialTimeSeriesModel):
"""Base class for linear state space models.
Sub-classes can specify the model to be learned by overriding
get_state_transition, get_noise_transform, and get_observation_model.
See kalman_filter.py for a detailed description of the class of models covered
by StateSpaceModel.
Briefly, state space models are defined by a state transition equation:
state[t] = StateTransition * state[t-1] + NoiseTransform * StateNoise[t]
+ ExogenousNoiseIncreasing[t]
StateNoise[t] ~ Gaussian(0, StateNoiseCovariance)
ExogenousNoiseIncreasing[t] ~ Gaussian(ExogenousNoiseIncreasingMean[t],
ExogenousNoiseIncreasingCovariance[t])
And an observation model:
observation[t] = ObservationModel * state[t] + ObservationNoise[t]
ObservationNoise[t] ~ Gaussian(0, ObservationNoiseCovariance)
Additionally, exogenous regressors can act as observations, decreasing
uncertainty:
ExogenousNoiseDecreasingObservation[t] ~ Gaussian(
ExogenousNoiseDecreasingMean[t], ExogenousNoiseDecreasingCovariance[t])
Attributes:
kalman_filter: If initialize_graph has been called, the initialized
KalmanFilter to use for inference. None otherwise.
prior_state_mean: If initialize_graph has been called, a
Variable-parameterized Tensor with shape [state dimension];
the initial prior mean for one or more time series. None otherwise.
prior_state_var: If initialize_graph has been called, a
Variable-parameterized Tensor with shape [state dimension x state
dimension]; the initial prior covariance. None otherwise.
state_transition_noise_covariance: If initialize_graph has been called, a
Variable-parameterized Tensor with shape [state noise dimension x state
noise dimension] indicating the amount of noise added at each
transition.
"""
def __init__(self, configuration):
"""Initialize a state space model.
Args:
configuration: A StateSpaceModelConfiguration object.
"""
self._configuration = configuration
if configuration.filtering_postprocessor is not None:
filtering_postprocessor_names = (
configuration.filtering_postprocessor.output_names)
else:
filtering_postprocessor_names = []
super(StateSpaceModel, self).__init__(
train_output_names=(["mean", "covariance", "log_likelihood"]
+ filtering_postprocessor_names),
predict_output_names=["mean", "covariance"],
num_features=configuration.num_features,
normalize_features=True,
dtype=configuration.dtype,
exogenous_feature_columns=configuration.exogenous_feature_columns,
exogenous_update_condition=configuration.exogenous_update_condition,
static_unrolling_window_size_threshold=
configuration.static_unrolling_window_size_threshold)
self._kalman_filter = None
self.prior_state_mean = None
self.prior_state_var = None
self.state_transition_noise_covariance = None
self._total_observation_count = None
self._observation_noise_covariance = None
# Capture the current variable scope and use it to define all model
# variables. Especially useful for ensembles, where variables may be defined
# for every component model in one function call, which would otherwise
# prevent the user from separating variables from different models into
# different scopes.
self._variable_scope = variable_scope.get_variable_scope()
def transition_power_noise_accumulator(self, num_steps):
r"""Sum a transitioned covariance matrix over a number of steps.
Computes
\sum_{i=0}^{num_steps - 1} (
state_transition^i
* state_transition_noise_covariance
* (state_transition^i)^T)
If special cases are available, overriding this function can lead to more
efficient inferences.
Args:
num_steps: A [...] shape integer Tensor with numbers of steps to compute
power sums for.
Returns:
The computed power sum, with shape [..., state dimension, state
dimension].
"""
# TODO(allenl): This general case should use cumsum if transition_to_powers
# can be computed in constant time (important for correlated ensembles,
# where transition_power_noise_accumulator special cases cannot be
# aggregated from member models).
noise_transform = ops.convert_to_tensor(self.get_noise_transform(),
self.dtype)
noise_transformed = math_ops.matmul(
math_ops.matmul(noise_transform,
self.state_transition_noise_covariance),
noise_transform,
transpose_b=True)
noise_additions = math_utils.power_sums_tensor(
math_ops.reduce_max(num_steps) + 1,
ops.convert_to_tensor(self.get_state_transition(), dtype=self.dtype),
noise_transformed)
return array_ops.gather(noise_additions, indices=num_steps)
def transition_to_powers(self, powers):
"""Raise the transition matrix to a batch of powers.
Computes state_transition^powers. If special cases are available, overriding
this function can lead to more efficient inferences.
Args:
powers: A [...] shape integer Tensor with powers to raise the transition
matrix to.
Returns:
The computed matrix powers, with shape [..., state dimension, state
dimension].
"""
return math_utils.matrix_to_powers(
ops.convert_to_tensor(self.get_state_transition(), dtype=self.dtype),
powers)
def _window_initializer(self, times, state):
"""Prepare to impute across the gaps in a window."""
_, _, priors_from_time = state
times = ops.convert_to_tensor(times)
priors_from_time = ops.convert_to_tensor(priors_from_time)
intra_batch_gaps = array_ops.reshape(times[:, 1:] - times[:, :-1], [-1])
# Ignore negative starting gaps, since there will be transient start times
# as inputs statistics are computed.
starting_gaps = math_ops.maximum(times[:, 0] - priors_from_time, 0)
# Pre-define transition matrices raised to powers (and their sums) for every
# gap in this window. This avoids duplicate computation (for example many
# steps will use the transition matrix raised to the first power) and
# batches the computation rather than doing it inside the per-step loop.
unique_gaps, _ = array_ops.unique(
array_ops.concat([intra_batch_gaps, starting_gaps], axis=0))
self._window_power_sums = self.transition_power_noise_accumulator(
unique_gaps)
self._window_transition_powers = self.transition_to_powers(unique_gaps)
self._window_gap_sizes = unique_gaps
def _lookup_window_caches(self, caches, indices):
_, window_power_ids = array_ops.unique(
array_ops.concat(
[
self._window_gap_sizes, math_ops.cast(
indices, self._window_gap_sizes.dtype)
],
axis=0))
all_gathered_indices = []
for cache in caches:
gathered_indices = array_ops.gather(
cache, window_power_ids[-array_ops.shape(indices)[0]:])
gathered_indices.set_shape(indices.get_shape().concatenate(
gathered_indices.get_shape()[-2:]))
all_gathered_indices.append(gathered_indices)
return all_gathered_indices
def _cached_transition_powers_and_sums(self, num_steps):
return self._lookup_window_caches(
caches=[self._window_transition_powers, self._window_power_sums],
indices=num_steps)
def _imputation_step(self, current_times, state):
"""Add state transition noise to catch `state` up to `current_times`.
State space models are inherently sequential, so we need to "predict
through" any missing time steps to catch up each element of the batch to its
next observation/prediction time.
Args:
current_times: A [batch size] Tensor of times to impute up to, not
inclusive.
state: A tuple of (mean, covariance, previous_times) having shapes
mean; [batch size x state dimension]
covariance; [batch size x state dimension x state dimension]
previous_times; [batch size]
Returns:
Imputed model state corresponding to the `state` argument.
"""
estimated_state, estimated_state_var, previous_times = state
# Ignore negative imputation intervals due to transient start time
# estimates.
catchup_times = math_ops.maximum(current_times - previous_times, 0)
transition_matrices, transition_noise_sums = ( # pylint: disable=unbalanced-tuple-unpacking
self._cached_transition_powers_and_sums(catchup_times))
estimated_state = self._kalman_filter.predict_state_mean(
estimated_state, transition_matrices)
estimated_state_var = self._kalman_filter.predict_state_var(
estimated_state_var, transition_matrices, transition_noise_sums)
return (estimated_state, estimated_state_var,
previous_times + catchup_times)
def _filtering_step(self, current_times, current_values, state, predictions):
"""Compute posteriors and accumulate one-step-ahead predictions.
Args:
current_times: A [batch size] Tensor for times for each observation.
current_values: A [batch size] Tensor of values for each observation.
state: A tuple of (mean, covariance, previous_times) having shapes
mean; [batch size x state dimension]
covariance; [batch size x state dimension x state dimension]
previous_times; [batch size]
predictions: A dictionary containing mean and covariance Tensors, the
output of _prediction_step.
Returns:
A tuple of (posteriors, outputs):
posteriors: Model state updated to take `current_values` into account.
outputs: The `predictions` dictionary updated to include "loss" and
"log_likelihood" entries (loss simply being negative log
likelihood).
"""
estimated_state, estimated_state_covariance, previous_times = state
observation_model = self.get_broadcasted_observation_model(current_times)
imputed_to_current_step_assert = control_flow_ops.Assert(
math_ops.reduce_all(math_ops.equal(current_times, previous_times)),
["Attempted to perform filtering without imputation/prediction"])
with ops.control_dependencies([imputed_to_current_step_assert]):
estimated_state_covariance = math_utils.clip_covariance(
estimated_state_covariance,
self._configuration.filtering_maximum_posterior_variance_ratio,
self._configuration.filtering_minimum_posterior_variance)
(filtered_state, filtered_state_covariance,
log_prob) = self._kalman_filter.do_filter(
estimated_state=estimated_state,
estimated_state_covariance=estimated_state_covariance,
predicted_observation=predictions["mean"],
predicted_observation_covariance=predictions["covariance"],
observation=current_values,
observation_model=observation_model,
observation_noise=self._observation_noise_covariance)
filtered_state = (filtered_state, filtered_state_covariance, current_times)
log_prob.set_shape(current_times.get_shape())
predictions["loss"] = -log_prob
predictions["log_likelihood"] = log_prob
if self._configuration.filtering_postprocessor is not None:
return self._configuration.filtering_postprocessor.process_filtering_step(
current_times=current_times,
current_values=current_values,
predicted_state=state,
filtered_state=filtered_state,
outputs=predictions)
return (filtered_state, predictions)
def _scale_back_predictions(self, predictions):
"""Return a window of predictions to input scale."""
predictions["mean"] = self._scale_back_data(predictions["mean"])
predictions["covariance"] = self._scale_back_variance(
predictions["covariance"])
return predictions
def _prediction_step(self, current_times, state):
"""Make a prediction based on `state`.
Computes predictions based on the current `state`, checking that it has
already been updated (in `_imputation_step`) to `current_times`.
Args:
current_times: A [batch size] Tensor for times to make predictions for.
state: A tuple of (mean, covariance, previous_times) having shapes
mean; [batch size x state dimension]
covariance; [batch size x state dimension x state dimension]
previous_times; [batch size]
Returns:
A tuple of (updated state, predictions):
updated state: Model state with added transition noise.
predictions: A dictionary with "mean" and "covariance", having shapes
"mean": [batch size x num features]
"covariance: [batch size x num features x num features]
"""
estimated_state, estimated_state_var, previous_times = state
advanced_to_current_assert = control_flow_ops.Assert(
math_ops.reduce_all(math_ops.less_equal(current_times, previous_times)),
["Attempted to predict without imputation"])
with ops.control_dependencies([advanced_to_current_assert]):
observation_model = self.get_broadcasted_observation_model(current_times)
predicted_obs, predicted_obs_var = (
self._kalman_filter.observed_from_state(
state_mean=estimated_state,
state_var=estimated_state_var,
observation_model=observation_model,
observation_noise=self._observation_noise_covariance))
predicted_obs_var.set_shape(
ops.convert_to_tensor(current_times).get_shape()
.concatenate([self.num_features, self.num_features]))
predicted_obs.set_shape(current_times.get_shape().concatenate(
(self.num_features,)))
predicted_obs_var.set_shape(current_times.get_shape().concatenate(
(self.num_features, self.num_features)))
# Not scaled back to input-scale, since this also feeds into the
# loss. Instead, predictions are scaled back before being returned to the
# user in _scale_back_predictions.
predictions = {
"mean": predicted_obs,
"covariance": predicted_obs_var}
state = (estimated_state, estimated_state_var, current_times)
return (state, predictions)
def _exogenous_noise_decreasing(self, current_times, exogenous_values, state):
"""Update state with exogenous regressors, decreasing uncertainty.
Constructs a mean and covariance based on transformations of
`exogenous_values`, then performs Bayesian inference on the constructed
observation. This has the effect of lowering uncertainty.
This update refines or overrides previous inferences, useful for modeling
exogenous inputs which "set" state, e.g. we dumped boiling water on the
thermometer so we're pretty sure it's 100 degrees C.
Args:
current_times: A [batch size] Tensor of times for the exogenous values
being input.
exogenous_values: A [batch size x exogenous input dimension] Tensor of
exogenous values for each part of the batch.
state: A tuple of (mean, covariance, previous_times) having shapes
mean; [batch size x state dimension]
covariance; [batch size x state dimension x state dimension]
previous_times; [batch size]
Returns:
Updated state taking the exogenous regressors into account (with lower
uncertainty than the input state).
"""
estimated_state, estimated_state_covariance, previous_times = state
state_transition = ops.convert_to_tensor(
self.get_state_transition(), dtype=self.dtype)
state_dimension = tensor_shape.dimension_value(state_transition.shape[0])
# Learning the observation model would be redundant since we transform
# `exogenous_values` to the state space via a linear transformation anyway.
observation_model = linalg_ops.eye(
state_dimension,
batch_shape=array_ops.shape(exogenous_values)[:-1],
dtype=self.dtype)
with variable_scope.variable_scope("exogenous_noise_decreasing_covariance"):
observation_noise = math_utils.transform_to_covariance_matrices(
exogenous_values, state_dimension)
with variable_scope.variable_scope(
"exogenous_noise_decreasing_observation"):
observation = layers.fully_connected(
exogenous_values, state_dimension, activation_fn=None)
# Pretend that we are making an observation with an observation model equal
# to the identity matrix (i.e. a direct observation of the latent state),
# with learned observation noise.
posterior_state, posterior_state_var = (
self._kalman_filter.posterior_from_prior_state(
prior_state=estimated_state,
prior_state_var=estimated_state_covariance,
observation=observation,
observation_model=observation_model,
predicted_observations=(
estimated_state,
# The predicted noise covariance is noise due to current state
# uncertainty plus noise learned based on the exogenous
# observation (a somewhat trivial call to
# self._kalman_filter.observed_from_state has been omitted).
observation_noise + estimated_state_covariance),
observation_noise=observation_noise))
return (posterior_state, posterior_state_var, previous_times)
def _exogenous_noise_increasing(self, current_times, exogenous_values, state):
"""Update state with exogenous regressors, increasing uncertainty.
Adds to the state mean a linear transformation of `exogenous_values`, and
increases uncertainty by constructing a covariance matrix based on
`exogenous_values` and adding it to the state covariance.
This update is useful for modeling changes relative to current state,
e.g. the furnace turned on so the temperature will be increasing at an
additional 1 degree per minute with some uncertainty, this uncertainty being
added to our current uncertainty in the per-minute change in temperature.
Args:
current_times: A [batch size] Tensor of times for the exogenous values
being input.
exogenous_values: A [batch size x exogenous input dimension] Tensor of
exogenous values for each part of the batch.
state: A tuple of (mean, covariance, previous_times) having shapes
mean; [batch size x state dimension]
covariance; [batch size x state dimension x state dimension]
previous_times; [batch size]
Returns:
Updated state taking the exogenous regressors into account (with higher
uncertainty than the input state).
"""
start_mean, start_covariance, previous_times = state
with variable_scope.variable_scope("exogenous_noise_increasing_mean"):
mean_addition = layers.fully_connected(
exogenous_values,
tensor_shape.dimension_value(start_mean.shape[1]), activation_fn=None)
state_dimension = tensor_shape.dimension_value(start_covariance.shape[1])
with variable_scope.variable_scope("exogenous_noise_increasing_covariance"):
covariance_addition = (
math_utils.transform_to_covariance_matrices(
exogenous_values, state_dimension))
return (start_mean + mean_addition,
start_covariance + covariance_addition,
previous_times)
def _exogenous_input_step(
self, current_times, current_exogenous_regressors, state):
"""Update state with exogenous regressors.
Allows both increases and decreases in uncertainty.
Args:
current_times: A [batch size] Tensor of times for the exogenous values
being input.
current_exogenous_regressors: A [batch size x exogenous input dimension]
Tensor of exogenous values for each part of the batch.
state: A tuple of (mean, covariance, previous_times) having shapes
mean; [batch size x state dimension]
covariance; [batch size x state dimension x state dimension]
previous_times; [batch size]
Returns:
Updated state taking the exogenous regressors into account.
"""
if self._configuration.exogenous_noise_decreases:
state = self._exogenous_noise_decreasing(
current_times, current_exogenous_regressors, state)
if self._configuration.exogenous_noise_increases:
state = self._exogenous_noise_increasing(
current_times, current_exogenous_regressors, state)
return state
def _loss_additions(self, times, values, mode):
"""Add regularization during training."""
if mode == estimator_lib.ModeKeys.TRAIN:
if (self._input_statistics is not None
and self._configuration.bayesian_prior_weighting):
normalization = 1. / math_ops.cast(
self._input_statistics.total_observation_count, self.dtype)
else:
# If there is no total observation count recorded, or if we are not
# doing a Bayesian prior weighting, assumes/pretends that the full
# dataset size is the window size.
normalization = 1. / math_ops.cast(
array_ops.shape(times)[1], self.dtype)
transition_contribution = ops.convert_to_tensor(
self._configuration.covariance_prior_fn(
self.state_transition_noise_covariance),
dtype=self.dtype)
if (self._configuration.use_observation_noise
and self._observation_noise_covariance is not None):
observation_contribution = ops.convert_to_tensor(
self._configuration.covariance_prior_fn(
self._observation_noise_covariance),
dtype=self.dtype)
regularization_sum = transition_contribution + observation_contribution
else:
regularization_sum = transition_contribution
return -normalization * regularization_sum
else:
return array_ops.zeros([], dtype=self.dtype)
def _variable_observation_transition_tradeoff_log(self):
"""Define a variable to trade off observation and transition noise."""
return variable_scope.get_variable(
name="observation_transition_tradeoff_log_scale",
initializer=constant_op.constant(
-self._configuration.transition_covariance_initial_log_scale_bias,
dtype=self.dtype),
dtype=self.dtype)
def _define_parameters(self, observation_transition_tradeoff_log=None):
"""Define extra model-specific parameters.
Models should wrap any variables defined here in the model's variable scope.
Args:
observation_transition_tradeoff_log: An ensemble-global parameter
controlling the tradeoff between observation noise and transition
noise. If its value is not None, component transition noise should scale
with e^-observation_transition_tradeoff_log.
"""
with variable_scope.variable_scope(self._variable_scope):
# A scalar which allows the optimizer to quickly shift from observation
# noise to transition noise (this value is subtracted from log transition
# noise and added to log observation noise).
if observation_transition_tradeoff_log is None:
self._observation_transition_tradeoff_log_scale = (
self._variable_observation_transition_tradeoff_log())
else:
self._observation_transition_tradeoff_log_scale = (
observation_transition_tradeoff_log)
self.state_transition_noise_covariance = (
self.get_state_transition_noise_covariance())
def _set_input_statistics(self, input_statistics=None):
super(StateSpaceModel, self).initialize_graph(
input_statistics=input_statistics)
def initialize_graph(self, input_statistics=None):
"""Define variables and ops relevant to the top-level model in an ensemble.
For generic model parameters, _define_parameters() is called recursively on
all members of an ensemble.
Args:
input_statistics: A math_utils.InputStatistics object containing input
statistics. If None, data-independent defaults are used, which may
result in longer or unstable training.
"""
self._set_input_statistics(input_statistics=input_statistics)
self._define_parameters()
with variable_scope.variable_scope(self._variable_scope):
self._observation_noise_covariance = ops.convert_to_tensor(
self.get_observation_noise_covariance(), dtype=self.dtype)
self._kalman_filter = kalman_filter.KalmanFilter(dtype=self.dtype)
(self.prior_state_mean,
self.prior_state_var) = self._make_priors()
def _make_priors(self):
"""Creates and returns model priors."""
prior_state_covariance = self.get_prior_covariance()
prior_state_mean = self.get_prior_mean()
return (prior_state_mean, prior_state_covariance)
def get_prior_covariance(self):
"""Constructs a variable prior covariance with data-based initialization.
Models should wrap any variables defined here in the model's variable scope.
Returns:
A two-dimensional [state dimension, state dimension] floating point Tensor
with a (positive definite) prior state covariance matrix.
"""
with variable_scope.variable_scope(self._variable_scope):
state_dimension = ops.convert_to_tensor(
self.get_state_transition()).get_shape().dims[0].value
if self._configuration.trainable_start_state:
base_covariance = math_utils.variable_covariance_matrix(
state_dimension, "prior_state_var",
dtype=self.dtype)
else:
return linalg_ops.eye(state_dimension, dtype=self.dtype)
if self._input_statistics is not None:
# Make sure initial latent value uncertainty is at least on the same
# scale as noise in the data.
covariance_multiplier = math_ops.reduce_max(
self._scale_variance(
self._input_statistics.series_start_moments.variance))
return base_covariance * gen_math_ops.maximum(
covariance_multiplier, 1.0)
else:
return base_covariance
def get_prior_mean(self):
"""Constructs a Variable-parameterized prior mean.
Models should wrap any variables defined here in the model's variable scope.
Returns:
A one-dimensional floating point Tensor with shape [state dimension]
indicating the prior mean.
"""
with variable_scope.variable_scope(self._variable_scope):
state_transition = ops.convert_to_tensor(
self.get_state_transition(), dtype=self.dtype)
state_dimension = state_transition.get_shape().dims[0].value
return variable_scope.get_variable(
name="prior_state_mean",
shape=[state_dimension],
dtype=self.dtype,
trainable=self._configuration.trainable_start_state)
# TODO(allenl): It would be nice if the generation were done with TensorFlow
# ops, and if the model parameters were somehow set instead of being passed
# around in a dictionary. Maybe unconditional generation should be through a
# special set of initializers?
def random_model_parameters(self, seed=None):
if self.num_features != 1:
raise NotImplementedError("Generation for multivariate state space models"
" is not currently implemented.")
if seed:
numpy.random.seed(seed)
state_dimension, noise_dimension = ops.convert_to_tensor(
self.get_noise_transform()).get_shape().as_list()
transition_var = 1.0 / numpy.random.gamma(shape=10., scale=10.,
size=[noise_dimension])
initial_state = numpy.random.normal(size=[state_dimension])
params_dict = {}
if self.prior_state_mean is not None:
params_dict[self.prior_state_mean] = initial_state
if self.state_transition_noise_covariance is not None:
params_dict[self.state_transition_noise_covariance] = numpy.diag(
transition_var)
if self.prior_state_var is not None:
params_dict[self.prior_state_var] = numpy.zeros(
[state_dimension, state_dimension])
if self._configuration.use_observation_noise:
observation_var = 1.0 / numpy.random.gamma(shape=4, scale=4)
params_dict[self._observation_noise_covariance] = [[observation_var]]
return params_dict
def generate(self, number_of_series, series_length,
model_parameters=None, seed=None, add_observation_noise=None):
if seed is not None:
numpy.random.seed(seed)
if self.num_features != 1:
raise NotImplementedError("Generation for multivariate state space models"
" is not currently implemented.")
if add_observation_noise is None:
add_observation_noise = self._configuration.use_observation_noise
if model_parameters is None:
model_parameters = {}
transitions = ops.convert_to_tensor(
self.get_state_transition(), dtype=self.dtype).eval(
feed_dict=model_parameters)
noise_transform = ops.convert_to_tensor(self.get_noise_transform()).eval(
feed_dict=model_parameters)
noise_dimension = noise_transform.shape[1]
get_passed_or_trained_value = model_utils.parameter_switch(model_parameters)
transition_var = numpy.diag(get_passed_or_trained_value(
self.state_transition_noise_covariance))
transition_std = numpy.sqrt(transition_var)
if add_observation_noise:
observation_var = get_passed_or_trained_value(
self._observation_noise_covariance)[0][0]
observation_std = numpy.sqrt(observation_var)
initial_state = get_passed_or_trained_value(self.prior_state_mean)
current_state = numpy.tile(numpy.expand_dims(initial_state, 0),
[number_of_series, 1])
observations = numpy.zeros([number_of_series, series_length])
observation_models = self.get_broadcasted_observation_model(
times=math_ops.range(series_length)).eval(feed_dict=model_parameters)
for timestep, observation_model in enumerate(observation_models):
current_state = numpy.dot(current_state, transitions.T)
current_state += numpy.dot(
numpy.random.normal(
loc=numpy.zeros([number_of_series, noise_dimension]),
scale=numpy.tile(numpy.expand_dims(transition_std, 0),
[number_of_series, 1])),
noise_transform.T)
observation_mean = numpy.dot(current_state, observation_model[0].T)
if add_observation_noise:
observations[:, timestep] = numpy.random.normal(loc=observation_mean,
scale=observation_std)
else:
observations[:, timestep] = observation_mean
observations = numpy.expand_dims(observations, -1)
times = numpy.tile(
numpy.expand_dims(numpy.arange(observations.shape[1]), 0),
[observations.shape[0], 1])
return {TrainEvalFeatures.TIMES: times,
TrainEvalFeatures.VALUES: observations}
@abc.abstractmethod
def get_state_transition(self):
"""Specifies the state transition model to use.
Returns:
A [state dimension x state dimension] Tensor specifying how states
transition from one timestep to the next.
"""
pass
@abc.abstractmethod
def get_noise_transform(self):
"""Specifies the noise transition model to use.
Returns:
A [state dimension x state noise dimension] Tensor specifying how noise
(generated with shape [state noise dimension]) affects the model's state.
"""
pass
@abc.abstractmethod
def get_observation_model(self, times):
"""Specifies the observation model to use.
Args:
times: A [batch dimension] int32 Tensor with times for each part of the
batch, on which the observation model can depend.
Returns:
This function, when overridden, has three possible return values:
- A [state dimension] Tensor with a static, univariate observation
model.
- A [self.num_features x state dimension] static, multivariate model.
- A [batch dimension x self.num_features x state dimension] observation
model, which may depend on `times`.
See get_broadcasted_observation_model for details of the broadcasting.
"""
pass
def get_broadcasted_observation_model(self, times):
"""Broadcast this model's observation model if necessary.
The model can define a univariate observation model which will be broadcast
over both self.num_features and the batch dimension of `times`.
The model can define a multi-variate observation model which does not depend
on `times`, and it will be broadcast over the batch dimension of `times`.
Finally, the model can define a multi-variate observation model with a batch
dimension, which will not be broadcast.
Args:
times: A [batch dimension] int32 Tensor with times for each part of the
batch, on which the observation model can depend.
Returns:
A [batch dimension x self.num_features x state dimension] Tensor
specifying the observation model to use for each time in `times` and each
feature.
"""
unbroadcasted_model = ops.convert_to_tensor(
self.get_observation_model(times), dtype=self.dtype)
unbroadcasted_shape = (unbroadcasted_model.get_shape()
.with_rank_at_least(1).with_rank_at_most(3))
if unbroadcasted_shape.ndims is None:
# Pass through fully undefined shapes, but make sure they're rank 3 at
# graph eval time
assert_op = control_flow_ops.Assert(
math_ops.equal(array_ops.rank(unbroadcasted_model), 3),
[array_ops.shape(unbroadcasted_model)])
with ops.control_dependencies([assert_op]):
return array_ops.identity(unbroadcasted_model)
if unbroadcasted_shape.ndims == 1:
# Unbroadcasted shape [state dimension]
broadcasted_model = array_ops.tile(
array_ops.reshape(tensor=unbroadcasted_model, shape=[1, 1, -1]),
[array_ops.shape(times)[0], self.num_features, 1])
elif unbroadcasted_shape.ndims == 2:
# Unbroadcasted shape [num features x state dimension]
broadcasted_model = array_ops.tile(
array_ops.expand_dims(unbroadcasted_model, axis=0),
[array_ops.shape(times)[0], 1, 1])
elif unbroadcasted_shape.ndims == 3:
broadcasted_model = unbroadcasted_model
broadcasted_model.get_shape().assert_has_rank(3)
return broadcasted_model
def get_state_transition_noise_covariance(
self, minimum_initial_variance=1e-5):
state_noise_transform = ops.convert_to_tensor(
self.get_noise_transform(), dtype=self.dtype)
state_noise_dimension = state_noise_transform.get_shape().dims[1].value
if self._input_statistics is not None:
feature_variance = self._scale_variance(
self._input_statistics.series_start_moments.variance)
initial_transition_noise_scale = math_ops.log(
gen_math_ops.maximum(
math_ops.reduce_mean(feature_variance) / math_ops.cast(
self._input_statistics.total_observation_count, self.dtype),
minimum_initial_variance))
else:
initial_transition_noise_scale = 0.
# Generally high transition noise is undesirable; we want to set it quite
# low to start so that we don't need too much training to get to good
# solutions (i.e. with confident predictions into the future if possible),
# but not so low that training can't yield a high transition noise if the
# data demands it.
initial_transition_noise_scale -= (
self._observation_transition_tradeoff_log_scale)
return math_utils.variable_covariance_matrix(
state_noise_dimension, "state_transition_noise",
dtype=self.dtype,
initial_overall_scale_log=initial_transition_noise_scale)
def get_observation_noise_covariance(self, minimum_initial_variance=1e-5):
if self._configuration.use_observation_noise:
if self._input_statistics is not None:
# Get variance across the first few values in each batch for each
# feature, for an initial observation noise (over-)estimate.
feature_variance = self._scale_variance(
self._input_statistics.series_start_moments.variance)
else:
feature_variance = None
if feature_variance is not None:
feature_variance = gen_math_ops.maximum(feature_variance,
minimum_initial_variance)
return math_utils.variable_covariance_matrix(
size=self.num_features,
dtype=self.dtype,
name="observation_noise_covariance",
initial_diagonal_values=feature_variance,
initial_overall_scale_log=(
self._observation_transition_tradeoff_log_scale))
else:
return array_ops.zeros(
shape=[self.num_features, self.num_features],
name="observation_noise_covariance",
dtype=self.dtype)
def get_start_state(self):
"""Defines and returns a non-batched prior state and covariance."""
# TODO(allenl,vitalyk): Add an option for non-Gaussian priors once extended
# Kalman filtering is implemented (ideally any Distribution object).
if self._input_statistics is not None:
start_time = self._input_statistics.start_time
else:
start_time = array_ops.zeros([], dtype=dtypes.int64)
return (self.prior_state_mean,
self.prior_state_var,
start_time - 1)
def get_features_for_timesteps(self, timesteps):
"""Get features for a batch of timesteps. Default to no features."""
return array_ops.zeros([array_ops.shape(timesteps)[0], 0], dtype=self.dtype)
class StateSpaceEnsemble(StateSpaceModel):
"""Base class for combinations of state space models."""
def __init__(self, ensemble_members, configuration):
"""Initialize the ensemble by specifying its members.
Args:
ensemble_members: A list of StateSpaceModel objects which will be included
in this ensemble.
configuration: A StateSpaceModelConfiguration object.
"""
self._ensemble_members = ensemble_members
super(StateSpaceEnsemble, self).__init__(configuration=configuration)
def _set_input_statistics(self, input_statistics):
super(StateSpaceEnsemble, self)._set_input_statistics(input_statistics)
for member in self._ensemble_members:
member._set_input_statistics(input_statistics) # pylint: disable=protected-access
def _loss_additions(self, times, values, mode):
# Allow sub-models to regularize
return (super(StateSpaceEnsemble, self)._loss_additions(
times, values, mode) + math_ops.add_n([
member._loss_additions(times, values, mode) # pylint: disable=protected-access
for member in self._ensemble_members
]))
def _compute_blocked(self, member_fn, name):
with variable_scope.variable_scope(self._variable_scope):
return math_utils.block_diagonal(
[member_fn(member)
for member in self._ensemble_members],
dtype=self.dtype,
name=name)
def transition_to_powers(self, powers):
return self._compute_blocked(
member_fn=lambda member: member.transition_to_powers(powers),
name="ensemble_transition_to_powers")
def _define_parameters(self, observation_transition_tradeoff_log=None):
with variable_scope.variable_scope(self._variable_scope):
if observation_transition_tradeoff_log is None:
# Define the tradeoff parameter between observation and transition noise
# once for the whole ensemble, and pass it down to members.
observation_transition_tradeoff_log = (
self._variable_observation_transition_tradeoff_log())
for member in self._ensemble_members:
member._define_parameters(observation_transition_tradeoff_log=( # pylint: disable=protected-access
observation_transition_tradeoff_log))
super(StateSpaceEnsemble, self)._define_parameters(
observation_transition_tradeoff_log
=observation_transition_tradeoff_log)
def random_model_parameters(self, seed=None):
param_union = {}
for i, member in enumerate(self._ensemble_members):
member_params = member.random_model_parameters(
seed=seed + i if seed else None)
param_union.update(member_params)
param_union.update(
super(StateSpaceEnsemble, self).random_model_parameters(seed=seed))
return param_union
def get_prior_mean(self):
return array_ops.concat(
values=[member.get_prior_mean() for member in self._ensemble_members],
axis=0,
name="ensemble_prior_state_mean")
def get_state_transition(self):
return self._compute_blocked(
member_fn=
lambda member: member.get_state_transition(),
name="ensemble_state_transition")
def get_noise_transform(self):
return self._compute_blocked(
member_fn=
lambda member: member.get_noise_transform(),
name="ensemble_noise_transform")
def get_observation_model(self, times):
raise NotImplementedError("No un-broadcasted observation model defined for"
" ensembles.")
def get_broadcasted_observation_model(self, times):
"""Computes a combined observation model based on member models.
The effect is that predicted observations from each model are summed.
Args:
times: A [batch dimension] int32 Tensor with times for each part of the
batch, on which member observation models can depend.
Returns:
A [batch dimension x num features x combined state dimension] Tensor with
the combined observation model.
"""
member_observation_models = [
ops.convert_to_tensor(
member.get_broadcasted_observation_model(times), dtype=self.dtype)
for member in self._ensemble_members
]
return array_ops.concat(values=member_observation_models, axis=2)
class StateSpaceIndependentEnsemble(StateSpaceEnsemble):
"""Implements ensembles of independent state space models.
Useful for fitting multiple independent state space models together while
keeping their specifications decoupled. The "ensemble" is simply a state space
model with the observation models of its members concatenated, and the
transition matrices and noise transforms stacked in block-diagonal
matrices. This means that the dimensionality of the ensemble's state is the
sum of those of its components, which can lead to slow and memory-intensive
training and inference as the posterior (shape [state dimension x state
dimension]) gets large.
Each individual model j's state at time t is defined by:
state[t, j] = StateTransition[j] * state[t-1, j]
+ NoiseTransform[j] * StateNoise[t, j]
StateNoise[t, j] ~ Gaussian(0, StateNoiseCovariance[j])
and the ensemble observation model is:
observation[t] = Sum { ObservationModel[j] * state[t, j] }
+ ObservationNoise[t]
ObservationNoise[t] ~ Gaussian(0, ObservationNoiseCovariance)
"""
def transition_power_noise_accumulator(self, num_steps):
return self._compute_blocked(
member_fn=lambda m: m.transition_power_noise_accumulator(num_steps),
name="ensemble_power_noise_accumulator")
def get_prior_covariance(self):
"""Construct the ensemble prior covariance based on component models."""
return self._compute_blocked(
member_fn=
lambda member: member.get_prior_covariance(),
name="ensemble_prior_state_covariance")
def get_state_transition_noise_covariance(self):
"""Construct the ensemble transition noise covariance from components."""
return self._compute_blocked(
member_fn=
lambda member: member.state_transition_noise_covariance,
name="ensemble_state_transition_noise")
# TODO(allenl): It would be nice to have replicated feature models which are
# identical batched together to reduce the graph size.
# TODO(allenl): Support for sharing M independent models across N features, with
# N > M.
# TODO(allenl): Stack component prior covariances while allowing cross-model
# correlations to be learned (currently a full covariance prior is learned, but
# custom component model covariances are not used).
class StateSpaceCorrelatedFeaturesEnsemble(StateSpaceEnsemble):
"""An correlated ensemble where each model represents a feature.
Unlike `StateSpaceIndependentEnsemble`, a full state transition noise
covariance matrix is learned for this ensemble; the models are not assumed to
be independent. Rather than concatenating observation models (i.e. summing the
contributions of each model to each feature),
StateSpaceCorrelatedFeaturesEnsemble stacks observation models diagonally,
meaning that each model corresponds to one feature of the series.
Behaves like (and is) a single state space model where:
StateTransition = Diag(StateTransition[j] for models j)
ObservationModel = Diag(ObservationModel[j] for models j)
Note that each ObservationModel[j] is a [1 x S_j] matrix (S_j being the state
dimension of model j), i.e. a univariate model. The combined model is
multivariate, the number of features of the series being equal to the number
of component models in the ensemble.
"""
def __init__(self, ensemble_members, configuration):
"""Specify the ensemble's configuration and component models.
Args:
ensemble_members: A list of `StateSpaceModel` objects, with length equal
to `configuration.num_features`. Each of these models, which must be
univariate, corresponds to a single feature of the time series.
configuration: A StateSpaceModelConfiguration object.
Raises:
ValueError: If the length of `ensemble_members` does not equal the number
of features in the series, or any component is not univariate.
"""
if len(ensemble_members) != configuration.num_features:
raise ValueError(
"The number of members in a StateSpaceCorrelatedFeaturesEnsemble "
"must equal the number of features in the time series.")
for member in ensemble_members:
if member.num_features != 1:
raise ValueError(
"StateSpaceCorrelatedFeaturesEnsemble components must be "
"univariate.")
super(StateSpaceCorrelatedFeaturesEnsemble, self).__init__(
ensemble_members=ensemble_members, configuration=configuration)
def transition_power_noise_accumulator(self, num_steps):
"""Use a noise accumulator special case when possible."""
if len(self._ensemble_members) == 1:
# If this is a univariate series, we should use the special casing built
# into the single component model.
return self._ensemble_members[0].transition_power_noise_accumulator(
num_steps=num_steps)
# If we have multiple features, and therefore multiple models, we have
# introduced correlations which make noise accumulation more
# complicated. Here we fall back to the general case, since we can't just
# aggregate member special cases.
return super(StateSpaceCorrelatedFeaturesEnsemble,
self).transition_power_noise_accumulator(num_steps=num_steps)
def get_broadcasted_observation_model(self, times):
"""Stack observation models diagonally."""
def _member_observation_model(member):
return ops.convert_to_tensor(
member.get_broadcasted_observation_model(times), dtype=self.dtype)
return self._compute_blocked(member_fn=_member_observation_model,
name="feature_ensemble_observation_model")
|
tensorflow-master
|
tensorflow/contrib/timeseries/python/timeseries/state_space_models/state_space_model.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
|
tensorflow-master
|
tensorflow/contrib/timeseries/python/timeseries/state_space_models/__init__.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for filtering postprocessors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.timeseries.python.timeseries.state_space_models import filtering_postprocessor
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.platform import test
class FilteringStepPostprocessorTest(test.TestCase):
def test_gaussian_alternative(self):
for float_dtype in [dtypes.float32, dtypes.float64]:
detector = filtering_postprocessor.StateInterpolatingAnomalyDetector(
anomaly_log_likelihood=(filtering_postprocessor
.cauchy_alternative_to_gaussian),
responsibility_scaling=10.)
predicted_state = [
constant_op.constant(
[[40.], [20.]], dtype=float_dtype), constant_op.constant(
[3., 6.], dtype=float_dtype), constant_op.constant([-1, -2])
]
filtered_state = [
constant_op.constant(
[[80.], [180.]], dtype=float_dtype), constant_op.constant(
[1., 2.], dtype=float_dtype), constant_op.constant([-1, -2])
]
interpolated_state, updated_outputs = detector.process_filtering_step(
current_times=constant_op.constant([1, 2]),
current_values=constant_op.constant([[0.], [1.]], dtype=float_dtype),
predicted_state=predicted_state,
filtered_state=filtered_state,
outputs={
"mean":
constant_op.constant([[0.1], [10.]], dtype=float_dtype),
"covariance":
constant_op.constant([[[1.0]], [[1.0]]], dtype=float_dtype),
"log_likelihood":
constant_op.constant([-1., -40.], dtype=float_dtype)
})
# The first batch element is not anomalous, and so should use the inferred
# state. The second is anomalous, and should use the predicted state.
expected_state = [[[80.], [20.]],
[1., 6.],
[-1, -2]]
with self.cached_session():
for interpolated, expected in zip(interpolated_state, expected_state):
self.assertAllClose(expected, interpolated.eval())
self.assertGreater(0., updated_outputs["anomaly_score"][0].eval())
self.assertLess(0., updated_outputs["anomaly_score"][1].eval())
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/timeseries/python/timeseries/state_space_models/filtering_postprocessor_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for periodic state space model components."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.timeseries.python.timeseries.state_space_models import periodic
from tensorflow.contrib.timeseries.python.timeseries.state_space_models import state_space_model
from tensorflow.contrib.timeseries.python.timeseries.state_space_models import test_utils
from tensorflow.python.framework import dtypes
from tensorflow.python.platform import test
class SpecialCaseTests(test.TestCase):
def test_cycle_transition_to_powers(self):
num_steps = 3
dtype = dtypes.float64
periodicity = 3
cycle = periodic.CycleStateSpaceModel(
periodicity=periodicity,
configuration=state_space_model.StateSpaceModelConfiguration(
dtype=dtype))
test_utils.transition_power_test_template(
test_case=self, model=cycle, num_steps=num_steps)
def test_resolution_cycle_transition_to_powers(self):
num_steps = 3
dtype = dtypes.float64
latent_values = 3
periodicity = latent_values - 1
cycle = periodic.ResolutionCycleModel(
num_latent_values=latent_values,
periodicity=periodicity,
configuration=state_space_model.StateSpaceModelConfiguration(
dtype=dtype))
test_utils.transition_power_test_template(
test_case=self, model=cycle, num_steps=num_steps)
def test_cycle_noise_accumulator(self):
num_steps = 3
dtype = dtypes.float64
periodicity = 3
cycle = periodic.CycleStateSpaceModel(
periodicity=periodicity,
configuration=state_space_model.StateSpaceModelConfiguration(
dtype=dtype))
test_utils.noise_accumulator_test_template(
test_case=self, model=cycle, num_steps=num_steps)
def test_resolution_cycle_noise_accumulator(self):
num_steps = 3
dtype = dtypes.float64
latent_values = 3
periodicity = latent_values + 0.1
cycle = periodic.ResolutionCycleModel(
num_latent_values=latent_values,
periodicity=periodicity,
configuration=state_space_model.StateSpaceModelConfiguration(
dtype=dtype))
test_utils.noise_accumulator_test_template(
test_case=self, model=cycle, num_steps=num_steps)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/timeseries/python/timeseries/state_space_models/periodic_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for level and trend state space model components."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.timeseries.python.timeseries.state_space_models import level_trend
from tensorflow.contrib.timeseries.python.timeseries.state_space_models import state_space_model
from tensorflow.contrib.timeseries.python.timeseries.state_space_models import test_utils
from tensorflow.python.framework import dtypes
from tensorflow.python.platform import test
class SpecialCaseTests(test.TestCase):
def test_adder_transition_to_powers(self):
num_steps = 3
dtype = dtypes.float64
adder = level_trend.AdderStateSpaceModel(
configuration=state_space_model.StateSpaceModelConfiguration(
dtype=dtype))
test_utils.transition_power_test_template(
test_case=self, model=adder, num_steps=num_steps)
def test_adder_noise_accumulator(self):
num_steps = 3
dtype = dtypes.float64
use_level_noise = True
adder = level_trend.AdderStateSpaceModel(
use_level_noise=use_level_noise,
configuration=state_space_model.StateSpaceModelConfiguration(
dtype=dtype))
test_utils.noise_accumulator_test_template(
test_case=self, model=adder, num_steps=num_steps)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/timeseries/python/timeseries/state_space_models/level_trend_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Kalman filtering."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy
from tensorflow.contrib.timeseries.python.timeseries import math_utils
from tensorflow.contrib.timeseries.python.timeseries.state_space_models import kalman_filter
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
# Two-dimensional state model with "slope" and "level" components.
STATE_TRANSITION = [
[1., 1.], # Add slope to level
[0., 1.] # Maintain slope
]
# Independent noise for each component
STATE_TRANSITION_NOISE = [[0.1, 0.0], [0.0, 0.2]]
OBSERVATION_MODEL = [[[0.5, 0.0], [0.0, 1.0]]]
OBSERVATION_NOISE = [[0.0001, 0.], [0., 0.0002]]
STATE_NOISE_TRANSFORM = [[1.0, 0.0], [0.0, 1.0]]
def _powers_and_sums_from_transition_matrix(
state_transition, state_transition_noise_covariance,
state_noise_transform, max_gap=1):
def _transition_matrix_powers(powers):
return math_utils.matrix_to_powers(state_transition, powers)
def _power_sums(num_steps):
power_sums_tensor = math_utils.power_sums_tensor(
max_gap + 1, state_transition,
math_ops.matmul(state_noise_transform,
math_ops.matmul(
state_transition_noise_covariance,
state_noise_transform,
adjoint_b=True)))
return array_ops.gather(power_sums_tensor, indices=num_steps)
return (_transition_matrix_powers, _power_sums)
class MultivariateTests(test.TestCase):
def _multivariate_symmetric_covariance_test_template(
self, dtype, simplified_posterior_variance_computation):
"""Check that errors aren't building up asymmetries in covariances."""
kf = kalman_filter.KalmanFilter(dtype=dtype)
observation_noise_covariance = constant_op.constant(
[[1., 0.5], [0.5, 1.]], dtype=dtype)
observation_model = constant_op.constant(
[[[1., 0., 0., 0.], [0., 0., 1., 0.]]], dtype=dtype)
state = array_ops.placeholder(shape=[1, 4], dtype=dtype)
state_var = array_ops.placeholder(shape=[1, 4, 4], dtype=dtype)
observation = array_ops.placeholder(shape=[1, 2], dtype=dtype)
transition_fn, power_sum_fn = _powers_and_sums_from_transition_matrix(
state_transition=constant_op.constant(
[[1., 1., 0., 0.], [0., 1., 0., 0.], [0., 0., 1., 1.],
[0., 0., 0., 1.]],
dtype=dtype),
state_noise_transform=linalg_ops.eye(4, dtype=dtype),
state_transition_noise_covariance=constant_op.constant(
[[1., 0., 0.5, 0.], [0., 1., 0., 0.5], [0.5, 0., 1., 0.],
[0., 0.5, 0., 1.]],
dtype=dtype))
pred_state = kf.predict_state_mean(
prior_state=state, transition_matrices=transition_fn([1]))
pred_state_var = kf.predict_state_var(
prior_state_var=state_var, transition_matrices=transition_fn([1]),
transition_noise_sums=power_sum_fn([1]))
observed_mean, observed_var = kf.observed_from_state(
state_mean=pred_state, state_var=pred_state_var,
observation_model=observation_model,
observation_noise=observation_noise_covariance)
post_state, post_state_var = kf.posterior_from_prior_state(
prior_state=pred_state, prior_state_var=pred_state_var,
observation=observation,
observation_model=observation_model,
predicted_observations=(observed_mean, observed_var),
observation_noise=observation_noise_covariance)
with self.cached_session() as session:
evaled_state = numpy.array([[1., 1., 1., 1.]])
evaled_state_var = numpy.eye(4)[None]
for i in range(500):
evaled_state, evaled_state_var, evaled_observed_var = session.run(
[post_state, post_state_var, observed_var],
feed_dict={state: evaled_state,
state_var: evaled_state_var,
observation: [[float(i), float(i)]]})
self.assertAllClose(evaled_observed_var[0],
evaled_observed_var[0].T)
self.assertAllClose(evaled_state_var[0],
evaled_state_var[0].T)
def test_multivariate_symmetric_covariance_float32(self):
self._multivariate_symmetric_covariance_test_template(
dtypes.float32, simplified_posterior_variance_computation=False)
def test_multivariate_symmetric_covariance_float64(self):
self._multivariate_symmetric_covariance_test_template(
dtypes.float64, simplified_posterior_variance_computation=True)
class KalmanFilterNonBatchTest(test.TestCase):
"""Single-batch KalmanFilter tests."""
def setUp(self):
"""The basic model defined above, with unit batches."""
self.kalman_filter = kalman_filter.KalmanFilter()
self.transition_fn, self.power_sum_fn = (
_powers_and_sums_from_transition_matrix(
state_transition=STATE_TRANSITION,
state_transition_noise_covariance=STATE_TRANSITION_NOISE,
state_noise_transform=STATE_NOISE_TRANSFORM,
max_gap=5))
def test_observed_from_state(self):
"""Compare observation mean and noise to hand-computed values."""
with self.cached_session():
state = constant_op.constant([[2., 1.]])
state_var = constant_op.constant([[[4., 0.], [0., 3.]]])
observed_mean, observed_var = self.kalman_filter.observed_from_state(
state, state_var,
observation_model=OBSERVATION_MODEL,
observation_noise=OBSERVATION_NOISE)
observed_mean_override, observed_var_override = (
self.kalman_filter.observed_from_state(
state, state_var,
observation_model=OBSERVATION_MODEL,
observation_noise=100 * constant_op.constant(
OBSERVATION_NOISE)[None]))
self.assertAllClose(numpy.array([[1., 1.]]),
observed_mean.eval())
self.assertAllClose(numpy.array([[1., 1.]]),
observed_mean_override.eval())
self.assertAllClose(numpy.array([[[1.0001, 0.], [0., 3.0002]]]),
observed_var.eval())
self.assertAllClose(numpy.array([[[1.01, 0.], [0., 3.02]]]),
observed_var_override.eval())
def _posterior_from_prior_state_test_template(
self, state, state_var, observation, observation_model, observation_noise,
expected_state, expected_state_var):
"""Test that repeated observations converge to the expected value."""
predicted_observations = self.kalman_filter.observed_from_state(
state, state_var, observation_model,
observation_noise=observation_noise)
state_update, state_var_update = (
self.kalman_filter.posterior_from_prior_state(
state, state_var, observation,
observation_model=observation_model,
predicted_observations=predicted_observations,
observation_noise=observation_noise))
with self.cached_session() as session:
evaled_state, evaled_state_var = session.run([state, state_var])
for _ in range(300):
evaled_state, evaled_state_var = session.run(
[state_update, state_var_update],
feed_dict={state: evaled_state, state_var: evaled_state_var})
self.assertAllClose(expected_state,
evaled_state,
atol=1e-5)
self.assertAllClose(
expected_state_var,
evaled_state_var,
atol=1e-5)
def test_posterior_from_prior_state_univariate(self):
self._posterior_from_prior_state_test_template(
state=constant_op.constant([[0.3]]),
state_var=constant_op.constant([[[1.]]]),
observation=constant_op.constant([[1.]]),
observation_model=[[[2.]]],
observation_noise=[[[0.01]]],
expected_state=numpy.array([[0.5]]),
expected_state_var=[[[0.]]])
def test_posterior_from_prior_state_univariate_unit_noise(self):
self._posterior_from_prior_state_test_template(
state=constant_op.constant([[0.3]]),
state_var=constant_op.constant([[[1e10]]]),
observation=constant_op.constant([[1.]]),
observation_model=[[[2.]]],
observation_noise=[[[1.0]]],
expected_state=numpy.array([[0.5]]),
expected_state_var=[[[1. / (300. * 2. ** 2)]]])
def test_posterior_from_prior_state_multivariate_2d(self):
self._posterior_from_prior_state_test_template(
state=constant_op.constant([[1.9, 1.]]),
state_var=constant_op.constant([[[1., 0.], [0., 2.]]]),
observation=constant_op.constant([[1., 1.]]),
observation_model=OBSERVATION_MODEL,
observation_noise=OBSERVATION_NOISE,
expected_state=numpy.array([[2., 1.]]),
expected_state_var=[[[0., 0.], [0., 0.]]])
def test_posterior_from_prior_state_multivariate_3d(self):
self._posterior_from_prior_state_test_template(
state=constant_op.constant([[1.9, 1., 5.]]),
state_var=constant_op.constant(
[[[200., 0., 1.], [0., 2000., 0.], [1., 0., 40000.]]]),
observation=constant_op.constant([[1., 1., 3.]]),
observation_model=constant_op.constant(
[[[0.5, 0., 0.],
[0., 10., 0.],
[0., 0., 100.]]]),
observation_noise=linalg_ops.eye(3) / 10000.,
expected_state=numpy.array([[2., .1, .03]]),
expected_state_var=numpy.zeros([1, 3, 3]))
def test_predict_state_mean(self):
"""Compare state mean transitions with simple hand-computed values."""
with self.cached_session():
state = constant_op.constant([[4., 2.]])
state = self.kalman_filter.predict_state_mean(
state, self.transition_fn([1]))
for _ in range(2):
state = self.kalman_filter.predict_state_mean(
state, self.transition_fn([1]))
self.assertAllClose(
numpy.array([[2. * 3. + 4., # Slope * time + base
2.]]),
state.eval())
def test_predict_state_var(self):
"""Compare a variance transition with simple hand-computed values."""
with self.cached_session():
state_var = constant_op.constant([[[1., 0.], [0., 2.]]])
state_var = self.kalman_filter.predict_state_var(
state_var, self.transition_fn([1]), self.power_sum_fn([1]))
self.assertAllClose(
numpy.array([[[3.1, 2.0], [2.0, 2.2]]]),
state_var.eval())
def test_do_filter(self):
"""Tests do_filter.
Tests that correct values have high probability and incorrect values
have low probability when there is low uncertainty.
"""
with self.cached_session():
state = constant_op.constant([[4., 2.]])
state_var = constant_op.constant([[[0.0001, 0.], [0., 0.0001]]])
observation = constant_op.constant([[
.5 * (
4. # Base
+ 2.), # State transition
2.
]])
estimated_state = self.kalman_filter.predict_state_mean(
state, self.transition_fn([1]))
estimated_state_covariance = self.kalman_filter.predict_state_var(
state_var, self.transition_fn([1]), self.power_sum_fn([1]))
(predicted_observation,
predicted_observation_covariance) = (
self.kalman_filter.observed_from_state(
estimated_state, estimated_state_covariance,
observation_model=OBSERVATION_MODEL,
observation_noise=OBSERVATION_NOISE))
(_, _, first_log_prob) = self.kalman_filter.do_filter(
estimated_state=estimated_state,
estimated_state_covariance=estimated_state_covariance,
predicted_observation=predicted_observation,
predicted_observation_covariance=predicted_observation_covariance,
observation=observation,
observation_model=OBSERVATION_MODEL,
observation_noise=OBSERVATION_NOISE)
self.assertGreater(first_log_prob.eval()[0], numpy.log(0.99))
def test_predict_n_ahead_mean(self):
with self.cached_session():
original_state = constant_op.constant([[4., 2.]])
n = 5
iterative_state = original_state
for i in range(n):
self.assertAllClose(
iterative_state.eval(),
self.kalman_filter.predict_state_mean(
original_state,
self.transition_fn([i])).eval())
iterative_state = self.kalman_filter.predict_state_mean(
iterative_state,
self.transition_fn([1]))
def test_predict_n_ahead_var(self):
with self.cached_session():
original_var = constant_op.constant([[[2., 3.], [4., 5.]]])
n = 5
iterative_var = original_var
for i in range(n):
self.assertAllClose(
iterative_var.eval(),
self.kalman_filter.predict_state_var(
original_var,
self.transition_fn([i]),
self.power_sum_fn([i])).eval())
iterative_var = self.kalman_filter.predict_state_var(
iterative_var,
self.transition_fn([1]),
self.power_sum_fn([1]))
class KalmanFilterBatchTest(test.TestCase):
"""KalmanFilter tests with more than one element batches."""
def test_do_filter_batch(self):
"""Tests do_filter, in batch mode.
Tests that correct values have high probability and incorrect values
have low probability when there is low uncertainty.
"""
with self.cached_session():
state = constant_op.constant([[4., 2.], [5., 3.], [6., 4.]])
state_var = constant_op.constant(3 * [[[0.0001, 0.], [0., 0.0001]]])
observation = constant_op.constant([
[
.5 * (
4. # Base
+ 2.), # State transition
2.
],
[
.5 * (
5. # Base
+ 3.), # State transition
3.
],
[3.14, 2.71]
]) # Low probability observation
kf = kalman_filter.KalmanFilter()
transition_fn, power_sum_fn = _powers_and_sums_from_transition_matrix(
state_transition=STATE_TRANSITION,
state_transition_noise_covariance=STATE_TRANSITION_NOISE,
state_noise_transform=STATE_NOISE_TRANSFORM,
max_gap=2)
estimated_state = kf.predict_state_mean(state, transition_fn(3*[1]))
estimated_state_covariance = kf.predict_state_var(
state_var, transition_fn(3*[1]), power_sum_fn(3*[1]))
observation_model = array_ops.tile(OBSERVATION_MODEL, [3, 1, 1])
(predicted_observation,
predicted_observation_covariance) = (
kf.observed_from_state(
estimated_state, estimated_state_covariance,
observation_model=observation_model,
observation_noise=OBSERVATION_NOISE))
(state, state_var, log_prob) = kf.do_filter(
estimated_state=estimated_state,
estimated_state_covariance=estimated_state_covariance,
predicted_observation=predicted_observation,
predicted_observation_covariance=predicted_observation_covariance,
observation=observation,
observation_model=observation_model,
observation_noise=OBSERVATION_NOISE)
first_log_prob, second_log_prob, third_log_prob = log_prob.eval()
self.assertGreater(first_log_prob.sum(), numpy.log(0.99))
self.assertGreater(second_log_prob.sum(), numpy.log(0.99))
self.assertLess(third_log_prob.sum(), numpy.log(0.01))
def test_predict_n_ahead_mean(self):
with self.cached_session():
kf = kalman_filter.KalmanFilter()
transition_fn, _ = _powers_and_sums_from_transition_matrix(
state_transition=STATE_TRANSITION,
state_transition_noise_covariance=STATE_TRANSITION_NOISE,
state_noise_transform=STATE_NOISE_TRANSFORM,
max_gap=2)
original_state = constant_op.constant([[4., 2.], [3., 1.], [6., 2.]])
state0 = original_state
state1 = kf.predict_state_mean(state0, transition_fn(3 * [1]))
state2 = kf.predict_state_mean(state1, transition_fn(3 * [1]))
batch_eval = kf.predict_state_mean(
original_state, transition_fn([1, 0, 2])).eval()
self.assertAllClose(state0.eval()[1], batch_eval[1])
self.assertAllClose(state1.eval()[0], batch_eval[0])
self.assertAllClose(state2.eval()[2], batch_eval[2])
def test_predict_n_ahead_var(self):
with self.cached_session():
kf = kalman_filter.KalmanFilter()
transition_fn, power_sum_fn = _powers_and_sums_from_transition_matrix(
state_transition=STATE_TRANSITION,
state_transition_noise_covariance=STATE_TRANSITION_NOISE,
state_noise_transform=STATE_NOISE_TRANSFORM,
max_gap=2)
base_var = 2.0 * numpy.identity(2) + numpy.ones([2, 2])
original_var = constant_op.constant(
numpy.array(
[base_var, 2.0 * base_var, 3.0 * base_var], dtype=numpy.float32))
var0 = original_var
var1 = kf.predict_state_var(
var0, transition_fn(3 * [1]), power_sum_fn(3 * [1]))
var2 = kf.predict_state_var(
var1, transition_fn(3 * [1]), power_sum_fn(3 * [1]))
batch_eval = kf.predict_state_var(
original_var,
transition_fn([1, 0, 2]),
power_sum_fn([1, 0, 2])).eval()
self.assertAllClose(var0.eval()[1], batch_eval[1])
self.assertAllClose(var1.eval()[0], batch_eval[0])
self.assertAllClose(var2.eval()[2], batch_eval[2])
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/timeseries/python/timeseries/state_space_models/kalman_filter_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implements Kalman filtering for linear state space models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.timeseries.python.timeseries import math_utils
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import numerics
# TODO(allenl): support for always-factored covariance matrices
class KalmanFilter(object):
"""Inference on linear state models.
The model for observations in a given state is:
observation(t) = observation_model * state(t)
+ Gaussian(0, observation_noise_covariance)
State updates take the following form:
state(t) = state_transition * state(t-1)
+ state_noise_transform * Gaussian(0, state_transition_noise_covariance)
This is a real-valued analog to hidden Markov models, with linear transitions
and a Gaussian noise model. Given initial conditions, noise, and state
transition, Kalman filtering recursively estimates states and observations,
along with their associated uncertainty. When fed observations, future state
and uncertainty estimates are conditioned on those observations (in a Bayesian
sense).
Typically some "given"s mentioned above (noises) will be unknown, and so
optimizing the Kalman filter's probabilistic predictions with respect to these
parameters is a good approach. The state transition and observation models are
usually known a priori as a modeling decision.
"""
def __init__(self, dtype=dtypes.float32,
simplified_posterior_covariance_computation=False):
"""Initialize the Kalman filter.
Args:
dtype: The data type to use for floating point tensors.
simplified_posterior_covariance_computation: If True, uses an algebraic
simplification of the Kalman filtering posterior covariance update,
which is slightly faster at the cost of numerical stability. The
simplified update is often stable when using double precision on small
models or with fixed transition matrices.
"""
self._simplified_posterior_covariance_computation = (
simplified_posterior_covariance_computation)
self.dtype = dtype
def do_filter(
self, estimated_state, estimated_state_covariance,
predicted_observation, predicted_observation_covariance,
observation, observation_model, observation_noise):
"""Convenience function for scoring predictions.
Scores a prediction against an observation, and computes the updated
posterior over states.
Shapes given below for arguments are for single-model Kalman filtering
(e.g. KalmanFilter). For ensembles, prior_state and prior_state_var are
same-length tuples of values corresponding to each model.
Args:
estimated_state: A prior mean over states [batch size x state dimension]
estimated_state_covariance: Covariance of state prior [batch size x D x
D], with D depending on the Kalman filter implementation (typically
the state dimension).
predicted_observation: A prediction for the observed value, such as that
returned by observed_from_state. A [batch size x num features] Tensor.
predicted_observation_covariance: A covariance matrix corresponding to
`predicted_observation`, a [batch size x num features x num features]
Tensor.
observation: The observed value corresponding to the predictions
given [batch size x observation dimension]
observation_model: The [batch size x observation dimension x model state
dimension] Tensor indicating how a particular state is mapped to
(pre-noise) observations for each part of the batch.
observation_noise: A [batch size x observation dimension x observation
dimension] Tensor or [observation dimension x observation dimension]
Tensor with covariance matrices to use for each part of the batch (a
two-dimensional input will be broadcast).
Returns:
posterior_state, posterior_state_var: Posterior mean and
covariance, updated versions of prior_state and
prior_state_var.
log_prediction_prob: Log probability of the observations under
the priors, suitable for optimization (should be maximized).
"""
symmetrized_observation_covariance = 0.5 * (
predicted_observation_covariance + array_ops.matrix_transpose(
predicted_observation_covariance))
instability_message = (
"This may occur due to numerically unstable filtering when there is "
"a large difference in posterior variances, or when inferences are "
"near-deterministic. Considering tuning the "
"'filtering_maximum_posterior_variance_ratio' or "
"'filtering_minimum_posterior_variance' parameters in your "
"StateSpaceModelConfiguration, or tuning the transition matrix.")
symmetrized_observation_covariance = numerics.verify_tensor_all_finite(
symmetrized_observation_covariance,
"Predicted observation covariance was not finite. {}".format(
instability_message))
diag = array_ops.matrix_diag_part(symmetrized_observation_covariance)
min_diag = math_ops.reduce_min(diag)
non_negative_assert = control_flow_ops.Assert(
min_diag >= 0.,
[("The predicted observation covariance "
"has a negative diagonal entry. {}").format(instability_message),
min_diag])
with ops.control_dependencies([non_negative_assert]):
observation_covariance_cholesky = linalg_ops.cholesky(
symmetrized_observation_covariance)
log_prediction_prob = math_utils.mvn_tril_log_prob(
loc=predicted_observation,
scale_tril=observation_covariance_cholesky,
x=observation)
(posterior_state,
posterior_state_var) = self.posterior_from_prior_state(
prior_state=estimated_state,
prior_state_var=estimated_state_covariance,
observation=observation,
observation_model=observation_model,
predicted_observations=(predicted_observation,
predicted_observation_covariance),
observation_noise=observation_noise)
return (posterior_state, posterior_state_var, log_prediction_prob)
def predict_state_mean(self, prior_state, transition_matrices):
"""Compute state transitions.
Args:
prior_state: Current estimated state mean [batch_size x state_dimension]
transition_matrices: A [batch size, state dimension, state dimension]
batch of matrices (dtype matching the `dtype` argument to the
constructor) with the transition matrix raised to the power of the
number of steps to be taken (not element-wise; use
math_utils.matrix_to_powers if there is no efficient special case) if
more than one step is desired.
Returns:
State mean advanced based on `transition_matrices` (dimensions matching
first argument).
"""
advanced_state = array_ops.squeeze(
math_ops.matmul(
transition_matrices,
prior_state[..., None]),
axis=[-1])
return advanced_state
def predict_state_var(
self, prior_state_var, transition_matrices, transition_noise_sums):
r"""Compute variance for state transitions.
Computes a noise estimate corresponding to the value returned by
predict_state_mean.
Args:
prior_state_var: Covariance matrix specifying uncertainty of current state
estimate [batch size x state dimension x state dimension]
transition_matrices: A [batch size, state dimension, state dimension]
batch of matrices (dtype matching the `dtype` argument to the
constructor) with the transition matrix raised to the power of the
number of steps to be taken (not element-wise; use
math_utils.matrix_to_powers if there is no efficient special case).
transition_noise_sums: A [batch size, state dimension, state dimension]
Tensor (dtype matching the `dtype` argument to the constructor) with:
\sum_{i=0}^{num_steps - 1} (
state_transition_to_powers_fn(i)
* state_transition_noise_covariance
* state_transition_to_powers_fn(i)^T
)
for the number of steps to be taken in each part of the batch (this
should match `transition_matrices`). Use math_utils.power_sums_tensor
with `tf.gather` if there is no efficient special case.
Returns:
State variance advanced based on `transition_matrices` and
`transition_noise_sums` (dimensions matching first argument).
"""
prior_variance_transitioned = math_ops.matmul(
math_ops.matmul(transition_matrices, prior_state_var),
transition_matrices,
adjoint_b=True)
return prior_variance_transitioned + transition_noise_sums
def posterior_from_prior_state(self, prior_state, prior_state_var,
observation, observation_model,
predicted_observations,
observation_noise):
"""Compute a posterior over states given an observation.
Args:
prior_state: Prior state mean [batch size x state dimension]
prior_state_var: Prior state covariance [batch size x state dimension x
state dimension]
observation: The observed value corresponding to the predictions given
[batch size x observation dimension]
observation_model: The [batch size x observation dimension x model state
dimension] Tensor indicating how a particular state is mapped to
(pre-noise) observations for each part of the batch.
predicted_observations: An (observation mean, observation variance) tuple
computed based on the current state, usually the output of
observed_from_state.
observation_noise: A [batch size x observation dimension x observation
dimension] or [observation dimension x observation dimension] Tensor
with covariance matrices to use for each part of the batch (a
two-dimensional input will be broadcast).
Returns:
Posterior mean and covariance (dimensions matching the first two
arguments).
"""
observed_mean, observed_var = predicted_observations
residual = observation - observed_mean
# TODO(allenl): Can more of this be done using matrix_solve_ls?
kalman_solve_rhs = math_ops.matmul(
observation_model, prior_state_var, adjoint_b=True)
# This matrix_solve adjoint doesn't make a difference symbolically (since
# observed_var is a covariance matrix, and should be symmetric), but
# filtering on multivariate series is unstable without it. See
# test_multivariate_symmetric_covariance_float64 in kalman_filter_test.py
# for an example of the instability (fails with adjoint=False).
kalman_gain_transposed = linalg_ops.matrix_solve(
matrix=observed_var, rhs=kalman_solve_rhs, adjoint=True)
posterior_state = prior_state + array_ops.squeeze(
math_ops.matmul(
kalman_gain_transposed,
array_ops.expand_dims(residual, -1),
adjoint_a=True),
axis=[-1])
gain_obs = math_ops.matmul(
kalman_gain_transposed, observation_model, adjoint_a=True)
identity_extradim = linalg_ops.eye(
array_ops.shape(gain_obs)[1], dtype=gain_obs.dtype)[None]
identity_minus_factor = identity_extradim - gain_obs
if self._simplified_posterior_covariance_computation:
# posterior covariance =
# (I - kalman_gain * observation_model) * prior_state_var
posterior_state_var = math_ops.matmul(identity_minus_factor,
prior_state_var)
else:
observation_noise = ops.convert_to_tensor(observation_noise)
# A Joseph form update, which provides better numeric stability than the
# simplified optimal Kalman gain update, at the cost of a few extra
# operations. Joseph form updates are valid for any gain (not just the
# optimal Kalman gain), and so are more forgiving of numerical errors in
# computing the optimal Kalman gain.
#
# posterior covariance =
# (I - kalman_gain * observation_model) * prior_state_var
# * (I - kalman_gain * observation_model)^T
# + kalman_gain * observation_noise * kalman_gain^T
left_multiplied_state_var = math_ops.matmul(identity_minus_factor,
prior_state_var)
multiplied_state_var = math_ops.matmul(
identity_minus_factor, left_multiplied_state_var, adjoint_b=True)
def _batch_observation_noise_update():
return (multiplied_state_var + math_ops.matmul(
math_ops.matmul(
kalman_gain_transposed, observation_noise, adjoint_a=True),
kalman_gain_transposed))
def _matrix_observation_noise_update():
return (multiplied_state_var + math_ops.matmul(
math_utils.batch_times_matrix(
kalman_gain_transposed, observation_noise, adj_x=True),
kalman_gain_transposed))
if observation_noise.get_shape().ndims is None:
posterior_state_var = control_flow_ops.cond(
math_ops.equal(array_ops.rank(observation_noise), 2),
_matrix_observation_noise_update, _batch_observation_noise_update)
else:
# If static shape information exists, it gets checked in each cond()
# branch, so we need a special case to avoid graph-build-time
# exceptions.
if observation_noise.get_shape().ndims == 2:
posterior_state_var = _matrix_observation_noise_update()
else:
posterior_state_var = _batch_observation_noise_update()
return posterior_state, posterior_state_var
def observed_from_state(self, state_mean, state_var, observation_model,
observation_noise):
"""Compute an observation distribution given a state distribution.
Args:
state_mean: State mean vector [batch size x state dimension]
state_var: State covariance [batch size x state dimension x state
dimension]
observation_model: The [batch size x observation dimension x model state
dimension] Tensor indicating how a particular state is mapped to
(pre-noise) observations for each part of the batch.
observation_noise: A [batch size x observation dimension x observation
dimension] Tensor with covariance matrices to use for each part of the
batch. To remove observation noise, pass a Tensor of zeros (or simply
0, which will broadcast).
Returns:
observed_mean: Observation mean vector [batch size x observation
dimension]
observed_var: Observation covariance [batch size x observation dimension x
observation dimension]
"""
observed_mean = array_ops.squeeze(
math_ops.matmul(
array_ops.expand_dims(state_mean, 1),
observation_model,
adjoint_b=True),
axis=[1])
observed_var = math_ops.matmul(
math_ops.matmul(observation_model, state_var),
observation_model,
adjoint_b=True)
observed_var += observation_noise
return observed_mean, observed_var
|
tensorflow-master
|
tensorflow/contrib/timeseries/python/timeseries/state_space_models/kalman_filter.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for VARMA.
Tests VARMA model building and utility functions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.timeseries.python.timeseries.feature_keys import TrainEvalFeatures
from tensorflow.contrib.timeseries.python.timeseries.state_space_models import state_space_model
from tensorflow.contrib.timeseries.python.timeseries.state_space_models import varma
from tensorflow.python.estimator import estimator_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class MakeModelTest(test.TestCase):
def test_ar_smaller(self):
model = varma.VARMA(
autoregressive_order=0,
moving_average_order=3)
model.initialize_graph()
outputs = model.define_loss(
features={
TrainEvalFeatures.TIMES: constant_op.constant([[1, 2]]),
TrainEvalFeatures.VALUES: constant_op.constant([[[1.], [2.]]])
},
mode=estimator_lib.ModeKeys.TRAIN)
initializer = variables.global_variables_initializer()
with self.cached_session() as sess:
sess.run([initializer])
outputs.loss.eval()
def test_ma_smaller(self):
model = varma.VARMA(
autoregressive_order=6,
moving_average_order=3,
configuration=state_space_model.StateSpaceModelConfiguration(
num_features=7))
model.initialize_graph()
outputs = model.define_loss(
features={
TrainEvalFeatures.TIMES: constant_op.constant([[1, 2]]),
TrainEvalFeatures.VALUES: constant_op.constant(
[[[1.] * 7, [2.] * 7]])
},
mode=estimator_lib.ModeKeys.TRAIN)
initializer = variables.global_variables_initializer()
with self.cached_session() as sess:
sess.run([initializer])
outputs.loss.eval()
def test_make_ensemble_no_errors(self):
with variable_scope.variable_scope("model_one"):
model_one = varma.VARMA(10, 5)
with variable_scope.variable_scope("model_two"):
model_two = varma.VARMA(0, 3)
configuration = state_space_model.StateSpaceModelConfiguration()
ensemble = state_space_model.StateSpaceIndependentEnsemble(
ensemble_members=[model_one, model_two],
configuration=configuration)
ensemble.initialize_graph()
outputs = ensemble.define_loss(
features={
TrainEvalFeatures.TIMES: constant_op.constant([[1, 2]]),
TrainEvalFeatures.VALUES: constant_op.constant([[[1.], [2.]]])},
mode=estimator_lib.ModeKeys.TRAIN)
initializer = variables.global_variables_initializer()
with self.cached_session() as sess:
sess.run([initializer])
outputs.loss.eval()
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/timeseries/python/timeseries/state_space_models/varma_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for state space model infrastructure."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy
from tensorflow.contrib import layers
from tensorflow.contrib.timeseries.python.timeseries import estimators
from tensorflow.contrib.timeseries.python.timeseries import feature_keys
from tensorflow.contrib.timeseries.python.timeseries import input_pipeline
from tensorflow.contrib.timeseries.python.timeseries import math_utils
from tensorflow.contrib.timeseries.python.timeseries import saved_model_utils
from tensorflow.contrib.timeseries.python.timeseries import state_management
from tensorflow.contrib.timeseries.python.timeseries import test_utils
from tensorflow.contrib.timeseries.python.timeseries.state_space_models import state_space_model
from tensorflow.python.estimator import estimator_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.saved_model import loader
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.training import coordinator as coordinator_lib
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import queue_runner_impl
class RandomStateSpaceModel(state_space_model.StateSpaceModel):
def __init__(self,
state_dimension,
state_noise_dimension,
configuration=state_space_model.StateSpaceModelConfiguration()):
self.transition = numpy.random.normal(
size=[state_dimension, state_dimension]).astype(
configuration.dtype.as_numpy_dtype)
self.noise_transform = numpy.random.normal(
size=(state_dimension, state_noise_dimension)).astype(
configuration.dtype.as_numpy_dtype)
# Test batch broadcasting
self.observation_model = numpy.random.normal(
size=(configuration.num_features, state_dimension)).astype(
configuration.dtype.as_numpy_dtype)
super(RandomStateSpaceModel, self).__init__(
configuration=configuration._replace(
covariance_prior_fn=lambda _: 0.))
def get_state_transition(self):
return self.transition
def get_noise_transform(self):
return self.noise_transform
def get_observation_model(self, times):
return self.observation_model
class ConstructionTests(test.TestCase):
def test_initialize_graph_error(self):
with self.assertRaisesRegexp(ValueError, "initialize_graph"):
model = RandomStateSpaceModel(2, 2)
outputs = model.define_loss(
features={
feature_keys.TrainEvalFeatures.TIMES:
constant_op.constant([[1, 2]]),
feature_keys.TrainEvalFeatures.VALUES:
constant_op.constant([[[1.], [2.]]])
},
mode=estimator_lib.ModeKeys.TRAIN)
initializer = variables.global_variables_initializer()
with self.cached_session() as sess:
sess.run([initializer])
outputs.loss.eval()
def test_initialize_graph_state_manager_error(self):
with self.assertRaisesRegexp(ValueError, "initialize_graph"):
model = RandomStateSpaceModel(2, 2)
state_manager = state_management.ChainingStateManager()
outputs = state_manager.define_loss(
model=model,
features={
feature_keys.TrainEvalFeatures.TIMES:
constant_op.constant([[1, 2]]),
feature_keys.TrainEvalFeatures.VALUES:
constant_op.constant([[[1.], [2.]]])
},
mode=estimator_lib.ModeKeys.TRAIN)
initializer = variables.global_variables_initializer()
with self.cached_session() as sess:
sess.run([initializer])
outputs.loss.eval()
class GapTests(test.TestCase):
def _gap_test_template(self, times, values):
random_model = RandomStateSpaceModel(
state_dimension=1, state_noise_dimension=1,
configuration=state_space_model.StateSpaceModelConfiguration(
num_features=1))
random_model.initialize_graph()
input_fn = input_pipeline.WholeDatasetInputFn(
input_pipeline.NumpyReader({
feature_keys.TrainEvalFeatures.TIMES: times,
feature_keys.TrainEvalFeatures.VALUES: values
}))
features, _ = input_fn()
times = features[feature_keys.TrainEvalFeatures.TIMES]
values = features[feature_keys.TrainEvalFeatures.VALUES]
model_outputs = random_model.get_batch_loss(
features={
feature_keys.TrainEvalFeatures.TIMES: times,
feature_keys.TrainEvalFeatures.VALUES: values
},
mode=None,
state=math_utils.replicate_state(
start_state=random_model.get_start_state(),
batch_size=array_ops.shape(times)[0]))
with self.cached_session() as session:
variables.global_variables_initializer().run()
coordinator = coordinator_lib.Coordinator()
queue_runner_impl.start_queue_runners(session, coord=coordinator)
model_outputs.loss.eval()
coordinator.request_stop()
coordinator.join()
def test_start_gap(self):
self._gap_test_template(times=[20, 21, 22], values=numpy.arange(3))
def test_mid_gap(self):
self._gap_test_template(times=[2, 60, 61], values=numpy.arange(3))
def test_end_gap(self):
self._gap_test_template(times=[2, 3, 73], values=numpy.arange(3))
def test_all_gaps(self):
self._gap_test_template(times=[2, 4, 8, 16, 32, 64, 128],
values=numpy.arange(7))
class StateSpaceEquivalenceTests(test.TestCase):
def test_savedmodel_state_override(self):
random_model = RandomStateSpaceModel(
state_dimension=5,
state_noise_dimension=4,
configuration=state_space_model.StateSpaceModelConfiguration(
exogenous_feature_columns=[layers.real_valued_column("exogenous")],
dtype=dtypes.float64, num_features=1))
estimator = estimators.StateSpaceRegressor(
model=random_model,
optimizer=gradient_descent.GradientDescentOptimizer(0.1))
combined_input_fn = input_pipeline.WholeDatasetInputFn(
input_pipeline.NumpyReader({
feature_keys.FilteringFeatures.TIMES: [1, 2, 3, 4],
feature_keys.FilteringFeatures.VALUES: [1., 2., 3., 4.],
"exogenous": [-1., -2., -3., -4.]
}))
estimator.train(combined_input_fn, steps=1)
export_location = estimator.export_saved_model(
self.get_temp_dir(), estimator.build_raw_serving_input_receiver_fn())
with ops.Graph().as_default() as graph:
random_model.initialize_graph()
with self.session(graph=graph) as session:
variables.global_variables_initializer().run()
evaled_start_state = session.run(random_model.get_start_state())
evaled_start_state = [
state_element[None, ...] for state_element in evaled_start_state]
with ops.Graph().as_default() as graph:
with self.session(graph=graph) as session:
signatures = loader.load(
session, [tag_constants.SERVING], export_location)
first_split_filtering = saved_model_utils.filter_continuation(
continue_from={
feature_keys.FilteringResults.STATE_TUPLE: evaled_start_state},
signatures=signatures,
session=session,
features={
feature_keys.FilteringFeatures.TIMES: [1, 2],
feature_keys.FilteringFeatures.VALUES: [1., 2.],
"exogenous": [[-1.], [-2.]]})
second_split_filtering = saved_model_utils.filter_continuation(
continue_from=first_split_filtering,
signatures=signatures,
session=session,
features={
feature_keys.FilteringFeatures.TIMES: [3, 4],
feature_keys.FilteringFeatures.VALUES: [3., 4.],
"exogenous": [[-3.], [-4.]]
})
combined_filtering = saved_model_utils.filter_continuation(
continue_from={
feature_keys.FilteringResults.STATE_TUPLE: evaled_start_state},
signatures=signatures,
session=session,
features={
feature_keys.FilteringFeatures.TIMES: [1, 2, 3, 4],
feature_keys.FilteringFeatures.VALUES: [1., 2., 3., 4.],
"exogenous": [[-1.], [-2.], [-3.], [-4.]]
})
split_predict = saved_model_utils.predict_continuation(
continue_from=second_split_filtering,
signatures=signatures,
session=session,
steps=1,
exogenous_features={
"exogenous": [[[-5.]]]})
combined_predict = saved_model_utils.predict_continuation(
continue_from=combined_filtering,
signatures=signatures,
session=session,
steps=1,
exogenous_features={
"exogenous": [[[-5.]]]})
for state_key, combined_state_value in combined_filtering.items():
if state_key == feature_keys.FilteringResults.TIMES:
continue
self.assertAllClose(
combined_state_value, second_split_filtering[state_key])
for prediction_key, combined_value in combined_predict.items():
self.assertAllClose(combined_value, split_predict[prediction_key])
def _equivalent_to_single_model_test_template(self, model_generator):
with self.cached_session() as session:
random_model = RandomStateSpaceModel(
state_dimension=5,
state_noise_dimension=4,
configuration=state_space_model.StateSpaceModelConfiguration(
dtype=dtypes.float64, num_features=1))
random_model.initialize_graph()
series_length = 10
model_data = random_model.generate(
number_of_series=1, series_length=series_length,
model_parameters=random_model.random_model_parameters())
input_fn = input_pipeline.WholeDatasetInputFn(
input_pipeline.NumpyReader(model_data))
features, _ = input_fn()
model_outputs = random_model.get_batch_loss(
features=features,
mode=None,
state=math_utils.replicate_state(
start_state=random_model.get_start_state(),
batch_size=array_ops.shape(
features[feature_keys.TrainEvalFeatures.TIMES])[0]))
variables.global_variables_initializer().run()
compare_outputs_evaled_fn = model_generator(
random_model, model_data)
coordinator = coordinator_lib.Coordinator()
queue_runner_impl.start_queue_runners(session, coord=coordinator)
compare_outputs_evaled = compare_outputs_evaled_fn(session)
model_outputs_evaled = session.run(
(model_outputs.end_state, model_outputs.predictions))
coordinator.request_stop()
coordinator.join()
model_posteriors, model_predictions = model_outputs_evaled
(_, compare_posteriors,
compare_predictions) = compare_outputs_evaled
(model_posterior_mean, model_posterior_var,
model_from_time) = model_posteriors
(compare_posterior_mean, compare_posterior_var,
compare_from_time) = compare_posteriors
self.assertAllClose(model_posterior_mean, compare_posterior_mean[0])
self.assertAllClose(model_posterior_var, compare_posterior_var[0])
self.assertAllClose(model_from_time, compare_from_time)
self.assertEqual(sorted(model_predictions.keys()),
sorted(compare_predictions.keys()))
for prediction_name in model_predictions:
if prediction_name == "loss":
# Chunking means that losses will be different; skip testing them.
continue
# Compare the last chunk to their corresponding un-chunked model
# predictions
last_prediction_chunk = compare_predictions[prediction_name][-1]
comparison_values = last_prediction_chunk.shape[0]
model_prediction = (
model_predictions[prediction_name][0, -comparison_values:])
self.assertAllClose(model_prediction,
last_prediction_chunk)
def _model_equivalent_to_chained_model_test_template(self, chunk_size):
def chained_model_outputs(original_model, data):
input_fn = test_utils.AllWindowInputFn(
input_pipeline.NumpyReader(data), window_size=chunk_size)
state_manager = state_management.ChainingStateManager(
state_saving_interval=1)
features, _ = input_fn()
state_manager.initialize_graph(original_model)
model_outputs = state_manager.define_loss(
model=original_model,
features=features,
mode=estimator_lib.ModeKeys.TRAIN)
def _eval_outputs(session):
for _ in range(50):
# Warm up saved state
model_outputs.loss.eval()
(posterior_mean, posterior_var,
priors_from_time) = model_outputs.end_state
posteriors = ((posterior_mean,), (posterior_var,), priors_from_time)
outputs = (model_outputs.loss, posteriors,
model_outputs.predictions)
chunked_outputs_evaled = session.run(outputs)
return chunked_outputs_evaled
return _eval_outputs
self._equivalent_to_single_model_test_template(chained_model_outputs)
def test_model_equivalent_to_chained_model_chunk_size_one(self):
numpy.random.seed(2)
random_seed.set_random_seed(3)
self._model_equivalent_to_chained_model_test_template(1)
def test_model_equivalent_to_chained_model_chunk_size_five(self):
numpy.random.seed(4)
random_seed.set_random_seed(5)
self._model_equivalent_to_chained_model_test_template(5)
class PredictionTests(test.TestCase):
def _check_predictions(
self, predicted_mean, predicted_covariance, window_size):
self.assertAllEqual(predicted_covariance.shape,
[1, # batch
window_size,
1, # num features
1]) # num features
self.assertAllEqual(predicted_mean.shape,
[1, # batch
window_size,
1]) # num features
for position in range(window_size - 2):
self.assertGreater(predicted_covariance[0, position + 2, 0, 0],
predicted_covariance[0, position, 0, 0])
def test_predictions_direct(self):
dtype = dtypes.float64
with variable_scope.variable_scope(dtype.name):
random_model = RandomStateSpaceModel(
state_dimension=5, state_noise_dimension=4,
configuration=state_space_model.StateSpaceModelConfiguration(
dtype=dtype, num_features=1))
random_model.initialize_graph()
prediction_dict = random_model.predict(features={
feature_keys.PredictionFeatures.TIMES: [[1, 3, 5, 6]],
feature_keys.PredictionFeatures.STATE_TUPLE:
math_utils.replicate_state(
start_state=random_model.get_start_state(), batch_size=1)
})
with self.cached_session():
variables.global_variables_initializer().run()
predicted_mean = prediction_dict["mean"].eval()
predicted_covariance = prediction_dict["covariance"].eval()
self._check_predictions(predicted_mean, predicted_covariance,
window_size=4)
def test_predictions_after_loss(self):
dtype = dtypes.float32
with variable_scope.variable_scope(dtype.name):
random_model = RandomStateSpaceModel(
state_dimension=5, state_noise_dimension=4,
configuration=state_space_model.StateSpaceModelConfiguration(
dtype=dtype, num_features=1))
features = {
feature_keys.TrainEvalFeatures.TIMES: [[1, 2, 3, 4]],
feature_keys.TrainEvalFeatures.VALUES:
array_ops.ones([1, 4, 1], dtype=dtype)
}
passthrough = state_management.PassthroughStateManager()
random_model.initialize_graph()
passthrough.initialize_graph(random_model)
model_outputs = passthrough.define_loss(
model=random_model,
features=features,
mode=estimator_lib.ModeKeys.EVAL)
predictions = random_model.predict({
feature_keys.PredictionFeatures.TIMES: [[5, 7, 8]],
feature_keys.PredictionFeatures.STATE_TUPLE: model_outputs.end_state
})
with self.cached_session():
variables.global_variables_initializer().run()
predicted_mean = predictions["mean"].eval()
predicted_covariance = predictions["covariance"].eval()
self._check_predictions(predicted_mean, predicted_covariance,
window_size=3)
class ExogenousTests(test.TestCase):
def test_noise_increasing(self):
for dtype in [dtypes.float32, dtypes.float64]:
with variable_scope.variable_scope(dtype.name):
random_model = RandomStateSpaceModel(
state_dimension=5, state_noise_dimension=4,
configuration=state_space_model.StateSpaceModelConfiguration(
dtype=dtype, num_features=1))
original_covariance = array_ops.diag(array_ops.ones(shape=[5]))
_, new_covariance, _ = random_model._exogenous_noise_increasing(
current_times=[[1]],
exogenous_values=[[5.]],
state=[
array_ops.ones(shape=[1, 5]), original_covariance[None], [0]
])
with self.cached_session() as session:
variables.global_variables_initializer().run()
evaled_new_covariance, evaled_original_covariance = session.run(
[new_covariance[0], original_covariance])
new_variances = numpy.diag(evaled_new_covariance)
original_variances = numpy.diag(evaled_original_covariance)
for i in range(5):
self.assertGreater(new_variances[i], original_variances[i])
def test_noise_decreasing(self):
for dtype in [dtypes.float32, dtypes.float64]:
with variable_scope.variable_scope(dtype.name):
random_model = RandomStateSpaceModel(
state_dimension=5, state_noise_dimension=4,
configuration=state_space_model.StateSpaceModelConfiguration(
dtype=dtype, num_features=1))
random_model.initialize_graph()
original_covariance = array_ops.diag(
array_ops.ones(shape=[5], dtype=dtype))
_, new_covariance, _ = random_model._exogenous_noise_decreasing(
current_times=[[1]],
exogenous_values=constant_op.constant([[-2.]], dtype=dtype),
state=[
-array_ops.ones(shape=[1, 5], dtype=dtype),
original_covariance[None], [0]
])
with self.cached_session() as session:
variables.global_variables_initializer().run()
evaled_new_covariance, evaled_original_covariance = session.run(
[new_covariance[0], original_covariance])
new_variances = numpy.diag(evaled_new_covariance)
original_variances = numpy.diag(evaled_original_covariance)
for i in range(5):
self.assertLess(new_variances[i], original_variances[i])
class StubStateSpaceModel(state_space_model.StateSpaceModel):
def __init__(self,
transition,
state_noise_dimension,
configuration=state_space_model.StateSpaceModelConfiguration()):
self.transition = transition
self.noise_transform = numpy.random.normal(
size=(transition.shape[0], state_noise_dimension)).astype(numpy.float32)
# Test feature + batch broadcasting
self.observation_model = numpy.random.normal(
size=(transition.shape[0])).astype(numpy.float32)
super(StubStateSpaceModel, self).__init__(
configuration=configuration)
def get_state_transition(self):
return self.transition
def get_noise_transform(self):
return self.noise_transform
def get_observation_model(self, times):
return self.observation_model
GeneratedModel = collections.namedtuple(
"GeneratedModel", ["model", "data", "true_parameters"])
class PosteriorTests(test.TestCase):
def _get_cycle_transition(self, period):
cycle_transition = numpy.zeros([period - 1, period - 1],
dtype=numpy.float32)
cycle_transition[0, :] = -1
cycle_transition[1:, :-1] = numpy.identity(period - 2)
return cycle_transition
_adder_transition = numpy.array([[1, 1],
[0, 1]], dtype=numpy.float32)
def _get_single_model(self):
numpy.random.seed(8)
stub_model = StubStateSpaceModel(
transition=self._get_cycle_transition(5), state_noise_dimension=0)
series_length = 1000
stub_model.initialize_graph()
true_params = stub_model.random_model_parameters()
data = stub_model.generate(
number_of_series=1, series_length=series_length,
model_parameters=true_params)
return GeneratedModel(
model=stub_model, data=data, true_parameters=true_params)
def test_exact_posterior_recovery_no_transition_noise(self):
with self.cached_session() as session:
stub_model, data, true_params = self._get_single_model()
input_fn = input_pipeline.WholeDatasetInputFn(
input_pipeline.NumpyReader(data))
features, _ = input_fn()
model_outputs = stub_model.get_batch_loss(
features=features,
mode=None,
state=math_utils.replicate_state(
start_state=stub_model.get_start_state(),
batch_size=array_ops.shape(
features[feature_keys.TrainEvalFeatures.TIMES])[0]))
variables.global_variables_initializer().run()
coordinator = coordinator_lib.Coordinator()
queue_runner_impl.start_queue_runners(session, coord=coordinator)
posterior_mean, posterior_var, posterior_times = session.run(
# Feed the true model parameters so that this test doesn't depend on
# the generated parameters being close to the variable initializations
# (an alternative would be training steps to fit the noise values,
# which would be slow).
model_outputs.end_state, feed_dict=true_params)
coordinator.request_stop()
coordinator.join()
self.assertAllClose(numpy.zeros([1, 4, 4]), posterior_var,
atol=1e-2)
self.assertAllClose(
numpy.dot(
numpy.linalg.matrix_power(
stub_model.transition,
data[feature_keys.TrainEvalFeatures.TIMES].shape[1]),
true_params[stub_model.prior_state_mean]),
posterior_mean[0],
rtol=1e-1)
self.assertAllClose(
math_utils.batch_end_time(
features[feature_keys.TrainEvalFeatures.TIMES]).eval(),
posterior_times)
def test_chained_exact_posterior_recovery_no_transition_noise(self):
with self.cached_session() as session:
stub_model, data, true_params = self._get_single_model()
chunk_size = 10
input_fn = test_utils.AllWindowInputFn(
input_pipeline.NumpyReader(data), window_size=chunk_size)
features, _ = input_fn()
state_manager = state_management.ChainingStateManager(
state_saving_interval=1)
state_manager.initialize_graph(stub_model)
model_outputs = state_manager.define_loss(
model=stub_model,
features=features,
mode=estimator_lib.ModeKeys.TRAIN)
variables.global_variables_initializer().run()
coordinator = coordinator_lib.Coordinator()
queue_runner_impl.start_queue_runners(session, coord=coordinator)
for _ in range(
data[feature_keys.TrainEvalFeatures.TIMES].shape[1] // chunk_size):
model_outputs.loss.eval()
posterior_mean, posterior_var, posterior_times = session.run(
model_outputs.end_state, feed_dict=true_params)
coordinator.request_stop()
coordinator.join()
self.assertAllClose(numpy.zeros([1, 4, 4]), posterior_var,
atol=1e-2)
self.assertAllClose(
numpy.dot(
numpy.linalg.matrix_power(
stub_model.transition,
data[feature_keys.TrainEvalFeatures.TIMES].shape[1]),
true_params[stub_model.prior_state_mean]),
posterior_mean[0],
rtol=1e-1)
self.assertAllClose(data[feature_keys.TrainEvalFeatures.TIMES][:, -1],
posterior_times)
class TimeDependentStateSpaceModel(state_space_model.StateSpaceModel):
"""A mostly trivial model which predicts values = times + 1."""
def __init__(self, static_unrolling_window_size_threshold=None):
super(TimeDependentStateSpaceModel, self).__init__(
configuration=state_space_model.StateSpaceModelConfiguration(
use_observation_noise=False,
transition_covariance_initial_log_scale_bias=5.,
static_unrolling_window_size_threshold=
static_unrolling_window_size_threshold))
def get_state_transition(self):
return array_ops.ones(shape=[1, 1])
def get_noise_transform(self):
return array_ops.ones(shape=[1, 1])
def get_observation_model(self, times):
return array_ops.reshape(
tensor=math_ops.cast(times + 1, dtypes.float32), shape=[-1, 1, 1])
def make_priors(self):
return (ops.convert_to_tensor([1.]), ops.convert_to_tensor([[0.]]))
class UnknownShapeModel(TimeDependentStateSpaceModel):
def get_observation_model(self, times):
parent_model = super(UnknownShapeModel, self).get_observation_model(times)
return array_ops.placeholder_with_default(
input=parent_model, shape=tensor_shape.unknown_shape())
class TimeDependentTests(test.TestCase):
def _time_dependency_test_template(self, model_type):
"""Test that a time-dependent observation model influences predictions."""
model = model_type()
estimator = estimators.StateSpaceRegressor(
model=model, optimizer=gradient_descent.GradientDescentOptimizer(0.1))
values = numpy.reshape([1., 2., 3., 4.],
newshape=[1, 4, 1])
input_fn = input_pipeline.WholeDatasetInputFn(
input_pipeline.NumpyReader({
feature_keys.TrainEvalFeatures.TIMES: [[0, 1, 2, 3]],
feature_keys.TrainEvalFeatures.VALUES: values
}))
estimator.train(input_fn=input_fn, max_steps=1)
predicted_values = estimator.evaluate(input_fn=input_fn, steps=1)["mean"]
# Throw out the first value so we don't test the prior
self.assertAllEqual(values[1:], predicted_values[1:])
def test_undefined_shape_time_dependency(self):
self._time_dependency_test_template(UnknownShapeModel)
def test_loop_unrolling(self):
"""Tests running/restoring from a checkpoint with static unrolling."""
model = TimeDependentStateSpaceModel(
# Unroll during training, but not evaluation
static_unrolling_window_size_threshold=2)
estimator = estimators.StateSpaceRegressor(model=model)
times = numpy.arange(100)
values = numpy.arange(100)
dataset = {
feature_keys.TrainEvalFeatures.TIMES: times,
feature_keys.TrainEvalFeatures.VALUES: values
}
train_input_fn = input_pipeline.RandomWindowInputFn(
input_pipeline.NumpyReader(dataset), batch_size=16, window_size=2)
eval_input_fn = input_pipeline.WholeDatasetInputFn(
input_pipeline.NumpyReader(dataset))
estimator.train(input_fn=train_input_fn, max_steps=1)
estimator.evaluate(input_fn=eval_input_fn, steps=1)
class LevelOnlyModel(state_space_model.StateSpaceModel):
def get_state_transition(self):
return linalg_ops.eye(1, dtype=self.dtype)
def get_noise_transform(self):
return linalg_ops.eye(1, dtype=self.dtype)
def get_observation_model(self, times):
return [1]
class MultivariateLevelModel(
state_space_model.StateSpaceCorrelatedFeaturesEnsemble):
def __init__(self, configuration):
univariate_component_configuration = configuration._replace(
num_features=1)
components = []
for feature in range(configuration.num_features):
with variable_scope.variable_scope("feature{}".format(feature)):
components.append(
LevelOnlyModel(configuration=univariate_component_configuration))
super(MultivariateLevelModel, self).__init__(
ensemble_members=components, configuration=configuration)
class MultivariateTests(test.TestCase):
def test_multivariate(self):
dtype = dtypes.float32
num_features = 3
covariance = numpy.eye(num_features)
# A single off-diagonal has a non-zero value in the true transition
# noise covariance.
covariance[-1, 0] = 1.
covariance[0, -1] = 1.
dataset_size = 100
values = numpy.cumsum(
numpy.random.multivariate_normal(
mean=numpy.zeros(num_features),
cov=covariance,
size=dataset_size),
axis=0)
times = numpy.arange(dataset_size)
model = MultivariateLevelModel(
configuration=state_space_model.StateSpaceModelConfiguration(
num_features=num_features,
dtype=dtype,
use_observation_noise=False,
transition_covariance_initial_log_scale_bias=5.))
estimator = estimators.StateSpaceRegressor(
model=model, optimizer=gradient_descent.GradientDescentOptimizer(0.1))
data = {
feature_keys.TrainEvalFeatures.TIMES: times,
feature_keys.TrainEvalFeatures.VALUES: values
}
train_input_fn = input_pipeline.RandomWindowInputFn(
input_pipeline.NumpyReader(data), batch_size=16, window_size=16)
estimator.train(input_fn=train_input_fn, steps=1)
for component in model._ensemble_members:
# Check that input statistics propagated to component models
self.assertTrue(component._input_statistics)
def test_ensemble_observation_noise(self):
model = MultivariateLevelModel(
configuration=state_space_model.StateSpaceModelConfiguration())
model.initialize_graph()
outputs = model.define_loss(
features={
feature_keys.TrainEvalFeatures.TIMES:
constant_op.constant([[1, 2]]),
feature_keys.TrainEvalFeatures.VALUES:
constant_op.constant([[[1.], [2.]]])
},
mode=estimator_lib.ModeKeys.TRAIN)
initializer = variables.global_variables_initializer()
with self.cached_session() as sess:
sess.run([initializer])
outputs.loss.eval()
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/timeseries/python/timeseries/state_space_models/state_space_model_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests that the TensorFlow parts of the known anomaly example run."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.timeseries.examples import known_anomaly
from tensorflow.python.platform import test
class KnownAnomalyExampleTest(test.TestCase):
def test_shapes_and_variance_structural_ar(self):
(times, observed, all_times, mean, upper_limit, lower_limit,
anomaly_locations) = known_anomaly.train_and_evaluate_exogenous(
train_steps=1, estimator_fn=known_anomaly.autoregressive_estimator)
self.assertAllEqual(
anomaly_locations,
[25, 50, 75, 100, 125, 150, 175, 249])
self.assertAllEqual(all_times.shape, mean.shape)
self.assertAllEqual(all_times.shape, upper_limit.shape)
self.assertAllEqual(all_times.shape, lower_limit.shape)
self.assertAllEqual(times.shape, observed.shape)
def test_shapes_and_variance_structural_ssm(self):
(times, observed, all_times, mean, upper_limit, lower_limit,
anomaly_locations) = known_anomaly.train_and_evaluate_exogenous(
train_steps=50, estimator_fn=known_anomaly.state_space_estimator)
self.assertAllEqual(
anomaly_locations,
[25, 50, 75, 100, 125, 150, 175, 249])
self.assertAllEqual([200], times.shape)
self.assertAllEqual([200], observed.shape)
self.assertAllEqual([300], all_times.shape)
self.assertAllEqual([300], mean.shape)
self.assertAllEqual([300], upper_limit.shape)
self.assertAllEqual([300], lower_limit.shape)
# Check that initial predictions are relatively confident.
self.assertLess(upper_limit[210] - lower_limit[210],
3.0 * (upper_limit[200] - lower_limit[200]))
# Check that post-changepoint predictions are less confident
self.assertGreater(upper_limit[290] - lower_limit[290],
3.0 * (upper_limit[240] - lower_limit[240]))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/timeseries/examples/known_anomaly_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests that the TensorFlow parts of the prediction example run."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from os import path
from tensorflow.contrib.timeseries.examples import predict
from tensorflow.python.platform import test
_MODULE_PATH = path.dirname(__file__)
_DATA_FILE = path.join(_MODULE_PATH, "data/period_trend.csv")
class PeriodTrendExampleTest(test.TestCase):
def test_shapes_and_variance_structural(self):
(times, observed, all_times, mean, upper_limit, lower_limit
) = predict.structural_ensemble_train_and_predict(_DATA_FILE)
# Just check that plotting will probably be OK. We can't actually run the
# plotting code since we don't want to pull in matplotlib as a dependency
# for this test.
self.assertAllEqual([500], times.shape)
self.assertAllEqual([500], observed.shape)
self.assertAllEqual([700], all_times.shape)
self.assertAllEqual([700], mean.shape)
self.assertAllEqual([700], upper_limit.shape)
self.assertAllEqual([700], lower_limit.shape)
def test_ar(self):
(times, observed, all_times, mean,
upper_limit, lower_limit) = predict.ar_train_and_predict(_DATA_FILE)
self.assertAllEqual(times.shape, observed.shape)
self.assertAllEqual(all_times.shape, mean.shape)
self.assertAllEqual(all_times.shape, upper_limit.shape)
self.assertAllEqual(all_times.shape, lower_limit.shape)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/timeseries/examples/predict_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests that the TensorFlow parts of the multivariate example run."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.timeseries.examples import multivariate
from tensorflow.python.platform import test
class MultivariateExampleTest(test.TestCase):
def test_shapes_structural(self):
times, values = multivariate.multivariate_train_and_sample(
export_directory=self.get_temp_dir(), training_steps=5)
self.assertAllEqual([1100], times.shape)
self.assertAllEqual([1100, 5], values.shape)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/timeseries/examples/multivariate_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""An example of training and predicting with a TFTS estimator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import sys
import numpy as np
import tensorflow as tf
try:
import matplotlib # pylint: disable=g-import-not-at-top
matplotlib.use("TkAgg") # Need Tk for interactive plots.
from matplotlib import pyplot # pylint: disable=g-import-not-at-top
HAS_MATPLOTLIB = True
except ImportError:
# Plotting requires matplotlib, but the unit test running this code may
# execute in an environment without it (i.e. matplotlib is not a build
# dependency). We'd still like to test the TensorFlow-dependent parts of this
# example, namely train_and_predict.
HAS_MATPLOTLIB = False
FLAGS = None
_MODULE_PATH = os.path.dirname(__file__)
_DEFAULT_DATA_FILE = os.path.join(_MODULE_PATH, "data/period_trend.csv")
def structural_ensemble_train_and_predict(csv_file_name):
# Cycle between 5 latent values over a period of 100. This leads to a very
# smooth periodic component (and a small model), which is a good fit for our
# example data. Modeling high-frequency periodic variations will require a
# higher cycle_num_latent_values.
structural = tf.contrib.timeseries.StructuralEnsembleRegressor(
periodicities=100, num_features=1, cycle_num_latent_values=5)
return train_and_predict(structural, csv_file_name, training_steps=150)
def ar_train_and_predict(csv_file_name):
# An autoregressive model, with periodicity handled as a time-based
# regression. Note that this requires windows of size 16 (input_window_size +
# output_window_size) for training.
ar = tf.contrib.timeseries.ARRegressor(
periodicities=100, input_window_size=10, output_window_size=6,
num_features=1,
# Use the (default) normal likelihood loss to adaptively fit the
# variance. SQUARED_LOSS overestimates variance when there are trends in
# the series.
loss=tf.contrib.timeseries.ARModel.NORMAL_LIKELIHOOD_LOSS)
return train_and_predict(ar, csv_file_name, training_steps=600)
def train_and_predict(estimator, csv_file_name, training_steps):
"""A simple example of training and predicting."""
# Read data in the default "time,value" CSV format with no header
reader = tf.contrib.timeseries.CSVReader(csv_file_name)
# Set up windowing and batching for training
train_input_fn = tf.contrib.timeseries.RandomWindowInputFn(
reader, batch_size=16, window_size=16)
# Fit model parameters to data
estimator.train(input_fn=train_input_fn, steps=training_steps)
# Evaluate on the full dataset sequentially, collecting in-sample predictions
# for a qualitative evaluation. Note that this loads the whole dataset into
# memory. For quantitative evaluation, use RandomWindowChunker.
evaluation_input_fn = tf.contrib.timeseries.WholeDatasetInputFn(reader)
evaluation = estimator.evaluate(input_fn=evaluation_input_fn, steps=1)
# Predict starting after the evaluation
(predictions,) = tuple(estimator.predict(
input_fn=tf.contrib.timeseries.predict_continuation_input_fn(
evaluation, steps=200)))
times = evaluation["times"][0]
observed = evaluation["observed"][0, :, 0]
mean = np.squeeze(np.concatenate(
[evaluation["mean"][0], predictions["mean"]], axis=0))
variance = np.squeeze(np.concatenate(
[evaluation["covariance"][0], predictions["covariance"]], axis=0))
all_times = np.concatenate([times, predictions["times"]], axis=0)
upper_limit = mean + np.sqrt(variance)
lower_limit = mean - np.sqrt(variance)
return times, observed, all_times, mean, upper_limit, lower_limit
def make_plot(name, training_times, observed, all_times, mean,
upper_limit, lower_limit):
"""Plot a time series in a new figure."""
pyplot.figure()
pyplot.plot(training_times, observed, "b", label="training series")
pyplot.plot(all_times, mean, "r", label="forecast")
pyplot.plot(all_times, upper_limit, "g", label="forecast upper bound")
pyplot.plot(all_times, lower_limit, "g", label="forecast lower bound")
pyplot.fill_between(all_times, lower_limit, upper_limit, color="grey",
alpha="0.2")
pyplot.axvline(training_times[-1], color="k", linestyle="--")
pyplot.xlabel("time")
pyplot.ylabel("observations")
pyplot.legend(loc=0)
pyplot.title(name)
def main(unused_argv):
if not HAS_MATPLOTLIB:
raise ImportError(
"Please install matplotlib to generate a plot from this example.")
input_filename = FLAGS.input_filename
if input_filename is None:
input_filename = _DEFAULT_DATA_FILE
make_plot("Structural ensemble",
*structural_ensemble_train_and_predict(input_filename))
make_plot("AR", *ar_train_and_predict(input_filename))
pyplot.show()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--input_filename",
type=str,
required=False,
help="Input csv file (omit to use the data/period_trend.csv).")
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
tensorflow-master
|
tensorflow/contrib/timeseries/examples/predict.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
|
tensorflow-master
|
tensorflow/contrib/timeseries/examples/__init__.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Example of using an exogenous feature to ignore a known anomaly."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
from os import path
import numpy as np
import tensorflow as tf
try:
import matplotlib # pylint: disable=g-import-not-at-top
matplotlib.use("TkAgg") # Need Tk for interactive plots.
from matplotlib import pyplot # pylint: disable=g-import-not-at-top
HAS_MATPLOTLIB = True
except ImportError:
# Plotting requires matplotlib, but the unit test running this code may
# execute in an environment without it (i.e. matplotlib is not a build
# dependency). We'd still like to test the TensorFlow-dependent parts of this
# example, namely train_and_predict.
HAS_MATPLOTLIB = False
_MODULE_PATH = path.dirname(__file__)
_DATA_FILE = path.join(_MODULE_PATH, "data/changepoints.csv")
def state_space_estimator(exogenous_feature_columns):
"""Constructs a StructuralEnsembleRegressor."""
def _exogenous_update_condition(times, features):
del times # unused
# Make exogenous updates sparse by setting an update condition. This in
# effect allows missing exogenous features: if the condition evaluates to
# False, no update is performed. Otherwise we sometimes end up with "leaky"
# updates which add unnecessary uncertainty to the model even when there is
# no changepoint.
return tf.equal(tf.squeeze(features["is_changepoint"], axis=-1), "yes")
return (
tf.contrib.timeseries.StructuralEnsembleRegressor(
periodicities=12,
# Extract a smooth period by constraining the number of latent values
# being cycled between.
cycle_num_latent_values=3,
num_features=1,
exogenous_feature_columns=exogenous_feature_columns,
exogenous_update_condition=_exogenous_update_condition),
# Use truncated backpropagation with a window size of 64, batching
# together 4 of these windows (random offsets) per training step. Training
# with exogenous features often requires somewhat larger windows.
4, 64)
def autoregressive_estimator(exogenous_feature_columns):
input_window_size = 8
output_window_size = 2
return (
tf.contrib.timeseries.ARRegressor(
periodicities=12,
num_features=1,
input_window_size=input_window_size,
output_window_size=output_window_size,
exogenous_feature_columns=exogenous_feature_columns),
64, input_window_size + output_window_size)
def train_and_evaluate_exogenous(
estimator_fn, csv_file_name=_DATA_FILE, train_steps=300):
"""Training, evaluating, and predicting on a series with changepoints."""
# Indicate the format of our exogenous feature, in this case a string
# representing a boolean value.
string_feature = tf.feature_column.categorical_column_with_vocabulary_list(
key="is_changepoint", vocabulary_list=["no", "yes"])
# Specify the way this feature is presented to the model, here using a one-hot
# encoding.
one_hot_feature = tf.feature_column.indicator_column(
categorical_column=string_feature)
estimator, batch_size, window_size = estimator_fn(
exogenous_feature_columns=[one_hot_feature])
reader = tf.contrib.timeseries.CSVReader(
csv_file_name,
# Indicate the format of our CSV file. First we have two standard columns,
# one for times and one for values. The third column is a custom exogenous
# feature indicating whether each timestep is a changepoint. The
# changepoint feature name must match the string_feature column name
# above.
column_names=(tf.contrib.timeseries.TrainEvalFeatures.TIMES,
tf.contrib.timeseries.TrainEvalFeatures.VALUES,
"is_changepoint"),
# Indicate dtypes for our features.
column_dtypes=(tf.int64, tf.float32, tf.string),
# This CSV has a header line; here we just ignore it.
skip_header_lines=1)
train_input_fn = tf.contrib.timeseries.RandomWindowInputFn(
reader, batch_size=batch_size, window_size=window_size)
estimator.train(input_fn=train_input_fn, steps=train_steps)
evaluation_input_fn = tf.contrib.timeseries.WholeDatasetInputFn(reader)
evaluation = estimator.evaluate(input_fn=evaluation_input_fn, steps=1)
# Create an input_fn for prediction, with a simulated changepoint. Since all
# of the anomalies in the training data are explained by the exogenous
# feature, we should get relatively confident predictions before the indicated
# changepoint (since we are telling the model that no changepoint exists at
# those times) and relatively uncertain predictions after.
(predictions,) = tuple(estimator.predict(
input_fn=tf.contrib.timeseries.predict_continuation_input_fn(
evaluation, steps=100,
exogenous_features={
"is_changepoint": [["no"] * 49 + ["yes"] + ["no"] * 50]})))
times = evaluation["times"][0]
observed = evaluation["observed"][0, :, 0]
mean = np.squeeze(np.concatenate(
[evaluation["mean"][0], predictions["mean"]], axis=0))
variance = np.squeeze(np.concatenate(
[evaluation["covariance"][0], predictions["covariance"]], axis=0))
all_times = np.concatenate([times, predictions["times"]], axis=0)
upper_limit = mean + np.sqrt(variance)
lower_limit = mean - np.sqrt(variance)
# Indicate the locations of the changepoints for plotting vertical lines.
anomaly_locations = []
with open(csv_file_name, "r") as csv_file:
csv_reader = csv.DictReader(csv_file)
for row in csv_reader:
if row["is_changepoint"] == "yes":
anomaly_locations.append(int(row["time"]))
anomaly_locations.append(predictions["times"][49])
return (times, observed, all_times, mean, upper_limit, lower_limit,
anomaly_locations)
def make_plot(name, training_times, observed, all_times, mean,
upper_limit, lower_limit, anomaly_locations):
"""Plot the time series and anomalies in a new figure."""
pyplot.figure()
pyplot.plot(training_times, observed, "b", label="training series")
pyplot.plot(all_times, mean, "r", label="forecast")
pyplot.axvline(anomaly_locations[0], linestyle="dotted", label="changepoints")
for anomaly_location in anomaly_locations[1:]:
pyplot.axvline(anomaly_location, linestyle="dotted")
pyplot.fill_between(all_times, lower_limit, upper_limit, color="grey",
alpha="0.2")
pyplot.axvline(training_times[-1], color="k", linestyle="--")
pyplot.xlabel("time")
pyplot.ylabel("observations")
pyplot.legend(loc=0)
pyplot.title(name)
def main(unused_argv):
if not HAS_MATPLOTLIB:
raise ImportError(
"Please install matplotlib to generate a plot from this example.")
make_plot("Ignoring a known anomaly (state space)",
*train_and_evaluate_exogenous(
estimator_fn=state_space_estimator))
make_plot("Ignoring a known anomaly (autoregressive)",
*train_and_evaluate_exogenous(
estimator_fn=autoregressive_estimator, train_steps=3000))
pyplot.show()
if __name__ == "__main__":
tf.app.run(main=main)
|
tensorflow-master
|
tensorflow/contrib/timeseries/examples/known_anomaly.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A multivariate TFTS example.
Fits a multivariate model, exports it, and visualizes the learned correlations
by iteratively predicting and sampling from the predictions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from os import path
import tempfile
import numpy
import tensorflow as tf
try:
import matplotlib # pylint: disable=g-import-not-at-top
matplotlib.use("TkAgg") # Need Tk for interactive plots.
from matplotlib import pyplot # pylint: disable=g-import-not-at-top
HAS_MATPLOTLIB = True
except ImportError:
# Plotting requires matplotlib, but the unit test running this code may
# execute in an environment without it (i.e. matplotlib is not a build
# dependency). We'd still like to test the TensorFlow-dependent parts of this
# example, namely train_and_predict.
HAS_MATPLOTLIB = False
_MODULE_PATH = path.dirname(__file__)
_DATA_FILE = path.join(_MODULE_PATH, "data/multivariate_level.csv")
def multivariate_train_and_sample(
csv_file_name=_DATA_FILE, export_directory=None, training_steps=500):
"""Trains, evaluates, and exports a multivariate model."""
estimator = tf.contrib.timeseries.StructuralEnsembleRegressor(
periodicities=[], num_features=5)
reader = tf.contrib.timeseries.CSVReader(
csv_file_name,
column_names=((tf.contrib.timeseries.TrainEvalFeatures.TIMES,)
+ (tf.contrib.timeseries.TrainEvalFeatures.VALUES,) * 5))
train_input_fn = tf.contrib.timeseries.RandomWindowInputFn(
# Larger window sizes generally produce a better covariance matrix.
reader, batch_size=4, window_size=64)
estimator.train(input_fn=train_input_fn, steps=training_steps)
evaluation_input_fn = tf.contrib.timeseries.WholeDatasetInputFn(reader)
current_state = estimator.evaluate(input_fn=evaluation_input_fn, steps=1)
values = [current_state["observed"]]
times = [current_state[tf.contrib.timeseries.FilteringResults.TIMES]]
# Export the model so we can do iterative prediction and filtering without
# reloading model checkpoints.
if export_directory is None:
export_directory = tempfile.mkdtemp()
input_receiver_fn = estimator.build_raw_serving_input_receiver_fn()
export_location = estimator.export_saved_model(export_directory,
input_receiver_fn)
with tf.Graph().as_default():
numpy.random.seed(1) # Make the example a bit more deterministic
with tf.Session() as session:
signatures = tf.saved_model.loader.load(
session, [tf.saved_model.tag_constants.SERVING], export_location)
for _ in range(100):
current_prediction = (
tf.contrib.timeseries.saved_model_utils.predict_continuation(
continue_from=current_state, signatures=signatures,
session=session, steps=1))
next_sample = numpy.random.multivariate_normal(
# Squeeze out the batch and series length dimensions (both 1).
mean=numpy.squeeze(current_prediction["mean"], axis=(0, 1)),
cov=numpy.squeeze(current_prediction["covariance"], axis=(0, 1)))
# Update model state so that future predictions are conditional on the
# value we just sampled.
filtering_features = {
tf.contrib.timeseries.TrainEvalFeatures.TIMES: current_prediction[
tf.contrib.timeseries.FilteringResults.TIMES],
tf.contrib.timeseries.TrainEvalFeatures.VALUES: next_sample[
None, None, :]}
current_state = (
tf.contrib.timeseries.saved_model_utils.filter_continuation(
continue_from=current_state,
session=session,
signatures=signatures,
features=filtering_features))
values.append(next_sample[None, None, :])
times.append(current_state["times"])
all_observations = numpy.squeeze(numpy.concatenate(values, axis=1), axis=0)
all_times = numpy.squeeze(numpy.concatenate(times, axis=1), axis=0)
return all_times, all_observations
def main(unused_argv):
if not HAS_MATPLOTLIB:
raise ImportError(
"Please install matplotlib to generate a plot from this example.")
all_times, all_observations = multivariate_train_and_sample()
# Show where sampling starts on the plot
pyplot.axvline(1000, linestyle="dotted")
pyplot.plot(all_times, all_observations)
pyplot.show()
if __name__ == "__main__":
tf.app.run(main=main)
|
tensorflow-master
|
tensorflow/contrib/timeseries/examples/multivariate.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A more advanced example, of building an RNN-based time series model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from os import path
import tempfile
import numpy
import tensorflow as tf
from tensorflow.contrib.timeseries.python.timeseries import estimators as ts_estimators
from tensorflow.contrib.timeseries.python.timeseries import model as ts_model
from tensorflow.contrib.timeseries.python.timeseries import state_management
try:
import matplotlib # pylint: disable=g-import-not-at-top
matplotlib.use("TkAgg") # Need Tk for interactive plots.
from matplotlib import pyplot # pylint: disable=g-import-not-at-top
HAS_MATPLOTLIB = True
except ImportError:
# Plotting requires matplotlib, but the unit test running this code may
# execute in an environment without it (i.e. matplotlib is not a build
# dependency). We'd still like to test the TensorFlow-dependent parts of this
# example.
HAS_MATPLOTLIB = False
_MODULE_PATH = path.dirname(__file__)
_DATA_FILE = path.join(_MODULE_PATH, "data/multivariate_periods.csv")
class _LSTMModel(ts_model.SequentialTimeSeriesModel):
"""A time series model-building example using an RNNCell."""
def __init__(self, num_units, num_features, exogenous_feature_columns=None,
dtype=tf.float32):
"""Initialize/configure the model object.
Note that we do not start graph building here. Rather, this object is a
configurable factory for TensorFlow graphs which are run by an Estimator.
Args:
num_units: The number of units in the model's LSTMCell.
num_features: The dimensionality of the time series (features per
timestep).
exogenous_feature_columns: A list of `tf.feature_column`s representing
features which are inputs to the model but are not predicted by
it. These must then be present for training, evaluation, and
prediction.
dtype: The floating point data type to use.
"""
super(_LSTMModel, self).__init__(
# Pre-register the metrics we'll be outputting (just a mean here).
train_output_names=["mean"],
predict_output_names=["mean"],
num_features=num_features,
exogenous_feature_columns=exogenous_feature_columns,
dtype=dtype)
self._num_units = num_units
# Filled in by initialize_graph()
self._lstm_cell = None
self._lstm_cell_run = None
self._predict_from_lstm_output = None
def initialize_graph(self, input_statistics=None):
"""Save templates for components, which can then be used repeatedly.
This method is called every time a new graph is created. It's safe to start
adding ops to the current default graph here, but the graph should be
constructed from scratch.
Args:
input_statistics: A math_utils.InputStatistics object.
"""
super(_LSTMModel, self).initialize_graph(input_statistics=input_statistics)
with tf.variable_scope("", use_resource=True):
# Use ResourceVariables to avoid race conditions.
self._lstm_cell = tf.nn.rnn_cell.LSTMCell(num_units=self._num_units)
# Create templates so we don't have to worry about variable reuse.
self._lstm_cell_run = tf.make_template(
name_="lstm_cell",
func_=self._lstm_cell,
create_scope_now_=True)
# Transforms LSTM output into mean predictions.
self._predict_from_lstm_output = tf.make_template(
name_="predict_from_lstm_output",
func_=functools.partial(tf.layers.dense, units=self.num_features),
create_scope_now_=True)
def get_start_state(self):
"""Return initial state for the time series model."""
return (
# Keeps track of the time associated with this state for error checking.
tf.zeros([], dtype=tf.int64),
# The previous observation or prediction.
tf.zeros([self.num_features], dtype=self.dtype),
# The most recently seen exogenous features.
tf.zeros(self._get_exogenous_embedding_shape(), dtype=self.dtype),
# The state of the RNNCell (batch dimension removed since this parent
# class will broadcast).
[tf.squeeze(state_element, axis=0)
for state_element
in self._lstm_cell.zero_state(batch_size=1, dtype=self.dtype)])
def _filtering_step(self, current_times, current_values, state, predictions):
"""Update model state based on observations.
Note that we don't do much here aside from computing a loss. In this case
it's easier to update the RNN state in _prediction_step, since that covers
running the RNN both on observations (from this method) and our own
predictions. This distinction can be important for probabilistic models,
where repeatedly predicting without filtering should lead to low-confidence
predictions.
Args:
current_times: A [batch size] integer Tensor.
current_values: A [batch size, self.num_features] floating point Tensor
with new observations.
state: The model's state tuple.
predictions: The output of the previous `_prediction_step`.
Returns:
A tuple of new state and a predictions dictionary updated to include a
loss (note that we could also return other measures of goodness of fit,
although only "loss" will be optimized).
"""
state_from_time, prediction, exogenous, lstm_state = state
with tf.control_dependencies(
[tf.assert_equal(current_times, state_from_time)]):
# Subtract the mean and divide by the variance of the series. Slightly
# more efficient if done for a whole window (using the normalize_features
# argument to SequentialTimeSeriesModel).
transformed_values = self._scale_data(current_values)
# Use mean squared error across features for the loss.
predictions["loss"] = tf.reduce_mean(
(prediction - transformed_values) ** 2, axis=-1)
# Keep track of the new observation in model state. It won't be run
# through the LSTM until the next _imputation_step.
new_state_tuple = (current_times, transformed_values,
exogenous, lstm_state)
return (new_state_tuple, predictions)
def _prediction_step(self, current_times, state):
"""Advance the RNN state using a previous observation or prediction."""
_, previous_observation_or_prediction, exogenous, lstm_state = state
# Update LSTM state based on the most recent exogenous and endogenous
# features.
inputs = tf.concat([previous_observation_or_prediction, exogenous],
axis=-1)
lstm_output, new_lstm_state = self._lstm_cell_run(
inputs=inputs, state=lstm_state)
next_prediction = self._predict_from_lstm_output(lstm_output)
new_state_tuple = (current_times, next_prediction,
exogenous, new_lstm_state)
return new_state_tuple, {"mean": self._scale_back_data(next_prediction)}
def _imputation_step(self, current_times, state):
"""Advance model state across a gap."""
# Does not do anything special if we're jumping across a gap. More advanced
# models, especially probabilistic ones, would want a special case that
# depends on the gap size.
return state
def _exogenous_input_step(
self, current_times, current_exogenous_regressors, state):
"""Save exogenous regressors in model state for use in _prediction_step."""
state_from_time, prediction, _, lstm_state = state
return (state_from_time, prediction,
current_exogenous_regressors, lstm_state)
def train_and_predict(
csv_file_name=_DATA_FILE, training_steps=200, estimator_config=None,
export_directory=None):
"""Train and predict using a custom time series model."""
# Construct an Estimator from our LSTM model.
categorical_column = tf.feature_column.categorical_column_with_hash_bucket(
key="categorical_exogenous_feature", hash_bucket_size=16)
exogenous_feature_columns = [
# Exogenous features are not part of the loss, but can inform
# predictions. In this example the features have no extra information, but
# are included as an API example.
tf.feature_column.numeric_column(
"2d_exogenous_feature", shape=(2,)),
tf.feature_column.embedding_column(
categorical_column=categorical_column, dimension=10)]
estimator = ts_estimators.TimeSeriesRegressor(
model=_LSTMModel(num_features=5, num_units=128,
exogenous_feature_columns=exogenous_feature_columns),
optimizer=tf.train.AdamOptimizer(0.001), config=estimator_config,
# Set state to be saved across windows.
state_manager=state_management.ChainingStateManager())
reader = tf.contrib.timeseries.CSVReader(
csv_file_name,
column_names=((tf.contrib.timeseries.TrainEvalFeatures.TIMES,)
+ (tf.contrib.timeseries.TrainEvalFeatures.VALUES,) * 5
+ ("2d_exogenous_feature",) * 2
+ ("categorical_exogenous_feature",)),
# Data types other than for `times` need to be specified if they aren't
# float32. In this case one of our exogenous features has string dtype.
column_dtypes=((tf.int64,) + (tf.float32,) * 7 + (tf.string,)))
train_input_fn = tf.contrib.timeseries.RandomWindowInputFn(
reader, batch_size=4, window_size=32)
estimator.train(input_fn=train_input_fn, steps=training_steps)
evaluation_input_fn = tf.contrib.timeseries.WholeDatasetInputFn(reader)
evaluation = estimator.evaluate(input_fn=evaluation_input_fn, steps=1)
# Predict starting after the evaluation
predict_exogenous_features = {
"2d_exogenous_feature": numpy.concatenate(
[numpy.ones([1, 100, 1]), numpy.zeros([1, 100, 1])],
axis=-1),
"categorical_exogenous_feature": numpy.array(
["strkey"] * 100)[None, :, None]}
(predictions,) = tuple(estimator.predict(
input_fn=tf.contrib.timeseries.predict_continuation_input_fn(
evaluation, steps=100,
exogenous_features=predict_exogenous_features)))
times = evaluation["times"][0]
observed = evaluation["observed"][0, :, :]
predicted_mean = numpy.squeeze(numpy.concatenate(
[evaluation["mean"][0], predictions["mean"]], axis=0))
all_times = numpy.concatenate([times, predictions["times"]], axis=0)
# Export the model in SavedModel format. We include a bit of extra boilerplate
# for "cold starting" as if we didn't have any state from the Estimator, which
# is the case when serving from a SavedModel. If Estimator output is
# available, the result of "Estimator.evaluate" can be passed directly to
# `tf.contrib.timeseries.saved_model_utils.predict_continuation` as the
# `continue_from` argument.
with tf.Graph().as_default():
filter_feature_tensors, _ = evaluation_input_fn()
with tf.train.MonitoredSession() as session:
# Fetch the series to "warm up" our state, which will allow us to make
# predictions for its future values. This is just a dictionary of times,
# values, and exogenous features mapping to numpy arrays. The use of an
# input_fn is just a convenience for the example; they can also be
# specified manually.
filter_features = session.run(filter_feature_tensors)
if export_directory is None:
export_directory = tempfile.mkdtemp()
input_receiver_fn = estimator.build_raw_serving_input_receiver_fn()
export_location = estimator.export_saved_model(export_directory,
input_receiver_fn)
# Warm up and predict using the SavedModel
with tf.Graph().as_default():
with tf.Session() as session:
signatures = tf.saved_model.loader.load(
session, [tf.saved_model.tag_constants.SERVING], export_location)
state = tf.contrib.timeseries.saved_model_utils.cold_start_filter(
signatures=signatures, session=session, features=filter_features)
saved_model_output = (
tf.contrib.timeseries.saved_model_utils.predict_continuation(
continue_from=state, signatures=signatures,
session=session, steps=100,
exogenous_features=predict_exogenous_features))
# The exported model gives the same results as the Estimator.predict()
# call above.
numpy.testing.assert_allclose(
predictions["mean"],
numpy.squeeze(saved_model_output["mean"], axis=0))
return times, observed, all_times, predicted_mean
def main(unused_argv):
if not HAS_MATPLOTLIB:
raise ImportError(
"Please install matplotlib to generate a plot from this example.")
(observed_times, observations,
all_times, predictions) = train_and_predict()
pyplot.axvline(99, linestyle="dotted")
observed_lines = pyplot.plot(
observed_times, observations, label="Observed", color="k")
predicted_lines = pyplot.plot(
all_times, predictions, label="Predicted", color="b")
pyplot.legend(handles=[observed_lines[0], predicted_lines[0]],
loc="upper left")
pyplot.show()
if __name__ == "__main__":
tf.app.run(main=main)
|
tensorflow-master
|
tensorflow/contrib/timeseries/examples/lstm.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests that the TensorFlow parts of the LSTM example run."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.timeseries.examples import lstm
from tensorflow.python.estimator import estimator_lib
from tensorflow.python.platform import test
class _SeedRunConfig(estimator_lib.RunConfig):
@property
def tf_random_seed(self):
return 3
class LSTMExampleTest(test.TestCase):
def test_periodicity_learned(self):
(observed_times, observed_values,
all_times, predicted_values) = lstm.train_and_predict(
training_steps=2, estimator_config=_SeedRunConfig(),
export_directory=self.get_temp_dir())
self.assertAllEqual([100], observed_times.shape)
self.assertAllEqual([100, 5], observed_values.shape)
self.assertAllEqual([200], all_times.shape)
self.assertAllEqual([200, 5], predicted_values.shape)
# TODO(allenl): Make the model deterministic so you can check something
# substantive.
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/timeseries/examples/lstm_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Cloud Bigtable Client for TensorFlow.
This contrib package allows TensorFlow to interface directly with Cloud Bigtable
for high-speed data loading.
@@BigtableClient
@@BigtableTable
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.bigtable.python.ops.bigtable_api import BigtableClient
from tensorflow.contrib.bigtable.python.ops.bigtable_api import BigtableTable
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = [
'BigtableClient',
'BigtableTable',
]
remove_undocumented(__name__, _allowed_symbols)
|
tensorflow-master
|
tensorflow/contrib/bigtable/__init__.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""This module contains tests for the bigtable integration."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
|
tensorflow-master
|
tensorflow/contrib/bigtable/python/kernel_tests/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Bigtable Ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import bigtable
from tensorflow.contrib.bigtable.ops import gen_bigtable_ops
from tensorflow.contrib.bigtable.ops import gen_bigtable_test_ops
from tensorflow.contrib.bigtable.python.ops import bigtable_api
from tensorflow.contrib.util import loader
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import errors
from tensorflow.python.platform import resource_loader
from tensorflow.python.platform import test
from tensorflow.python.util import compat
_bigtable_so = loader.load_op_library(
resource_loader.get_path_to_datafile("_bigtable_test.so"))
def _ListOfTuplesOfStringsToBytes(values):
return [(compat.as_bytes(i[0]), compat.as_bytes(i[1])) for i in values]
class BigtableOpsTest(test.TestCase):
COMMON_ROW_KEYS = ["r1", "r2", "r3"]
COMMON_VALUES = ["v1", "v2", "v3"]
def setUp(self):
self._client = gen_bigtable_test_ops.bigtable_test_client()
table = gen_bigtable_ops.bigtable_table(self._client, "testtable")
self._table = bigtable.BigtableTable("testtable", None, table)
def _makeSimpleDataset(self):
output_rows = dataset_ops.Dataset.from_tensor_slices(self.COMMON_ROW_KEYS)
output_values = dataset_ops.Dataset.from_tensor_slices(self.COMMON_VALUES)
return dataset_ops.Dataset.zip((output_rows, output_values))
def _writeCommonValues(self, sess):
output_ds = self._makeSimpleDataset()
write_op = self._table.write(output_ds, ["cf1"], ["c1"])
sess.run(write_op)
def runReadKeyTest(self, read_ds):
itr = dataset_ops.make_initializable_iterator(read_ds)
n = itr.get_next()
expected = list(self.COMMON_ROW_KEYS)
expected.reverse()
with self.cached_session() as sess:
self._writeCommonValues(sess)
sess.run(itr.initializer)
for i in range(3):
output = sess.run(n)
want = expected.pop()
self.assertEqual(
compat.as_bytes(want), compat.as_bytes(output),
"Unequal at step %d: want: %s, got: %s" % (i, want, output))
def testReadPrefixKeys(self):
self.runReadKeyTest(self._table.keys_by_prefix_dataset("r"))
def testReadRangeKeys(self):
self.runReadKeyTest(self._table.keys_by_range_dataset("r1", "r4"))
def runScanTest(self, read_ds):
itr = dataset_ops.make_initializable_iterator(read_ds)
n = itr.get_next()
expected_keys = list(self.COMMON_ROW_KEYS)
expected_keys.reverse()
expected_values = list(self.COMMON_VALUES)
expected_values.reverse()
with self.cached_session() as sess:
self._writeCommonValues(sess)
sess.run(itr.initializer)
for i in range(3):
output = sess.run(n)
want = expected_keys.pop()
self.assertEqual(
compat.as_bytes(want), compat.as_bytes(output[0]),
"Unequal keys at step %d: want: %s, got: %s" % (i, want, output[0]))
want = expected_values.pop()
self.assertEqual(
compat.as_bytes(want), compat.as_bytes(output[1]),
"Unequal values at step: %d: want: %s, got: %s" % (i, want,
output[1]))
def testScanPrefixStringCol(self):
self.runScanTest(self._table.scan_prefix("r", cf1="c1"))
def testScanPrefixListCol(self):
self.runScanTest(self._table.scan_prefix("r", cf1=["c1"]))
def testScanPrefixTupleCol(self):
self.runScanTest(self._table.scan_prefix("r", columns=("cf1", "c1")))
def testScanRangeStringCol(self):
self.runScanTest(self._table.scan_range("r1", "r4", cf1="c1"))
def testScanRangeListCol(self):
self.runScanTest(self._table.scan_range("r1", "r4", cf1=["c1"]))
def testScanRangeTupleCol(self):
self.runScanTest(self._table.scan_range("r1", "r4", columns=("cf1", "c1")))
def testLookup(self):
ds = self._table.keys_by_prefix_dataset("r")
ds = ds.apply(self._table.lookup_columns(cf1="c1"))
itr = dataset_ops.make_initializable_iterator(ds)
n = itr.get_next()
expected_keys = list(self.COMMON_ROW_KEYS)
expected_values = list(self.COMMON_VALUES)
expected_tuples = zip(expected_keys, expected_values)
with self.cached_session() as sess:
self._writeCommonValues(sess)
sess.run(itr.initializer)
for i, elem in enumerate(expected_tuples):
output = sess.run(n)
self.assertEqual(
compat.as_bytes(elem[0]), compat.as_bytes(output[0]),
"Unequal keys at step %d: want: %s, got: %s" %
(i, compat.as_bytes(elem[0]), compat.as_bytes(output[0])))
self.assertEqual(
compat.as_bytes(elem[1]), compat.as_bytes(output[1]),
"Unequal values at step %d: want: %s, got: %s" %
(i, compat.as_bytes(elem[1]), compat.as_bytes(output[1])))
def testSampleKeys(self):
ds = self._table.sample_keys()
itr = dataset_ops.make_initializable_iterator(ds)
n = itr.get_next()
expected_key = self.COMMON_ROW_KEYS[0]
with self.cached_session() as sess:
self._writeCommonValues(sess)
sess.run(itr.initializer)
output = sess.run(n)
self.assertEqual(
compat.as_bytes(self.COMMON_ROW_KEYS[0]), compat.as_bytes(output),
"Unequal keys: want: %s, got: %s" % (compat.as_bytes(
self.COMMON_ROW_KEYS[0]), compat.as_bytes(output)))
output = sess.run(n)
self.assertEqual(
compat.as_bytes(self.COMMON_ROW_KEYS[2]), compat.as_bytes(output),
"Unequal keys: want: %s, got: %s" % (compat.as_bytes(
self.COMMON_ROW_KEYS[2]), compat.as_bytes(output)))
with self.assertRaises(errors.OutOfRangeError):
sess.run(n)
def runSampleKeyPairsTest(self, ds, expected_key_pairs):
itr = dataset_ops.make_initializable_iterator(ds)
n = itr.get_next()
with self.cached_session() as sess:
self._writeCommonValues(sess)
sess.run(itr.initializer)
for i, elems in enumerate(expected_key_pairs):
output = sess.run(n)
self.assertEqual(
compat.as_bytes(elems[0]), compat.as_bytes(output[0]),
"Unequal key pair (first element) at step %d; want: %s, got %s" %
(i, compat.as_bytes(elems[0]), compat.as_bytes(output[0])))
self.assertEqual(
compat.as_bytes(elems[1]), compat.as_bytes(output[1]),
"Unequal key pair (second element) at step %d; want: %s, got %s" %
(i, compat.as_bytes(elems[1]), compat.as_bytes(output[1])))
with self.assertRaises(errors.OutOfRangeError):
sess.run(n)
def testSampleKeyPairsSimplePrefix(self):
ds = bigtable_api._BigtableSampleKeyPairsDataset(
self._table, prefix="r", start="", end="")
expected_key_pairs = [("r", "r1"), ("r1", "r3"), ("r3", "s")]
self.runSampleKeyPairsTest(ds, expected_key_pairs)
def testSampleKeyPairsSimpleRange(self):
ds = bigtable_api._BigtableSampleKeyPairsDataset(
self._table, prefix="", start="r1", end="r3")
expected_key_pairs = [("r1", "r3")]
self.runSampleKeyPairsTest(ds, expected_key_pairs)
def testSampleKeyPairsSkipRangePrefix(self):
ds = bigtable_api._BigtableSampleKeyPairsDataset(
self._table, prefix="r2", start="", end="")
expected_key_pairs = [("r2", "r3")]
self.runSampleKeyPairsTest(ds, expected_key_pairs)
def testSampleKeyPairsSkipRangeRange(self):
ds = bigtable_api._BigtableSampleKeyPairsDataset(
self._table, prefix="", start="r2", end="r3")
expected_key_pairs = [("r2", "r3")]
self.runSampleKeyPairsTest(ds, expected_key_pairs)
def testSampleKeyPairsOffsetRanges(self):
ds = bigtable_api._BigtableSampleKeyPairsDataset(
self._table, prefix="", start="r2", end="r4")
expected_key_pairs = [("r2", "r3"), ("r3", "r4")]
self.runSampleKeyPairsTest(ds, expected_key_pairs)
def testSampleKeyPairEverything(self):
ds = bigtable_api._BigtableSampleKeyPairsDataset(
self._table, prefix="", start="", end="")
expected_key_pairs = [("", "r1"), ("r1", "r3"), ("r3", "")]
self.runSampleKeyPairsTest(ds, expected_key_pairs)
def testSampleKeyPairsPrefixAndStartKey(self):
ds = bigtable_api._BigtableSampleKeyPairsDataset(
self._table, prefix="r", start="r1", end="")
itr = dataset_ops.make_initializable_iterator(ds)
with self.cached_session() as sess:
with self.assertRaises(errors.InvalidArgumentError):
sess.run(itr.initializer)
def testSampleKeyPairsPrefixAndEndKey(self):
ds = bigtable_api._BigtableSampleKeyPairsDataset(
self._table, prefix="r", start="", end="r3")
itr = dataset_ops.make_initializable_iterator(ds)
with self.cached_session() as sess:
with self.assertRaises(errors.InvalidArgumentError):
sess.run(itr.initializer)
def testParallelScanPrefix(self):
ds = self._table.parallel_scan_prefix(prefix="r", cf1="c1")
itr = dataset_ops.make_initializable_iterator(ds)
n = itr.get_next()
with self.cached_session() as sess:
self._writeCommonValues(sess)
sess.run(itr.initializer)
expected_values = list(zip(self.COMMON_ROW_KEYS, self.COMMON_VALUES))
actual_values = []
for _ in range(len(expected_values)):
output = sess.run(n)
actual_values.append(output)
with self.assertRaises(errors.OutOfRangeError):
sess.run(n)
self.assertItemsEqual(
_ListOfTuplesOfStringsToBytes(expected_values),
_ListOfTuplesOfStringsToBytes(actual_values))
def testParallelScanRange(self):
ds = self._table.parallel_scan_range(start="r1", end="r4", cf1="c1")
itr = dataset_ops.make_initializable_iterator(ds)
n = itr.get_next()
with self.cached_session() as sess:
self._writeCommonValues(sess)
sess.run(itr.initializer)
expected_values = list(zip(self.COMMON_ROW_KEYS, self.COMMON_VALUES))
actual_values = []
for _ in range(len(expected_values)):
output = sess.run(n)
actual_values.append(output)
with self.assertRaises(errors.OutOfRangeError):
sess.run(n)
self.assertItemsEqual(
_ListOfTuplesOfStringsToBytes(expected_values),
_ListOfTuplesOfStringsToBytes(actual_values))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/bigtable/python/kernel_tests/bigtable_ops_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""This module contains the Python API for the Cloud Bigtable integration."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
|
tensorflow-master
|
tensorflow/contrib/bigtable/python/ops/__init__.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Python API for TensorFlow's Cloud Bigtable integration.
TensorFlow has support for reading from and writing to Cloud Bigtable. To use
TensorFlow + Cloud Bigtable integration, first create a BigtableClient to
configure your connection to Cloud Bigtable, and then create a BigtableTable
object to allow you to create numerous `tf.data.Dataset`s to read data, or
write a `tf.data.Dataset` object to the underlying Cloud Bigtable table.
For background on Cloud Bigtable, see: https://cloud.google.com/bigtable .
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six import iteritems
from six import string_types
from tensorflow.contrib.bigtable.ops import gen_bigtable_ops
from tensorflow.contrib.util import loader
from tensorflow.python.data.experimental.ops import interleave_ops
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import nest
from tensorflow.python.data.util import structure
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_shape
from tensorflow.python.platform import resource_loader
_bigtable_so = loader.load_op_library(
resource_loader.get_path_to_datafile("_bigtable.so"))
class BigtableClient(object):
"""BigtableClient is the entrypoint for interacting with Cloud Bigtable in TF.
BigtableClient encapsulates a connection to Cloud Bigtable, and exposes the
`table` method to open a Bigtable table.
"""
def __init__(self,
project_id,
instance_id,
connection_pool_size=None,
max_receive_message_size=None):
"""Creates a BigtableClient that can be used to open connections to tables.
Args:
project_id: A string representing the GCP project id to connect to.
instance_id: A string representing the Bigtable instance to connect to.
connection_pool_size: (Optional.) A number representing the number of
concurrent connections to the Cloud Bigtable service to make.
max_receive_message_size: (Optional.) The maximum bytes received in a
single gRPC response.
Raises:
ValueError: if the arguments are invalid (e.g. wrong type, or out of
expected ranges (e.g. negative).)
"""
if not isinstance(project_id, str):
raise ValueError("`project_id` must be a string")
self._project_id = project_id
if not isinstance(instance_id, str):
raise ValueError("`instance_id` must be a string")
self._instance_id = instance_id
if connection_pool_size is None:
connection_pool_size = -1
elif connection_pool_size < 1:
raise ValueError("`connection_pool_size` must be positive")
if max_receive_message_size is None:
max_receive_message_size = -1
elif max_receive_message_size < 1:
raise ValueError("`max_receive_message_size` must be positive")
self._connection_pool_size = connection_pool_size
self._resource = gen_bigtable_ops.bigtable_client(
project_id, instance_id, connection_pool_size, max_receive_message_size)
def table(self, name, snapshot=None):
"""Opens a table and returns a `tf.contrib.bigtable.BigtableTable` object.
Args:
name: A `tf.string` `tf.Tensor` name of the table to open.
snapshot: Either a `tf.string` `tf.Tensor` snapshot id, or `True` to
request the creation of a snapshot. (Note: currently unimplemented.)
Returns:
A `tf.contrib.bigtable.BigtableTable` Python object representing the
operations available on the table.
"""
# TODO(saeta): Implement snapshot functionality.
table = gen_bigtable_ops.bigtable_table(self._resource, name)
return BigtableTable(name, snapshot, table)
class BigtableTable(object):
"""Entry point for reading and writing data in Cloud Bigtable.
This BigtableTable class is the Python representation of the Cloud Bigtable
table within TensorFlow. Methods on this class allow data to be read from and
written to the Cloud Bigtable service in flexible and high performance
manners.
"""
# TODO(saeta): Investigate implementing tf.contrib.lookup.LookupInterface.
# TODO(saeta): Consider variant tensors instead of resources (while supporting
# connection pooling).
def __init__(self, name, snapshot, resource):
self._name = name
self._snapshot = snapshot
self._resource = resource
def lookup_columns(self, *args, **kwargs):
"""Retrieves the values of columns for a dataset of keys.
Example usage:
```python
table = bigtable_client.table("my_table")
key_dataset = table.get_keys_prefix("imagenet")
images = key_dataset.apply(table.lookup_columns(("cf1", "image"),
("cf2", "label"),
("cf2", "boundingbox")))
training_data = images.map(parse_and_crop, num_parallel_calls=64).batch(128)
```
Alternatively, you can use keyword arguments to specify the columns to
capture. Example (same as above, rewritten):
```python
table = bigtable_client.table("my_table")
key_dataset = table.get_keys_prefix("imagenet")
images = key_dataset.apply(table.lookup_columns(
cf1="image", cf2=("label", "boundingbox")))
training_data = images.map(parse_and_crop, num_parallel_calls=64).batch(128)
```
Note: certain `kwargs` keys are reserved, and thus, some column families
cannot be identified using the `kwargs` syntax. Instead, please use the
`args` syntax. This list includes:
- 'name'
Note: this list can change at any time.
Args:
*args: A list of tuples containing (column family, column name) pairs.
**kwargs: Column families (keys) and column qualifiers (values).
Returns:
A function that can be passed to `tf.data.Dataset.apply` to retrieve the
values of columns for the rows.
"""
table = self # Capture self
normalized = args
if normalized is None:
normalized = []
if isinstance(normalized, tuple):
normalized = list(normalized)
for key, value in iteritems(kwargs):
if key == "name":
continue
if isinstance(value, str):
normalized.append((key, value))
continue
for col in value:
normalized.append((key, col))
def _apply_fn(dataset):
# TODO(saeta): Verify dataset's types are correct!
return _BigtableLookupDataset(dataset, table, normalized)
return _apply_fn
def keys_by_range_dataset(self, start, end):
"""Retrieves all row keys between start and end.
Note: it does NOT retrieve the values of columns.
Args:
start: The start row key. The row keys for rows after start (inclusive)
will be retrieved.
end: (Optional.) The end row key. Rows up to (but not including) end will
be retrieved. If end is None, all subsequent row keys will be retrieved.
Returns:
A `tf.data.Dataset` containing `tf.string` Tensors corresponding to all
of the row keys between `start` and `end`.
"""
# TODO(saeta): Make inclusive / exclusive configurable?
if end is None:
end = ""
return _BigtableRangeKeyDataset(self, start, end)
def keys_by_prefix_dataset(self, prefix):
"""Retrieves the row keys matching a given prefix.
Args:
prefix: All row keys that begin with `prefix` in the table will be
retrieved.
Returns:
A `tf.data.Dataset`. containing `tf.string` Tensors corresponding to all
of the row keys matching that prefix.
"""
return _BigtablePrefixKeyDataset(self, prefix)
def sample_keys(self):
"""Retrieves a sampling of row keys from the Bigtable table.
This dataset is most often used in conjunction with
`tf.data.experimental.parallel_interleave` to construct a set of ranges for
scanning in parallel.
Returns:
A `tf.data.Dataset` returning string row keys.
"""
return _BigtableSampleKeysDataset(self)
def scan_prefix(self, prefix, probability=None, columns=None, **kwargs):
"""Retrieves row (including values) from the Bigtable service.
Rows with row-key prefixed by `prefix` will be retrieved.
Specifying the columns to retrieve for each row is done by either using
kwargs or in the columns parameter. To retrieve values of the columns "c1",
and "c2" from the column family "cfa", and the value of the column "c3"
from column family "cfb", the following datasets (`ds1`, and `ds2`) are
equivalent:
```
table = # ...
ds1 = table.scan_prefix("row_prefix", columns=[("cfa", "c1"),
("cfa", "c2"),
("cfb", "c3")])
ds2 = table.scan_prefix("row_prefix", cfa=["c1", "c2"], cfb="c3")
```
Note: only the latest value of a cell will be retrieved.
Args:
prefix: The prefix all row keys must match to be retrieved for prefix-
based scans.
probability: (Optional.) A float between 0 (exclusive) and 1 (inclusive).
A non-1 value indicates to probabilistically sample rows with the
provided probability.
columns: The columns to read. Note: most commonly, they are expressed as
kwargs. Use the columns value if you are using column families that are
reserved. The value of columns and kwargs are merged. Columns is a list
of tuples of strings ("column_family", "column_qualifier").
**kwargs: The column families and columns to read. Keys are treated as
column_families, and values can be either lists of strings, or strings
that are treated as the column qualifier (column name).
Returns:
A `tf.data.Dataset` returning the row keys and the cell contents.
Raises:
ValueError: If the configured probability is unexpected.
"""
probability = _normalize_probability(probability)
normalized = _normalize_columns(columns, kwargs)
return _BigtableScanDataset(self, prefix, "", "", normalized, probability)
def scan_range(self, start, end, probability=None, columns=None, **kwargs):
"""Retrieves rows (including values) from the Bigtable service.
Rows with row-keys between `start` and `end` will be retrieved.
Specifying the columns to retrieve for each row is done by either using
kwargs or in the columns parameter. To retrieve values of the columns "c1",
and "c2" from the column family "cfa", and the value of the column "c3"
from column family "cfb", the following datasets (`ds1`, and `ds2`) are
equivalent:
```
table = # ...
ds1 = table.scan_range("row_start", "row_end", columns=[("cfa", "c1"),
("cfa", "c2"),
("cfb", "c3")])
ds2 = table.scan_range("row_start", "row_end", cfa=["c1", "c2"], cfb="c3")
```
Note: only the latest value of a cell will be retrieved.
Args:
start: The start of the range when scanning by range.
end: (Optional.) The end of the range when scanning by range.
probability: (Optional.) A float between 0 (exclusive) and 1 (inclusive).
A non-1 value indicates to probabilistically sample rows with the
provided probability.
columns: The columns to read. Note: most commonly, they are expressed as
kwargs. Use the columns value if you are using column families that are
reserved. The value of columns and kwargs are merged. Columns is a list
of tuples of strings ("column_family", "column_qualifier").
**kwargs: The column families and columns to read. Keys are treated as
column_families, and values can be either lists of strings, or strings
that are treated as the column qualifier (column name).
Returns:
A `tf.data.Dataset` returning the row keys and the cell contents.
Raises:
ValueError: If the configured probability is unexpected.
"""
probability = _normalize_probability(probability)
normalized = _normalize_columns(columns, kwargs)
return _BigtableScanDataset(self, "", start, end, normalized, probability)
def parallel_scan_prefix(self,
prefix,
num_parallel_scans=None,
probability=None,
columns=None,
**kwargs):
"""Retrieves row (including values) from the Bigtable service at high speed.
Rows with row-key prefixed by `prefix` will be retrieved. This method is
similar to `scan_prefix`, but by contrast performs multiple sub-scans in
parallel in order to achieve higher performance.
Note: The dataset produced by this method is not deterministic!
Specifying the columns to retrieve for each row is done by either using
kwargs or in the columns parameter. To retrieve values of the columns "c1",
and "c2" from the column family "cfa", and the value of the column "c3"
from column family "cfb", the following datasets (`ds1`, and `ds2`) are
equivalent:
```
table = # ...
ds1 = table.parallel_scan_prefix("row_prefix", columns=[("cfa", "c1"),
("cfa", "c2"),
("cfb", "c3")])
ds2 = table.parallel_scan_prefix("row_prefix", cfa=["c1", "c2"], cfb="c3")
```
Note: only the latest value of a cell will be retrieved.
Args:
prefix: The prefix all row keys must match to be retrieved for prefix-
based scans.
num_parallel_scans: (Optional.) The number of concurrent scans against the
Cloud Bigtable instance.
probability: (Optional.) A float between 0 (exclusive) and 1 (inclusive).
A non-1 value indicates to probabilistically sample rows with the
provided probability.
columns: The columns to read. Note: most commonly, they are expressed as
kwargs. Use the columns value if you are using column families that are
reserved. The value of columns and kwargs are merged. Columns is a list
of tuples of strings ("column_family", "column_qualifier").
**kwargs: The column families and columns to read. Keys are treated as
column_families, and values can be either lists of strings, or strings
that are treated as the column qualifier (column name).
Returns:
A `tf.data.Dataset` returning the row keys and the cell contents.
Raises:
ValueError: If the configured probability is unexpected.
"""
probability = _normalize_probability(probability)
normalized = _normalize_columns(columns, kwargs)
ds = _BigtableSampleKeyPairsDataset(self, prefix, "", "")
return self._make_parallel_scan_dataset(ds, num_parallel_scans, probability,
normalized)
def parallel_scan_range(self,
start,
end,
num_parallel_scans=None,
probability=None,
columns=None,
**kwargs):
"""Retrieves rows (including values) from the Bigtable service.
Rows with row-keys between `start` and `end` will be retrieved. This method
is similar to `scan_range`, but by contrast performs multiple sub-scans in
parallel in order to achieve higher performance.
Note: The dataset produced by this method is not deterministic!
Specifying the columns to retrieve for each row is done by either using
kwargs or in the columns parameter. To retrieve values of the columns "c1",
and "c2" from the column family "cfa", and the value of the column "c3"
from column family "cfb", the following datasets (`ds1`, and `ds2`) are
equivalent:
```
table = # ...
ds1 = table.parallel_scan_range("row_start",
"row_end",
columns=[("cfa", "c1"),
("cfa", "c2"),
("cfb", "c3")])
ds2 = table.parallel_scan_range("row_start", "row_end",
cfa=["c1", "c2"], cfb="c3")
```
Note: only the latest value of a cell will be retrieved.
Args:
start: The start of the range when scanning by range.
end: (Optional.) The end of the range when scanning by range.
num_parallel_scans: (Optional.) The number of concurrent scans against the
Cloud Bigtable instance.
probability: (Optional.) A float between 0 (exclusive) and 1 (inclusive).
A non-1 value indicates to probabilistically sample rows with the
provided probability.
columns: The columns to read. Note: most commonly, they are expressed as
kwargs. Use the columns value if you are using column families that are
reserved. The value of columns and kwargs are merged. Columns is a list
of tuples of strings ("column_family", "column_qualifier").
**kwargs: The column families and columns to read. Keys are treated as
column_families, and values can be either lists of strings, or strings
that are treated as the column qualifier (column name).
Returns:
A `tf.data.Dataset` returning the row keys and the cell contents.
Raises:
ValueError: If the configured probability is unexpected.
"""
probability = _normalize_probability(probability)
normalized = _normalize_columns(columns, kwargs)
ds = _BigtableSampleKeyPairsDataset(self, "", start, end)
return self._make_parallel_scan_dataset(ds, num_parallel_scans, probability,
normalized)
def write(self, dataset, column_families, columns, timestamp=None):
"""Writes a dataset to the table.
Args:
dataset: A `tf.data.Dataset` to be written to this table. It must produce
a list of number-of-columns+1 elements, all of which must be strings.
The first value will be used as the row key, and subsequent values will
be used as cell values for the corresponding columns from the
corresponding column_families and columns entries.
column_families: A `tf.Tensor` of `tf.string`s corresponding to the
column names to store the dataset's elements into.
columns: A `tf.Tensor` of `tf.string`s corresponding to the column names
to store the dataset's elements into.
timestamp: (Optional.) An int64 timestamp to write all the values at.
Leave as None to use server-provided timestamps.
Returns:
A `tf.Operation` that can be run to perform the write.
Raises:
ValueError: If there are unexpected or incompatible types, or if the
number of columns and column_families does not match the output of
`dataset`.
"""
if timestamp is None:
timestamp = -1 # Bigtable server provided timestamp.
for tensor_type in nest.flatten(
dataset_ops.get_legacy_output_types(dataset)):
if tensor_type != dtypes.string:
raise ValueError("Not all elements of the dataset were `tf.string`")
for shape in nest.flatten(dataset_ops.get_legacy_output_shapes(dataset)):
if not shape.is_compatible_with(tensor_shape.scalar()):
raise ValueError("Not all elements of the dataset were scalars")
if len(column_families) != len(columns):
raise ValueError("len(column_families) != len(columns)")
if len(nest.flatten(
dataset_ops.get_legacy_output_types(dataset))) != len(columns) + 1:
raise ValueError("A column name must be specified for every component of "
"the dataset elements. (e.g.: len(columns) != "
"len(dataset.output_types))")
return gen_bigtable_ops.dataset_to_bigtable(
self._resource,
dataset._variant_tensor, # pylint: disable=protected-access
column_families,
columns,
timestamp)
def _make_parallel_scan_dataset(self, ds, num_parallel_scans,
normalized_probability, normalized_columns):
"""Builds a parallel dataset from a given range.
Args:
ds: A `_BigtableSampleKeyPairsDataset` returning ranges of keys to use.
num_parallel_scans: The number of concurrent parallel scans to use.
normalized_probability: A number between 0 and 1 for the keep probability.
normalized_columns: The column families and column qualifiers to retrieve.
Returns:
A `tf.data.Dataset` representing the result of the parallel scan.
"""
if num_parallel_scans is None:
num_parallel_scans = 50
ds = ds.shuffle(buffer_size=10000) # TODO(saeta): Make configurable.
def _interleave_fn(start, end):
return _BigtableScanDataset(
self,
prefix="",
start=start,
end=end,
normalized=normalized_columns,
probability=normalized_probability)
# Note prefetch_input_elements must be set in order to avoid rpc timeouts.
ds = ds.apply(
interleave_ops.parallel_interleave(
_interleave_fn,
cycle_length=num_parallel_scans,
sloppy=True,
prefetch_input_elements=1))
return ds
def _normalize_probability(probability):
if probability is None:
probability = 1.0
if isinstance(probability, float) and (probability <= 0.0 or
probability > 1.0):
raise ValueError("probability must be in the range (0, 1].")
return probability
def _normalize_columns(columns, provided_kwargs):
"""Converts arguments (columns, and kwargs dict) to C++ representation.
Args:
columns: a datastructure containing the column families and qualifier to
retrieve. Valid types include (1) None, (2) list of tuples, (3) a tuple of
strings.
provided_kwargs: a dictionary containing the column families and qualifiers
to retrieve
Returns:
A list of pairs of column family+qualifier to retrieve.
Raises:
ValueError: If there are no cells to retrieve or the columns are in an
incorrect format.
"""
normalized = columns
if normalized is None:
normalized = []
if isinstance(normalized, tuple):
if len(normalized) == 2:
normalized = [normalized]
else:
raise ValueError("columns was a tuple of inappropriate length")
for key, value in iteritems(provided_kwargs):
if key == "name":
continue
if isinstance(value, string_types):
normalized.append((key, value))
continue
for col in value:
normalized.append((key, col))
if not normalized:
raise ValueError("At least one column + column family must be specified.")
return normalized
class _BigtableKeyDataset(dataset_ops.DatasetSource):
"""_BigtableKeyDataset is an abstract class representing the keys of a table.
"""
def __init__(self, table, variant_tensor):
"""Constructs a _BigtableKeyDataset.
Args:
table: a Bigtable class.
variant_tensor: DT_VARIANT representation of the dataset.
"""
super(_BigtableKeyDataset, self).__init__(variant_tensor)
self._table = table
@property
def _element_structure(self):
return structure.TensorStructure(dtypes.string, [])
class _BigtablePrefixKeyDataset(_BigtableKeyDataset):
"""_BigtablePrefixKeyDataset represents looking up keys by prefix.
"""
def __init__(self, table, prefix):
self._prefix = prefix
variant_tensor = gen_bigtable_ops.bigtable_prefix_key_dataset(
table=table._resource, # pylint: disable=protected-access
prefix=self._prefix)
super(_BigtablePrefixKeyDataset, self).__init__(table, variant_tensor)
class _BigtableRangeKeyDataset(_BigtableKeyDataset):
"""_BigtableRangeKeyDataset represents looking up keys by range.
"""
def __init__(self, table, start, end):
self._start = start
self._end = end
variant_tensor = gen_bigtable_ops.bigtable_range_key_dataset(
table=table._resource, # pylint: disable=protected-access
start_key=self._start,
end_key=self._end)
super(_BigtableRangeKeyDataset, self).__init__(table, variant_tensor)
class _BigtableSampleKeysDataset(_BigtableKeyDataset):
"""_BigtableSampleKeysDataset represents a sampling of row keys.
"""
# TODO(saeta): Expose the data size offsets into the keys.
def __init__(self, table):
variant_tensor = gen_bigtable_ops.bigtable_sample_keys_dataset(
table=table._resource) # pylint: disable=protected-access
super(_BigtableSampleKeysDataset, self).__init__(table, variant_tensor)
class _BigtableLookupDataset(dataset_ops.DatasetSource):
"""_BigtableLookupDataset represents a dataset that retrieves values for keys.
"""
def __init__(self, dataset, table, normalized):
self._num_outputs = len(normalized) + 1 # 1 for row key
self._dataset = dataset
self._table = table
self._normalized = normalized
self._column_families = [i[0] for i in normalized]
self._columns = [i[1] for i in normalized]
variant_tensor = gen_bigtable_ops.bigtable_lookup_dataset(
keys_dataset=self._dataset._variant_tensor, # pylint: disable=protected-access
table=self._table._resource, # pylint: disable=protected-access
column_families=self._column_families,
columns=self._columns)
super(_BigtableLookupDataset, self).__init__(variant_tensor)
@property
def _element_structure(self):
return structure.NestedStructure(tuple(
[structure.TensorStructure(dtypes.string, [])] * self._num_outputs))
class _BigtableScanDataset(dataset_ops.DatasetSource):
"""_BigtableScanDataset represents a dataset that retrieves keys and values.
"""
def __init__(self, table, prefix, start, end, normalized, probability):
self._table = table
self._prefix = prefix
self._start = start
self._end = end
self._column_families = [i[0] for i in normalized]
self._columns = [i[1] for i in normalized]
self._probability = probability
self._num_outputs = len(normalized) + 1 # 1 for row key
variant_tensor = gen_bigtable_ops.bigtable_scan_dataset(
table=self._table._resource, # pylint: disable=protected-access
prefix=self._prefix,
start_key=self._start,
end_key=self._end,
column_families=self._column_families,
columns=self._columns,
probability=self._probability)
super(_BigtableScanDataset, self).__init__(variant_tensor)
@property
def _element_structure(self):
return structure.NestedStructure(
tuple(
[structure.TensorStructure(dtypes.string, [])] * self._num_outputs))
class _BigtableSampleKeyPairsDataset(dataset_ops.DatasetSource):
"""_BigtableSampleKeyPairsDataset returns key pairs from a Bigtable table.
"""
def __init__(self, table, prefix, start, end):
self._table = table
self._prefix = prefix
self._start = start
self._end = end
variant_tensor = gen_bigtable_ops.bigtable_sample_key_pairs_dataset(
table=self._table._resource, # pylint: disable=protected-access
prefix=self._prefix,
start_key=self._start,
end_key=self._end)
super(_BigtableSampleKeyPairsDataset, self).__init__(variant_tensor)
@property
def _element_structure(self):
return structure.NestedStructure(
(structure.TensorStructure(dtypes.string, []),
structure.TensorStructure(dtypes.string, [])))
|
tensorflow-master
|
tensorflow/contrib/bigtable/python/ops/bigtable_api.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SavedModel contrib support.
SavedModel provides a language-neutral format to save machine-learned models
that is recoverable and hermetic. It enables higher-level systems and tools to
produce, consume and transform TensorFlow models.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,wildcard-import,line-too-long
from tensorflow.contrib.saved_model.python.saved_model.keras_saved_model import *
# pylint: enable=unused-import,wildcard-import,line-too-long
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = [
"load_keras_model",
"save_keras_model"]
remove_undocumented(__name__, _allowed_symbols)
|
tensorflow-master
|
tensorflow/contrib/saved_model/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SavedModel contrib support.
SavedModel provides a language-neutral format to save machine-learned models
that is recoverable and hermetic. It enables higher-level systems and tools to
produce, consume and transform TensorFlow models.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import
from tensorflow.contrib.saved_model.python.saved_model import *
# pylint: enable=wildcard-import
|
tensorflow-master
|
tensorflow/contrib/saved_model/python/__init__.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=protected-access
"""Utility functions to save/load keras Model to/from SavedModel."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras import saving
# TODO(kathywu): Remove all contrib callers, switch to tf.keras.
save_keras_model = saving.export_saved_model
load_keras_model = saving.load_from_saved_model
|
tensorflow-master
|
tensorflow/contrib/saved_model/python/saved_model/keras_saved_model.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SavedModel contrib support.
SavedModel provides a language-neutral format to save machine-learned models
that is recoverable and hermetic. It enables higher-level systems and tools to
produce, consume and transform TensorFlow models.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import
from tensorflow.contrib.saved_model.python.saved_model import keras_saved_model
# pylint: enable=wildcard-import
|
tensorflow-master
|
tensorflow/contrib/saved_model/python/saved_model/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SavedModel Reader."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.contrib.saved_model.python.saved_model import reader
from tensorflow.python.framework import ops
from tensorflow.python.lib.io import file_io
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.saved_model import builder as saved_model_builder
from tensorflow.python.saved_model import tag_constants
def tearDownModule():
file_io.delete_recursively(test.get_temp_dir())
class ReaderTest(test.TestCase):
def _init_and_validate_variable(self, sess, variable_name, variable_value):
v = variables.Variable(variable_value, name=variable_name)
sess.run(variables.global_variables_initializer())
self.assertEqual(variable_value, v.eval())
def testReadSavedModelValid(self):
saved_model_dir = os.path.join(test.get_temp_dir(), "valid_saved_model")
builder = saved_model_builder.SavedModelBuilder(saved_model_dir)
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
builder.add_meta_graph_and_variables(sess, [tag_constants.TRAINING])
builder.save()
actual_saved_model_pb = reader.read_saved_model(saved_model_dir)
self.assertEqual(len(actual_saved_model_pb.meta_graphs), 1)
self.assertEqual(
len(actual_saved_model_pb.meta_graphs[0].meta_info_def.tags), 1)
self.assertEqual(actual_saved_model_pb.meta_graphs[0].meta_info_def.tags[0],
tag_constants.TRAINING)
def testReadSavedModelInvalid(self):
saved_model_dir = os.path.join(test.get_temp_dir(), "invalid_saved_model")
with self.assertRaisesRegexp(
IOError, "SavedModel file does not exist at: %s" % saved_model_dir):
reader.read_saved_model(saved_model_dir)
def testGetSavedModelTagSets(self):
saved_model_dir = os.path.join(test.get_temp_dir(), "test_tags")
builder = saved_model_builder.SavedModelBuilder(saved_model_dir)
# Graph with a single variable. SavedModel invoked to:
# - add with weights.
# - a single tag (from predefined constants).
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
builder.add_meta_graph_and_variables(sess, [tag_constants.TRAINING])
# Graph that updates the single variable. SavedModel invoked to:
# - simply add the model (weights are not updated).
# - a single tag (from predefined constants).
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 43)
builder.add_meta_graph([tag_constants.SERVING])
# Graph that updates the single variable. SavedModel is invoked:
# - to add the model (weights are not updated).
# - multiple predefined tags.
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 44)
builder.add_meta_graph([tag_constants.SERVING, tag_constants.GPU])
# Graph that updates the single variable. SavedModel is invoked:
# - to add the model (weights are not updated).
# - multiple predefined tags for serving on TPU.
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 44)
builder.add_meta_graph([tag_constants.SERVING, tag_constants.TPU])
# Graph that updates the single variable. SavedModel is invoked:
# - to add the model (weights are not updated).
# - multiple custom tags.
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 45)
builder.add_meta_graph(["foo", "bar"])
# Save the SavedModel to disk.
builder.save()
actual_tags = reader.get_saved_model_tag_sets(saved_model_dir)
expected_tags = [["train"], ["serve"], ["serve", "gpu"], ["serve", "tpu"],
["foo", "bar"]]
self.assertEqual(expected_tags, actual_tags)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/saved_model/python/saved_model/reader_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SavedModel functionality to read a SavedModel from disk."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from google.protobuf import message
from google.protobuf import text_format
from tensorflow.core.protobuf import saved_model_pb2
from tensorflow.python.lib.io import file_io
from tensorflow.python.saved_model import constants
from tensorflow.python.util import compat
def read_saved_model(saved_model_dir):
"""Reads the savedmodel.pb or savedmodel.pbtxt file containing `SavedModel`.
Args:
saved_model_dir: Directory containing the SavedModel file.
Returns:
A `SavedModel` protocol buffer.
Raises:
IOError: If the file does not exist, or cannot be successfully parsed.
"""
# Build the path to the SavedModel in pbtxt format.
path_to_pbtxt = os.path.join(
compat.as_bytes(saved_model_dir),
compat.as_bytes(constants.SAVED_MODEL_FILENAME_PBTXT))
# Build the path to the SavedModel in pb format.
path_to_pb = os.path.join(
compat.as_bytes(saved_model_dir),
compat.as_bytes(constants.SAVED_MODEL_FILENAME_PB))
# Ensure that the SavedModel exists at either path.
if not file_io.file_exists(path_to_pbtxt) and not file_io.file_exists(
path_to_pb):
raise IOError("SavedModel file does not exist at: %s" % saved_model_dir)
# Parse the SavedModel protocol buffer.
saved_model = saved_model_pb2.SavedModel()
if file_io.file_exists(path_to_pb):
try:
file_content = file_io.FileIO(path_to_pb, "rb").read()
saved_model.ParseFromString(file_content)
return saved_model
except message.DecodeError as e:
raise IOError("Cannot parse file %s: %s." % (path_to_pb, str(e)))
elif file_io.file_exists(path_to_pbtxt):
try:
file_content = file_io.FileIO(path_to_pbtxt, "rb").read()
text_format.Merge(file_content.decode("utf-8"), saved_model)
return saved_model
except text_format.ParseError as e:
raise IOError("Cannot parse file %s: %s." % (path_to_pbtxt, str(e)))
else:
raise IOError("SavedModel file does not exist at: %s/{%s|%s}" %
(saved_model_dir, constants.SAVED_MODEL_FILENAME_PBTXT,
constants.SAVED_MODEL_FILENAME_PB))
def get_saved_model_tag_sets(saved_model_dir):
"""Retrieves all the tag-sets available in the SavedModel.
Args:
saved_model_dir: Directory containing the SavedModel.
Returns:
String representation of all tag-sets in the SavedModel.
"""
saved_model = read_saved_model(saved_model_dir)
all_tags = []
for meta_graph_def in saved_model.meta_graphs:
all_tags.append(list(meta_graph_def.meta_info_def.tags))
return all_tags
|
tensorflow-master
|
tensorflow/contrib/saved_model/python/saved_model/reader.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops for building neural network layers, regularizers, summaries, etc.
@@avg_pool2d
@@avg_pool3d
@@batch_norm
@@convolution
@@convolution1d
@@convolution2d
@@convolution3d
@@conv2d_in_plane
@@convolution2d_in_plane
@@conv2d_transpose
@@convolution2d_transpose
@@conv3d_transpose
@@convolution3d_transpose
@@dense_to_sparse
@@dropout
@@elu
@@embedding_lookup_unique
@@flatten
@@fully_connected
@@GDN
@@gdn
@@images_to_sequence
@@layer_norm
@@linear
@@max_pool2d
@@max_pool3d
@@one_hot_encoding
@@relu
@@relu6
@@repeat
@@recompute_grad
@@RevBlock
@@rev_block
@@safe_embedding_lookup_sparse
@@scale_gradient
@@separable_conv2d
@@separable_convolution2d
@@sequence_to_images
@@softmax
@@spatial_softmax
@@stack
@@unit_norm
@@bow_encoder
@@embed_sequence
@@maxout
@@apply_regularization
@@l1_l2_regularizer
@@l1_regularizer
@@l2_regularizer
@@sum_regularizer
@@xavier_initializer
@@xavier_initializer_conv2d
@@variance_scaling_initializer
@@optimize_loss
@@summarize_activation
@@summarize_tensor
@@summarize_tensors
@@summarize_collection
@@summarize_activations
@@bucketized_column
@@check_feature_columns
@@create_feature_spec_for_parsing
@@crossed_column
@@embedding_column
@@scattered_embedding_column
@@input_from_feature_columns
@@transform_features
@@joint_weighted_sum_from_feature_columns
@@make_place_holder_tensors_for_base_features
@@multi_class_target
@@one_hot_column
@@parse_feature_columns_from_examples
@@parse_feature_columns_from_sequence_examples
@@real_valued_column
@@shared_embedding_columns
@@sparse_column_with_hash_bucket
@@sparse_column_with_integerized_feature
@@sparse_column_with_keys
@@sparse_column_with_vocabulary_file
@@weighted_sparse_column
@@weighted_sum_from_feature_columns
@@infer_real_valued_columns
@@sequence_input_from_feature_columns
@@group_norm
@@instance_norm
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,wildcard-import
from tensorflow.contrib.layers.python.layers import *
# pylint: enable=unused-import,wildcard-import
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = ['bias_add',
'conv1d',
'conv2d',
'conv3d',
'elu',
'feature_column',
'group_norm',
'instance_norm',
'legacy_fully_connected',
'legacy_linear',
'legacy_relu',
'OPTIMIZER_CLS_NAMES',
'OPTIMIZER_SUMMARIES',
'regression_target',
'SPARSE_FEATURE_CROSS_DEFAULT_HASH_KEY',
'summaries']
remove_undocumented(__name__, _allowed_symbols)
|
tensorflow-master
|
tensorflow/contrib/layers/__init__.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.contrib.layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
from tensorflow.contrib import layers as layers_lib
from tensorflow.contrib.framework.python.ops import arg_scope
from tensorflow.contrib.framework.python.ops import variables
from tensorflow.contrib.layers.python.layers import layers as _layers
from tensorflow.contrib.layers.python.layers import regularizers
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import template
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.ops.losses import losses
from tensorflow.python.platform import test
class AvgPool2DTest(test.TestCase):
def testInvalidDataFormat(self):
height, width = 3, 6
images = np.random.uniform(size=(5, height, width, 3))
with self.assertRaisesRegexp(ValueError,
'data_format has to be either NCHW or NHWC.'):
_layers.avg_pool2d(images, [3, 3], data_format='CHWN')
def testCreateAvgPool(self):
height, width = 3, 6
images = np.random.uniform(size=(5, height, width, 3))
output = _layers.avg_pool2d(images, [3, 3])
self.assertEqual(output.op.name, 'AvgPool2D/AvgPool')
self.assertListEqual(output.get_shape().as_list(), [5, 1, 2, 3])
def testCreateAvgPoolNCHW(self):
height, width = 3, 6
images = np.random.uniform(size=(5, 2, height, width))
output = _layers.avg_pool2d(images, [3, 3], data_format='NCHW')
self.assertListEqual(output.get_shape().as_list(), [5, 2, 1, 2])
def testCollectOutputs(self):
height, width = 3, 6
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = _layers.avg_pool2d(images, [3, 3], outputs_collections='outputs')
output_collected = ops.get_collection('outputs')[0]
self.assertEqual(output_collected.aliases, ['AvgPool2D'])
self.assertEqual(output_collected, output)
def testCreateSquareAvgPool(self):
height, width = 3, 6
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = _layers.avg_pool2d(images, 3)
self.assertEqual(output.op.name, 'AvgPool2D/AvgPool')
self.assertListEqual(output.get_shape().as_list(), [5, 1, 2, 3])
def testCreateAvgPoolWithScope(self):
height, width = 3, 6
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = _layers.avg_pool2d(images, [3, 3], scope='pool1')
self.assertEqual(output.op.name, 'pool1/AvgPool')
def testCreateAvgPoolWithSamePadding(self):
height, width = 3, 6
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = _layers.avg_pool2d(images, [3, 3], padding='SAME')
self.assertListEqual(output.get_shape().as_list(), [5, 2, 3, 3])
def testCreateAvgPoolWithSamePaddingNCHW(self):
height, width = 3, 6
images = random_ops.random_uniform((5, 3, height, width), seed=1)
output = _layers.avg_pool2d(
images, [3, 3], padding='SAME', data_format='NCHW')
self.assertListEqual(output.get_shape().as_list(), [5, 3, 2, 3])
def testCreateAvgPoolStrideWithSamePadding(self):
height, width = 3, 6
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = _layers.avg_pool2d(images, [3, 3], stride=1, padding='SAME')
self.assertListEqual(output.get_shape().as_list(), [5, height, width, 3])
def testGlobalAvgPool(self):
height, width = 3, 6
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = _layers.avg_pool2d(images, images.get_shape()[1:3], stride=1)
self.assertListEqual(output.get_shape().as_list(), [5, 1, 1, 3])
class AvgPool3DTest(test.TestCase):
def testInvalidDataFormat(self):
depth, height, width = 3, 6, 9
images = np.random.uniform(size=(5, depth, height, width, 3))
with self.assertRaisesRegexp(
ValueError, 'data_format has to be either NCDHW or NDHWC.'):
_layers.avg_pool3d(images, [3, 3, 3], data_format='CDHWN')
def testCreateAvgPool(self):
depth, height, width = 3, 6, 9
images = np.random.uniform(size=(5, depth, height, width, 3))
output = _layers.avg_pool3d(images, [3, 3, 3])
self.assertEqual(output.op.name, 'AvgPool3D/AvgPool3D')
self.assertListEqual(output.get_shape().as_list(), [5, 1, 2, 4, 3])
def testCreateAvgPoolNCDHW(self):
depth, height, width = 3, 6, 9
images = np.random.uniform(size=(5, 2, depth, height, width))
output = _layers.avg_pool3d(images, [3, 3, 3], data_format='NCDHW')
self.assertEquals(output.op.name, 'AvgPool3D/transpose_1')
self.assertListEqual(output.get_shape().as_list(), [5, 2, 1, 2, 4])
def testCollectOutputs(self):
depth, height, width = 3, 6, 9
images = random_ops.random_uniform((5, depth, height, width, 3), seed=1)
output = _layers.avg_pool3d(
images, [3, 3, 3], outputs_collections='outputs')
output_collected = ops.get_collection('outputs')[0]
self.assertEqual(output_collected.aliases, ['AvgPool3D'])
self.assertEqual(output_collected, output)
def testCreateSquareAvgPool(self):
depth, height, width = 3, 6, 9
images = random_ops.random_uniform((5, depth, height, width, 3), seed=1)
output = _layers.avg_pool3d(images, 3)
self.assertEqual(output.op.name, 'AvgPool3D/AvgPool3D')
self.assertListEqual(output.get_shape().as_list(), [5, 1, 2, 4, 3])
def testCreateAvgPoolWithScope(self):
depth, height, width = 3, 6, 9
images = random_ops.random_uniform((5, depth, height, width, 3), seed=1)
output = _layers.avg_pool3d(images, [3, 3, 3], scope='pool1')
self.assertEqual(output.op.name, 'pool1/AvgPool3D')
def testCreateAvgPoolWithSamePadding(self):
depth, height, width = 3, 6, 9
images = random_ops.random_uniform((5, depth, height, width, 3), seed=1)
output = _layers.avg_pool3d(images, [3, 3, 3], padding='SAME')
self.assertListEqual(output.get_shape().as_list(), [5, 2, 3, 5, 3])
def testCreateAvgPoolWithSamePaddingNCDHW(self):
depth, height, width = 3, 6, 9
images = random_ops.random_uniform((5, 3, depth, height, width), seed=1)
output = _layers.avg_pool3d(
images, [3, 3, 3], padding='SAME', data_format='NCDHW')
self.assertListEqual(output.get_shape().as_list(), [5, 3, 2, 3, 5])
def testCreateAvgPoolStrideWithSamePadding(self):
depth, height, width = 3, 6, 9
images = random_ops.random_uniform((5, depth, height, width, 3), seed=1)
output = _layers.avg_pool3d(images, [3, 3, 3], stride=1, padding='SAME')
self.assertListEqual(output.get_shape().as_list(),
[5, depth, height, width, 3])
def testGlobalAvgPool(self):
depth, height, width = 3, 6, 9
images = random_ops.random_uniform((5, depth, height, width, 3), seed=1)
output = _layers.avg_pool3d(images, images.get_shape()[1:4], stride=1)
self.assertListEqual(output.get_shape().as_list(), [5, 1, 1, 1, 3])
class PoolTest(test.TestCase):
def testCreatePool(self):
height, width = 3, 3
images = np.random.uniform(size=(5, height, width, 3))
output = _layers.pool(images, [3, 3], pooling_type='AVG')
self.assertEqual(output.op.name, 'avg_pool')
self.assertListEqual(output.get_shape().as_list(), [5, 1, 1, 3])
def testCreatePoolNCHW(self):
height, width = 3, 3
images = np.random.uniform(size=(5, 3, height, width))
output = _layers.pool(
images, [3, 3], pooling_type='AVG', data_format='NCHW')
self.assertEqual(output.op.name, 'avg_pool')
self.assertListEqual(output.get_shape().as_list(), [5, 3, 1, 1])
def testCollectOutputs(self):
height, width = 3, 3
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = _layers.pool(
images, [3, 3], pooling_type='AVG', outputs_collections='outputs')
output_collected = ops.get_collection('outputs')[0]
self.assertEqual(output_collected.aliases, ['avg_pool'])
self.assertEqual(output_collected, output)
def testCreateSquareAvgPool(self):
height, width = 3, 3
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = _layers.pool(images, 3, pooling_type='AVG')
self.assertEqual(output.op.name, 'avg_pool')
self.assertEqual(output.get_shape().as_list(), [5, 1, 1, 3])
def testCreateMaxPoolWithScope(self):
height, width = 3, 3
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = _layers.pool(images, [3, 3], pooling_type='MAX', scope='pool1')
self.assertEqual(output.op.name, 'pool1')
def testCreateMaxPoolWithSamePadding(self):
height, width = 3, 3
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = _layers.pool(images, [3, 3], pooling_type='MAX', padding='SAME')
self.assertEqual(output.get_shape().as_list(), [5, 3, 3, 3])
def testCreateAvgPoolStrideWithSamePadding(self):
height, width = 3, 3
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = _layers.pool(
images, [3, 3], stride=1, padding='SAME', pooling_type='AVG')
self.assertEqual(output.get_shape().as_list(), [5, height, width, 3])
def testGlobalAvgPool(self):
height, width = 3, 3
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = _layers.pool(
images, images.get_shape()[1:3], stride=1, pooling_type='AVG')
self.assertEqual(output.get_shape().as_list(), [5, 1, 1, 3])
def testAvgPoolWithStride(self):
height, width = 5, 8
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = _layers.pool(images, [2, 3], stride=[1, 2], pooling_type='AVG')
self.assertEqual(output.get_shape().as_list(), [5, 4, 3, 3])
def testAvgPoolWithDilation(self):
height, width = 5, 8
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = _layers.pool(
images, [2, 3], dilation_rate=[1, 2], pooling_type='AVG')
self.assertEqual(output.get_shape().as_list(), [5, 4, 4, 3])
def testAvgPoolWithDilationNCHW(self):
height, width = 5, 8
images = random_ops.random_uniform((5, 3, height, width), seed=1)
output = _layers.pool(
images, [2, 3],
dilation_rate=[1, 2],
pooling_type='AVG',
data_format='NCHW')
self.assertEqual(output.get_shape().as_list(), [5, 3, 4, 4])
class BiasAddTest(test.TestCase):
def testCreate(self):
height, width = 3, 3
with self.cached_session():
images = np.random.uniform(size=(5, height, width, 3))
output = _layers.bias_add(images)
self.assertEqual(output.op.name, 'BiasAdd/BiasAdd')
self.assertListEqual(output.get_shape().as_list(), [5, height, width, 3])
def testCreateWithActivation(self):
height, width = 3, 3
with self.cached_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = _layers.bias_add(images, activation_fn=nn_ops.relu)
self.assertEqual(output.op.name, 'BiasAdd/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, height, width, 3])
def testCreateDimensions(self):
dims = (2, 3, 4)
shape = [5, 2, 3, 4]
with self.cached_session():
for d in dims:
input_shape = shape[:d]
inputs = random_ops.random_uniform(input_shape, seed=1)
output = _layers.bias_add(inputs)
self.assertListEqual(output.get_shape().as_list(), input_shape)
biases = variables.get_variables_by_name('biases')[-1]
self.assertListEqual(biases.get_shape().as_list(), [input_shape[-1]])
class ConvolutionTest(test.TestCase):
def testInvalidShape(self):
with self.cached_session():
images_2d = random_ops.random_uniform((5, 7, 9, 3), seed=1)
with self.assertRaisesRegexp(
ValueError, 'Convolution expects input with rank 5, got 4'):
layers_lib.convolution3d(images_2d, 32, 3)
images_3d = random_ops.random_uniform((5, 6, 7, 9, 3), seed=1)
with self.assertRaisesRegexp(
ValueError, 'Convolution expects input with rank 4, got 5'):
layers_lib.convolution2d(images_3d, 32, 3)
def testInvalidDataFormat(self):
height, width = 7, 9
with self.cached_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
with self.assertRaisesRegexp(ValueError, 'data_format'):
layers_lib.convolution2d(images, 32, 3, data_format='CHWN')
def testCreateConv(self):
height, width = 7, 9
with self.cached_session():
images = np.random.uniform(size=(5, height, width, 4)).astype(np.float32)
output = layers_lib.convolution2d(images, 32, [3, 3])
self.assertEqual(output.op.name, 'Conv/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, height, width, 32])
weights = variables.get_variables_by_name('weights')[0]
self.assertListEqual(weights.get_shape().as_list(), [3, 3, 4, 32])
biases = variables.get_variables_by_name('biases')[0]
self.assertListEqual(biases.get_shape().as_list(), [32])
def testCreateConvNCHW(self):
height, width = 7, 9
with self.cached_session():
images = np.random.uniform(size=(5, 4, height, width)).astype(np.float32)
output = layers_lib.convolution2d(images, 32, [3, 3], data_format='NCHW')
self.assertEqual(output.op.name, 'Conv/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, 32, height, width])
weights = variables.get_variables_by_name('weights')[0]
self.assertListEqual(weights.get_shape().as_list(), [3, 3, 4, 32])
biases = variables.get_variables_by_name('biases')[0]
self.assertListEqual(biases.get_shape().as_list(), [32])
def testCreateSquareConv(self):
height, width = 7, 9
with self.cached_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = layers_lib.convolution2d(images, 32, 3)
self.assertEqual(output.op.name, 'Conv/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, height, width, 32])
def testCreateConvWithTensorShape(self):
height, width = 7, 9
with self.cached_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = layers_lib.convolution2d(images, 32, images.get_shape()[1:3])
self.assertEqual(output.op.name, 'Conv/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, height, width, 32])
def testCreateFullyConv(self):
height, width = 7, 9
with self.cached_session():
images = random_ops.random_uniform((5, height, width, 32), seed=1)
output = layers_lib.convolution2d(
images, 64, images.get_shape()[1:3], padding='VALID')
self.assertEqual(output.op.name, 'Conv/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, 1, 1, 64])
biases = variables.get_variables_by_name('biases')[0]
self.assertListEqual(biases.get_shape().as_list(), [64])
def testFullyConvWithCustomGetter(self):
height, width = 7, 9
with self.cached_session():
called = [0]
def custom_getter(getter, *args, **kwargs):
called[0] += 1
return getter(*args, **kwargs)
with variable_scope.variable_scope('test', custom_getter=custom_getter):
images = random_ops.random_uniform((5, height, width, 32), seed=1)
layers_lib.convolution2d(images, 64, images.get_shape()[1:3])
self.assertEqual(called[0], 2) # Custom getter called twice.
def testCreateVerticalConv(self):
height, width = 7, 9
with self.cached_session():
images = random_ops.random_uniform((5, height, width, 4), seed=1)
output = layers_lib.convolution2d(images, 32, [3, 1])
self.assertEqual(output.op.name, 'Conv/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, height, width, 32])
weights = variables.get_variables_by_name('weights')[0]
self.assertListEqual(weights.get_shape().as_list(), [3, 1, 4, 32])
biases = variables.get_variables_by_name('biases')[0]
self.assertListEqual(biases.get_shape().as_list(), [32])
def testCreateHorizontalConv(self):
height, width = 7, 9
with self.cached_session():
images = random_ops.random_uniform((5, height, width, 4), seed=1)
output = layers_lib.convolution2d(images, 32, [1, 3])
self.assertEqual(output.op.name, 'Conv/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, height, width, 32])
weights = variables.get_variables_by_name('weights')[0]
self.assertListEqual(weights.get_shape().as_list(), [1, 3, 4, 32])
def testCreateConvWithStride(self):
height, width = 6, 8
with self.cached_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = layers_lib.convolution2d(images, 32, [3, 3], stride=2)
self.assertEqual(output.op.name, 'Conv/Relu')
self.assertListEqual(output.get_shape().as_list(),
[5, height / 2, width / 2, 32])
def testCreateConvCreatesWeightsAndBiasesVars(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 3), seed=1)
with self.cached_session():
self.assertFalse(variables.get_variables('conv1/weights'))
self.assertFalse(variables.get_variables('conv1/biases'))
layers_lib.convolution2d(images, 32, [3, 3], scope='conv1')
self.assertTrue(variables.get_variables('conv1/weights'))
self.assertTrue(variables.get_variables('conv1/biases'))
def testCreateConvWithScope(self):
height, width = 7, 9
with self.cached_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = layers_lib.convolution2d(images, 32, [3, 3], scope='conv1')
self.assertEqual(output.op.name, 'conv1/Relu')
def testCreateConvWithCollection(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 3), seed=1)
with ops.name_scope('fe'):
conv = layers_lib.convolution2d(
images, 32, [3, 3], outputs_collections='outputs', scope='Conv')
output_collected = ops.get_collection('outputs')[0]
self.assertEqual(output_collected.aliases, ['Conv'])
self.assertEqual(output_collected, conv)
def testCreateConvWithoutActivation(self):
height, width = 7, 9
with self.cached_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = layers_lib.convolution2d(images, 32, [3, 3], activation_fn=None)
self.assertEqual(output.op.name, 'Conv/BiasAdd')
def testCreateConvValid(self):
height, width = 7, 9
with self.cached_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = layers_lib.convolution2d(images, 32, [3, 3], padding='VALID')
self.assertListEqual(output.get_shape().as_list(), [5, 5, 7, 32])
def testCreateConvWithWD(self):
height, width = 7, 9
weight_decay = 0.01
with self.cached_session() as sess:
images = random_ops.random_uniform((5, height, width, 3), seed=1)
regularizer = regularizers.l2_regularizer(weight_decay)
layers_lib.convolution2d(
images, 32, [3, 3], weights_regularizer=regularizer)
l2_loss = nn_ops.l2_loss(variables.get_variables_by_name('weights')[0])
wd = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)[0]
self.assertEqual(wd.op.name, 'Conv/kernel/Regularizer/l2_regularizer')
sess.run(variables_lib.global_variables_initializer())
self.assertAlmostEqual(sess.run(wd), weight_decay * l2_loss.eval())
def testCreateConvNoRegularizers(self):
height, width = 7, 9
with self.cached_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
layers_lib.convolution2d(images, 32, [3, 3])
self.assertEqual(
ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES), [])
def testReuseVars(self):
height, width = 7, 9
with self.cached_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
layers_lib.convolution2d(images, 32, [3, 3], scope='conv1')
self.assertEqual(len(variables.get_variables()), 2)
layers_lib.convolution2d(images, 32, [3, 3], scope='conv1', reuse=True)
self.assertEqual(len(variables.get_variables()), 2)
def testNonReuseVars(self):
height, width = 7, 9
with self.cached_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
layers_lib.convolution2d(images, 32, [3, 3])
self.assertEqual(len(variables.get_variables()), 2)
layers_lib.convolution2d(images, 32, [3, 3])
self.assertEqual(len(variables.get_variables()), 4)
def testReuseConvWithWD(self):
height, width = 7, 9
with self.cached_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
weight_decay = regularizers.l2_regularizer(0.01)
with arg_scope(
[layers_lib.convolution2d], weights_regularizer=weight_decay):
layers_lib.convolution2d(images, 32, [3, 3], scope='conv1')
self.assertEqual(len(variables.get_variables()), 2)
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)), 1)
layers_lib.convolution2d(images, 32, [3, 3], scope='conv1', reuse=True)
self.assertEqual(len(variables.get_variables()), 2)
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)), 1)
def testConvWithBatchNorm(self):
height, width = 7, 9
with self.cached_session():
images = random_ops.random_uniform((5, height, width, 32), seed=1)
with arg_scope(
[layers_lib.convolution2d],
normalizer_fn=_layers.batch_norm,
normalizer_params={
'decay': 0.9
}):
net = layers_lib.convolution2d(images, 32, [3, 3])
net = layers_lib.convolution2d(net, 32, [3, 3])
self.assertEqual(len(variables.get_variables()), 8)
self.assertEqual(len(variables.get_variables('Conv/BatchNorm')), 3)
self.assertEqual(len(variables.get_variables('Conv_1/BatchNorm')), 3)
def testReuseConvWithBatchNorm(self):
height, width = 7, 9
with self.cached_session():
images = random_ops.random_uniform((5, height, width, 32), seed=1)
with arg_scope(
[layers_lib.convolution2d],
normalizer_fn=_layers.batch_norm,
normalizer_params={
'decay': 0.9
}):
net = layers_lib.convolution2d(images, 32, [3, 3], scope='Conv')
net = layers_lib.convolution2d(
net, 32, [3, 3], scope='Conv', reuse=True)
self.assertEqual(len(variables.get_variables()), 4)
self.assertEqual(len(variables.get_variables('Conv/BatchNorm')), 3)
self.assertEqual(len(variables.get_variables('Conv_1/BatchNorm')), 0)
def testCreateConvCreatesWeightsAndBiasesVarsWithRateTwo(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 3), seed=1)
with self.cached_session():
self.assertFalse(variables.get_variables('conv1/weights'))
self.assertFalse(variables.get_variables('conv1/biases'))
layers_lib.convolution2d(images, 32, [3, 3], rate=2, scope='conv1')
self.assertTrue(variables.get_variables('conv1/weights'))
self.assertTrue(variables.get_variables('conv1/biases'))
def testOutputSizeWithRateTwoSamePadding(self):
num_filters = 32
input_size = [5, 10, 12, 3]
expected_size = [5, 10, 12, num_filters]
images = random_ops.random_uniform(input_size, seed=1)
output = layers_lib.convolution2d(
images, num_filters, [3, 3], rate=2, padding='SAME')
self.assertListEqual(list(output.get_shape().as_list()), expected_size)
with self.cached_session() as sess:
sess.run(variables_lib.global_variables_initializer())
self.assertEqual(output.op.name, 'Conv/Relu')
self.assertListEqual(list(output.eval().shape), expected_size)
def testOutputSizeWithRateTwoValidPadding(self):
num_filters = 32
input_size = [5, 10, 12, 3]
expected_size = [5, 6, 8, num_filters]
images = random_ops.random_uniform(input_size, seed=1)
output = layers_lib.convolution2d(
images, num_filters, [3, 3], rate=2, padding='VALID')
self.assertListEqual(list(output.get_shape().as_list()), expected_size)
with self.cached_session() as sess:
sess.run(variables_lib.global_variables_initializer())
self.assertEqual(output.op.name, 'Conv/Relu')
self.assertListEqual(list(output.eval().shape), expected_size)
def testOutputSizeWithRateTwoThreeValidPadding(self):
num_filters = 32
input_size = [5, 10, 12, 3]
expected_size = [5, 6, 6, num_filters]
images = random_ops.random_uniform(input_size, seed=1)
output = layers_lib.convolution2d(
images, num_filters, [3, 3], rate=[2, 3], padding='VALID')
self.assertListEqual(list(output.get_shape().as_list()), expected_size)
with self.cached_session() as sess:
sess.run(variables_lib.global_variables_initializer())
self.assertEquals(output.op.name, 'Conv/Relu')
self.assertListEqual(list(output.eval().shape), expected_size)
def testDynamicOutputSizeWithRateOneValidPadding(self):
num_filters = 32
input_size = [5, 9, 11, 3]
expected_size = [None, None, None, num_filters]
expected_size_dynamic = [5, 7, 9, num_filters]
with self.cached_session():
images = array_ops.placeholder(np.float32,
[None, None, None, input_size[3]])
output = layers_lib.convolution2d(
images, num_filters, [3, 3], rate=1, padding='VALID')
variables_lib.global_variables_initializer().run()
self.assertEqual(output.op.name, 'Conv/Relu')
self.assertListEqual(output.get_shape().as_list(), expected_size)
eval_output = output.eval({images: np.zeros(input_size, np.float32)})
self.assertListEqual(list(eval_output.shape), expected_size_dynamic)
def testDynamicOutputSizeWithRateOneValidPaddingNCHW(self):
if test.is_gpu_available(cuda_only=True):
num_filters = 32
input_size = [5, 3, 9, 11]
expected_size = [None, num_filters, None, None]
expected_size_dynamic = [5, num_filters, 7, 9]
with self.session(use_gpu=True):
images = array_ops.placeholder(np.float32,
[None, input_size[1], None, None])
output = layers_lib.convolution2d(
images,
num_filters, [3, 3],
rate=1,
padding='VALID',
data_format='NCHW')
variables_lib.global_variables_initializer().run()
self.assertEqual(output.op.name, 'Conv/Relu')
self.assertListEqual(output.get_shape().as_list(), expected_size)
eval_output = output.eval({images: np.zeros(input_size, np.float32)})
self.assertListEqual(list(eval_output.shape), expected_size_dynamic)
def testDynamicOutputSizeWithRateTwoValidPadding(self):
num_filters = 32
input_size = [5, 9, 11, 3]
expected_size = [None, None, None, num_filters]
expected_size_dynamic = [5, 5, 7, num_filters]
with self.cached_session():
images = array_ops.placeholder(np.float32,
[None, None, None, input_size[3]])
output = layers_lib.convolution2d(
images, num_filters, [3, 3], rate=2, padding='VALID')
variables_lib.global_variables_initializer().run()
self.assertEqual(output.op.name, 'Conv/Relu')
self.assertListEqual(output.get_shape().as_list(), expected_size)
eval_output = output.eval({images: np.zeros(input_size, np.float32)})
self.assertListEqual(list(eval_output.shape), expected_size_dynamic)
def testWithScope(self):
num_filters = 32
input_size = [5, 9, 11, 3]
expected_size = [5, 5, 7, num_filters]
images = random_ops.random_uniform(input_size, seed=1)
output = layers_lib.convolution2d(
images, num_filters, [3, 3], rate=2, padding='VALID', scope='conv7')
with self.cached_session() as sess:
sess.run(variables_lib.global_variables_initializer())
self.assertEqual(output.op.name, 'conv7/Relu')
self.assertListEqual(list(output.eval().shape), expected_size)
def testWithScopeWithoutActivation(self):
num_filters = 32
input_size = [5, 9, 11, 3]
expected_size = [5, 5, 7, num_filters]
images = random_ops.random_uniform(input_size, seed=1)
output = layers_lib.convolution2d(
images,
num_filters, [3, 3],
rate=2,
padding='VALID',
activation_fn=None,
scope='conv7')
with self.cached_session() as sess:
sess.run(variables_lib.global_variables_initializer())
self.assertEqual(output.op.name, 'conv7/BiasAdd')
self.assertListEqual(list(output.eval().shape), expected_size)
class Convolution2dTransposeTests(test.TestCase):
def testTrainableFlagIsPassedOn(self):
for trainable in [True, False]:
with ops.Graph().as_default():
num_filters = 32
input_size = [5, 10, 12, 3]
images = random_ops.random_uniform(input_size, seed=1)
layers_lib.conv2d_transpose(
images, num_filters, [3, 3], stride=1, trainable=trainable)
model_variables = variables.get_model_variables()
trainable_variables = variables_lib.trainable_variables()
for model_variable in model_variables:
self.assertEqual(trainable, model_variable in trainable_variables)
def testInvalidDataFormat(self):
height, width = 7, 9
with self.cached_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
with self.assertRaisesRegexp(
ValueError, 'data_format has to be either NCHW or NHWC.'):
_layers.convolution2d_transpose(images, 32, 3, data_format='CHWN')
def testOutputSizeWithStrideOneSamePaddingNCHW(self):
# `NCHW` data format is only supported for `GPU` device.
if test.is_gpu_available(cuda_only=True):
with self.session(use_gpu=True) as sess:
num_filters = 32
input_size = [5, 3, 10, 12]
expected_size = [5, num_filters, 10, 12]
images = random_ops.random_uniform(input_size, seed=1)
output = layers_lib.conv2d_transpose(
images,
num_filters, [3, 3],
stride=1,
padding='SAME',
data_format='NCHW')
self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')
sess.run(variables_lib.global_variables_initializer())
self.assertListEqual(list(output.eval().shape), expected_size)
def testOutputSizeWithStrideOneValidPaddingNCHW(self):
if test.is_gpu_available(cuda_only=True):
with self.session(use_gpu=True) as sess:
num_filters = 32
input_size = [5, 3, 10, 12]
expected_size = [5, num_filters, 12, 14]
images = random_ops.random_uniform(input_size, seed=1)
output = layers_lib.conv2d_transpose(
images,
num_filters, [3, 3],
stride=1,
padding='VALID',
data_format='NCHW')
self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')
sess.run(variables_lib.global_variables_initializer())
self.assertListEqual(list(output.eval().shape), expected_size)
def testOutputSizeWithStrideTwoValidPaddingNCHW(self):
if test.is_gpu_available(cuda_only=True):
with self.session(use_gpu=True) as sess:
num_filters = 32
input_size = [5, 3, 9, 11]
expected_size = [5, num_filters, 19, 23]
images = random_ops.random_uniform(input_size, seed=1)
output = layers_lib.conv2d_transpose(
images,
num_filters, [3, 3],
stride=[2, 2],
padding='VALID',
data_format='NCHW')
self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')
self.assertListEqual(list(output.get_shape().as_list()), expected_size)
sess.run(variables_lib.global_variables_initializer())
self.assertListEqual(list(output.eval().shape), expected_size)
def testOutputSizeWith1x1StrideTwoSamePaddingNCHW(self):
if test.is_gpu_available(cuda_only=True):
with self.session(use_gpu=True) as sess:
num_filters = 1
input_size = [1, 1, 1, 1]
expected_size = [1, num_filters, 2, 2]
images = random_ops.random_uniform(input_size, seed=1)
output = layers_lib.conv2d_transpose(
images,
num_filters, [2, 2],
stride=[2, 2],
padding='SAME',
data_format='NCHW')
self.assertListEqual(list(output.get_shape().as_list()), expected_size)
sess.run(variables_lib.global_variables_initializer())
self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')
self.assertListEqual(list(output.eval().shape), expected_size)
def testOutputSizeWith1x1StrideTwoValidPaddingNCHW(self):
if test.is_gpu_available(cuda_only=True):
with self.session(use_gpu=True) as sess:
num_filters = 1
input_size = [1, 1, 1, 1]
expected_size = [1, num_filters, 2, 2]
images = random_ops.random_uniform(input_size, seed=1)
output = layers_lib.conv2d_transpose(
images,
num_filters, [2, 2],
stride=[2, 2],
padding='VALID',
data_format='NCHW')
sess.run(variables_lib.global_variables_initializer())
self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')
self.assertListEqual(list(output.eval().shape), expected_size)
def testOutputSizeWith2x2StrideTwoSamePaddingNCHW(self):
if test.is_gpu_available(cuda_only=True):
with self.session(use_gpu=True) as sess:
num_filters = 1
input_size = [1, 1, 2, 2]
expected_size = [1, num_filters, 4, 4]
images = random_ops.random_uniform(input_size, seed=1)
output = layers_lib.conv2d_transpose(
images,
num_filters, [2, 2],
stride=[2, 2],
padding='SAME',
data_format='NCHW')
sess.run(variables_lib.global_variables_initializer())
self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')
self.assertListEqual(list(output.eval().shape), expected_size)
def testOutputSizeWith2x2StrideTwoValidPaddingNCHW(self):
if test.is_gpu_available(cuda_only=True):
with self.session(use_gpu=True) as sess:
num_filters = 1
input_size = [1, 1, 2, 2]
expected_size = [1, num_filters, 4, 4]
images = random_ops.random_uniform(input_size, seed=1)
output = layers_lib.conv2d_transpose(
images,
num_filters, [2, 2],
stride=[2, 2],
padding='VALID',
data_format='NCHW')
sess.run(variables_lib.global_variables_initializer())
self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')
self.assertListEqual(list(output.eval().shape), expected_size)
def testOutputSizeWithStride2x1NCHW(self):
if test.is_gpu_available(cuda_only=True):
with self.session(use_gpu=True) as sess:
num_filters = 1
input_size = [1, 1, 3, 2]
expected_size = [1, num_filters, 6, 5]
images = random_ops.random_uniform(input_size, seed=1)
output = layers_lib.conv2d_transpose(
images,
num_filters, [2, 4],
stride=[2, 1],
padding='VALID',
data_format='NCHW')
sess.run(variables_lib.global_variables_initializer())
self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')
self.assertListEqual(list(output.eval().shape), expected_size)
def testOutputSizeWithStride2x4NCHW(self):
if test.is_gpu_available(cuda_only=True):
with self.session(use_gpu=True) as sess:
num_filters = 1
input_size = [1, 1, 3, 2]
expected_size = [1, num_filters, 6, 8]
images = random_ops.random_uniform(input_size, seed=1)
output = layers_lib.conv2d_transpose(
images,
num_filters, [2, 4],
stride=[2, 4],
padding='VALID',
data_format='NCHW')
sess.run(variables_lib.global_variables_initializer())
self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')
self.assertListEqual(list(output.eval().shape), expected_size)
def testOutputSizeWithStride2x5NCHW(self):
if test.is_gpu_available(cuda_only=True):
with self.session(use_gpu=True) as sess:
num_filters = 1
input_size = [1, 1, 3, 2]
expected_size = [1, num_filters, 6, 10]
images = random_ops.random_uniform(input_size, seed=1)
output = layers_lib.conv2d_transpose(
images,
num_filters, [2, 4],
stride=[2, 5],
padding='VALID',
data_format='NCHW')
sess.run(variables_lib.global_variables_initializer())
self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')
self.assertListEqual(list(output.eval().shape), expected_size)
def testOutputSizeWithStrideOneSamePadding(self):
num_filters = 32
input_size = [5, 10, 12, 3]
expected_size = [5, 10, 12, num_filters]
images = random_ops.random_uniform(input_size, seed=1)
output = layers_lib.conv2d_transpose(
images, num_filters, [3, 3], stride=1, padding='SAME')
self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')
with self.cached_session() as sess:
sess.run(variables_lib.global_variables_initializer())
self.assertListEqual(list(output.eval().shape), expected_size)
def testOutputSizeWithStrideOneValidPadding(self):
num_filters = 32
input_size = [5, 10, 12, 3]
expected_size = [5, 12, 14, num_filters]
images = random_ops.random_uniform(input_size, seed=1)
output = layers_lib.conv2d_transpose(
images, num_filters, [3, 3], stride=1, padding='VALID')
self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')
with self.cached_session() as sess:
sess.run(variables_lib.global_variables_initializer())
self.assertListEqual(list(output.eval().shape), expected_size)
def testOutputSizeWithStrideTwoValidPadding(self):
num_filters = 32
input_size = [5, 9, 11, 3]
expected_size = [5, 19, 23, num_filters]
images = random_ops.random_uniform(input_size, seed=1)
output = layers_lib.conv2d_transpose(
images, num_filters, [3, 3], stride=[2, 2], padding='VALID')
self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')
self.assertListEqual(list(output.get_shape().as_list()), expected_size)
with self.cached_session() as sess:
sess.run(variables_lib.global_variables_initializer())
self.assertListEqual(list(output.eval().shape), expected_size)
def testOutputSizeWith1x1StrideTwoSamePadding(self):
num_filters = 1
input_size = [1, 1, 1, 1]
expected_size = [1, 2, 2, num_filters]
images = random_ops.random_uniform(input_size, seed=1)
output = layers_lib.conv2d_transpose(
images, num_filters, [2, 2], stride=[2, 2], padding='SAME')
self.assertListEqual(list(output.get_shape().as_list()), expected_size)
with self.cached_session() as sess:
sess.run(variables_lib.global_variables_initializer())
self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')
self.assertListEqual(list(output.eval().shape), expected_size)
def testOutputSizeWith1x1StrideTwoValidPadding(self):
num_filters = 1
input_size = [1, 1, 1, 1]
expected_size = [1, 2, 2, num_filters]
images = random_ops.random_uniform(input_size, seed=1)
output = layers_lib.conv2d_transpose(
images, num_filters, [2, 2], stride=[2, 2], padding='VALID')
with self.cached_session() as sess:
sess.run(variables_lib.global_variables_initializer())
self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')
self.assertListEqual(list(output.eval().shape), expected_size)
def testOutputSizeWith2x2StrideTwoSamePadding(self):
num_filters = 1
input_size = [1, 2, 2, 1]
expected_size = [1, 4, 4, num_filters]
images = random_ops.random_uniform(input_size, seed=1)
output = layers_lib.conv2d_transpose(
images, num_filters, [2, 2], stride=[2, 2], padding='SAME')
with self.cached_session() as sess:
sess.run(variables_lib.global_variables_initializer())
self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')
self.assertListEqual(list(output.eval().shape), expected_size)
def testOutputSizeWith2x2StrideTwoValidPadding(self):
num_filters = 1
input_size = [1, 2, 2, 1]
expected_size = [1, 4, 4, num_filters]
images = random_ops.random_uniform(input_size, seed=1)
output = layers_lib.conv2d_transpose(
images, num_filters, [2, 2], stride=[2, 2], padding='VALID')
with self.cached_session() as sess:
sess.run(variables_lib.global_variables_initializer())
self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')
self.assertListEqual(list(output.eval().shape), expected_size)
def testOutputSizeWithStride2x1(self):
num_filters = 1
input_size = [1, 3, 2, 1]
expected_size = [1, 6, 5, num_filters]
images = random_ops.random_uniform(input_size, seed=1)
output = layers_lib.conv2d_transpose(
images, num_filters, [2, 4], stride=[2, 1], padding='VALID')
with self.cached_session() as sess:
sess.run(variables_lib.global_variables_initializer())
self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')
self.assertListEqual(list(output.eval().shape), expected_size)
def testOutputSizeWithStride2x4(self):
num_filters = 1
input_size = [1, 3, 2, 1]
expected_size = [1, 6, 8, num_filters]
images = random_ops.random_uniform(input_size, seed=1)
output = layers_lib.conv2d_transpose(
images, num_filters, [2, 4], stride=[2, 4], padding='VALID')
with self.cached_session() as sess:
sess.run(variables_lib.global_variables_initializer())
self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')
self.assertListEqual(list(output.eval().shape), expected_size)
def testOutputSizeWithStride2x5(self):
num_filters = 1
input_size = [1, 3, 2, 1]
expected_size = [1, 6, 10, num_filters]
images = random_ops.random_uniform(input_size, seed=1)
output = layers_lib.conv2d_transpose(
images, num_filters, [2, 4], stride=[2, 5], padding='VALID')
with self.cached_session() as sess:
sess.run(variables_lib.global_variables_initializer())
self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')
self.assertListEqual(list(output.eval().shape), expected_size)
def testOutputSizeRandomSizesAndStridesValidPadding(self):
np.random.seed(0)
max_image_size = 10
for _ in range(10):
num_filters = 1
input_size = [
1,
np.random.randint(1, max_image_size),
np.random.randint(1, max_image_size), 1
]
filter_size = [
np.random.randint(1, input_size[1] + 1),
np.random.randint(1, input_size[2] + 1)
]
stride = [np.random.randint(1, 3), np.random.randint(1, 3)]
ops.reset_default_graph()
graph = ops.Graph()
with graph.as_default():
images = random_ops.random_uniform(input_size, seed=1)
transpose = layers_lib.conv2d_transpose(
images, num_filters, filter_size, stride=stride, padding='VALID')
conv = layers_lib.conv2d(
transpose, num_filters, filter_size, stride=stride, padding='VALID')
with self.session(graph=graph) as sess:
sess.run(variables_lib.global_variables_initializer())
self.assertListEqual(list(conv.eval().shape), input_size)
def testDynamicOutputSizeWithStrideTwoValidPadding(self):
num_filters = 32
input_size = [5, 9, 11, 3]
expected_size = [None, None, None, num_filters]
expected_size_dynamic = [5, 19, 23, num_filters]
images = array_ops.placeholder(np.float32,
[None, None, None, input_size[3]])
output = layers_lib.conv2d_transpose(
images, num_filters, [3, 3], stride=[2, 2], padding='VALID')
self.assertListEqual(output.get_shape().as_list(), expected_size)
with self.cached_session() as sess:
sess.run(variables_lib.global_variables_initializer())
self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')
eval_output = output.eval({images: np.zeros(input_size, np.float32)})
self.assertListEqual(list(eval_output.shape), expected_size_dynamic)
def testDynamicOutputSizeWithStrideTwoSamePadding(self):
num_filters = 32
input_size = [5, 9, 11, 3]
expected_size = [None, None, None, num_filters]
expected_size_dynamic = [5, 18, 22, num_filters]
with self.cached_session():
images = array_ops.placeholder(np.float32,
[None, None, None, input_size[3]])
output = layers_lib.conv2d_transpose(
images, num_filters, [3, 3], stride=[2, 2], padding='SAME')
variables_lib.global_variables_initializer().run()
self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')
self.assertListEqual(output.get_shape().as_list(), expected_size)
eval_output = output.eval({images: np.zeros(input_size, np.float32)})
self.assertListEqual(list(eval_output.shape), expected_size_dynamic)
def testWithScope(self):
num_filters = 32
input_size = [5, 9, 11, 3]
expected_size = [5, 19, 23, num_filters]
images = random_ops.random_uniform(input_size, seed=1)
output = layers_lib.conv2d_transpose(
images, num_filters, [3, 3], stride=2, padding='VALID', scope='conv7')
self.assertEqual(output.op.name, 'conv7/Relu')
with self.cached_session() as sess:
sess.run(variables_lib.global_variables_initializer())
self.assertListEqual(list(output.eval().shape), expected_size)
def testWithScopeWithoutActivation(self):
num_filters = 32
input_size = [5, 9, 11, 3]
expected_size = [5, 19, 23, num_filters]
images = random_ops.random_uniform(input_size, seed=1)
output = layers_lib.conv2d_transpose(
images,
num_filters, [3, 3],
stride=2,
padding='VALID',
activation_fn=None,
scope='conv7')
self.assertEqual(output.op.name, 'conv7/BiasAdd')
with self.cached_session() as sess:
sess.run(variables_lib.global_variables_initializer())
self.assertListEqual(list(output.eval().shape), expected_size)
def testDeconvWithoutBiasesProducesConv2dTranspose(self):
num_filters = 32
input_size = [5, 9, 11, 3]
expected_size = [5, 19, 23, num_filters]
stride = 2
padding = 'VALID'
with self.cached_session() as sess:
images = random_ops.random_uniform(input_size, seed=1)
output_deconv = layers_lib.conv2d_transpose(
images,
num_filters, [3, 3],
stride=stride,
padding=padding,
activation_fn=None,
scope='conv7')
weights = variables.get_variables_by_name('conv7/weights')[0]
output_conv2d_transpose = nn_ops.conv2d_transpose(
images,
weights,
expected_size, [1, stride, stride, 1],
padding=padding)
sess.run(variables_lib.global_variables_initializer())
output_deconv, output_conv2d_transpose = sess.run(
[output_deconv, output_conv2d_transpose])
self.assertTrue(
np.isclose(output_deconv, output_conv2d_transpose, 1e-5, 1e-5).all())
class ConvolutionInPlaneTest(test.TestCase):
def testHorzConvWithBlankImage(self):
image = array_ops.ones((1, 10, 10, 1))
horz_gradients = layers_lib.conv2d_in_plane(
image,
weights_initializer=init_ops.constant_initializer([1, -1]),
kernel_size=[1, 2],
padding='VALID',
activation_fn=None)
init_op = variables_lib.global_variables_initializer()
with self.cached_session() as sess:
sess.run(init_op)
result = sess.run(horz_gradients)
expected = np.zeros((1, 10, 9, 1))
self.assertAllClose(result, expected, rtol=1e-5, atol=1e-5)
def testHorzConvWithBlankImageAndPlaceholder(self):
image = array_ops.placeholder(dtypes.float32, shape=(None, None, None, 1))
horz_gradients = layers_lib.conv2d_in_plane(
image,
weights_initializer=init_ops.constant_initializer([1, -1]),
kernel_size=[1, 2],
padding='VALID',
activation_fn=None)
init_op = variables_lib.global_variables_initializer()
with self.cached_session() as sess:
sess.run(init_op)
result = sess.run(
horz_gradients, feed_dict={
image: np.ones((1, 10, 10, 1))
})
expected = np.zeros((1, 10, 9, 1))
self.assertAllClose(result, expected, rtol=1e-5, atol=1e-5)
def testHorzConvWithRandomImageMultiBatch(self):
np.random.seed(1)
image = np.random.rand(5, 10, 10, 1)
expected = image[:, :, 0:-1, :] - image[:, :, 1:, :]
tf_image = constant_op.constant(image, dtype=dtypes.float32)
horz_gradients = layers_lib.conv2d_in_plane(
tf_image,
weights_initializer=init_ops.constant_initializer([1, -1]),
kernel_size=[1, 2],
padding='VALID',
activation_fn=None)
init_op = variables_lib.global_variables_initializer()
with self.cached_session() as sess:
sess.run(init_op)
result = sess.run(horz_gradients)
self.assertAllClose(result, expected, rtol=1e-5, atol=1e-5)
def testHorzConvWithRandomImageMultiBatchMultiChannel(self):
np.random.seed(1)
image = np.random.rand(5, 10, 10, 7)
expected = image[:, :, 0:-1, :] - image[:, :, 1:, :]
tf_image = constant_op.constant(image, dtype=dtypes.float32)
horz_gradients = layers_lib.conv2d_in_plane(
tf_image,
weights_initializer=init_ops.constant_initializer([1, -1]),
kernel_size=[1, 2],
padding='VALID',
activation_fn=None)
init_op = variables_lib.global_variables_initializer()
with self.cached_session() as sess:
sess.run(init_op)
result = sess.run(horz_gradients)
self.assertAllClose(result, expected, rtol=1e-5, atol=1e-5)
def testHorzConvWithVaryingImage(self):
image = np.asmatrix(('1.0 2.0 3.0;' '1.1 2.0 4.0;' '-4.3 0.0 8.9'))
expected = np.asmatrix(('-1.0 -1.0;' '-0.9 -2.0;' '-4.3 -8.9'))
expected = np.reshape(np.asarray(expected), (1, 3, 2, 1))
tf_image = constant_op.constant(
image, shape=(1, 3, 3, 1), dtype=dtypes.float32)
horz_gradients = layers_lib.conv2d_in_plane(
tf_image,
weights_initializer=init_ops.constant_initializer([1, -1]),
kernel_size=[1, 2],
padding='VALID',
activation_fn=None)
init_op = variables_lib.global_variables_initializer()
with self.cached_session() as sess:
sess.run(init_op)
result = sess.run(horz_gradients)
self.assertAllClose(result, expected, rtol=1e-5, atol=1e-5)
def testVertConvWithBlankImage(self):
image = array_ops.ones((1, 10, 10, 1))
vert_gradients = layers_lib.conv2d_in_plane(
image,
weights_initializer=init_ops.constant_initializer([1, -1]),
kernel_size=[2, 1],
padding='VALID',
activation_fn=None)
init_op = variables_lib.global_variables_initializer()
with self.cached_session() as sess:
sess.run(init_op)
result = sess.run(vert_gradients)
expected = np.zeros((1, 9, 10, 1))
self.assertAllClose(result, expected, rtol=1e-5, atol=1e-5)
def testVertConvWithVaryingImage(self):
image = np.asmatrix(('1.0 2.0 3.0;' '1.1 2.0 4.0;' '-4.3 0.0 8.9'))
expected = np.asmatrix(('-0.1 0.0 -1.0;' ' 5.4 2.0 -4.9'))
expected = np.reshape(np.asarray(expected), (1, 2, 3, 1))
tf_image = constant_op.constant(
image, shape=(1, 3, 3, 1), dtype=dtypes.float32)
vert_gradients = layers_lib.conv2d_in_plane(
tf_image,
weights_initializer=init_ops.constant_initializer([1, -1]),
kernel_size=[2, 1],
padding='VALID',
activation_fn=None)
init_op = variables_lib.global_variables_initializer()
with self.cached_session() as sess:
sess.run(init_op)
result = sess.run(vert_gradients)
self.assertAllClose(result, expected, rtol=1e-5, atol=1e-5)
def testConv1dShape(self):
width = 7
with self.cached_session():
images = random_ops.random_uniform((5, width, 3), seed=1)
output = layers_lib.convolution1d(images, 32, 3)
self.assertEqual(output.op.name, 'Conv/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, width, 32])
def testConvInferSpatialDims(self):
depth, height, width = 7, 9, 11
with self.cached_session():
images = np.random.uniform(size=(5, width, 4)).astype(np.float32)
output = layers_lib.convolution(images, 32, [3])
self.assertListEqual(output.get_shape().as_list(), [5, width, 32])
images = np.random.uniform(size=(5, height, width, 4)).astype(np.float32)
output = layers_lib.convolution(images, 32, [3, 3])
self.assertListEqual(output.get_shape().as_list(), [5, height, width, 32])
images = np.random.uniform(size=(5, depth, height, width,
4)).astype(np.float32)
output = layers_lib.convolution(images, 32, [3, 3, 3])
self.assertListEqual(output.get_shape().as_list(),
[5, depth, height, width, 32])
class DenseToSparseTest(test.TestCase):
def testDenseFromConstantToSparse(self):
expected_constant = np.reshape(np.arange(24, dtype=np.int64), (3, 4, 2))
tensor = constant_op.constant(expected_constant)
sparse = _layers.dense_to_sparse(tensor)
dense = sparse_ops.sparse_to_dense(sparse.indices, sparse.dense_shape,
sparse.values)
with self.cached_session() as sess:
constant = sess.run(dense)
self.assertAllEqual(expected_constant, constant)
class DropoutTest(test.TestCase):
def testCreateDropout(self):
height, width = 3, 3
with self.cached_session():
images = np.random.uniform(size=(5, height, width, 3))
output = _layers.dropout(images)
self.assertEqual(output.op.name, 'Dropout/dropout_1/mul_1')
output.get_shape().assert_is_compatible_with(
ops.convert_to_tensor(images).get_shape())
def testCreateDropoutWithConstantTrue(self):
height, width = 3, 3
with self.cached_session():
is_training = constant_op.constant(True)
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = _layers.dropout(images, is_training=is_training)
output.get_shape().assert_is_compatible_with(images.get_shape())
def testCreateDropoutWithConstantFalse(self):
height, width = 3, 3
with self.cached_session():
is_training = constant_op.constant(False)
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = _layers.dropout(images, is_training=is_training)
output.get_shape().assert_is_compatible_with(images.get_shape())
def testCreateDropoutWithPlaceholder(self):
height, width = 3, 3
with self.cached_session():
is_training = array_ops.placeholder(dtype=dtypes.bool, shape=[])
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = _layers.dropout(images, is_training=is_training)
self.assertEqual(output.op.name, 'Dropout/cond/Merge')
output.get_shape().assert_is_compatible_with(images.get_shape())
def testCollectOutputs(self):
height, width = 3, 3
with self.cached_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = _layers.dropout(images, outputs_collections='outputs')
c_output = ops.get_collection('outputs')[0]
self.assertEqual(c_output.aliases, ['Dropout'])
self.assertEqual(c_output, output)
def testDropout(self):
height, width = 10, 10
with self.cached_session() as sess:
images = random_ops.random_uniform(
(5, height, width, 3), seed=1, name='images')
num_elem_initial = math_ops.reduce_mean(
math_ops.cast(images > 0, dtypes.float32))
output = _layers.dropout(images)
num_elem = math_ops.reduce_mean(math_ops.cast(output > 0, dtypes.float32))
num_elem, num_elem_initial = sess.run([num_elem, num_elem_initial])
self.assertLess(num_elem, num_elem_initial / 2 + 0.1)
self.assertGreater(num_elem, num_elem_initial / 2 - 0.1)
def testDropoutSeed(self):
"""Test that providing the same seed produces the same result."""
height, width = 10, 10
with self.cached_session() as sess:
images = random_ops.random_uniform(
(5, height, width, 3), seed=1, name='images')
output1 = _layers.dropout(images, seed=1)
output2 = _layers.dropout(images, seed=1)
self.assertAllEqual(*sess.run([output1, output2]))
def testCreateDropoutNoTraining(self):
height, width = 3, 3
with self.cached_session() as sess:
images = random_ops.random_uniform(
(5, height, width, 3), seed=1, name='images')
num_elem_initial = math_ops.reduce_mean(
math_ops.cast(images > 0, dtypes.float32))
output = _layers.dropout(images, is_training=False)
num_elem = math_ops.reduce_mean(math_ops.cast(output > 0, dtypes.float32))
num_elem, num_elem_initial = sess.run([num_elem, num_elem_initial])
self.assertEqual(num_elem, num_elem_initial)
outputs, inputs = sess.run([output, images])
self.assertAllClose(outputs, inputs)
def testCreateFCFollowByDropout(self):
height, width = 3, 3
with self.cached_session() as sess:
images = random_ops.random_uniform(
(5, height, width, 3), seed=1, name='images')
output = _layers.fully_connected(images, 50)
num_elem_initial = math_ops.reduce_mean(
math_ops.cast(output > 0, dtypes.float32))
output = _layers.dropout(output)
num_elem = math_ops.reduce_mean(math_ops.cast(output > 0, dtypes.float32))
sess.run(variables_lib.global_variables_initializer())
num_elem, num_elem_initial = sess.run([num_elem, num_elem_initial])
self.assertLess(num_elem, num_elem_initial / 2 + 0.1)
self.assertGreater(num_elem, num_elem_initial / 2 - 0.1)
def testCreateFCWithDropout(self):
height, width = 3, 3
with self.cached_session() as sess:
images = random_ops.random_uniform(
(5, height, width, 3), seed=1, name='images')
output = _layers.fully_connected(
images, 50, normalizer_fn=_layers.dropout)
num_elem = math_ops.reduce_mean(math_ops.cast(output > 0, dtypes.float32))
sess.run(variables_lib.global_variables_initializer())
num_elem = sess.run(num_elem)
self.assertLess(num_elem, 0.5)
self.assertGreater(num_elem, 0.1)
class FlattenTest(test.TestCase):
def testUnknownLastDim(self):
with ops.Graph().as_default() as g, self.session(g):
inputs = array_ops.placeholder(dtype=dtypes.float32)
inputs.set_shape(tensor_shape.TensorShape((5, None)))
output = _layers.flatten(inputs)
self.assertEqual(output.get_shape().as_list(), [5, None])
def testCollectOutputs(self):
height, width = 3, 3
with self.cached_session():
images = np.random.uniform(size=(5, height, width, 3))
output = _layers.flatten(images, outputs_collections='outputs')
c_output = ops.get_collection('outputs')[0]
self.assertEqual(c_output.aliases, ['Flatten'])
self.assertEqual(c_output, output)
def testFlatten4D(self):
height, width = 3, 3
with self.cached_session():
images = random_ops.random_uniform(
(5, height, width, 3), seed=1, name='images')
output = _layers.flatten(images)
self.assertEqual(output.get_shape().num_elements(),
images.get_shape().num_elements())
self.assertEqual(output.get_shape()[0], images.get_shape()[0])
def testFlatten3D(self):
height, width = 3, 3
with self.cached_session():
images = random_ops.random_uniform(
(5, height, width), seed=1, name='images')
output = _layers.flatten(images)
self.assertEqual(output.get_shape().num_elements(),
images.get_shape().num_elements())
self.assertEqual(output.get_shape()[0], images.get_shape()[0])
def testFlatten0D(self):
with self.cached_session():
scalars = random_ops.random_uniform((5,), seed=1, name='scalars')
output = _layers.flatten(scalars)
self.assertEqual(output.shape, (5, 1))
def testFlattenBatchSize(self):
height, width = 3, 3
with self.cached_session() as sess:
images = random_ops.random_uniform(
(5, height, width, 3), seed=1, name='images')
inputs = array_ops.placeholder(dtypes.int32, (None, height, width, 3))
output = _layers.flatten(inputs)
self.assertEqual(output.get_shape().as_list(), [None, height * width * 3])
output = sess.run(output, {inputs: images.eval()})
self.assertEqual(output.size, images.get_shape().num_elements())
self.assertEqual(output.shape[0], images.get_shape()[0])
def testUnknownDims(self):
height = width = depth = 3
with self.cached_session() as sess:
images = random_ops.random_uniform(
(5, height, width, depth), seed=1, name='images')
inputs = array_ops.placeholder(dtypes.int32, (None, None, None, None))
output = _layers.flatten(inputs)
output = sess.run(output, {inputs: images.eval()})
self.assertEqual(output.size, images.get_shape().num_elements())
self.assertEqual(output.shape[0], images.get_shape()[0])
def _sparsify(array, threshold=0.5):
array[array < threshold] = 0
non_zero = np.where(array)
indices = np.vstack(non_zero).T
values = array[non_zero]
shape = array.shape
return indices, values, shape
class PartialFlattenTest(test.TestCase):
def testDensePartialFlatten(self):
"""Test `_inner_flatten` on `Tensor`s."""
shape = [2, 3, 4, 5, 6]
np.random.seed(5446)
inputs = np.random.randint(0, 100, size=shape)
for new_rank in [1, 2, 3, 4, 5]:
expected_new_shape = (
shape[:new_rank - 1] + [np.prod(shape[new_rank - 1:])])
expected_flattened = np.reshape(inputs, expected_new_shape)
flattened_t = _layers._inner_flatten(inputs, new_rank)
static_shape = flattened_t.get_shape().as_list()
self.assertEqual(static_shape, expected_new_shape)
with self.cached_session() as sess:
flattened = sess.run(flattened_t)
np.testing.assert_array_equal(expected_flattened, flattened)
def testSparsePartialFlatten(self):
"""Test `_inner_flatten` on `SparseTensor`s."""
shape = [4, 3, 11, 6]
np.random.seed(10301)
random_ = np.random.rand(*shape)
indices, values, _ = _sparsify(random_)
for new_rank in [1, 2, 3]:
expected_shape = (shape[:new_rank - 1] + [np.prod(shape[new_rank - 1:])])
reshaped_random_ = np.reshape(random_, expected_shape)
expected_indices, expected_values, _ = _sparsify(reshaped_random_)
inputs_t = sparse_tensor.SparseTensor(indices, values, shape)
flattened_t = _layers._inner_flatten(inputs_t, new_rank)
with self.cached_session() as sess:
flattened = sess.run(flattened_t)
np.testing.assert_array_equal(expected_indices, flattened.indices)
np.testing.assert_array_equal(expected_values, flattened.values)
np.testing.assert_array_equal(expected_shape, flattened.dense_shape)
def testIncompleteShape(self):
"""Test `_inner_flatten` shape inference for incomplete shapes."""
shape = [2, None, 4, None, 5, 6]
inputs = array_ops.placeholder(dtypes.int32)
inputs.set_shape(shape)
flattened1 = _layers._inner_flatten(inputs, 1)
self.assertEqual([None], flattened1.get_shape().as_list())
flattened2 = _layers._inner_flatten(inputs, 2)
self.assertEqual([2, None], flattened2.get_shape().as_list())
flattened3 = _layers._inner_flatten(inputs, 3)
self.assertEqual([2, None, None], flattened3.get_shape().as_list())
flattened4 = _layers._inner_flatten(inputs, 4)
self.assertEqual([2, None, 4, None], flattened4.get_shape().as_list())
flattened5 = _layers._inner_flatten(inputs, 5)
self.assertEqual([2, None, 4, None, 30], flattened5.get_shape().as_list())
def testDenseFlattenRankAssertion(self):
"""Test `_inner_flatten` rank assertion for dense tensors."""
shape = [2, 3]
new_rank = 3
inputs = array_ops.placeholder(dtypes.int32)
inputs.set_shape(shape)
with self.assertRaisesRegexp(ValueError,
'inputs has rank less than new_rank'):
_layers._inner_flatten(inputs, new_rank)
def testSparseFlattenRankAssertion(self):
"""Test `_inner_flatten` rank assertion for sparse tensors."""
shape = [2, 3]
new_rank = 3
np.random.seed(10301)
random_ = np.random.rand(*shape)
indices, values, _ = _sparsify(random_)
inputs = sparse_tensor.SparseTensor(indices, values, shape)
with self.assertRaisesRegexp(ValueError,
'Inputs has rank less than new_rank'):
_layers._inner_flatten(inputs, new_rank)
class FCTest(test.TestCase):
def testCreateFC(self):
height, width = 3, 3
for layer_fn in (_layers.fully_connected, layers_lib.relu):
with ops.Graph().as_default() as g, self.session(g):
inputs = np.random.uniform(size=(5, height * width * 3))
output = layer_fn(inputs, 32)
self.assertEqual(output.op.name, 'fully_connected/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, 32])
weights = variables.get_variables_by_name('weights')[0]
self.assertListEqual(weights.get_shape().as_list(), [3 * 3 * 3, 32])
biases = variables.get_variables_by_name('biases')[0]
self.assertListEqual(biases.get_shape().as_list(), [32])
def testCreateFCWithScope(self):
height, width = 3, 3
with self.cached_session():
inputs = random_ops.random_uniform((5, height * width * 3), seed=1)
output = _layers.fully_connected(inputs, 32, scope='fc1')
self.assertEqual(output.op.name, 'fc1/Relu')
def testCreateFCWithCollection(self):
height, width = 3, 3
inputs = random_ops.random_uniform((5, height * width * 3), seed=1)
with ops.name_scope('fe'):
fc = _layers.fully_connected(
inputs, 7, outputs_collections='outputs', scope='fc')
output_collected = ops.get_collection('outputs')[0]
self.assertEqual(output_collected.aliases, ['fc'])
self.assertEqual(output_collected, fc)
def testCreateFcCreatesWeightsAndBiasesVars(self):
height, width = 3, 3
inputs = random_ops.random_uniform((5, height * width * 3), seed=1)
with self.cached_session():
self.assertFalse(variables.get_variables('fc1/weights'))
self.assertFalse(variables.get_variables('fc1/biases'))
_layers.fully_connected(inputs, 32, scope='fc1')
self.assertTrue(variables.get_variables('fc1/weights'))
self.assertTrue(variables.get_variables('fc1/biases'))
def testReuseVars(self):
height, width = 3, 3
inputs = random_ops.random_uniform((5, height * width * 3), seed=1)
with self.cached_session():
_layers.fully_connected(inputs, 32, scope='fc1')
self.assertEqual(len(variables.get_variables('fc1')), 2)
_layers.fully_connected(inputs, 32, scope='fc1', reuse=True)
self.assertEqual(len(variables.get_variables('fc1')), 2)
def testNonReuseVars(self):
height, width = 3, 3
inputs = random_ops.random_uniform((5, height * width * 3), seed=1)
with self.cached_session():
_layers.fully_connected(inputs, 32)
self.assertEqual(len(variables.get_variables('fully_connected')), 2)
_layers.fully_connected(inputs, 32)
self.assertEqual(len(variables.get_variables('fully_connected')), 4)
def testReuseWithRegularizer(self):
height, width = 3, 3
regularizer = lambda x: math_ops.reduce_sum(x) * 1e-3
inputs = random_ops.random_uniform((5, height * width * 3), seed=1)
_layers.fully_connected(
inputs, 32, scope='fc1', weights_regularizer=regularizer)
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)), 1)
self.assertEqual(len(losses.get_regularization_losses()), 1)
_layers.fully_connected(
inputs, 32, scope='fc1', weights_regularizer=regularizer, reuse=True)
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)), 1)
self.assertEqual(len(losses.get_regularization_losses()), 1)
with variable_scope.variable_scope('outer', reuse=False):
_layers.fully_connected(inputs, 32, weights_regularizer=regularizer)
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)), 2)
self.assertEqual(len(losses.get_regularization_losses()), 2)
with variable_scope.variable_scope('outer', reuse=True):
_layers.fully_connected(inputs, 32, weights_regularizer=regularizer)
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)), 2)
self.assertEqual(len(losses.get_regularization_losses()), 2)
def testCreateFCWithoutActivation(self):
height, width = 3, 3
with self.cached_session():
inputs = random_ops.random_uniform((5, height * width * 3), seed=1)
output = _layers.fully_connected(inputs, 32, activation_fn=None)
self.assertEqual(output.op.name, 'fully_connected/BiasAdd')
def testCreateFCWithWD(self):
height, width = 3, 3
with self.cached_session() as sess:
inputs = random_ops.random_uniform((5, height * width * 3), seed=1)
weight_decay = regularizers.l2_regularizer(0.01)
_layers.fully_connected(inputs, 32, weights_regularizer=weight_decay)
wd = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)[0]
self.assertEqual(wd.op.name,
'fully_connected/kernel/Regularizer/l2_regularizer')
sess.run(variables_lib.global_variables_initializer())
self.assertLess(sess.run(wd), 0.4)
def testCreateFCWithBD(self):
height, width = 3, 3
with self.cached_session() as sess:
inputs = random_ops.random_uniform((5, height * width * 3), seed=1)
bias_decay = regularizers.l2_regularizer(0.01)
_layers.fully_connected(inputs, 32, biases_regularizer=bias_decay)
wd = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)[0]
self.assertEqual(wd.op.name,
'fully_connected/bias/Regularizer/l2_regularizer')
sess.run(variables_lib.global_variables_initializer())
self.assertLess(sess.run(wd), 0.4)
def testCreateNoRegularizers(self):
height, width = 3, 3
with self.cached_session():
inputs = random_ops.random_uniform((5, height * width * 3), seed=1)
_layers.fully_connected(inputs, 32)
self.assertEqual(
ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES), [])
def testReuseFCWithWD(self):
height, width = 3, 3
with self.cached_session():
inputs = random_ops.random_uniform((5, height * width * 3), seed=1)
weight_decay = regularizers.l2_regularizer(0.01)
_layers.fully_connected(
inputs, 32, weights_regularizer=weight_decay, scope='FC')
self.assertEqual(len(variables.get_variables()), 2)
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)), 1)
_layers.fully_connected(
inputs, 32, weights_regularizer=weight_decay, scope='FC', reuse=True)
self.assertEqual(len(variables.get_variables()), 2)
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)), 1)
def testFCWithBatchNorm(self):
height, width = 3, 3
with self.cached_session():
images = random_ops.random_uniform((5, height * width * 3), seed=1)
with arg_scope(
[_layers.fully_connected],
normalizer_fn=_layers.batch_norm,
normalizer_params={
'decay': 0.9
}):
net = _layers.fully_connected(images, 27)
net = _layers.fully_connected(net, 27)
self.assertEqual(len(variables.get_variables()), 8)
self.assertEqual(
len(variables.get_variables('fully_connected/BatchNorm')), 3)
self.assertEqual(
len(variables.get_variables('fully_connected_1/BatchNorm')), 3)
def testReuseFCWithBatchNorm(self):
height, width = 3, 3
with self.cached_session():
images = random_ops.random_uniform((5, height * width * 3), seed=1)
with arg_scope(
[_layers.fully_connected],
normalizer_fn=_layers.batch_norm,
normalizer_params={
'decay': 0.9
}):
net = _layers.fully_connected(images, 27, scope='fc1')
net = _layers.fully_connected(net, 27, scope='fc1', reuse=True)
self.assertEqual(len(variables.get_variables()), 4)
self.assertEqual(len(variables.get_variables('fc1/BatchNorm')), 3)
class BatchNormTest(test.TestCase):
def _addBesselsCorrection(self, sample_size, expected_var):
correction_factor = sample_size / (sample_size - 1)
expected_var *= correction_factor
return expected_var, correction_factor
def testBatchNormCenterFalse(self):
a = array_ops.placeholder(dtype=dtypes.float32, shape=(10, 10, 10, 10))
# Test that center=False builds a valid graph.
_layers.batch_norm(
a, center=False, data_format='NCHW', zero_debias_moving_mean=True)
def testUnknownShape(self):
with ops.Graph().as_default() as g, self.session(g):
inputs = array_ops.placeholder(dtype=dtypes.float32)
with self.assertRaisesRegexp(ValueError, 'undefined rank'):
_layers.batch_norm(inputs)
def testInvalidDataFormat(self):
with ops.Graph().as_default() as g, self.session(g):
inputs = array_ops.placeholder(dtype=dtypes.float32)
with self.assertRaisesRegexp(
ValueError, 'data_format has to be either NCHW or NHWC.'):
_layers.batch_norm(inputs, data_format='CHWN')
def testUnknownChannelsDimNHWC(self):
with ops.Graph().as_default() as g, self.session(g):
inputs = array_ops.placeholder(dtype=dtypes.float32)
inputs.set_shape(tensor_shape.TensorShape((5, 3, 3, None)))
with self.assertRaisesRegexp(ValueError, 'undefined'):
_layers.batch_norm(inputs, data_format='NHWC')
def testUnknownChannelsDimNCHW(self):
with ops.Graph().as_default() as g, self.session(g):
inputs = array_ops.placeholder(dtype=dtypes.float32)
inputs.set_shape(tensor_shape.TensorShape((5, None, 3, 3)))
with self.assertRaisesRegexp(ValueError, 'undefined'):
_layers.batch_norm(inputs, data_format='NCHW')
def _testCreateOp(self, fused, dtype=None):
if dtype is None:
dtype = dtypes.float32
height, width = 3, 3
with self.cached_session():
images = np.random.uniform(size=(5, height, width, 3)).astype(
dtype.as_numpy_dtype)
output = _layers.batch_norm(images, fused=fused)
expected_name = ('BatchNorm/FusedBatchNorm'
if fused else 'BatchNorm/batchnorm')
self.assertTrue(output.op.name.startswith(expected_name))
self.assertListEqual(output.get_shape().as_list(), [5, height, width, 3])
self.assertEqual(
ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES), [])
def testCreateOpDefault(self):
self._testCreateOp(False)
def testCreateOpFused(self):
self._testCreateOp(True)
def testCreateOpFusedFloat16(self):
self._testCreateOp(True, dtypes.float16)
def _testCreateOpBetaRegularizer(self, fused=True):
height, width = 3, 3
with self.cached_session():
reg = lambda x: 0.1 * math_ops.reduce_sum(x)
images = np.random.uniform(size=(5, height, width, 3)).astype('f')
_layers.batch_norm(images, param_regularizers={'beta': reg}, fused=fused)
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)), 1)
beta_decay = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)[0]
self.assertEqual(beta_decay.op.name, 'BatchNorm/beta/Regularizer/mul')
def testCreateOpBetaRegularizerFused(self):
self._testCreateOpBetaRegularizer(fused=True)
def testCreateOpBetaRegularizerNonFused(self):
self._testCreateOpBetaRegularizer(fused=False)
def _testCreateOpGammaRegularizer(self, fused=True):
height, width = 3, 3
with self.cached_session():
reg = lambda x: 0.1 * math_ops.reduce_sum(x)
images = np.random.uniform(size=(5, height, width, 3)).astype('f')
_layers.batch_norm(
images, param_regularizers={'gamma': reg}, scale=True, fused=fused)
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)), 1)
gamma_decay = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)[0]
self.assertEqual(gamma_decay.op.name, 'BatchNorm/gamma/Regularizer/mul')
def testCreateOpGammaRegularizerFused(self):
self._testCreateOpGammaRegularizer(fused=True)
def testCreateOpGammaRegularizerNonFused(self):
self._testCreateOpGammaRegularizer(fused=False)
def testCreateVariables(self):
height, width = 3, 3
with self.cached_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
_layers.batch_norm(images, scale=True)
beta = variables.get_variables_by_name('beta')[0]
gamma = variables.get_variables_by_name('gamma')[0]
self.assertEqual(beta.op.name, 'BatchNorm/beta')
self.assertEqual(gamma.op.name, 'BatchNorm/gamma')
moving_mean = variables.get_variables_by_name('moving_mean')[0]
moving_variance = variables.get_variables_by_name('moving_variance')[0]
self.assertEqual(moving_mean.op.name, 'BatchNorm/moving_mean')
self.assertEqual(moving_variance.op.name, 'BatchNorm/moving_variance')
def testMovingAverageVariables(self):
height, width = 3, 3
with self.cached_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
_layers.batch_norm(images, scale=True)
self.assertEqual(len(variables.get_model_variables()), 4)
moving_mean = variables.get_variables_by_name('moving_mean')[0]
moving_variance = variables.get_variables_by_name('moving_variance')[0]
self.assertEqual(moving_mean.op.name, 'BatchNorm/moving_mean')
self.assertEqual(moving_variance.op.name, 'BatchNorm/moving_variance')
def testMovingAverageVariablesZeroDebias(self):
height, width = 3, 3
with self.cached_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
_layers.batch_norm(
images, scale=True, zero_debias_moving_mean=True, fused=False)
self.assertEqual(len(variables.get_model_variables()), 6)
moving_mean = variables.get_variables_by_name('moving_mean')[0]
moving_variance = variables.get_variables_by_name('moving_variance')[0]
biased = variables.get_variables_by_name('biased')[0]
local_step = variables.get_variables_by_name('local_step')[0]
self.assertEqual(moving_mean.op.name, 'BatchNorm/moving_mean')
self.assertEqual(moving_variance.op.name, 'BatchNorm/moving_variance')
self.assertEqual(biased.op.name, 'BatchNorm/BatchNorm/moving_mean/biased')
self.assertEqual(local_step.op.name,
'BatchNorm/BatchNorm/moving_mean/local_step')
def testUpdatesCollection(self):
height, width = 3, 3
with self.cached_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
_layers.batch_norm(images, updates_collections='my_update_ops')
update_layers = ops.get_collection('my_update_ops')
update_moving_mean = update_layers[0]
update_moving_variance = update_layers[1]
self.assertEqual(update_moving_mean.op.name, 'BatchNorm/AssignMovingAvg')
self.assertEqual(update_moving_variance.op.name,
'BatchNorm/AssignMovingAvg_1')
def testVariablesCollections(self):
variables_collections = {
'beta': ['beta'],
'gamma': ['gamma'],
'moving_mean': ['moving_mean'],
'moving_variance': ['moving_variance'],
}
images = random_ops.random_uniform((5, 5, 5, 3), seed=1)
_layers.batch_norm(
images, scale=True, variables_collections=variables_collections)
for var_name, collection_names in variables_collections.items():
collection = ops.get_collection(collection_names[0])
self.assertEqual(len(collection), 1)
var_name_in_collection = collection[0].op.name
self.assertEqual(var_name_in_collection, 'BatchNorm/' + var_name)
def testReuseVariables(self):
height, width = 3, 3
with self.cached_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
_layers.batch_norm(images, scale=True, scope='bn')
_layers.batch_norm(images, scale=True, scope='bn', reuse=True)
beta = variables.get_variables_by_name('beta')
gamma = variables.get_variables_by_name('gamma')
self.assertEqual(len(beta), 1)
self.assertEqual(len(gamma), 1)
moving_mean = variables.get_variables_by_name('moving_mean')
moving_variance = variables.get_variables_by_name('moving_variance')
moving_vars = moving_mean + moving_variance
self.assertEqual(len(moving_vars), 2)
def testReuseUpdateOps(self):
height, width = 3, 3
with self.cached_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
with arg_scope([_layers.batch_norm], updates_collections='update_ops'):
_layers.batch_norm(images, scope='bn')
self.assertEqual(len(ops.get_collection('update_ops')), 2)
_layers.batch_norm(images, scope='bn', reuse=True)
self.assertEqual(len(ops.get_collection('update_ops')), 4)
def testCreateMovingVars(self):
height, width = 3, 3
with self.cached_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
_ = _layers.batch_norm(images)
moving_mean = variables.get_variables('BatchNorm/moving_mean')
self.assertEqual(len(moving_mean), 1)
self.assertEqual(moving_mean[0].op.name, 'BatchNorm/moving_mean')
moving_variance = variables.get_variables('BatchNorm/moving_variance')
self.assertEqual(len(moving_variance), 1)
self.assertEqual(moving_variance[0].op.name, 'BatchNorm/moving_variance')
def testZeroDebiasMovingMean(self):
height, width = 3, 3
batch_size = 10
channels = 3
np.random.seed(1)
image_shape = (batch_size, height, width, channels)
axis = (0, 1, 2)
image_values = np.random.rand(*image_shape)
expected_mean = np.mean(image_values, axis=axis)
expected_var = np.var(image_values, axis=axis)
images = constant_op.constant(
image_values, shape=image_shape, dtype=dtypes.float32)
output = _layers.batch_norm(
images,
decay=0.1,
updates_collections=None,
zero_debias_moving_mean=True,
fused=False)
moving_mean = variables.get_variables_by_name('BatchNorm/moving_mean')[0]
moving_variance = variables.get_variables_by_name('moving_variance')[0]
biased = variables.get_variables_by_name('biased')[0]
local_step = variables.get_variables_by_name('local_step')[0]
with self.cached_session() as sess:
sess.run(variables_lib.global_variables_initializer())
self.assertAllClose(local_step.eval(), 0)
self.assertAllClose(moving_mean.eval(), [0] * channels)
self.assertAllClose(biased.eval(), [0] * channels)
self.assertAllClose(moving_variance.eval(), [1] * channels)
for i in range(10):
self.assertAllClose(local_step.eval(), i)
sess.run([output])
# In this case moving_mean == expected_mean after each update
self.assertAllClose(moving_mean.eval(), expected_mean)
# After 10 updates with decay 0.1 moving_mean == expected_mean,
# biased == expected_mean and moving_variance == expected_var.
self.assertAllClose(moving_mean.eval(), expected_mean)
self.assertAllClose(moving_variance.eval(), expected_var)
self.assertAllClose(biased.eval(), expected_mean)
def _testNoneUpdatesCollections(self,
fused,
data_format='NHWC',
zero_debias_moving_mean=False):
height, width = 2, 2
batch_size = 10
channels = 3
np.random.seed(1)
use_gpu = fused
with self.session(use_gpu=use_gpu) as sess:
if data_format == 'NHWC':
image_shape = (batch_size, height, width, channels)
axis = (0, 1, 2)
else:
image_shape = (batch_size, channels, height, width)
axis = (0, 2, 3)
image_values = np.random.rand(*image_shape)
expected_mean = np.mean(image_values, axis=axis)
expected_var = np.var(image_values, axis=axis)
if fused:
# Add Bessel's correction
expected_var, _ = self._addBesselsCorrection(
batch_size * height * width, expected_var)
images = constant_op.constant(
image_values, shape=image_shape, dtype=dtypes.float32)
output = _layers.batch_norm(
images,
decay=0.1,
updates_collections=None,
fused=fused,
data_format=data_format,
zero_debias_moving_mean=zero_debias_moving_mean)
# updates_ops are not added to UPDATE_OPS collection.
self.assertEqual(ops.get_collection(ops.GraphKeys.UPDATE_OPS), [])
# Initialize all variables
sess.run(variables_lib.global_variables_initializer())
moving_mean = variables.get_variables('BatchNorm/moving_mean')[0]
moving_variance = variables.get_variables('BatchNorm/moving_variance')[0]
mean, variance = sess.run([moving_mean, moving_variance])
# After initialization moving_mean == 0 and moving_variance == 1.
self.assertAllClose(mean, [0] * channels)
self.assertAllClose(variance, [1] * channels)
for _ in range(10):
sess.run([output])
if zero_debias_moving_mean:
# In this case moving_mean == expected_mean after update
self.assertAllClose(moving_mean.eval(), expected_mean)
mean = moving_mean.eval()
variance = moving_variance.eval()
# After 10 updates with decay 0.1 moving_mean == expected_mean and
# moving_variance == expected_var.
self.assertAllClose(mean, expected_mean)
self.assertAllClose(variance, expected_var)
def testNoneUpdatesCollectionsNHWC(self):
self._testNoneUpdatesCollections(False, data_format='NHWC')
def testNoneUpdatesCollectionsNCHW(self):
self._testNoneUpdatesCollections(False, data_format='NCHW')
def testNoneUpdatesCollectionsNHWCZeroDebias(self):
self._testNoneUpdatesCollections(
False, data_format='NHWC', zero_debias_moving_mean=True)
def testNoneUpdatesCollectionsNCHWZeroDebias(self):
self._testNoneUpdatesCollections(
False, data_format='NCHW', zero_debias_moving_mean=True)
def testNoneUpdatesCollectionsFusedNCHW(self):
if test.is_gpu_available(cuda_only=True):
self._testNoneUpdatesCollections(True, data_format='NCHW')
def testNoneUpdatesCollectionsFusedNHWC(self):
self._testNoneUpdatesCollections(True, data_format='NHWC')
def testNoneUpdatesCollectionsFusedNCHWZeroDebias(self):
if test.is_gpu_available(cuda_only=True):
self._testNoneUpdatesCollections(
True, data_format='NCHW', zero_debias_moving_mean=True)
def testNoneUpdatesCollectionsFusedNHWCZeroDebias(self):
self._testNoneUpdatesCollections(
True, data_format='NHWC', zero_debias_moving_mean=True)
def _testDelayedUpdateMovingVars(self,
fused,
data_format='NHWC',
zero_debias_moving_mean=False):
height, width = 2, 2
batch_size = 10
channels = 3
np.random.seed(1)
use_gpu = fused
with self.session(use_gpu=use_gpu) as sess:
if data_format == 'NHWC':
image_shape = (batch_size, height, width, channels)
axis = (0, 1, 2)
else:
image_shape = (batch_size, channels, height, width)
axis = (0, 2, 3)
image_values = np.random.rand(*image_shape)
expected_mean = np.mean(image_values, axis=axis)
expected_var = np.var(image_values, axis=axis)
if fused:
# Add Bessel's correction
expected_var, correction_factor = self._addBesselsCorrection(
batch_size * height * width, expected_var)
images = constant_op.constant(
image_values, shape=image_shape, dtype=dtypes.float32)
output = _layers.batch_norm(
images,
decay=0.1,
fused=fused,
data_format=data_format,
zero_debias_moving_mean=zero_debias_moving_mean)
update_ops = ops.get_collection(ops.GraphKeys.UPDATE_OPS)
# updates_ops are added to UPDATE_OPS collection.
self.assertEqual(len(update_ops), 2)
with ops.control_dependencies(update_ops):
barrier = control_flow_ops.no_op(name='barrier')
output = control_flow_ops.with_dependencies([barrier], output)
# Initialize all variables
sess.run(variables_lib.global_variables_initializer())
moving_mean = variables.get_variables('BatchNorm/moving_mean')[0]
moving_variance = variables.get_variables('BatchNorm/moving_variance')[0]
mean, variance = sess.run([moving_mean, moving_variance])
# After initialization moving_mean == 0 and moving_variance == 1.
self.assertAllClose(mean, [0] * channels)
self.assertAllClose(variance, [1] * channels)
for _ in range(10):
sess.run([output])
if zero_debias_moving_mean:
# In this case moving_mean == expected_mean after update
self.assertAllClose(moving_mean.eval(), expected_mean)
mean = moving_mean.eval()
variance = moving_variance.eval()
# After 10 updates with decay 0.1 moving_mean == expected_mean and
# moving_variance == expected_var.
self.assertAllClose(mean, expected_mean)
if fused:
# Add Bessel's correction
moving_variance_corrected = moving_variance / correction_factor
correct_moving_variance = state_ops.assign(moving_variance,
moving_variance_corrected)
sess.run(correct_moving_variance)
self.assertAllClose(variance, expected_var)
def testDelayedUpdateMovingVarsNHWC(self):
self._testDelayedUpdateMovingVars(False, data_format='NHWC')
def testDelayedUpdateMovingVarsNCHW(self):
self._testDelayedUpdateMovingVars(False, data_format='NCHW')
def testDelayedUpdateMovingVarsFusedNCHW(self):
if test.is_gpu_available(cuda_only=True):
self._testDelayedUpdateMovingVars(True, data_format='NCHW')
def testDelayedUpdateMovingVarsFusedNHWC(self):
self._testDelayedUpdateMovingVars(True, data_format='NHWC')
def testDelayedUpdateMovingVars(self):
self._testDelayedUpdateMovingVars(False)
def _testEvalMovingVars(self, zero_debias_moving_mean=False):
height, width = 3, 3
with self.cached_session() as sess:
image_shape = (10, height, width, 3)
image_values = np.random.rand(*image_shape)
expected_mean = np.mean(image_values, axis=(0, 1, 2))
expected_var = np.var(image_values, axis=(0, 1, 2))
images = constant_op.constant(
image_values, shape=image_shape, dtype=dtypes.float32)
output = _layers.batch_norm(images, decay=0.1, is_training=False)
self.assertEqual(ops.get_collection(ops.GraphKeys.UPDATE_OPS), [])
# Initialize all variables
sess.run(variables_lib.global_variables_initializer())
moving_mean = variables.get_variables('BatchNorm/moving_mean')[0]
moving_variance = variables.get_variables('BatchNorm/moving_variance')[0]
mean, variance = sess.run([moving_mean, moving_variance])
# After initialization moving_mean == 0 and moving_variance == 1.
self.assertAllClose(mean, [0] * 3)
self.assertAllClose(variance, [1] * 3)
# Simulate assignment from saver restore.
init_assigns = [
state_ops.assign(moving_mean, expected_mean),
state_ops.assign(moving_variance, expected_var)
]
sess.run(init_assigns)
for _ in range(10):
sess.run([output], {images: np.random.rand(*image_shape)})
mean = moving_mean.eval()
variance = moving_variance.eval()
# Although we feed different images, the moving_mean and moving_variance
# shouldn't change.
self.assertAllClose(mean, expected_mean)
self.assertAllClose(variance, expected_var)
def testEvalMovingVars(self):
self._testEvalMovingVars()
def testEvalMovingVarsZeroDebias(self):
self._testEvalMovingVars(True)
def testEvalMovingVarsWithPartitioner(self):
# This test makes sure that the moving-mean and moving-variance logic works
# when `batch_norm` is called within a variable-scope that has a variable
# partitioner.
partitioner = partitioned_variables.fixed_size_partitioner(2, axis=0)
with variable_scope.variable_scope(
variable_scope.get_variable_scope(), partitioner=partitioner):
self.testEvalMovingVars()
def _testReuseVars(self, fused, zero_debias_moving_mean=False):
height, width = 3, 3
batch_size = 10
channels = 3
with self.cached_session() as sess:
image_shape = (batch_size, height, width, channels)
image_values = np.random.rand(*image_shape)
expected_mean = np.mean(image_values, axis=(0, 1, 2))
expected_var = np.var(image_values, axis=(0, 1, 2))
if fused:
# Add Bessel's correction
expected_var, correction_factor = self._addBesselsCorrection(
batch_size * height * width, expected_var)
images = constant_op.constant(
image_values, shape=image_shape, dtype=dtypes.float32)
output_train = _layers.batch_norm(
images,
decay=0.1,
is_training=True,
scope='BN',
fused=fused,
zero_debias_moving_mean=zero_debias_moving_mean)
output_eval = _layers.batch_norm(
images,
decay=0.1,
is_training=False,
scope='BN',
reuse=True,
fused=fused,
zero_debias_moving_mean=zero_debias_moving_mean)
# Initialize all variables
sess.run(variables_lib.global_variables_initializer())
moving_mean = variables.get_variables('BN/moving_mean')[0]
moving_variance = variables.get_variables('BN/moving_variance')[0]
mean, variance = sess.run([moving_mean, moving_variance])
# After initialization moving_mean == 0 and moving_variance == 1.
self.assertAllClose(mean, [0] * channels)
self.assertAllClose(variance, [1] * channels)
update_ops = ops.get_collection(ops.GraphKeys.UPDATE_OPS)
with ops.control_dependencies(update_ops):
barrier = control_flow_ops.no_op(name='barrier')
train_op = control_flow_ops.with_dependencies([barrier], output_train)
# Before updates the outputs are different for train and eval.
self.assertFalse(
np.allclose(sess.run([output_train]), sess.run([output_eval])))
for _ in range(10):
sess.run([train_op])
mean = moving_mean.eval()
variance = moving_variance.eval()
# After 10 updates with decay 0.1 moving_mean == expected_mean and
# moving_variance == expected_var.
self.assertAllClose(mean, expected_mean)
if fused:
# Add Bessel's correction
moving_variance_corrected = moving_variance / correction_factor
correct_moving_variance = state_ops.assign(moving_variance,
moving_variance_corrected)
sess.run(correct_moving_variance)
self.assertAllClose(variance, expected_var)
# After convergence output_train and output_eval should be the same.
self.assertAllClose(sess.run([output_train]), sess.run([output_eval]))
def testReuseVarsDefault(self):
self._testReuseVars(False)
def testReuseVarsFused(self):
self._testReuseVars(True)
def testReuseVarsDefaultZeroDebias(self):
self._testReuseVars(False, True)
def testReuseVarsFusedZeroDebias(self):
self._testReuseVars(True, True)
def _testIsTrainingVariable(self,
fused,
data_format='NHWC',
zero_debias_moving_mean=False):
height, width = 2, 2
batch_size = 10
channels = 3
np.random.seed(1)
use_gpu = fused
np.random.seed(1)
with self.session(use_gpu=use_gpu) as sess:
if data_format == 'NHWC':
image_shape = (batch_size, height, width, channels)
axis = (0, 1, 2)
else:
image_shape = (batch_size, channels, height, width)
axis = (0, 2, 3)
image_values = np.random.rand(*image_shape)
expected_mean = np.mean(image_values, axis=axis)
expected_var = np.var(image_values, axis=axis)
if fused:
# Add Bessel's correction
expected_var, correction_factor = self._addBesselsCorrection(
batch_size * height * width, expected_var)
images = constant_op.constant(
image_values, shape=image_shape, dtype=dtypes.float32)
is_training = variables_lib.VariableV1(True)
output = _layers.batch_norm(
images,
decay=0.1,
is_training=is_training,
fused=fused,
data_format=data_format,
zero_debias_moving_mean=zero_debias_moving_mean)
# Initialize all variables
sess.run(variables_lib.global_variables_initializer())
moving_mean = variables.get_variables('BatchNorm/moving_mean')[0]
moving_variance = variables.get_variables('BatchNorm/moving_variance')[0]
mean, variance = sess.run([moving_mean, moving_variance])
# After initialization moving_mean == 0 and moving_variance == 1.
self.assertAllClose(mean, [0] * channels)
self.assertAllClose(variance, [1] * channels)
# Before updates the outputs are different depending of is_training.
output_true = sess.run([output], {is_training: True})
output_false = sess.run([output], {is_training: False})
self.assertFalse(np.allclose(output_true, output_false))
update_ops = ops.get_collection(ops.GraphKeys.UPDATE_OPS)
with ops.control_dependencies(update_ops):
barrier = control_flow_ops.no_op(name='barrier')
train_op = control_flow_ops.with_dependencies([barrier], output)
for _ in range(10):
sess.run([train_op])
mean = moving_mean.eval()
variance = moving_variance.eval()
# After 10 updates with decay 0.1 moving_mean == expected_mean and
# moving_variance == expected_var.
self.assertAllClose(mean, expected_mean)
self.assertAllClose(variance, expected_var)
# After updates to convergence the outputs don't depend on is_training.
output_true = sess.run([output], {is_training: True})
if fused:
# Add Bessel's correction
moving_variance_corrected = moving_variance / correction_factor
correct_moving_variance = state_ops.assign(moving_variance,
moving_variance_corrected)
sess.run(correct_moving_variance)
output_false = sess.run([output], {is_training: False})
self.assertAllClose(output_true, output_false)
def testIsTrainingVariableNHWC(self):
self._testIsTrainingVariable(False, data_format='NHWC')
def testIsTrainingVariableNCHW(self):
self._testIsTrainingVariable(False, data_format='NCHW')
def testIsTrainingVariableNHWCZeroDebias(self):
self._testIsTrainingVariable(
False, data_format='NHWC', zero_debias_moving_mean=True)
def testIsTrainingVariableNCHWZeroDebias(self):
self._testIsTrainingVariable(
False, data_format='NCHW', zero_debias_moving_mean=True)
def testIsTrainingVariableFusedNCHW(self):
if test.is_gpu_available(cuda_only=True):
self._testIsTrainingVariable(True, data_format='NCHW')
def testIsTrainingVariableFusedNHWC(self):
self._testIsTrainingVariable(True, data_format='NHWC')
def testIsTrainingVariableFusedNCHWZeroDebias(self):
if test.is_gpu_available(cuda_only=True):
self._testIsTrainingVariable(
True, data_format='NCHW', zero_debias_moving_mean=True)
def testIsTrainingVariableFusedNHWCZeroDebias(self):
self._testIsTrainingVariable(
True, data_format='NHWC', zero_debias_moving_mean=True)
def testNoUpdatesWhenIsTrainingFalse(self):
height, width = 3, 3
with self.cached_session() as sess:
image_shape = (10, height, width, 3)
image_values = np.random.rand(*image_shape)
images = constant_op.constant(
image_values, shape=image_shape, dtype=dtypes.float32)
output = _layers.batch_norm(images, decay=0.1, is_training=False)
update_ops = ops.get_collection(ops.GraphKeys.UPDATE_OPS)
# updates_ops are not added to UPDATE_OPS collection.
self.assertEqual(len(update_ops), 0)
# Initialize all variables
sess.run(variables_lib.global_variables_initializer())
moving_mean = variables.get_variables('BatchNorm/moving_mean')[0]
moving_variance = variables.get_variables('BatchNorm/moving_variance')[0]
mean, variance = sess.run([moving_mean, moving_variance])
# After initialization moving_mean == 0 and moving_variance == 1.
self.assertAllClose(mean, [0] * 3)
self.assertAllClose(variance, [1] * 3)
# When is_training is False batch_norm doesn't update moving_vars.
for _ in range(10):
sess.run([output])
self.assertAllClose(moving_mean.eval(), [0] * 3)
self.assertAllClose(moving_variance.eval(), [1] * 3)
def testNoneUpdatesCollectionNoTraining(self):
height, width = 3, 3
with self.cached_session() as sess:
image_shape = (10, height, width, 3)
image_values = np.random.rand(*image_shape)
images = constant_op.constant(
image_values, shape=image_shape, dtype=dtypes.float32)
output = _layers.batch_norm(
images, decay=0.1, updates_collections=None, is_training=False)
# updates_ops are not added to UPDATE_OPS collection.
self.assertEqual(ops.get_collection(ops.GraphKeys.UPDATE_OPS), [])
# Initialize all variables
sess.run(variables_lib.global_variables_initializer())
moving_mean = variables.get_variables('BatchNorm/moving_mean')[0]
moving_variance = variables.get_variables('BatchNorm/moving_variance')[0]
mean, variance = sess.run([moving_mean, moving_variance])
# After initialization moving_mean == 0 and moving_variance == 1.
self.assertAllClose(mean, [0] * 3)
self.assertAllClose(variance, [1] * 3)
# When is_training is False batch_norm doesn't update moving_vars.
for _ in range(10):
sess.run([output])
self.assertAllClose(moving_mean.eval(), [0] * 3)
self.assertAllClose(moving_variance.eval(), [1] * 3)
def _testNoneUpdatesCollectionIsTrainingVariable(self,
fused,
data_format='NHWC'):
height, width = 2, 2
batch_size = 10
channels = 3
np.random.seed(1)
use_gpu = fused
with self.session(use_gpu=use_gpu) as sess:
if data_format == 'NHWC':
image_shape = (batch_size, height, width, channels)
axis = (0, 1, 2)
else:
image_shape = (batch_size, channels, height, width)
axis = (0, 2, 3)
image_values = np.random.rand(*image_shape)
expected_mean = np.mean(image_values, axis=axis)
expected_var = np.var(image_values, axis=axis)
if fused:
# Add Bessel's correction
expected_var, correction_factor = self._addBesselsCorrection(
batch_size * height * width, expected_var)
images = constant_op.constant(
image_values, shape=image_shape, dtype=dtypes.float32)
is_training = variables_lib.VariableV1(True)
output = _layers.batch_norm(
images,
decay=0.1,
updates_collections=None,
is_training=is_training,
fused=fused,
data_format=data_format)
# updates_ops are not added to UPDATE_OPS collection.
self.assertEqual(ops.get_collection(ops.GraphKeys.UPDATE_OPS), [])
# Initialize all variables
sess.run(variables_lib.global_variables_initializer())
moving_mean = variables.get_variables('BatchNorm/moving_mean')[0]
moving_variance = variables.get_variables('BatchNorm/moving_variance')[0]
mean, variance = sess.run([moving_mean, moving_variance])
# After initialization moving_mean == 0 and moving_variance == 1.
self.assertAllClose(mean, [0] * channels)
self.assertAllClose(variance, [1] * channels)
# When is_training is False batch_norm doesn't update moving_vars.
for _ in range(10):
sess.run([output], {is_training: False})
self.assertAllClose(moving_mean.eval(), [0] * channels)
self.assertAllClose(moving_variance.eval(), [1] * channels)
# Before updates the outputs are different depending of is_training.
output_true = sess.run([output], {is_training: True})
output_false = sess.run([output], {is_training: False})
self.assertFalse(np.allclose(output_true, output_false))
# When is_training is True update moving_vars.
for _ in range(10):
sess.run([output], {is_training: True})
# After 10 updates with decay 0.1 moving_mean == expected_mean and
# moving_variance == expected_var.
self.assertAllClose(moving_mean.eval(), expected_mean)
self.assertAllClose(moving_variance.eval(), expected_var)
# After updates to convergence the outputs don't depend on is_training.
output_true = sess.run([output], {is_training: True})
if fused:
# Add Bessel's correction
moving_variance_corrected = moving_variance / correction_factor
correct_moving_variance = state_ops.assign(moving_variance,
moving_variance_corrected)
sess.run(correct_moving_variance)
output_false = sess.run([output], {is_training: False})
self.assertTrue(np.allclose(output_true, output_false))
def testNoneUpdatesCollectionIsTrainingVariableNHWC(self):
self._testNoneUpdatesCollectionIsTrainingVariable(False, data_format='NHWC')
def testNoneUpdatesCollectionIsTrainingVariableNCHW(self):
self._testNoneUpdatesCollectionIsTrainingVariable(False, data_format='NCHW')
def testNoneUpdatesCollectionIsTrainingVariableFusedNCHW(self):
if test.is_gpu_available(cuda_only=True):
self._testNoneUpdatesCollectionIsTrainingVariable(
True, data_format='NCHW')
def testNoneUpdatesCollectionIsTrainingVariableFusedNHWC(self):
self._testNoneUpdatesCollectionIsTrainingVariable(True, data_format='NHWC')
def _testTrainMovingVars(self, fused, data_format='NHWC'):
# Test that the gradients are stable while the moving_mean is updated.
# Since the moving_mean is used as shift to compute the tf.momments, the
# gradients could diverge, this test checks that gradients remains stable
# while the moving_mean is updated.
height, width = 7, 7
batch_size = 10
channels = 32
np.random.seed(1)
use_gpu = fused
with self.session(use_gpu=use_gpu) as sess:
if data_format == 'NHWC':
image_shape = (batch_size, height, width, channels)
axis = (0, 1, 2)
else:
image_shape = (batch_size, channels, height, width)
axis = (0, 2, 3)
image_values = np.random.rand(*image_shape) + 256
expected_mean = np.mean(image_values, axis=axis)
expected_var = np.var(image_values, axis=axis)
if fused:
# Add Bessel's correction
expected_var, _ = self._addBesselsCorrection(
batch_size * height * width, expected_var)
images = constant_op.constant(
image_values, shape=image_shape, dtype=dtypes.float32)
output = _layers.batch_norm(
images,
decay=0.2,
updates_collections=None,
is_training=True,
fused=fused,
data_format=data_format)
self.assertEqual(ops.get_collection(ops.GraphKeys.UPDATE_OPS), [])
objective = math_ops.reduce_sum(output)
[images_gradients] = gradients_impl.gradients(objective, images)
# Initialize all variables
sess.run(variables_lib.global_variables_initializer())
moving_mean = variables.get_variables('BatchNorm/moving_mean')[0]
moving_variance = variables.get_variables('BatchNorm/moving_variance')[0]
mean, variance = sess.run([moving_mean, moving_variance])
# After initialization moving_mean == 0 and moving_variance == 1.
self.assertAllClose(mean, [0] * channels)
self.assertAllClose(variance, [1] * channels)
# Initial input gradients.
images_gradients_value = sess.run(images_gradients)
for _ in range(10):
np_output, new_images_gradients = sess.run([output, images_gradients])
# The outputs should be close to 0.0 mean and 1.0 variance
self.assertAllClose(
np.mean(np_output, axis=axis), [0] * channels,
rtol=0.001,
atol=0.001)
self.assertAllClose(
np.var(np_output, axis=axis), [1] * channels, rtol=0.01, atol=0.01)
# The gradients should change slowly while updating moving_mean.
max_diff = np.max(np.abs(images_gradients_value - new_images_gradients))
self.assertGreaterEqual(max_diff, 0.0)
self.assertLess(max_diff, 5e-5)
self.assertAllClose(moving_mean.eval(), expected_mean)
self.assertAllClose(moving_variance.eval(), expected_var)
def testTrainMovingVarsNHWC(self):
self._testTrainMovingVars(False, data_format='NHWC')
def testTrainMovingVarsNCHW(self):
self._testTrainMovingVars(False, data_format='NCHW')
def testTrainMovingVarsFusedNCHW(self):
if test.is_gpu_available(cuda_only=True):
self._testTrainMovingVars(True, data_format='NCHW')
def testTrainMovingVarsFusedNHWC(self):
self._testTrainMovingVars(True, data_format='NHWC')
def testCustomInitializer(self):
height, width = 3, 3
channels = 3
with self.cached_session() as sess:
images = (np.ones((5, height, width, channels)) * 9.0).astype('f')
beta = init_ops.constant_initializer(
(np.ones(channels) * 5.0).astype('f'))
gamma = init_ops.constant_initializer(
(np.ones(channels) * 2.0).astype('f'))
mean = init_ops.constant_initializer(
(np.ones(channels) * 5.0).astype('f'))
variance = init_ops.constant_initializer(
(np.ones(channels) * 4.0).astype('f'))
output = _layers.batch_norm(
images,
is_training=False,
scale=True,
epsilon=0.0,
param_initializers={
'beta': beta,
'gamma': gamma,
'moving_mean': mean,
'moving_variance': variance,
})
sess.run(variables_lib.global_variables_initializer())
outs = sess.run(output)
self.assertAllClose(outs, images)
def _runBatchNormalizationWithFormat(self, shape, data_format, is_training):
channels = shape[-1]
with self.session(use_gpu=True) as sess:
images = np.arange(np.product(shape), dtype=np.float32).reshape(shape)
beta = init_ops.constant_initializer(
np.arange(2, channels + 2, dtype=np.float32))
gamma = init_ops.constant_initializer(
np.arange(10, channels + 10, dtype=np.float32) * 2.0)
mean = init_ops.constant_initializer(
np.arange(3, channels + 3, dtype=np.float32) * 5.0)
variance = init_ops.constant_initializer(
np.arange(1, channels + 1, dtype=np.float32) * 4.0)
if data_format == 'NCHW':
# Reshape inputs from NHWC to NCHW format.
images = array_ops.transpose(
images, [0, len(shape) - 1] + list(range(1,
len(shape) - 1)))
output = _layers.batch_norm(
images,
is_training=is_training,
scale=True,
epsilon=0.5,
param_initializers={
'beta': beta,
'gamma': gamma,
'moving_mean': mean,
'moving_variance': variance,
},
data_format=data_format)
if data_format == 'NCHW':
# Reshape outputs from NCHW back to NHWC format.
output = array_ops.transpose(output,
[0] + list(range(2, len(shape))) + [1])
sess.run(variables_lib.global_variables_initializer())
return sess.run(output)
def testNHWCAndNCHWInferenceProduceSameOutput(self):
if test.is_gpu_available(cuda_only=True):
for shape in [[7, 3, 5], [5, 2, 3, 4], [11, 3, 2, 4, 5]]:
nhwc = self._runBatchNormalizationWithFormat(
data_format='NHWC', shape=shape, is_training=False)
nchw = self._runBatchNormalizationWithFormat(
data_format='NCHW', shape=shape, is_training=False)
self.assertAllClose(nhwc, nchw, atol=1e-4, rtol=1e-4)
def testNHWCAndNCHWTrainingProduceSameOutput(self):
if test.is_gpu_available(cuda_only=True):
for shape in [[7, 3, 5], [5, 2, 3, 4], [11, 3, 2, 4, 5]]:
nhwc = self._runBatchNormalizationWithFormat(
data_format='NHWC', shape=shape, is_training=True)
nchw = self._runBatchNormalizationWithFormat(
data_format='NCHW', shape=shape, is_training=True)
self.assertAllClose(nhwc, nchw, atol=1e-4, rtol=1e-4)
def testBatchNormBeta(self):
# Test case for 11673
with self.cached_session() as sess:
a_32 = array_ops.placeholder(dtypes.float32, shape=(10, 10, 10, 10))
_layers.batch_norm(
a_32, center=False, data_format='NCHW', zero_debias_moving_mean=True)
a_16 = array_ops.placeholder(dtypes.float16, shape=(10, 10, 10, 10))
_layers.batch_norm(
a_16, center=False, data_format='NCHW', zero_debias_moving_mean=True)
sess.run(variables_lib.global_variables_initializer())
def testVariablesAreFloat32(self):
height, width = 3, 3
with self.cached_session():
images = random_ops.random_uniform(
(5, height, width, 3), seed=1, dtype=dtypes.float16)
_layers.batch_norm(images, scale=True)
beta = variables.get_variables_by_name('beta')[0]
gamma = variables.get_variables_by_name('gamma')[0]
self.assertEqual(beta.dtype, dtypes.float32_ref)
self.assertEqual(gamma.dtype, dtypes.float32_ref)
moving_mean = variables.get_variables_by_name('moving_mean')[0]
moving_variance = variables.get_variables_by_name('moving_variance')[0]
self.assertEqual(moving_mean.dtype, dtypes.float32_ref)
self.assertEqual(moving_variance.dtype, dtypes.float32_ref)
def _runFusedBatchNorm(self, shape, dtype):
channels = shape[1]
images = np.arange(np.product(shape), dtype=dtype).reshape(shape)
beta = init_ops.constant_initializer(
np.arange(2, channels + 2, dtype=np.float32))
gamma = init_ops.constant_initializer(
np.arange(10, channels + 10, dtype=np.float32) * 2.0)
mean = init_ops.constant_initializer(
np.arange(3, channels + 3, dtype=np.float32) * 5.0)
variance = init_ops.constant_initializer(
np.arange(1, channels + 1, dtype=np.float32) * 4.0)
output = _layers.batch_norm(
images,
fused=True,
is_training=True,
scale=True,
epsilon=0.5,
param_initializers={
'beta': beta,
'gamma': gamma,
'moving_mean': mean,
'moving_variance': variance,
},
data_format='NCHW')
with self.session(use_gpu=True) as sess:
sess.run(variables_lib.global_variables_initializer())
return sess.run(output)
def testFusedBatchNormFloat16MatchesFloat32(self):
if test.is_gpu_available(cuda_only=True):
shape = [5, 4, 2, 3]
res_32 = self._runFusedBatchNorm(shape, np.float32)
res_16 = self._runFusedBatchNorm(shape, np.float16)
self.assertAllClose(res_32, res_16, rtol=1e-3)
def testAdjustmentCreated(self):
# Tests that the adjustment is appropriately passed to and used by the core
# BN layer.
all_adjustments = []
def _create_adjustment(shape):
adjustments = [array_ops.ones(shape[-1:]), array_ops.zeros(shape[-1:])]
all_adjustments.extend(adjustments)
return adjustments
depth = 8
images = array_ops.zeros([10, 5, 5, depth])
output = _layers.batch_norm(
images, is_training=True, adjustment=_create_adjustment)
self.assertListEqual(output.shape.as_list(), images.shape.as_list())
self.assertEqual(len(all_adjustments), 2)
self.assertListEqual(all_adjustments[0].shape.as_list(), [depth])
self.assertListEqual(all_adjustments[1].shape.as_list(), [depth])
class LayerNormTest(test.TestCase):
def testUnknownShape(self):
with ops.Graph().as_default() as g, self.session(g):
inputs = array_ops.placeholder(dtype=dtypes.float32)
with self.assertRaisesRegexp(ValueError, 'undefined rank'):
_layers.layer_norm(inputs)
def testParamsDimsNotFullyDefined(self):
with ops.Graph().as_default() as g, self.session(g):
inputs = array_ops.placeholder(dtype=dtypes.float32)
inputs.set_shape(tensor_shape.TensorShape((5, 3, 3, None)))
with self.assertRaisesRegexp(ValueError, 'is not fully defined'):
_layers.layer_norm(inputs)
def testCreateOp(self):
height, width = 3, 3
with self.cached_session():
images = np.random.uniform(size=(5, height, width, 3))
output = _layers.layer_norm(images)
self.assertTrue(output.op.name.startswith('LayerNorm/batchnorm'))
self.assertListEqual(output.get_shape().as_list(), [5, height, width, 3])
def testCreateVariables(self):
height, width = 3, 3
with self.cached_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
_layers.layer_norm(images)
beta = variables.get_variables_by_name('beta')[0]
gamma = variables.get_variables_by_name('gamma')[0]
self.assertEqual(beta.op.name, 'LayerNorm/beta')
self.assertEqual(gamma.op.name, 'LayerNorm/gamma')
def testReuseVariables(self):
height, width = 3, 3
with self.cached_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
_layers.layer_norm(images, scope='ln')
_layers.layer_norm(images, scope='ln', reuse=True)
beta = variables.get_variables_by_name('beta')
gamma = variables.get_variables_by_name('gamma')
self.assertEqual(len(beta), 1)
self.assertEqual(len(gamma), 1)
def testReuseVars(self):
height, width = 3, 3
with self.cached_session() as sess:
image_shape = (10, height, width, 3)
image_values = np.random.rand(*image_shape)
images = constant_op.constant(
image_values, shape=image_shape, dtype=dtypes.float32)
output_train = _layers.layer_norm(images, scope='LN')
output_eval = _layers.layer_norm(images, scope='LN', reuse=True)
# Initialize all variables
sess.run(variables_lib.global_variables_initializer())
# output_train and output_eval should be the same.
self.assertAllClose(sess.run([output_train]), sess.run([output_eval]))
def doOutputTest(self,
input_shape,
tol=1e-5,
begin_norm_axis=1,
dtype=dtypes.float64):
eps = 1e-12 if dtype != dtypes.float16 else 1e-3
expected_mean = np.zeros(input_shape[:begin_norm_axis])
expected_var_uncorrected = np.ones(input_shape[:begin_norm_axis])
sigma_list = [1.0, 0.1]
if dtype == dtypes.float16:
# This causes the variance to underflow in float16, and requires that
# variance_epsilon be set appropriately to avoid NaNs in the output.
sigma_list.append(1e-4)
# Note that the mean:variance ratio must be limited to the representable
# range for float16.
for mu in [0.0, 1e2 if dtype != dtypes.float16 else 1e1]:
for sigma in sigma_list:
expected_var = expected_var_uncorrected / (1.0 + eps / sigma**2)
input_values = np.random.randn(*input_shape) * sigma + mu
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
inputs = constant_op.constant(
input_values, shape=input_shape, dtype=dtype)
output_t = _layers.layer_norm(
inputs, begin_norm_axis=begin_norm_axis, scope='LN')
# Initialize all variables
sess.run(variables_lib.global_variables_initializer())
# The mean and variance of the output should be close to 0 and 1
# respectively.
if begin_norm_axis < 0:
begin_norm_axis = len(input_shape) + begin_norm_axis
moments_axis = tuple(range(begin_norm_axis, len(input_shape)))
with variable_scope.variable_scope('LN', reuse=True):
beta_var = variable_scope.get_variable('beta', dtype=dtype)
gamma_var = variable_scope.get_variable('gamma', dtype=dtype)
outputs, beta, gamma = sess.run((output_t, beta_var, gamma_var))
# Make sure that there are no NaNs
self.assertFalse(np.isnan(outputs).any())
if outputs.dtype != np.float64:
# Cast to float64 before computing mean/variance to avoid
# overflow and precision issues.
outputs = outputs.astype(np.float64)
mean = np.mean(outputs, axis=moments_axis)
var = np.var(outputs, axis=moments_axis)
# Layer-norm implemented in numpy
expected_out = (
(gamma * (input_values - np.mean(
input_values, axis=moments_axis, keepdims=True)) /
np.sqrt(eps + np.var(
input_values, axis=moments_axis, keepdims=True))) + beta)
self.assertAllClose(expected_mean, mean, atol=tol, rtol=tol)
self.assertAllClose(expected_var, var, atol=tol)
# The full computation gets a bigger tolerance
self.assertAllClose(expected_out, outputs, atol=5 * tol)
def testOutput2DInput(self):
self.doOutputTest((10, 300))
def testOutput2DInputDegenerateNormAxis(self):
with self.assertRaisesRegexp(ValueError, r'must be < rank\(inputs\)'):
self.doOutputTest((10, 300), begin_norm_axis=2)
def testOutput4DInput(self):
self.doOutputTest((100, 10, 10, 3))
def testOutput4DInputNormOnInnermostAxis(self):
# Equivalent tests
self.doOutputTest(
(100, 10, 10, 3), begin_norm_axis=3, tol=1e-4, dtype=dtypes.float64)
self.doOutputTest(
(100, 10, 10, 3), begin_norm_axis=-1, tol=1e-4, dtype=dtypes.float64)
def testOutputSmallInput(self):
self.doOutputTest((10, 10, 10, 30))
def testOutputSmallInputNormOnInnermostAxis(self):
self.doOutputTest((10, 10, 10, 30), begin_norm_axis=3)
def testOutputBigInput(self):
self.doOutputTest((1, 100, 100, 1))
def testOutputBigInputFloat32(self):
self.doOutputTest((1, 100, 1000, 1), tol=1e-4, dtype=dtypes.float32)
def testOutputBigInputFloat16(self):
self.doOutputTest((1, 100, 1000, 1), tol=5e-2, dtype=dtypes.float16)
class GDNTest(test.TestCase):
def _runGDN(self, x, shape, inverse, data_format):
inputs = array_ops.placeholder(dtypes.float32, shape)
outputs = _layers.gdn(inputs, inverse=inverse, data_format=data_format)
with self.cached_session() as sess:
variables_lib.global_variables_initializer().run()
y, = sess.run([outputs], {inputs: x})
return y
def testInvalidDataFormat(self):
x = np.random.uniform(size=(1, 2, 3, 4))
with self.assertRaises(ValueError):
self._runGDN(x, x.shape, False, 'NHWC')
def testUnknownDim(self):
x = np.random.uniform(size=(1, 2, 3, 4))
with self.assertRaises(ValueError):
self._runGDN(x, 4 * [None], False, 'channels_last')
def testChannelsLast(self):
for ndim in [3, 4, 5]:
x = np.random.uniform(size=(1, 2, 3, 4)[:ndim])
y = self._runGDN(x, x.shape, False, 'channels_last')
self.assertEqual(x.shape, y.shape)
self.assertAllClose(y, x / np.sqrt(1 + .1 * (x**2)), rtol=0, atol=1e-6)
def testChannelsFirst(self):
# `bias_add` doesn't support NCHW on CPU.
if test.is_gpu_available(cuda_only=True):
for ndim in [3, 4, 5]:
x = np.random.uniform(size=(4, 3, 2, 1)[:ndim])
y = self._runGDN(x, x.shape, False, 'channels_first')
self.assertEqual(x.shape, y.shape)
self.assertAllClose(y, x / np.sqrt(1 + .1 * (x**2)), rtol=0, atol=1e-6)
def testWrongDims(self):
for ndim in [1, 2, 6]:
x = np.random.uniform(size=(1, 2, 3, 4, 3, 2)[:ndim])
with self.assertRaises(ValueError):
self._runGDN(x, x.shape, False, 'channels_last')
def testIGDN(self):
x = np.random.uniform(size=(1, 2, 3, 4))
y = self._runGDN(x, x.shape, True, 'channels_last')
self.assertEqual(x.shape, y.shape)
self.assertAllClose(y, x * np.sqrt(1 + .1 * (x**2)), rtol=0, atol=1e-6)
class ImagesToSequenceTest(test.TestCase):
def testInvalidDataFormat(self):
height, width = 7, 11
images = np.random.uniform(size=(5, height, width, 2))
with self.assertRaisesRegexp(ValueError,
'data_format has to be either NCHW or NHWC.'):
_layers.images_to_sequence(images, data_format='CHWN')
def testImagesToSequenceDims(self):
height, width = 7, 11
images = np.random.uniform(size=(2, height, width, 5)).astype(np.float32)
output = _layers.images_to_sequence(images)
self.assertListEqual(output.get_shape().as_list(), [11, 14, 5])
def testImagesToSequenceNCHW(self):
height, width = 7, 11
images = np.random.uniform(size=(2, 5, height, width)).astype(np.float32)
output = _layers.images_to_sequence(images, data_format='NCHW')
self.assertListEqual(output.get_shape().as_list(), [11, 14, 5])
class MaxPool2DTest(test.TestCase):
def testInvalidDataFormat(self):
height, width = 3, 6
images = np.random.uniform(size=(5, height, width, 3))
with self.assertRaisesRegexp(ValueError,
'data_format has to be either NCHW or NHWC.'):
_layers.max_pool2d(images, [3, 3], data_format='CHWN')
def testCreateMaxPool(self):
height, width = 3, 6
images = np.random.uniform(size=(5, height, width, 3)).astype(np.float32)
output = _layers.max_pool2d(images, [3, 3])
self.assertEqual(output.op.name, 'MaxPool2D/MaxPool')
self.assertListEqual(output.get_shape().as_list(), [5, 1, 2, 3])
def testCreateMaxPoolNCHW(self):
height, width = 3, 6
images = np.random.uniform(size=(5, 3, height, width)).astype(np.float32)
output = _layers.max_pool2d(images, [3, 3], data_format='NCHW')
self.assertListEqual(output.get_shape().as_list(), [5, 3, 1, 2])
def testCollectOutputs(self):
height, width = 3, 6
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = _layers.max_pool2d(images, [3, 3], outputs_collections='outputs')
output_collected = ops.get_collection('outputs')[0]
self.assertEqual(output_collected.aliases, ['MaxPool2D'])
self.assertEqual(output_collected, output)
def testCreateSquareMaxPool(self):
height, width = 3, 6
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = _layers.max_pool2d(images, 3)
self.assertEqual(output.op.name, 'MaxPool2D/MaxPool')
self.assertListEqual(output.get_shape().as_list(), [5, 1, 2, 3])
def testCreateMaxPoolWithScope(self):
height, width = 3, 6
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = _layers.max_pool2d(images, [3, 3], scope='pool1')
self.assertEqual(output.op.name, 'pool1/MaxPool')
def testCreateMaxPoolWithSamePadding(self):
height, width = 3, 6
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = _layers.max_pool2d(images, [3, 3], padding='SAME')
self.assertListEqual(output.get_shape().as_list(), [5, 2, 3, 3])
def testCreateMaxPoolWithSamePaddingNCHW(self):
height, width = 3, 6
images = random_ops.random_uniform((5, 3, height, width), seed=1)
output = _layers.max_pool2d(
images, [3, 3], padding='SAME', data_format='NCHW')
self.assertListEqual(output.get_shape().as_list(), [5, 3, 2, 3])
def testCreateMaxPoolStrideWithSamePadding(self):
height, width = 3, 6
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = _layers.max_pool2d(images, [3, 3], stride=1, padding='SAME')
self.assertListEqual(output.get_shape().as_list(), [5, height, width, 3])
def testGlobalMaxPool(self):
height, width = 3, 6
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = _layers.max_pool2d(images, images.get_shape()[1:3], stride=1)
self.assertListEqual(output.get_shape().as_list(), [5, 1, 1, 3])
class MaxPool3DTest(test.TestCase):
def testInvalidDataFormat(self):
depth, height, width = 3, 6, 9
images = np.random.uniform(size=(5, depth, height, width, 3))
with self.assertRaisesRegexp(
ValueError, 'data_format has to be either NCDHW or NDHWC.'):
_layers.max_pool3d(images, [3, 3, 3], data_format='CDHWN')
def testCreateMaxPool(self):
depth, height, width = 3, 6, 9
images = np.random.uniform(size=(5, depth, height, width, 3)).astype(
np.float32)
output = _layers.max_pool3d(images, [3, 3, 3])
self.assertEqual(output.op.name, 'MaxPool3D/MaxPool3D')
self.assertListEqual(output.get_shape().as_list(), [5, 1, 2, 4, 3])
def testCreateMaxPoolNCDHW(self):
depth, height, width = 3, 6, 9
images = np.random.uniform(size=(5, 3, depth, height, width)).astype(
np.float32)
output = _layers.max_pool3d(images, [3, 3, 3], data_format='NCDHW')
self.assertEquals(output.op.name, 'MaxPool3D/transpose_1')
self.assertListEqual(output.get_shape().as_list(), [5, 3, 1, 2, 4])
def testCollectOutputs(self):
depth, height, width = 3, 6, 9
images = random_ops.random_uniform((5, depth, height, width, 3), seed=1)
output = _layers.max_pool3d(
images, [3, 3, 3], outputs_collections='outputs')
output_collected = ops.get_collection('outputs')[0]
self.assertEqual(output_collected.aliases, ['MaxPool3D'])
self.assertEqual(output_collected, output)
def testCreateSquareMaxPool(self):
depth, height, width = 3, 6, 9
images = random_ops.random_uniform((5, depth, height, width, 3), seed=1)
output = _layers.max_pool3d(images, 3)
self.assertEqual(output.op.name, 'MaxPool3D/MaxPool3D')
self.assertListEqual(output.get_shape().as_list(), [5, 1, 2, 4, 3])
def testCreateMaxPoolWithScope(self):
depth, height, width = 3, 6, 9
images = random_ops.random_uniform((5, depth, height, width, 3), seed=1)
output = _layers.max_pool3d(images, [3, 3, 3], scope='pool1')
self.assertEqual(output.op.name, 'pool1/MaxPool3D')
def testCreateMaxPoolWithSamePadding(self):
depth, height, width = 3, 6, 9
images = random_ops.random_uniform((5, depth, height, width, 3), seed=1)
output = _layers.max_pool3d(images, [3, 3, 3], padding='SAME')
self.assertListEqual(output.get_shape().as_list(), [5, 2, 3, 5, 3])
def testCreateMaxPoolWithSamePaddingNCDHW(self):
depth, height, width = 3, 6, 9
images = random_ops.random_uniform((5, 3, depth, height, width), seed=1)
output = _layers.max_pool3d(
images, [3, 3, 3], padding='SAME', data_format='NCDHW')
self.assertListEqual(output.get_shape().as_list(), [5, 3, 2, 3, 5])
def testCreateMaxPoolStrideWithSamePadding(self):
depth, height, width = 3, 6, 9
images = random_ops.random_uniform((5, depth, height, width, 3), seed=1)
output = _layers.max_pool3d(images, [3, 3, 3], stride=1, padding='SAME')
self.assertListEqual(output.get_shape().as_list(),
[5, depth, height, width, 3])
def testGlobalMaxPool(self):
depth, height, width = 3, 6, 9
images = random_ops.random_uniform((5, depth, height, width, 3), seed=1)
output = _layers.max_pool3d(images, images.get_shape()[1:4], stride=1)
self.assertListEqual(output.get_shape().as_list(), [5, 1, 1, 1, 3])
class OneHotEncodingTest(test.TestCase):
def testOneHotEncodingCreate(self):
with self.cached_session():
labels = np.array([0, 1, 2])
output = _layers.one_hot_encoding(labels, num_classes=3)
self.assertEqual(output.op.name, 'OneHotEncoding/one_hot')
self.assertListEqual(output.get_shape().as_list(), [3, 3])
def testCollectOutputs(self):
with self.cached_session():
labels = constant_op.constant([0, 1, 2])
output = _layers.one_hot_encoding(
labels, num_classes=3, outputs_collections='outputs')
c_output = ops.get_collection('outputs')[0]
self.assertEqual(c_output.aliases, ['OneHotEncoding'])
self.assertEqual(c_output, output)
def testOneHotEncoding(self):
with self.cached_session():
labels = constant_op.constant([0, 1, 2])
one_hot_labels = constant_op.constant([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
output = _layers.one_hot_encoding(labels, num_classes=3)
self.assertAllClose(output.eval(), one_hot_labels.eval())
def testOneHotEncodingInt32(self):
with self.cached_session():
labels = constant_op.constant([0, 1, 2], dtype=dtypes.int32)
one_hot_labels = constant_op.constant([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
output = _layers.one_hot_encoding(labels, num_classes=3)
self.assertAllClose(output.eval(), one_hot_labels.eval())
class RepeatTests(test.TestCase):
def testRepeat(self):
height, width = 3, 3
with self.cached_session():
images = np.random.uniform(size=(5, height, width, 3)).astype(np.float32)
output = _layers.repeat(images, 3, layers_lib.conv2d, 32, [3, 3])
self.assertEqual(output.op.name, 'Repeat/convolution2d_3/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, 3, 3, 32])
def testRepeatWithScope(self):
height, width = 3, 3
with self.cached_session():
images = random_ops.random_uniform(
(5, height, width, 3), seed=1, name='images')
output = _layers.repeat(
images, 3, layers_lib.conv2d, 32, [3, 3], scope='conv1')
self.assertEqual(output.op.name, 'conv1/conv1_3/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, 3, 3, 32])
class SeparableConv2dTest(test.TestCase):
def testCreateConvInt32(self):
height, width = 3, 3
with self.cached_session():
images = random_ops.random_uniform(
(5, height, width, 3), seed=1, dtype=dtypes.int32, maxval=12345)
with self.assertRaisesRegexp(TypeError, 'non-floating point type'):
layers_lib.separable_conv2d(images, 32, [3, 3], 2)
def testCreateConvFloat32(self):
height, width = 3, 3
with self.cached_session():
images = random_ops.random_uniform(
(5, height, width, 3), seed=1, dtype=dtypes.float32)
output = layers_lib.separable_conv2d(images, 32, [3, 3], 2)
self.assertEqual(output.op.name, 'SeparableConv2d/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, height, width, 32])
def testCreateDepthwiseConv(self):
height, width = 3, 3
with self.cached_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = layers_lib.separable_conv2d(images, None, [3, 3], 2)
self.assertEqual(output.op.name, 'SeparableConv2d/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, height, width, 6])
def testCreateConvCreatesWeightsAndBiasesVars(self):
height, width = 3, 3
images = random_ops.random_uniform((5, height, width, 3), seed=1)
with self.cached_session():
self.assertFalse(variables.get_variables('conv1/depthwise_weights'))
self.assertFalse(variables.get_variables('conv1/pointwise_weights'))
self.assertFalse(variables.get_variables('conv1/biases'))
layers_lib.separable_conv2d(images, 32, [3, 3], 4, scope='conv1')
self.assertTrue(variables.get_variables('conv1/depthwise_weights'))
self.assertTrue(variables.get_variables('conv1/pointwise_weights'))
self.assertTrue(variables.get_variables('conv1/biases'))
def testCreateAtrousConvCreatesWeightsAndBiasesVars(self):
height, width = 3, 3
images = random_ops.random_uniform((5, height, width, 3), seed=1)
with self.cached_session():
self.assertFalse(variables.get_variables('conv1/depthwise_weights'))
self.assertFalse(variables.get_variables('conv1/pointwise_weights'))
self.assertFalse(variables.get_variables('conv1/biases'))
layers_lib.separable_conv2d(images, 32, [3, 3], 4, rate=2, scope='conv1')
self.assertTrue(variables.get_variables('conv1/depthwise_weights'))
self.assertTrue(variables.get_variables('conv1/pointwise_weights'))
self.assertTrue(variables.get_variables('conv1/biases'))
def testCreateDepthwiseConvCreatesWeightsAndBiasesVars(self):
height, width = 3, 3
images = random_ops.random_uniform((5, height, width, 3), seed=1)
with self.cached_session():
self.assertFalse(variables.get_variables('conv1/depthwise_weights'))
self.assertFalse(variables.get_variables('conv1/pointwise_weights'))
self.assertFalse(variables.get_variables('conv1/biases'))
layers_lib.separable_conv2d(images, None, [3, 3], 4, scope='conv1')
self.assertTrue(variables.get_variables('conv1/depthwise_weights'))
self.assertFalse(variables.get_variables('conv1/pointwise_weights'))
self.assertTrue(variables.get_variables('conv1/biases'))
def testCreateConvWithScope(self):
height, width = 3, 3
with self.cached_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = layers_lib.separable_conv2d(images, 32, [3, 3], 6, scope='conv1')
self.assertEqual(output.op.name, 'conv1/Relu')
def testCreateConvWithoutActivation(self):
height, width = 3, 3
with self.cached_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = layers_lib.separable_conv2d(
images, 32, [3, 3], 8, activation_fn=None)
self.assertEqual(output.op.name, 'SeparableConv2d/BiasAdd')
def testCreateConvValid(self):
height, width = 3, 3
with self.cached_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = layers_lib.separable_conv2d(
images, 32, [3, 3], 2, padding='VALID')
self.assertListEqual(output.get_shape().as_list(), [5, 1, 1, 32])
def testCreateAtrousConvValid(self):
height, width = 5, 5
with self.cached_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = layers_lib.separable_conv2d(
images, 32, [3, 3], 2, padding='VALID', rate=2)
self.assertListEqual(output.get_shape().as_list(), [5, 1, 1, 32])
def testCreateDepthwiseConvValid(self):
height, width = 3, 3
with self.cached_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = layers_lib.separable_conv2d(
images, None, [3, 3], 2, padding='VALID')
self.assertListEqual(output.get_shape().as_list(), [5, 1, 1, 6])
def testCreateAtrousDepthwiseConvValid(self):
height, width = 5, 5
with self.cached_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = layers_lib.separable_conv2d(
images, None, [3, 3], 2, padding='VALID', rate=2)
self.assertListEqual(output.get_shape().as_list(), [5, 1, 1, 6])
def testCreateConvWithWeightDecay(self):
random_seed.set_random_seed(0)
height, width = 3, 3
with self.cached_session() as sess:
images = random_ops.random_uniform((5, height, width, 3), seed=1)
regularizer = regularizers.l2_regularizer(0.01)
layers_lib.separable_conv2d(
images,
32, [3, 3],
2,
weights_regularizer=regularizer,
weights_initializer=init_ops.ones_initializer())
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)), 2)
weight_decay = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)[0]
self.assertEqual(
weight_decay.op.name,
'SeparableConv2d/depthwise_kernel/Regularizer/l2_regularizer')
sess.run(variables_lib.global_variables_initializer())
depth_weight_one = sess.run(weight_decay)
weight_decay = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)[1]
self.assertEqual(
weight_decay.op.name,
'SeparableConv2d/pointwise_kernel/Regularizer/l2_regularizer')
pointwise_weight_one = sess.run(weight_decay)
regularizer = regularizers.l2_regularizer(1.0)
layers_lib.separable_conv2d(
images,
32, [3, 3],
2,
weights_regularizer=regularizer,
weights_initializer=init_ops.ones_initializer())
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)), 4)
weight_decay = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)[2]
sess.run(variables_lib.global_variables_initializer())
depth_weight_two = sess.run(weight_decay)
weight_decay = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)[3]
pointwise_weight_two = sess.run(weight_decay)
self.assertAllClose(
[100.0 * depth_weight_one, 100.0 * pointwise_weight_one],
[depth_weight_two, pointwise_weight_two])
def testReuseConvWithWeightDecay(self):
height, width = 3, 3
with self.cached_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
regularizer = regularizers.l2_regularizer(0.01)
layers_lib.separable_conv2d(
images, 32, [3, 3], 2, weights_regularizer=regularizer, scope='conv1')
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)), 2)
layers_lib.separable_conv2d(
images,
32, [3, 3],
2,
weights_regularizer=regularizer,
scope='conv1',
reuse=True)
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)), 2)
def testConvWithBatchNorm(self):
height, width = 3, 3
batch_norm_collection = 'moving_vars'
normalizer_params = {
'variables_collections': {
'beta': [batch_norm_collection],
'gamma': [batch_norm_collection],
'moving_mean': [batch_norm_collection],
'moving_variance': [batch_norm_collection],
}
}
images = random_ops.random_uniform((5, height, width, 3), seed=1)
net = layers_lib.separable_conv2d(
images,
8, [3, 3],
2,
normalizer_fn=_layers.batch_norm,
normalizer_params=normalizer_params,
scope='conv1')
net = layers_lib.separable_conv2d(
net,
32, [3, 3],
2,
normalizer_fn=_layers.batch_norm,
normalizer_params=normalizer_params,
scope='conv2')
self.assertEqual(len(ops.get_collection(batch_norm_collection)), 6)
self.assertEqual(len(variables.get_variables('conv1/BatchNorm')), 3)
self.assertEqual(len(variables.get_variables('conv2/BatchNorm')), 3)
def testConvWithInputsViaPlaceHolder(self):
height, width = 3, 3
images_placeholder = array_ops.placeholder(
dtypes.float32, shape=(None, None, None, 3))
net = layers_lib.separable_conv2d(
images_placeholder,
8, [3, 3],
2,
normalizer_fn=_layers.batch_norm,
normalizer_params={},
scope='conv1')
init_op = variables_lib.global_variables_initializer()
with self.cached_session() as sess:
images = np.random.rand(5, height, width, 3)
sess.run(init_op)
sess.run(net, feed_dict={images_placeholder: images})
def testTrainableFlagIsPassedOn(self):
for trainable in [True, False]:
for num_filters in [None, 8]:
with ops.Graph().as_default():
input_size = [5, 10, 12, 3]
images = random_ops.random_uniform(input_size, seed=1)
layers_lib.separable_conv2d(
images, num_filters, [3, 3], 1, trainable=trainable)
model_variables = variables.get_model_variables()
trainable_variables = variables_lib.trainable_variables()
for model_variable in model_variables:
self.assertEqual(trainable, model_variable in trainable_variables)
def testSepConvNCHW(self):
for num_filters, correct_output_filters in zip((None, 5), (6, 5)):
with self.cached_session():
batch, height, width = 4, 10, 12
kernel_dim, stride = 3, 2
images = random_ops.random_uniform((batch, 3, height, width), seed=1)
output = layers_lib.separable_conv2d(
images,
num_outputs=num_filters,
kernel_size=[kernel_dim, kernel_dim],
depth_multiplier=2,
stride=stride,
padding='VALID',
data_format='NCHW')
self.assertListEqual(output.get_shape().as_list(), [
batch, correct_output_filters, (height - kernel_dim + 1) // stride,
(width - kernel_dim + 1) // stride
])
class ScaleGradientTests(test.TestCase):
"""Simple tests of the scale_gradient function."""
def testBasic(self):
with self.cached_session():
x = np.array([42], np.float32)
gradient_scale = np.array([2], np.float32)
x = ops.convert_to_tensor(x)
y = layers_lib.scale_gradient(x, gradient_scale)
np.testing.assert_array_equal(x.eval(), y.eval())
g_x, = gradients_impl.gradients(y, [x], [np.array([3], np.float32)])
np.testing.assert_array_equal([3 * 2], g_x.eval())
class SequenceToImagesTest(test.TestCase):
def testImagesToSequenceDims(self):
num_batches = 14
num_time_steps = 11
num_channels = 5
desired_height = 7
sequence = np.random.uniform(size=(num_time_steps,
num_batches,
num_channels)).astype(np.float32)
output = _layers.sequence_to_images(sequence, desired_height)
self.assertListEqual(output.get_shape().as_list(), [2, 7, 11, 5])
def testImagesToSequenceNCHW(self):
num_batches = 14
num_time_steps = 11
num_channels = 5
desired_height = 7
sequence = np.random.uniform(size=(num_time_steps,
num_batches,
num_channels)).astype(np.float32)
output = _layers.sequence_to_images(sequence,
desired_height,
output_data_format='channels_first')
self.assertListEqual(output.get_shape().as_list(), [2, 5, 7, 11])
class SoftmaxTests(test.TestCase):
def setUp(self):
self.low = 1 / (1 + math.e)
self.high = math.e / (1 + math.e)
def testSoftmax2D(self):
logits = constant_op.constant([[0.0, 1], [1, 1], [1, 0]])
prediction = _layers.softmax(logits)
exp_prediction = np.array([[self.low, self.high], [0.5, 0.5],
[self.high, self.low]])
with self.cached_session() as sess:
prediction = sess.run(prediction)
self.assertAllClose(exp_prediction, prediction)
def testSoftmax3D(self):
logits = np.ones((2, 3, 2))
logits[0, 0, 0] = 0
logits[1, 1, 1] = 0
logits = constant_op.constant(logits)
exp_prediction = 0.5 * np.ones((2, 3, 2))
exp_prediction[0, 0, 0] = self.low
exp_prediction[0, 0, 1] = self.high
exp_prediction[1, 1, 0] = self.high
exp_prediction[1, 1, 1] = self.low
prediction = _layers.softmax(logits)
with self.cached_session() as sess:
prediction = sess.run(prediction)
self.assertAllClose(exp_prediction, prediction)
def testSoftmax3DUnknownSize(self):
logits = np.ones((2, 3, 2))
logits[0, 0, 0] = 0
logits[1, 1, 1] = 0
logit_placeholder = array_ops.placeholder(
dtypes.float32, shape=(None, None, 2))
feed_dict = {logit_placeholder: logits}
exp_prediction = 0.5 * np.ones((2, 3, 2))
exp_prediction[0, 0, 0] = self.low
exp_prediction[0, 0, 1] = self.high
exp_prediction[1, 1, 0] = self.high
exp_prediction[1, 1, 1] = self.low
prediction = _layers.softmax(logit_placeholder)
with self.cached_session() as sess:
prediction = sess.run(prediction, feed_dict=feed_dict)
self.assertAllClose(exp_prediction, prediction)
def testSoftmaxUndefinedNthDimension(self):
logits = array_ops.placeholder(dtypes.float32)
with self.assertRaises(ValueError):
_layers.softmax(logits)
class SpatialSoftmaxTests(test.TestCase):
def _SpatialSoftmax(self, x_loc, y_loc, height, width, batch_size, nchannels):
# Convert specified activation locations to range [-1, 1].
height_lin = np.linspace(-1, 1, height)
width_lin = np.linspace(-1, 1, width)
x_lin = np.expand_dims(np.array([height_lin[i] for i in x_loc]), 1)
y_lin = np.expand_dims(np.array([width_lin[i] for i in y_loc]), 1)
np_keypoints = np.array(
[np.concatenate([x_lin, y_lin], axis=1) for i in range(batch_size)])
np_keypoints = np.reshape(np_keypoints, [-1, nchannels * 2])
return np_keypoints
def testSpatialSoftmaxShape(self):
batch_shape = (2, 35, 30, 2)
features = array_ops.placeholder(dtypes.float32, shape=batch_shape)
np_features = np.zeros(batch_shape, dtype=np.float32)
spatial_softmax = _layers.spatial_softmax(features)
with self.cached_session() as sess:
sess.run(variables_lib.global_variables_initializer())
feed_dict = {features: np_features}
keypoints = sess.run(spatial_softmax, feed_dict)
self.assertAllEqual(keypoints.shape, (batch_shape[0], batch_shape[3] * 2))
def testSpatialSoftmaxShapeNCHW(self):
batch_shape = (2, 2, 35, 35)
features = array_ops.placeholder(dtypes.float32, shape=batch_shape)
np_features = np.zeros(batch_shape, dtype=np.float32)
spatial_softmax = _layers.spatial_softmax(features, data_format='NCHW')
with self.cached_session() as sess:
sess.run(variables_lib.global_variables_initializer())
feed_dict = {features: np_features}
keypoints = sess.run(spatial_softmax, feed_dict)
self.assertAllEqual(keypoints.shape, (batch_shape[0], batch_shape[1] * 2))
def testTwoMaxActivationsSameChannel(self):
batch_size, height, width, nchannels = (2, 35, 35, 1)
batch_shape = (batch_size, height, width, nchannels)
# Put high equal activations on different locations in the same channel.
features = array_ops.placeholder(dtypes.float32, shape=batch_shape)
spatial_softmax = _layers.spatial_softmax(features)
np_features = np.zeros(batch_shape, dtype=np.float32)
x0, y0 = (10, 10)
x1, y1 = (20, 20)
avg_x = (x0 + x1) // 2
avg_y = (y0 + y1) // 2
np_features[:, x0, y0, :] = 100.
np_features[:, x1, y1, :] = 100.
x_loc = [avg_x]
y_loc = [avg_y]
np_keypoints = self._SpatialSoftmax(x_loc, y_loc, height, width, batch_size,
nchannels)
# Make sure expected location keypoints matches actual location keypoints.
with self.cached_session() as sess:
sess.run(variables_lib.global_variables_initializer())
feed_dict = {features: np_features}
keypoints = sess.run(spatial_softmax, feed_dict)
self.assertAllClose(keypoints, np_keypoints)
def testMaxActivationsAtEdges(self):
batch_size, height, width, nchannels = (2, 35, 35, 4)
batch_shape = (batch_size, height, width, nchannels)
# Put high activations on edges of spatial extent.
features = array_ops.placeholder(dtypes.float32, shape=batch_shape)
spatial_softmax = _layers.spatial_softmax(features)
np_features = np.zeros(batch_shape, dtype=np.float32)
edges = [(0, 0), (0, width - 1), (height - 1, 0), (height - 1, width - 1)]
x_loc, y_loc = zip(*edges)
for c in range(nchannels):
np_features[:, x_loc[c], y_loc[c], c] = 100.
np_keypoints = self._SpatialSoftmax(x_loc, y_loc, height, width, batch_size,
nchannels)
# Make sure expected location keypoints matches actual location keypoints.
with self.cached_session() as sess:
sess.run(variables_lib.global_variables_initializer())
feed_dict = {features: np_features}
keypoints = sess.run(spatial_softmax, feed_dict)
self.assertAllClose(keypoints, np_keypoints)
def testSpatialSoftmaxVariableSized(self):
batch_size = 2
nchannels = 2
height1, width1 = (35, 30)
height2, width2 = (20, 20)
batch_shape1 = (batch_size, height1, width1, nchannels)
batch_shape2 = (batch_size, height2, width2, nchannels)
variable_sized_shape = (None, None, None, 2)
# Put high activations on single spatial locations.
features = array_ops.placeholder(dtypes.float32, shape=variable_sized_shape)
spatial_softmax = _layers.spatial_softmax(features)
np_features1 = np.zeros(batch_shape1, dtype=np.float32)
np_features2 = np.zeros(batch_shape2, dtype=np.float32)
x_loc = [15, 2]
y_loc = [10, 9]
for c in range(nchannels):
np_features1[:, x_loc[c], y_loc[c], c] = 100.
np_features2[:, x_loc[c], y_loc[c], c] = 100.
np_keypoints1 = self._SpatialSoftmax(x_loc, y_loc, height1, width1,
batch_size, nchannels)
np_keypoints2 = self._SpatialSoftmax(x_loc, y_loc, height2, width2,
batch_size, nchannels)
# Make sure expected location keypoints matches actual location keypoints.
with self.cached_session() as sess:
sess.run(variables_lib.global_variables_initializer())
feed_dict = {features: np_features1}
tf_keypoints1 = sess.run(spatial_softmax, feed_dict)
self.assertAllClose(tf_keypoints1, np_keypoints1)
feed_dict = {features: np_features2}
tf_keypoints2 = sess.run(spatial_softmax, feed_dict)
self.assertAllClose(tf_keypoints2, np_keypoints2)
def testSpatialSoftmax(self):
batch_size, height, width, nchannels = (2, 35, 35, 2)
batch_shape = (batch_size, height, width, nchannels)
# Put high activations on single spatial locations.
features = array_ops.placeholder(dtypes.float32, shape=batch_shape)
spatial_softmax = _layers.spatial_softmax(features)
np_features = np.zeros(batch_shape, dtype=np.float32)
x_loc = [15, 2]
y_loc = [10, 28]
for c in range(nchannels):
np_features[:, x_loc[c], y_loc[c], c] = 100.
np_keypoints = self._SpatialSoftmax(x_loc, y_loc, height, width, batch_size,
nchannels)
# Make sure expected location keypoints matches actual location keypoints.
with self.cached_session() as sess:
sess.run(variables_lib.global_variables_initializer())
feed_dict = {features: np_features}
keypoints = sess.run(spatial_softmax, feed_dict)
self.assertAllClose(keypoints, np_keypoints)
def testSpatialSoftmaxNCHW(self):
batch_size, nchannels, height, width = (2, 2, 35, 35)
batch_shape = (batch_size, nchannels, height, width)
# Put high activations on single spatial locations.
features = array_ops.placeholder(dtypes.float32, shape=batch_shape)
spatial_softmax = _layers.spatial_softmax(features, data_format='NCHW')
np_features = np.zeros(batch_shape, dtype=np.float32)
x_loc = [15, 2]
y_loc = [10, 28]
for c in range(nchannels):
np_features[:, c, x_loc[c], y_loc[c]] = 100.
np_keypoints = self._SpatialSoftmax(x_loc, y_loc, height, width, batch_size,
nchannels)
# Make sure expected location keypoints matches actual location keypoints.
with self.cached_session() as sess:
sess.run(variables_lib.global_variables_initializer())
feed_dict = {features: np_features}
keypoints = sess.run(spatial_softmax, feed_dict)
self.assertAllClose(keypoints, np_keypoints)
def testSpatialSoftmaxToFullyConnected(self):
batch_shape = (2, 35, 35, 2)
features = array_ops.placeholder(dtypes.float32, shape=batch_shape)
spatial_softmax = _layers.spatial_softmax(features)
net = _layers.fully_connected(spatial_softmax, 10)
np_features = np.zeros(batch_shape, dtype=np.float32)
with self.cached_session() as sess:
sess.run(variables_lib.global_variables_initializer())
feed_dict = {features: np_features}
sess.run(net, feed_dict)
class StackTests(test.TestCase):
def testStackFullyConnected(self):
height, width = 3, 3
with self.cached_session():
images = np.random.uniform(size=(5, height * width * 3))
output = _layers.stack(images, _layers.fully_connected, [10, 20, 30])
self.assertEqual(output.op.name, 'Stack/fully_connected_3/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, 30])
def testStackFullyConnectedFailOnReuse(self):
height, width = 3, 3
with self.cached_session():
with variable_scope.variable_scope('test', reuse=True):
images = np.random.uniform(size=(5, height * width * 3))
with self.assertRaises(ValueError):
_layers.stack(images, _layers.fully_connected, [10, 20, 30])
def testStackRelu(self):
height, width = 3, 3
with self.cached_session():
images = random_ops.random_uniform(
(5, height * width * 3), seed=1, name='images')
output = _layers.stack(images, layers_lib.relu, [10, 20, 30])
self.assertEqual(output.op.name, 'Stack/fully_connected_3/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, 30])
def testStackElu(self):
height, width = 3, 3
with self.cached_session():
images = random_ops.random_uniform(
(5, height * width * 3), seed=1, name='images')
output = _layers.stack(images, layers_lib.elu, [10, 20, 30])
self.assertEqual(output.op.name, 'Stack/fully_connected_3/Elu')
self.assertListEqual(output.get_shape().as_list(), [5, 30])
def testStackConvolution2d(self):
height, width = 3, 3
with self.cached_session():
images = random_ops.random_uniform(
(5, height, width, 3), seed=1, name='images')
output = _layers.stack(
images,
layers_lib.convolution2d, [10, 20, 30],
kernel_size=[3, 3],
padding='SAME')
self.assertEqual(output.op.name, 'Stack/convolution2d_3/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, 3, 3, 30])
def testStackWithScope(self):
height, width = 3, 3
with self.cached_session():
images = random_ops.random_uniform(
(5, height, width, 3), seed=1, name='images')
output = _layers.stack(
images,
layers_lib.convolution2d, [10, 20, 30],
kernel_size=[3, 3],
padding='SAME',
scope='conv1')
self.assertEqual(output.op.name, 'conv1/conv1_3/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, 3, 3, 30])
class UnitNormTests(test.TestCase):
def testUnitNormWithRandomMatrix(self):
height, width = 2, 3
for dim in range(3):
random_seed.set_random_seed(0)
image = random_ops.random_uniform((height, width, 3))
output = _layers.unit_norm(image, dim=dim, epsilon=1e-6)
norms = math_ops.sqrt(
math_ops.reduce_sum(math_ops.square(output), axis=dim))
shape = [height, width, 3]
del shape[dim]
expected = np.ones(shape)
with self.cached_session():
actual = norms.eval()
self.assertAllClose(expected, actual, 1e-4, 1e-4)
def testDimEqualToRankRaisesError(self):
height, width = 2, 3
random_seed.set_random_seed(0)
image = random_ops.random_uniform((height, width, 3))
with self.assertRaises(ValueError):
_layers.unit_norm(image, dim=3, epsilon=1e-6)
def testUnknownRankRaisesError(self):
image = array_ops.placeholder(dtypes.float32)
with self.assertRaises(ValueError):
_layers.unit_norm(image, dim=2)
def testKnownRankUnknownDimsSucceeds(self):
height, width = 2, 3
for dim in range(3):
placeholder_value = np.ones((height, width, 3))
shape = [height, width, 3]
del shape[dim]
expected = np.ones(shape)
image = array_ops.placeholder(dtypes.float32, (None, None, 3))
output = _layers.unit_norm(image, dim=dim, epsilon=1e-6)
norms = math_ops.sqrt(
math_ops.reduce_sum(math_ops.square(output), axis=dim))
with self.cached_session():
actual = norms.eval({image: placeholder_value})
self.assertAllClose(expected, actual, 1e-4, 1e-4)
class PoincareNormalizeTest(test.TestCase):
def _PoincareNormalize(self, x, dim, epsilon=1e-5):
if isinstance(dim, list):
norm = np.linalg.norm(x, axis=tuple(dim))
for d in dim:
norm = np.expand_dims(norm, d)
norm_x = ((1. - epsilon) * x) / norm
else:
norm = np.expand_dims(np.apply_along_axis(np.linalg.norm, dim, x), dim)
norm_x = ((1. - epsilon) * x) / norm
return np.where(norm > 1.0 - epsilon, norm_x, x)
def testPoincareNormalize(self):
x_shape = [20, 7, 3]
epsilon = 1e-5
tol = 1e-6
np.random.seed(1)
x_np = np.random.random_sample(x_shape).astype(np.float32)
for dim in range(len(x_shape)):
y_np = self._PoincareNormalize(x_np, dim, epsilon)
with self.cached_session():
x_tf = constant_op.constant(x_np, name='x')
y_tf = _layers.poincare_normalize(x_tf, dim, epsilon)
y_tf_eval = y_tf.eval()
norm = np.linalg.norm(y_np, axis=dim)
self.assertLessEqual(norm.max(), 1. - epsilon + tol)
norm = np.linalg.norm(y_tf_eval, axis=dim)
self.assertLessEqual(norm.max(), 1. - epsilon + tol)
self.assertAllClose(y_np, y_tf_eval)
def testPoincareNormalizeDimArray(self):
x_shape = [20, 7, 3]
epsilon = 1e-5
tol = 1e-6
np.random.seed(1)
x_np = np.random.random_sample(x_shape).astype(np.float32)
dim = [1, 2]
y_np = self._PoincareNormalize(x_np, dim, epsilon)
with self.cached_session():
x_tf = constant_op.constant(x_np, name='x')
y_tf = _layers.poincare_normalize(x_tf, dim, epsilon)
y_tf_eval = y_tf.eval()
norm = np.linalg.norm(y_np, axis=tuple(dim))
self.assertLess(norm.max(), 1. - epsilon + tol)
norm = np.linalg.norm(y_tf_eval, axis=tuple(dim))
self.assertLess(norm.max(), 1. - epsilon + tol)
self.assertAllClose(y_np, y_tf_eval, rtol=1e-6, atol=1e-6)
def testPoincareNormalizeGradient(self):
x_shape = [20, 7, 3]
np.random.seed(1)
x_np = np.random.random_sample(x_shape).astype(np.float64)
for dim in range(len(x_shape)):
with self.cached_session():
x_tf = constant_op.constant(x_np, name='x')
y_tf = _layers.poincare_normalize(x_tf, dim)
err = gradient_checker.compute_gradient_error(x_tf, x_shape, y_tf,
x_shape)
print('PoinCareNormalize gradient err = %g ' % err)
self.assertLess(err, 1e-4)
# TODO(b/28426988): Add separate tests for non-legacy versions.
class LegacyFullyConnectedTest(test.TestCase):
def setUp(self):
test.TestCase.setUp(self)
random_seed.set_random_seed(1234)
self.input = constant_op.constant([[1., 2., 3.], [-4., 15., -6.]])
self.input_3_dim_arr = [[[1., 1.1, 1.2], [2., 2.1, 2.2], [3., 3.1, 3.2],
[4., 4.1, 4.2]], [[5., 5.1, 5.2], [6., 6.1, 6.2],
[7., 7.1, 7.2], [8., 8.1, 8.2]]]
self.input_3_dim = constant_op.constant(self.input_3_dim_arr)
assert not ops.get_collection(ops.GraphKeys.SUMMARIES)
def _fully_connected_basic_use(self, x, num_output_units, expected_shape):
output = _layers.legacy_fully_connected(
x, num_output_units, activation_fn=nn_ops.relu)
with session.Session() as sess:
with self.assertRaises(errors_impl.FailedPreconditionError):
sess.run(output)
variables_lib.global_variables_initializer().run()
out_value, shape_value = sess.run([output, array_ops.shape(output)])
self.assertAllClose(shape_value, expected_shape)
self.assertEqual(output.get_shape().as_list(), expected_shape)
self.assertTrue(np.all(out_value >= 0), 'Relu should have all values >= 0.')
self.assertEqual(2,
len(ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)))
self.assertEqual(
0, len(ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)))
def test_fully_connected_basic_use(self):
self._fully_connected_basic_use(self.input, 8, [2, 8])
def test_fully_connected_basic_use_multi_dim(self):
for last_dim in [1, 3]:
self.setUp()
self._fully_connected_basic_use(self.input_3_dim, last_dim,
[2, 4, last_dim])
def test_relu_layer_basic_use(self):
output = layers_lib.legacy_relu(self.input, 8)
with session.Session() as sess:
with self.assertRaises(errors_impl.FailedPreconditionError):
sess.run(output)
variables_lib.global_variables_initializer().run()
out_value = sess.run(output)
self.assertEqual(output.get_shape().as_list(), [2, 8])
self.assertTrue(np.all(out_value >= 0), 'Relu should have all values >= 0.')
self.assertEqual(2,
len(ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)))
self.assertEqual(
0, len(ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)))
def test_variable_reuse_with_scope(self):
with variable_scope.variable_scope('test') as vs:
output1 = layers_lib.legacy_relu(self.input, 8)
output2 = layers_lib.legacy_relu(self.input, 8)
with variable_scope.variable_scope(vs, reuse=True):
output3 = layers_lib.legacy_relu(self.input, 8)
with session.Session() as sess:
variables_lib.global_variables_initializer().run()
out_value1, out_value2, out_value3 = sess.run([output1, output2, output3])
self.assertFalse(np.allclose(out_value1, out_value2))
self.assertAllClose(out_value1, out_value3)
def test_variable_reuse_with_template(self):
tmpl1 = template.make_template(
'test', _layers.legacy_fully_connected, num_output_units=8)
output1 = tmpl1(self.input)
output2 = tmpl1(self.input)
with session.Session() as sess:
variables_lib.global_variables_initializer().run()
out_value1, out_value2 = sess.run([output1, output2])
self.assertAllClose(out_value1, out_value2)
def _custom_initializers(self, x, num_output_units, expected_outputs):
output = layers_lib.legacy_relu(
x,
num_output_units,
weight_init=init_ops.constant_initializer(2.0),
bias_init=init_ops.constant_initializer(1.0))
with session.Session() as sess:
variables_lib.global_variables_initializer().run()
out_value = sess.run(output)
self.assertAllClose(np.array(expected_outputs), out_value)
def test_custom_initializers(self):
self._custom_initializers(self.input, 2, [[13.0, 13.0], [11.0, 11.0]])
def test_custom_initializers_multi_dim(self):
self._custom_initializers(
self.input_3_dim, 2,
[[[7.6, 7.6], [13.6, 13.6], [19.6, 19.6], [25.6, 25.6]],
[[31.6, 31.6], [37.6, 37.6], [43.6, 43.6], [49.6, 49.6]]])
def test_custom_collections(self):
layers_lib.legacy_relu(
self.input,
2,
weight_collections=['unbiased'],
bias_collections=['biased'],
output_collections=['output'])
self.assertEqual(1, len(ops.get_collection('unbiased')))
self.assertEqual(1, len(ops.get_collection('biased')))
self.assertEqual(1, len(ops.get_collection('output')))
self.assertEqual(2, len(ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)))
def test_all_custom_collections(self):
layers_lib.legacy_relu(
self.input,
2,
weight_collections=['unbiased', 'all'],
bias_collections=['biased', 'all'])
self.assertEqual(1, len(ops.get_collection('unbiased')))
self.assertEqual(1, len(ops.get_collection('biased')))
self.assertEqual(
ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES),
ops.get_collection('all'))
def test_no_bias(self):
layers_lib.legacy_relu(self.input, 2, bias_init=None)
self.assertEqual(1, len(ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)))
def test_no_activation(self):
y = _layers.legacy_fully_connected(self.input, 2)
self.assertEqual(2, len(ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)))
self.assertEqual('BiasAdd', y.op.type)
def test_no_activation_no_bias(self):
y = _layers.legacy_fully_connected(self.input, 2, bias_init=None)
self.assertEqual(1, len(ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)))
self.assertEqual('MatMul', y.op.type)
def test_regularizer(self):
cnt = [0]
tensor = constant_op.constant(5.0)
def test_fn(_):
cnt[0] += 1
return tensor
_layers.legacy_fully_connected(self.input, 2, weight_regularizer=test_fn)
self.assertEqual([tensor],
ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES))
self.assertEqual(1, cnt[0])
def test_regularizer_with_multiple_variables(self):
cnt = [0]
tensor = constant_op.constant(5.0)
def test_fn(_):
cnt[0] += 1
return tensor
_layers.legacy_fully_connected(self.input, 2, weight_regularizer=test_fn)
_layers.legacy_fully_connected(self.input, 2, weight_regularizer=test_fn)
self.assertEqual([tensor, tensor],
ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES))
self.assertEqual(2, cnt[0])
def test_regularizer_with_variable_reuse(self):
cnt = [0]
tensor = constant_op.constant(5.0)
def test_fn(_):
cnt[0] += 1
return tensor
with variable_scope.variable_scope('test') as vs:
_layers.legacy_fully_connected(self.input, 2, weight_regularizer=test_fn)
with variable_scope.variable_scope(vs, reuse=True):
_layers.legacy_fully_connected(self.input, 2, weight_regularizer=test_fn)
self.assertEqual([tensor],
ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES))
self.assertEqual(1, cnt[0])
def test_empty_x_results_in_empty_output(self):
# Empty x is common if someone masks their input with tf.boolean_mask in
# order to drop missing entries, and in a particular batch all entries are
# missing.
with self.cached_session():
x = np.array([]).reshape(0, 3)
self.assertEqual(0, array_ops.size(x).eval())
y = _layers.legacy_fully_connected(x, 2, activation_fn=nn_ops.softmax)
variables_lib.global_variables_initializer().run()
expected_y = np.array([]).reshape(0, 2)
np.testing.assert_array_equal(expected_y, y.eval())
def test_shapes_variable_first_dim(self):
# first dimension is not known statically.
x = array_ops.placeholder(dtypes.float32, shape=[None, 4, 3])
y = _layers.legacy_fully_connected(x, 1)
# in the output we still only know the 2nd and 3rd dimensions statically.
self.assertEqual(y.get_shape().as_list(), [None, 4, 1])
with self.cached_session() as sess:
variables_lib.global_variables_initializer().run()
# we can feed in input with first dimension 2
shape_value = sess.run(
array_ops.shape(y), feed_dict={
x: self.input_3_dim_arr
})
self.assertAllClose(shape_value, [2, 4, 1])
# we can feed in input with first dimension 1
shape_value = sess.run(
array_ops.shape(y), feed_dict={
x: [self.input_3_dim_arr[0]]
})
self.assertAllClose(shape_value, [1, 4, 1])
# we cannot feed in input with inconsistent dimensions
with self.assertRaises(ValueError):
sess.run(array_ops.shape(y), feed_dict={x: [[[]]]})
def _unknown_dim_invalid_input(self, last_dim):
x = array_ops.placeholder(dtypes.float32, shape=[3, last_dim])
_layers.legacy_fully_connected(x, 2, activation_fn=None)
def test_known_dim_valid_input(self):
self._unknown_dim_invalid_input(last_dim=3)
def test_unknown_dim_invalid_input(self):
with self.assertRaisesRegexp(
ValueError, 'last dimension of x must be known but is None'):
self._unknown_dim_invalid_input(last_dim=None)
def test_1d_invalid_input(self):
with self.cached_session():
with self.assertRaisesRegexp(ValueError,
'rank of x must be at least 2 not: 1'):
x = constant_op.constant([[]], shape=[0])
_layers.legacy_fully_connected(x, 2, activation_fn=nn_ops.softmax)
class MaxOutTest(test.TestCase):
def test_simple(self):
inputs = random_ops.random_uniform((64, 10, 36), seed=1)
graph = _layers.maxout(inputs, num_units=3)
self.assertEqual(graph.get_shape().as_list(), [64, 10, 3])
def test_fully_connected(self):
inputs = random_ops.random_uniform((64, 50), seed=1)
graph = _layers.fully_connected(inputs, 50)
graph = _layers.maxout(graph, num_units=10)
self.assertEqual(graph.get_shape().as_list(), [64, 10])
def test_nchw(self):
inputs = random_ops.random_uniform((10, 100, 100, 3), seed=1)
graph = _layers.conv2d(inputs, 10, 3, padding='SAME')
graph = _layers.maxout(graph, num_units=1)
self.assertEqual(graph.get_shape().as_list(), [10, 100, 100, 1])
def test_invalid_shape(self):
inputs = random_ops.random_uniform((10, 100, 100, 3), seed=1)
graph = _layers.conv2d(inputs, 3, 10)
with self.assertRaisesRegexp(ValueError, 'number of features'):
graph = _layers.maxout(graph, num_units=2)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/layers/python/layers/layers_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Reversible Residual Block.
From
[The Reversible Residual Network: Backpropagation Without Storing
Activations](https://arxiv.org/abs/1707.04585).
Also contains the @recompute_grad decorator, which recomputes the forward
function on the backwards pass.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import re
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.contrib.framework.python import ops as contrib_framework_ops
from tensorflow.python.eager import backprop
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops as framework_ops
from tensorflow.python.layers import base
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import custom_gradient
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import nest
from tensorflow.python.util import tf_inspect
__all__ = ["rev_block", "RevBlock", "recompute_grad"]
LAYER_RE = re.compile(".*revlayer_([0-9]*)/([fg])/.*")
_USE_DEFAULT = "__rev_block_lib_default"
_WRONG_VARS_ERR = """\
The variables used on recompute were different than the variables originally
used. The function wrapped with @recompute_grad likley creates its own variable
scope with a default name and has been called twice in the same enclosing scope.
To fix, ensure each call to the function happens in its own unique variable
scope.
"""
def _acc_grads(*lists_of_grads):
"""Accumulates lists of gradients."""
acc_grads = []
for grads in zip(*lists_of_grads):
grads = [g for g in grads if g is not None]
if grads:
acc_grads.append(math_ops.add_n(grads))
else:
acc_grads.append(None)
return acc_grads
def _rev_layer_forward(xs, f, g, f_side_input, g_side_input,
gate_outputs=False):
"""Forward for 1 reversible layer."""
x1, x2 = xs
y1 = x1 + (f(x2, f_side_input) if f_side_input else f(x2))
y2 = x2 + (g(y1, g_side_input) if g_side_input else g(y1))
if gate_outputs:
return control_flow_ops.tuple([y1, y2])
else:
return (y1, y2)
def _rev_layer_backward(ys, grad_ys, f, g, f_vars, f_side_input, g_vars,
g_side_input):
"""Backprop for 1 layer."""
y1, y2 = ys
grad_y1, grad_y2 = grad_ys
# Reconstruct intermediates and inputs (x1, x2)
# stop_gradients required on fn inputs to prevent infinite recursion into this
# grad function on the calls to gradients.
y1_stop = array_ops.stop_gradient(y1)
g_side_input = [array_ops.stop_gradient(t) for t in g_side_input]
gy1 = g(y1_stop, g_side_input) if g_side_input else g(y1_stop)
x2 = y2 - gy1
x2_stop = array_ops.stop_gradient(x2)
f_side_input = [array_ops.stop_gradient(t) for t in f_side_input]
fx2 = f(x2_stop, f_side_input) if f_side_input else f(x2_stop)
x1 = y1 - fx2
# Compute gradients wrt to inputs
# dL/dy2 * dG(y1)/y1
grad_gy1_y2 = gradients_impl.gradients(gy1, y1_stop, grad_y2)[0]
grad_x1 = grad_y1 + grad_gy1_y2
grad_x2 = (
gradients_impl.gradients(fx2, x2_stop, grad_y1)[0] + grad_y2 +
gradients_impl.gradients(fx2, x2_stop, grad_gy1_y2)[0])
# Compute gradients wrt to vars and side inputs in f and g
grads1 = gradients_impl.gradients(gy1, g_vars + g_side_input, grad_y2)
grad_g_vars, grad_g_side = grads1[:len(g_vars)], grads1[len(g_vars):]
grads2 = gradients_impl.gradients(fx2, f_vars + f_side_input, grad_y1)
grad_f_y1, grad_f_side1 = grads2[:len(f_vars)], grads2[len(f_vars):]
grads3 = gradients_impl.gradients(fx2, f_vars + f_side_input, grad_gy1_y2)
grad_f_y2, grad_f_side2 = grads3[:len(f_vars)], grads3[len(f_vars):]
grad_f_vars = _acc_grads(grad_f_y1, grad_f_y2)
grad_f_side = _acc_grads(grad_f_side1, grad_f_side2)
# Put returns in a tuple to ensure a constant memory budget (i.e. don't want
# the subsequent layer to start computing and consuming memory based on a
# subset of these values).
outputs = ((x1, x2), (grad_x1, grad_x2), (grad_f_vars, grad_f_side),
(grad_g_vars, grad_g_side))
tupled = control_flow_ops.tuple(nest.flatten(outputs))
return nest.pack_sequence_as(outputs, tupled)
def _rev_block_forward(x1,
x2,
f,
g,
num_layers=1,
f_side_input=None,
g_side_input=None,
gate_outputs=False):
"""Forward for a series of reversible layers."""
out = (x1, x2)
for i in xrange(num_layers):
out = _rev_layer_forward(
out, f[i], g[i], f_side_input, g_side_input, gate_outputs=gate_outputs)
y1, y2 = out
return y1, y2
def _safe_wraps(fn):
if isinstance(fn, functools.partial):
# functools.partial objects cannot be wrapped as they are missing the
# necessary properties (__name__, __module__, __doc__).
def passthrough(f):
return f
return passthrough
return functools.wraps(fn)
def _scope_wrap(fn, scope):
@_safe_wraps(fn)
def wrap(*args, **kwargs):
with variable_scope.variable_scope(scope, use_resource=True):
return fn(*args, **kwargs)
return wrap
class RevBlock(base.Layer):
"""Block of reversible layers. See rev_block."""
def __init__(self,
f,
g,
num_layers=1,
f_side_input=None,
g_side_input=None,
use_efficient_backprop=True,
name="revblock",
**kwargs):
super(RevBlock, self).__init__(name=name, **kwargs)
if isinstance(f, list):
assert len(f) == num_layers
else:
f = [f] * num_layers
if isinstance(g, list):
assert len(g) == num_layers
else:
g = [g] * num_layers
f = [_scope_wrap(fn, "revlayer_%d/f" % i) for i, fn in enumerate(f)]
g = [_scope_wrap(fn, "revlayer_%d/g" % i) for i, fn in enumerate(g)]
self.f = f
self.g = g
self.num_layers = num_layers
self.f_side_input = f_side_input or []
self.g_side_input = g_side_input or []
self._use_efficient_backprop = use_efficient_backprop
def call(self, inputs, forward=True):
vs = variable_scope.get_variable_scope()
vars_before = vs.global_variables()
if forward:
x1, x2 = inputs
out = self._forward(x1, x2)
else:
y1, y2 = inputs
out = self._backward(y1, y2)
# Add any created variables to the Layer's variable stores
new_vars = vs.global_variables()[len(vars_before):]
train_vars = vs.trainable_variables()
for new_var in new_vars:
if new_var in train_vars:
self._trainable_weights.append(new_var)
else:
self._non_trainable_weights.append(new_var)
return out
def forward(self, x1, x2):
return self.apply([x1, x2])
def backward(self, y1, y2):
return self.apply([y1, y2], forward=False)
def build(self, _):
logging.warn("RevBlock constructs its variables on first call, not on "
"build.")
self.built = True
def _make_efficient_grad_fn(self, inputs_, ys_):
def _efficient_grad_fn(*grad_ys, **kwargs):
"""Custom gradient fn for a block of reversible residual layers."""
inputs = inputs_
ys = ys_
variables = kwargs["variables"]
side_inputs = inputs[2:]
f_side_idxs = [None] * len(self.f_side_input)
g_side_idxs = [None] * len(self.g_side_input)
assert len(side_inputs) == len(self.f_side_input) + len(self.g_side_input)
for i, t in enumerate(side_inputs):
if t in self.f_side_input:
f_side_idxs[self.f_side_input.index(t)] = i
elif t in self.g_side_input:
g_side_idxs[self.g_side_input.index(t)] = i
else:
assert False
f_vars = [[] for _ in range(self.num_layers)]
g_vars = [[] for _ in range(self.num_layers)]
f_vars_idxs = [[] for _ in range(self.num_layers)]
g_vars_idxs = [[] for _ in range(self.num_layers)]
for i, ref in enumerate(variables):
# Use the name to identify the layer number and function (f or g)
regex = LAYER_RE.match(ref.name)
layer_no = int(regex.group(1))
fn_name = regex.group(2)
if fn_name == "f":
f_vars[layer_no].append(ref)
f_vars_idxs[layer_no].append(i)
else:
assert fn_name == "g"
g_vars[layer_no].append(ref)
g_vars_idxs[layer_no].append(i)
f_var_grads = []
g_var_grads = []
f_side_grads = []
g_side_grads = []
# Reverse variable containers to go backward
f_vars.reverse()
g_vars.reverse()
f = list(self.f)
g = list(self.g)
f.reverse()
g.reverse()
with variable_scope.variable_scope(self.scope_name, reuse=True):
for i in xrange(self.num_layers):
ys, grad_ys, f_ret, g_ret = _rev_layer_backward(
ys, grad_ys, f[i], g[i], f_vars[i], self.f_side_input, g_vars[i],
self.g_side_input)
grad_f_vars, grad_f_side = f_ret
grad_g_vars, grad_g_side = g_ret
f_var_grads.append(grad_f_vars)
g_var_grads.append(grad_g_vars)
f_side_grads.append(grad_f_side)
g_side_grads.append(grad_g_side)
# Accumulate layer gradients for f_side_input and g_side_input
acc_f_side_grads = _acc_grads(*f_side_grads)
acc_g_side_grads = _acc_grads(*g_side_grads)
# Use the stored idxs to put gradients in the passed-in order.
side_input_grads = [None] * len(side_inputs)
variable_grads = [None] * len(variables)
# Variable gradients were collected in reverse layer order. Reverse to
# match idxs.
f_var_grads.reverse()
g_var_grads.reverse()
for idxs, grads in list(zip(f_vars_idxs, f_var_grads)) + list(
zip(g_vars_idxs, g_var_grads)):
for i, grad in zip(idxs, grads):
variable_grads[i] = grad
for i, grad in zip(f_side_idxs, acc_f_side_grads):
side_input_grads[i] = grad
for i, grad in zip(g_side_idxs, acc_g_side_grads):
side_input_grads[i] = grad
grad_x1, grad_x2 = grad_ys
return [grad_x1, grad_x2] + side_input_grads, variable_grads
return _efficient_grad_fn
def _forward(self, x1, x2):
"""Run forward through the reversible layers."""
side_inputs = [self.f_side_input, self.g_side_input]
flat_side_inputs = nest.flatten(side_inputs)
def _forward_wrap(x1_, x2_, *flat_side_inputs):
f_side, g_side = nest.pack_sequence_as(side_inputs, flat_side_inputs)
return _rev_block_forward(
x1_,
x2_,
self.f,
self.g,
num_layers=self.num_layers,
f_side_input=f_side,
g_side_input=g_side,
gate_outputs=self._use_efficient_backprop)
@custom_gradient.custom_gradient
def _forward_with_custom_grad(*args):
out = _forward_wrap(*args) # pylint: disable=no-value-for-parameter
grad_fn = self._make_efficient_grad_fn(args, out)
return out, grad_fn
if self._use_efficient_backprop:
return _forward_with_custom_grad(x1, x2, *flat_side_inputs)
else:
return _forward_wrap(x1, x2, *flat_side_inputs)
def _backward(self, y1, y2):
"""Run backward through the reversible layers."""
f = list(self.f)
g = list(self.g)
f.reverse()
g.reverse()
for i in xrange(self.num_layers):
gy1 = g[i](y1, self.g_side_input) if self.g_side_input else g[i](y1)
x2 = y2 - gy1
fx2 = f[i](x2, self.f_side_input) if self.f_side_input else f[i](x2)
x1 = y1 - fx2
y1, y2 = x1, x2
return x1, x2
def rev_block(x1,
x2,
f,
g,
num_layers=1,
f_side_input=None,
g_side_input=None,
is_training=True):
"""A block of reversible residual layers.
A reversible residual layer is defined as:
```
y1 = x1 + f(x2, f_side_input)
y2 = x2 + g(y1, g_side_input)
```
A reversible residual block, defined here, is a series of reversible residual
layers.
Limitations:
* f and g must not close over any Tensors; all side inputs to f and g should
be passed in with f_side_input and g_side_input which will be forwarded to
f and g.
* f and g must not change the dimensionality of their inputs in order for the
addition in the equations above to work.
Args:
x1: a float Tensor.
x2: a float Tensor.
f: a function, (Tensor) -> (Tensor) (or list of such of length num_layers).
Should not change the shape of the Tensor. Can make calls to get_variable.
See f_side_input if there are side inputs.
g: a function, (Tensor) -> (Tensor) (or list of such of length num_layers).
Should not change the shape of the Tensor. Can make calls to get_variable.
See g_side_input if there are side inputs.
num_layers: int, number of reversible residual layers. Each layer will
apply f and g according to the equations above, with new variables in each
layer.
f_side_input: list of Tensors, side input to f. If not None, signature of f
should be (Tensor, list<Tensor>) -> (Tensor).
g_side_input: list of Tensors, side input to g. If not None, signature of g
should be (Tensor, list<Tensor>) -> (Tensor).
is_training: bool, whether to actually use the efficient backprop codepath.
Returns:
y1, y2: tuple of float Tensors.
"""
block = RevBlock(
f=f,
g=g,
num_layers=num_layers,
f_side_input=f_side_input,
g_side_input=g_side_input,
use_efficient_backprop=is_training,
_reuse=variable_scope.get_variable_scope().reuse)
return block.forward(x1, x2)
def enable_with_args(dec):
"""A decorator for decorators to enable their usage with or without args."""
@_safe_wraps(dec)
def new_dec(*args, **kwargs):
if len(args) == 1 and not kwargs and callable(args[0]):
# Used as decorator without args
fn = args[0]
return dec(fn)
else:
return lambda fn: dec(fn, *args, **kwargs)
return new_dec
@enable_with_args
def recompute_grad(fn, use_data_dep=_USE_DEFAULT, tupleize_grads=False):
"""Decorator that recomputes the function on the backwards pass.
To use this function, you must use `ResourceVariable`s (i.e.
`variable_scope(name, use_resource=True), which are the default in Eager mode
and when running on TPU.
Warning: Because the function will be called again on the backwards pass, the
user should be careful to not use ops in their function that mutate state or
have randomness (for example, batch normalization or dropout). If the function
does have such operations, it is recommended that the function take the
`is_recomputing` keyword argument which will be `False` on the forward pass
and `True` on the backwards pass so that it can disable state changes when
`is_recomputing=True` (for example, not updating the moving averages in batch
normalization).
Args:
fn: a function that takes Tensors (all as positional arguments) and returns
a tuple of Tensors. Note that `fn` should not close over any other
Tensors or Variables.
use_data_dep: `bool`, if `True` will use a dummy data dependency to force
the recompute to happen. If `False` will use a control dependency. By
default will be `True` if in an XLA context and `False` otherwise. XLA
ignores control dependencies and so this data dependency is necessary.
tupleize_grads: `bool`, if `True` will use control dependencies to ensure
that all gradients are produced before any are consumed by downstream ops.
If `use_data_dep` is also `True`, will use a data dependency instead of
a control dependency.
Returns:
A wrapped fn that is identical to fn when called, but its activations will
be discarded and recomputed on the backwards pass (i.e. on a call to
tf.gradients).
Raises:
ValueError: if `fn` closes over any Tensors or Variables.
"""
# Check for closed-over Tensors/Variables
if fn.__code__.co_freevars:
closed_over_vars = dict(zip(fn.__code__.co_freevars,
[c.cell_contents for c in fn.__closure__]))
for var_name, value in six.iteritems(closed_over_vars):
if isinstance(value, (framework_ops.Tensor, variables_lib.Variable)):
raise ValueError(
"fn decorated with @recompute_grad closes over Tensor %s "
"(local variable name: %s). The decorated fn must not close over "
"Tensors or Variables because gradients will NOT be computed for "
"them through fn. To ensure correct gradients, make the "
"Tensor an input to fn." % (value.name, var_name))
@_safe_wraps(fn)
def wrapped(*args):
return _recompute_grad(
fn, args, use_data_dep=use_data_dep, tupleize_grads=tupleize_grads)
return wrapped
def _is_on_tpu():
ctxt = framework_ops.get_default_graph()._get_control_flow_context() # pylint: disable=protected-access
return control_flow_util.GetContainingXLAContext(ctxt) is not None
def _recomputing_grad_fn(compute_fn,
original_args,
original_vars,
output_grads,
grad_fn_variables,
use_data_dep,
tupleize_grads,
arg_scope,
var_scope,
has_is_recompute_kwarg):
"""Grad fn for recompute_grad."""
variables = grad_fn_variables or []
# Identity ops around the inputs ensures correct gradient graph-walking.
inputs = [array_ops.identity(x) for x in list(original_args)]
# Recompute outputs
# Use a control dependency to ensure that the recompute is not eliminated by
# CSE and that it happens on the backwards pass.
ctrl_dep_grads = [g for g in output_grads if g is not None]
with framework_ops.control_dependencies(ctrl_dep_grads):
if use_data_dep:
inputs = _force_data_dependency(output_grads, inputs)
# Re-enter scopes
with contrib_framework_ops.arg_scope(arg_scope):
with variable_scope.variable_scope(var_scope, reuse=True):
# Re-call the function and ensure that the touched variables are the
# same as in the first call.
with backprop.GradientTape() as tape:
fn_kwargs = {}
if has_is_recompute_kwarg:
fn_kwargs["is_recomputing"] = True
outputs = compute_fn(*inputs, **fn_kwargs)
recompute_vars = set(tape.watched_variables())
if original_vars != recompute_vars:
raise ValueError(_WRONG_VARS_ERR)
if not isinstance(outputs, (list, tuple)):
outputs = [outputs]
outputs = list(outputs)
# Compute gradients
grads = gradients_impl.gradients(outputs, inputs + variables,
output_grads)
if tupleize_grads:
if use_data_dep:
grads = _tuple_with_data_dep(grads)
else:
grads = control_flow_ops.tuple(grads)
grad_inputs = grads[:len(inputs)]
grad_vars = grads[len(inputs):]
return grad_inputs, grad_vars
def _recompute_grad(fn, args, use_data_dep=_USE_DEFAULT, tupleize_grads=False):
"""See recompute_grad."""
has_is_recompute_kwarg = "is_recomputing" in tf_inspect.getargspec(fn).args
for arg in args:
if not isinstance(arg, framework_ops.Tensor):
raise ValueError("All inputs to function must be Tensors")
use_data_dep_ = use_data_dep
if use_data_dep_ == _USE_DEFAULT:
use_data_dep_ = _is_on_tpu()
# Use custom_gradient and return a grad_fn that recomputes on the backwards
# pass.
@custom_gradient.custom_gradient
def fn_with_recompute(*args):
"""Wrapper for fn."""
# Capture the variable and arg scopes so we can re-enter them when
# recomputing.
vs = variable_scope.get_variable_scope()
arg_scope = contrib_framework_ops.current_arg_scope()
# Track all variables touched in the function.
with backprop.GradientTape() as tape:
fn_kwargs = {}
if has_is_recompute_kwarg:
fn_kwargs["is_recomputing"] = False
outputs = fn(*args, **fn_kwargs)
original_vars = set(tape.watched_variables())
def _grad_fn(output_grads, variables=None):
# Validate that custom_gradient passes the right variables into grad_fn.
if original_vars:
assert variables, ("Fn created variables but the variables were not "
"passed to the gradient fn.")
if set(variables) != original_vars:
raise ValueError(_WRONG_VARS_ERR)
return _recomputing_grad_fn(
compute_fn=fn,
original_args=args,
original_vars=original_vars,
output_grads=output_grads,
grad_fn_variables=variables,
use_data_dep=use_data_dep_,
tupleize_grads=tupleize_grads,
arg_scope=arg_scope,
var_scope=vs,
has_is_recompute_kwarg=has_is_recompute_kwarg)
# custom_gradient inspects the signature of the function to determine
# whether the user expects variables passed in the grad_fn. If the function
# created variables, the grad_fn should accept the "variables" kwarg.
if original_vars:
def grad_fn(*output_grads, **kwargs):
return _grad_fn(output_grads, kwargs["variables"])
else:
def grad_fn(*output_grads):
return _grad_fn(output_grads)
return outputs, grad_fn
return fn_with_recompute(*args)
def _underlying_variable_ref(t):
"""Find the underlying variable ref.
Traverses through Identity, ReadVariableOp, and Enter ops.
Stops when op type has Variable or VarHandle in name.
Args:
t: a Tensor
Returns:
a Tensor that is a variable ref, or None on error.
"""
while t.op.type in ["Identity", "ReadVariableOp", "Enter"]:
t = t.op.inputs[0]
op_type = t.op.type
if "Variable" in op_type or "VarHandle" in op_type:
return t
else:
return None
def _force_data_dependency(first_compute, then_compute):
"""Force all of `then_compute` to depend on all of `first_compute`.
Uses a dummy data dependency, which is useful when running on TPUs because
XLA ignores control dependencies. Only supports float arguments.
Args:
first_compute: `list<Tensor>`. These will be made to run before the
`Tensor`s `then_compute`.
then_compute: `list<Tensor>`. These will run after all the `Tensor`s in
`first_compute`.
Returns:
`list<Tensor>`, same length as `then_compute`.
Raises:
ValueError: if ranks are unknown or types are not floating.
"""
def _first_element(x):
if x.get_shape().ndims is None:
raise ValueError("Rank of Tensor %s must be known" % x)
ndims = x.get_shape().ndims
begin = framework_ops.convert_to_tensor([0] * ndims, dtype=dtypes.int32)
size = framework_ops.convert_to_tensor([1] * ndims, dtype=dtypes.int32)
return array_ops.reshape(array_ops.slice(x, begin, size), [])
first_compute_sum = math_ops.add_n(
[_first_element(x) for x in first_compute if x is not None])
dtype = first_compute_sum.dtype
if not dtype.is_floating:
raise ValueError("_force_data_dependency only supports floating dtypes.")
epsilon = np.finfo(dtype.as_numpy_dtype).tiny
zero = array_ops.stop_gradient(epsilon * first_compute_sum)
return [
array_ops.identity(x) + zero if x is not None else None
for x in then_compute
]
def _tuple_with_data_dep(tensors):
return _force_data_dependency(tensors, tensors)
|
tensorflow-master
|
tensorflow/contrib/layers/python/layers/rev_block_lib.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for regularizers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.layers.python.layers import utils
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class ConstantValueTest(test.TestCase):
def test_value(self):
for v in [True, False, 1, 0, 1.0]:
value = utils.constant_value(v)
self.assertEqual(value, v)
def test_constant(self):
for v in [True, False, 1, 0, 1.0]:
c = constant_op.constant(v)
value = utils.constant_value(c)
self.assertEqual(value, v)
with self.cached_session():
self.assertEqual(c.eval(), v)
def test_variable(self):
for v in [True, False, 1, 0, 1.0]:
with ops.Graph().as_default() as g, self.session(g) as sess:
x = variables.Variable(v)
value = utils.constant_value(x)
self.assertEqual(value, None)
sess.run(variables.global_variables_initializer())
self.assertEqual(x.eval(), v)
def test_placeholder(self):
for v in [True, False, 1, 0, 1.0]:
p = array_ops.placeholder(np.dtype(type(v)), [])
x = array_ops.identity(p)
value = utils.constant_value(p)
self.assertEqual(value, None)
with self.cached_session():
self.assertEqual(x.eval(feed_dict={p: v}), v)
class StaticCondTest(test.TestCase):
def test_value(self):
fn1 = lambda: 'fn1'
fn2 = lambda: 'fn2'
expected = lambda v: 'fn1' if v else 'fn2'
for v in [True, False, 1, 0]:
o = utils.static_cond(v, fn1, fn2)
self.assertEqual(o, expected(v))
def test_constant(self):
fn1 = lambda: constant_op.constant('fn1')
fn2 = lambda: constant_op.constant('fn2')
expected = lambda v: b'fn1' if v else b'fn2'
for v in [True, False, 1, 0]:
o = utils.static_cond(v, fn1, fn2)
with self.cached_session():
self.assertEqual(o.eval(), expected(v))
def test_variable(self):
fn1 = lambda: variables.Variable('fn1')
fn2 = lambda: variables.Variable('fn2')
expected = lambda v: b'fn1' if v else b'fn2'
for v in [True, False, 1, 0]:
o = utils.static_cond(v, fn1, fn2)
with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
self.assertEqual(o.eval(), expected(v))
def test_tensors(self):
fn1 = lambda: constant_op.constant(0) - constant_op.constant(1)
fn2 = lambda: constant_op.constant(0) - constant_op.constant(2)
expected = lambda v: -1 if v else -2
for v in [True, False, 1, 0]:
o = utils.static_cond(v, fn1, fn2)
with self.cached_session():
self.assertEqual(o.eval(), expected(v))
class SmartCondStaticTest(test.TestCase):
def test_value(self):
fn1 = lambda: 'fn1'
fn2 = lambda: 'fn2'
expected = lambda v: 'fn1' if v else 'fn2'
for v in [True, False, 1, 0]:
o = utils.smart_cond(constant_op.constant(v), fn1, fn2)
self.assertEqual(o, expected(v))
def test_constant(self):
fn1 = lambda: constant_op.constant('fn1')
fn2 = lambda: constant_op.constant('fn2')
expected = lambda v: b'fn1' if v else b'fn2'
for v in [True, False, 1, 0]:
o = utils.smart_cond(constant_op.constant(v), fn1, fn2)
with self.cached_session():
self.assertEqual(o.eval(), expected(v))
def test_variable(self):
fn1 = lambda: variables.Variable('fn1')
fn2 = lambda: variables.Variable('fn2')
expected = lambda v: b'fn1' if v else b'fn2'
for v in [True, False, 1, 0]:
o = utils.smart_cond(constant_op.constant(v), fn1, fn2)
with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
self.assertEqual(o.eval(), expected(v))
def test_tensors(self):
fn1 = lambda: constant_op.constant(0) - constant_op.constant(1)
fn2 = lambda: constant_op.constant(0) - constant_op.constant(2)
expected = lambda v: -1 if v else -2
for v in [True, False, 1, 0]:
o = utils.smart_cond(constant_op.constant(v), fn1, fn2)
with self.cached_session():
self.assertEqual(o.eval(), expected(v))
class SmartCondDynamicTest(test.TestCase):
def test_value(self):
fn1 = lambda: ops.convert_to_tensor('fn1')
fn2 = lambda: ops.convert_to_tensor('fn2')
expected = lambda v: b'fn1' if v else b'fn2'
p = array_ops.placeholder(dtypes.bool, [])
for v in [True, False, 1, 0]:
o = utils.smart_cond(p, fn1, fn2)
with self.cached_session():
self.assertEqual(o.eval(feed_dict={p: v}), expected(v))
def test_constant(self):
fn1 = lambda: constant_op.constant('fn1')
fn2 = lambda: constant_op.constant('fn2')
expected = lambda v: b'fn1' if v else b'fn2'
p = array_ops.placeholder(dtypes.bool, [])
for v in [True, False, 1, 0]:
o = utils.smart_cond(p, fn1, fn2)
with self.cached_session():
self.assertEqual(o.eval(feed_dict={p: v}), expected(v))
def test_variable(self):
fn1 = lambda: variables.Variable('fn1')
fn2 = lambda: variables.Variable('fn2')
expected = lambda v: b'fn1' if v else b'fn2'
p = array_ops.placeholder(dtypes.bool, [])
for v in [True, False, 1, 0]:
o = utils.smart_cond(p, fn1, fn2)
with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
self.assertEqual(o.eval(feed_dict={p: v}), expected(v))
def test_tensors(self):
fn1 = lambda: constant_op.constant(0) - constant_op.constant(1)
fn2 = lambda: constant_op.constant(0) - constant_op.constant(2)
expected = lambda v: -1 if v else -2
p = array_ops.placeholder(dtypes.bool, [])
for v in [True, False, 1, 0]:
o = utils.smart_cond(p, fn1, fn2)
with self.cached_session():
self.assertEqual(o.eval(feed_dict={p: v}), expected(v))
class CollectNamedOutputsTest(test.TestCase):
def test_collect(self):
t1 = constant_op.constant(1.0, name='t1')
t2 = constant_op.constant(2.0, name='t2')
utils.collect_named_outputs('end_points', 'a1', t1)
utils.collect_named_outputs('end_points', 'a2', t2)
self.assertEqual(ops.get_collection('end_points'), [t1, t2])
def test_aliases(self):
t1 = constant_op.constant(1.0, name='t1')
t2 = constant_op.constant(2.0, name='t2')
utils.collect_named_outputs('end_points', 'a1', t1)
utils.collect_named_outputs('end_points', 'a2', t2)
self.assertEqual(t1.aliases, ['a1'])
self.assertEqual(t2.aliases, ['a2'])
def test_multiple_aliases(self):
t1 = constant_op.constant(1.0, name='t1')
t2 = constant_op.constant(2.0, name='t2')
utils.collect_named_outputs('end_points', 'a11', t1)
utils.collect_named_outputs('end_points', 'a12', t1)
utils.collect_named_outputs('end_points', 'a21', t2)
utils.collect_named_outputs('end_points', 'a22', t2)
self.assertEqual(t1.aliases, ['a11', 'a12'])
self.assertEqual(t2.aliases, ['a21', 'a22'])
def test_gather_aliases(self):
t1 = constant_op.constant(1.0, name='t1')
t2 = constant_op.constant(2.0, name='t2')
t3 = constant_op.constant(2.0, name='t3')
utils.collect_named_outputs('end_points', 'a1', t1)
utils.collect_named_outputs('end_points', 'a2', t2)
ops.add_to_collection('end_points', t3)
aliases = utils.gather_tensors_aliases(ops.get_collection('end_points'))
self.assertEqual(aliases, ['a1', 'a2', 't3'])
def test_convert_collection_to_dict(self):
t1 = constant_op.constant(1.0, name='t1')
t2 = constant_op.constant(2.0, name='t2')
utils.collect_named_outputs('end_points', 'a1', t1)
utils.collect_named_outputs('end_points', 'a21', t2)
utils.collect_named_outputs('end_points', 'a22', t2)
end_points = utils.convert_collection_to_dict('end_points')
self.assertEqual(end_points['a1'], t1)
self.assertEqual(end_points['a21'], t2)
self.assertEqual(end_points['a22'], t2)
def test_convert_collection_to_dict_clear_collection(self):
t1 = constant_op.constant(1.0, name='t1')
t2 = constant_op.constant(2.0, name='t2')
utils.collect_named_outputs('end_points', 'a1', t1)
utils.collect_named_outputs('end_points', 'a21', t2)
utils.collect_named_outputs('end_points', 'a22', t2)
utils.convert_collection_to_dict('end_points', clear_collection=True)
self.assertEqual(ops.get_collection('end_points'), [])
class NPositiveIntegersTest(test.TestCase):
def test_invalid_input(self):
with self.assertRaises(ValueError):
utils.n_positive_integers('3', [1])
with self.assertRaises(ValueError):
utils.n_positive_integers(3.3, [1])
with self.assertRaises(ValueError):
utils.n_positive_integers(-1, [1])
with self.assertRaises(ValueError):
utils.n_positive_integers(0, [1])
with self.assertRaises(ValueError):
utils.n_positive_integers(1, [1, 2])
with self.assertRaises(ValueError):
utils.n_positive_integers(1, [-1])
with self.assertRaises(ValueError):
utils.n_positive_integers(1, [0])
with self.assertRaises(ValueError):
utils.n_positive_integers(1, [0])
with self.assertRaises(ValueError):
utils.n_positive_integers(2, [1])
with self.assertRaises(ValueError):
utils.n_positive_integers(2, [1, 2, 3])
with self.assertRaises(ValueError):
utils.n_positive_integers(2, ['hello', 2])
with self.assertRaises(ValueError):
utils.n_positive_integers(2, tensor_shape.TensorShape([2, 3, 1]))
with self.assertRaises(ValueError):
utils.n_positive_integers(3, tensor_shape.TensorShape([2, None, 1]))
with self.assertRaises(ValueError):
utils.n_positive_integers(3, tensor_shape.TensorShape(None))
def test_valid_input(self):
self.assertEqual(utils.n_positive_integers(1, 2), (2,))
self.assertEqual(utils.n_positive_integers(2, 2), (2, 2))
self.assertEqual(utils.n_positive_integers(2, (2, 3)), (2, 3))
self.assertEqual(utils.n_positive_integers(3, (2, 3, 1)), (2, 3, 1))
self.assertEqual(
utils.n_positive_integers(3, tensor_shape.TensorShape([2, 3, 1])),
(2, 3, 1))
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/layers/python/layers/utils_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Tests for contrib.layers.python.layers.normalization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.framework.python.ops import variables as contrib_variables
from tensorflow.contrib.layers.python.layers import normalization
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class InstanceNormTest(test.TestCase):
def testUnknownShape(self):
inputs = array_ops.placeholder(dtypes.float32)
with self.assertRaisesRegexp(ValueError, 'undefined rank'):
normalization.instance_norm(inputs)
def testBadDataFormat(self):
inputs = array_ops.placeholder(dtypes.float32, shape=(2, 5, 5))
with self.assertRaisesRegexp(ValueError,
'data_format has to be either NCHW or NHWC.'):
normalization.instance_norm(inputs, data_format='NHCW')
def testParamsShapeNotFullyDefinedNCHW(self):
inputs = array_ops.placeholder(dtypes.float32, shape=(3, None, 4))
with self.assertRaisesRegexp(ValueError, 'undefined channels dimension'):
normalization.instance_norm(inputs, data_format='NCHW')
def testParamsShapeNotFullyDefinedNHWC(self):
inputs = array_ops.placeholder(dtypes.float32, shape=(3, 4, None))
with self.assertRaisesRegexp(ValueError, 'undefined channels dimension'):
normalization.instance_norm(inputs, data_format='NHWC')
def testCreateOp(self):
height, width = 3, 3
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = normalization.instance_norm(images)
print('name: ', output.op.name)
self.assertStartsWith(
output.op.name, 'InstanceNorm/instancenorm')
self.assertListEqual([5, height, width, 3], output.shape.as_list())
def testCreateOpFloat64(self):
height, width = 3, 3
images = random_ops.random_uniform(
(5, height, width, 3), dtype=dtypes.float64, seed=1)
output = normalization.instance_norm(images)
self.assertStartsWith(
output.op.name, 'InstanceNorm/instancenorm')
self.assertListEqual([5, height, width, 3], output.shape.as_list())
def testCreateOpNoScaleCenter(self):
height, width = 3, 3
images = random_ops.random_uniform(
(5, height, width, 3), dtype=dtypes.float64, seed=1)
output = normalization.instance_norm(images, center=False, scale=False)
self.assertStartsWith(
output.op.name, 'InstanceNorm/instancenorm')
self.assertListEqual([5, height, width, 3], output.shape.as_list())
self.assertEqual(0, len(contrib_variables.get_variables_by_name('beta')))
self.assertEqual(0, len(contrib_variables.get_variables_by_name('gamma')))
def testCreateVariables(self):
height, width = 3, 3
images = random_ops.random_uniform((5, height, width, 3), seed=1)
normalization.instance_norm(images, center=True, scale=True)
beta = contrib_variables.get_variables_by_name('beta')[0]
gamma = contrib_variables.get_variables_by_name('gamma')[0]
self.assertEqual('InstanceNorm/beta', beta.op.name)
self.assertEqual('InstanceNorm/gamma', gamma.op.name)
def testReuseVariables(self):
height, width = 3, 3
images = random_ops.random_uniform((5, height, width, 3), seed=1)
normalization.instance_norm(images, scale=True, scope='IN')
normalization.instance_norm(images, scale=True, scope='IN', reuse=True)
beta = contrib_variables.get_variables_by_name('beta')
gamma = contrib_variables.get_variables_by_name('gamma')
self.assertEqual(1, len(beta))
self.assertEqual(1, len(gamma))
def testValueCorrectWithReuseVars(self):
height, width = 3, 3
image_shape = (10, height, width, 3)
images = random_ops.random_uniform(image_shape, seed=1)
output_train = normalization.instance_norm(images, scope='IN')
output_eval = normalization.instance_norm(images, scope='IN', reuse=True)
with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
# output_train and output_eval should be the same.
train_np, eval_np = sess.run([output_train, output_eval])
self.assertAllClose(train_np, eval_np)
def doOutputTest(self, input_shape, data_format, tol=1e-3):
axis = -1 if data_format == 'NHWC' else 1
for mu in (0.0, 1e2):
for sigma in (1.0, 0.1):
# Determine shape of Tensor after normalization.
reduced_shape = (input_shape[0], input_shape[axis])
expected_mean = np.zeros(reduced_shape)
expected_var = np.ones(reduced_shape)
# Determine axes that will be normalized.
reduced_axes = list(range(len(input_shape)))
del reduced_axes[axis]
del reduced_axes[0]
reduced_axes = tuple(reduced_axes)
inputs = random_ops.random_uniform(input_shape, seed=0) * sigma + mu
output_op = normalization.instance_norm(
inputs, center=False, scale=False, data_format=data_format)
with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
outputs = sess.run(output_op)
# Make sure that there are no NaNs
self.assertFalse(np.isnan(outputs).any())
mean = np.mean(outputs, axis=reduced_axes)
var = np.var(outputs, axis=reduced_axes)
# The mean and variance of each example should be close to 0 and 1
# respectively.
self.assertAllClose(expected_mean, mean, rtol=tol, atol=tol)
self.assertAllClose(expected_var, var, rtol=tol, atol=tol)
def testOutputSmallInput4DNHWC(self):
self.doOutputTest((10, 10, 10, 30), 'NHWC', tol=1e-2)
def testOutputSmallInput4DNCHW(self):
self.doOutputTest((10, 10, 10, 30), 'NCHW', tol=1e-2)
def testOutputBigInput4DNHWC(self):
self.doOutputTest((1, 100, 100, 1), 'NHWC', tol=1e-3)
def testOutputBigInput4DNCHW(self):
self.doOutputTest((1, 100, 100, 1), 'NCHW', tol=1e-3)
def testOutputSmallInput5DNHWC(self):
self.doOutputTest((10, 10, 10, 10, 30), 'NHWC', tol=1e-2)
def testOutputSmallInput5DNCHW(self):
self.doOutputTest((10, 10, 10, 10, 30), 'NCHW', tol=1e-2)
def testOutputBigInput5DNHWC(self):
self.doOutputTest((1, 100, 100, 1, 1), 'NHWC', tol=1e-3)
def testOutputBigInput5DNCHW(self):
self.doOutputTest((1, 100, 100, 1, 1), 'NCHW', tol=1e-3)
class GroupNormTest(test.TestCase):
def testInvalidGroupSize(self):
inputs = array_ops.placeholder(dtypes.float32, shape=(5, 2, 10, 10))
with self.assertRaisesRegexp(ValueError,
'Invalid groups 10 for 2 channels.'):
normalization.group_norm(inputs, groups=10,
reduction_axes=[-2, -1], channels_axis=-3)
def testBadCommensurateGroup(self):
inputs = array_ops.placeholder(dtypes.float32, shape=(5, 4, 10, 10))
with self.assertRaisesRegexp(ValueError,
'4 channels is not commensurate with '
'3 groups.'):
normalization.group_norm(inputs, groups=3,
reduction_axes=[-2, -1], channels_axis=-3)
def testAxisIsBad(self):
inputs = array_ops.placeholder(dtypes.float32, shape=(1, 2, 4, 5))
with self.assertRaisesRegexp(ValueError,
'Axis is out of bounds.'):
normalization.group_norm(inputs, channels_axis=5)
with self.assertRaisesRegexp(ValueError,
'Axis is out of bounds.'):
normalization.group_norm(inputs, reduction_axes=[1, 5])
def testNotMutuallyExclusiveAxis(self):
inputs = array_ops.placeholder(dtypes.float32, shape=(10, 32, 32, 32))
# Specify axis with negative values.
with self.assertRaisesRegexp(ValueError, 'mutually exclusive'):
normalization.group_norm(inputs, channels_axis=-2, reduction_axes=[-2])
# Specify axis with positive values.
with self.assertRaisesRegexp(ValueError, 'mutually exclusive'):
normalization.group_norm(inputs, channels_axis=1, reduction_axes=[1, 3])
# Specify axis with mixed positive and negative values.
with self.assertRaisesRegexp(ValueError, 'mutually exclusive'):
normalization.group_norm(inputs, channels_axis=-2, reduction_axes=[2])
def testUnknownShape(self):
inputs = array_ops.placeholder(dtypes.float32)
with self.assertRaisesRegexp(ValueError, 'undefined rank'):
normalization.group_norm(inputs)
def testParamsShapeNotFullyDefinedReductionAxes(self):
inputs = array_ops.placeholder(dtypes.float32, shape=(1, 32, None, 4))
with self.assertRaisesRegexp(ValueError, 'undefined dimensions'):
normalization.group_norm(inputs)
def testParamsShapeNotFullyDefinedChannelsAxis(self):
inputs = array_ops.placeholder(dtypes.float32, shape=(1, 3, 4, None))
with self.assertRaisesRegexp(ValueError, 'undefined channel dimension'):
normalization.group_norm(inputs, channels_axis=-1,
reduction_axes=[-3, -2])
def testParamsShapeNotFullyDefinedBatchAxis(self):
height, width, groups = 3, 3, 4
inputs = array_ops.placeholder(dtypes.float32,
shape=(None, height, width, 2*groups))
output = normalization.group_norm(inputs, channels_axis=-1,
reduction_axes=[-3, -2], groups=groups)
self.assertListEqual([None, height, width, 2 * groups],
output.shape.as_list())
def testCreateOp(self):
height, width, groups = 3, 3, 4
images = random_ops.random_uniform((5, height, width, 2*groups), seed=1)
output = normalization.group_norm(images, groups=groups, channels_axis=-1,
reduction_axes=[-3, -2])
print('name: ', output.op.name)
self.assertListEqual([5, height, width, 2*groups], output.shape.as_list())
def testCreateOpFloat64(self):
height, width, groups = 3, 3, 5
images = random_ops.random_uniform(
(5, height, width, 4*groups), dtype=dtypes.float64, seed=1)
output = normalization.group_norm(images, groups=groups)
self.assertEqual(dtypes.float64, output.dtype)
self.assertListEqual([5, height, width, 4*groups], output.shape.as_list())
def testCreateOpNoScaleCenter(self):
height, width, groups = 3, 3, 7
images = random_ops.random_uniform(
(5, height, width, 3*groups), dtype=dtypes.float32, seed=1)
output = normalization.group_norm(images, groups=groups, center=False,
scale=False)
self.assertListEqual([5, height, width, 3*groups], output.shape.as_list())
self.assertEqual(0, len(contrib_variables.get_variables_by_name('beta')))
self.assertEqual(0, len(contrib_variables.get_variables_by_name('gamma')))
def testCreateVariables_NHWC(self):
height, width = 3, 3
images = random_ops.random_uniform((5, height, width, 8), seed=1)
normalization.group_norm(images, groups=4,
channels_axis=-1, reduction_axes=(-3, -2),
center=True, scale=True)
beta = contrib_variables.get_variables_by_name('beta')[0]
gamma = contrib_variables.get_variables_by_name('gamma')[0]
self.assertEqual('GroupNorm/beta', beta.op.name)
self.assertEqual('GroupNorm/gamma', gamma.op.name)
def testCreateVariables_NCHW(self):
height, width, groups = 3, 3, 4
images = random_ops.random_uniform((5, 2*groups, height, width), seed=1)
normalization.group_norm(images, groups=4,
channels_axis=-3, reduction_axes=(-2, -1),
center=True, scale=True)
beta = contrib_variables.get_variables_by_name('beta')[0]
gamma = contrib_variables.get_variables_by_name('gamma')[0]
self.assertEqual('GroupNorm/beta', beta.op.name)
self.assertEqual('GroupNorm/gamma', gamma.op.name)
def testReuseVariables(self):
height, width = 3, 3
images = random_ops.random_uniform((5, height, width, 4), seed=1)
normalization.group_norm(images, groups=2, scale=True, scope='IN')
normalization.group_norm(images, groups=2, scale=True, scope='IN',
reuse=True)
beta = contrib_variables.get_variables_by_name('beta')
gamma = contrib_variables.get_variables_by_name('gamma')
self.assertEqual(1, len(beta))
self.assertEqual(1, len(gamma))
def testValueCorrectWithReuseVars(self):
height, width = 3, 3
image_shape = (10, height, width, 4)
images = random_ops.random_uniform(image_shape, seed=1)
output_train = normalization.group_norm(images, groups=2, scope='IN')
output_eval = normalization.group_norm(images, groups=2, scope='IN',
reuse=True)
with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
# output_train and output_eval should be the same.
train_np, eval_np = sess.run([output_train, output_eval])
self.assertAllClose(train_np, eval_np)
def doOutputTest(self,
input_shape,
channels_axis=None,
reduction_axes=None,
mean_close_to_zero=False,
groups=2,
tol=1e-2):
# Select the axis for the channel and the dimensions along which statistics
# are accumulated.
if channels_axis < 0:
channels_axis += len(input_shape)
reduced_axes = [channels_axis + 1]
for a in reduction_axes:
if a < 0:
a += len(input_shape)
if a < channels_axis:
reduced_axes.append(a)
else:
reduced_axes.append(a+1)
reduced_axes = tuple(reduced_axes)
# Calculate the final shape for the output Tensor.
axes_before_channels = input_shape[:channels_axis]
axes_after_channels = input_shape[channels_axis+1:]
channels = input_shape[channels_axis]
outputs_shape = (axes_before_channels + [groups, channels // groups] +
axes_after_channels)
# Calculate the final shape for the output statistics.
reduced_shape = []
for i, a in enumerate(outputs_shape):
if i not in reduced_axes:
reduced_shape.append(a)
if mean_close_to_zero:
mu_tuple = (1e-4, 1e-2, 1.0)
sigma_tuple = (1e-2, 0.1, 1.0)
else:
mu_tuple = (1.0, 1e2)
sigma_tuple = (1.0, 0.1)
for mu in mu_tuple:
for sigma in sigma_tuple:
# Determine shape of Tensor after normalization.
expected_mean = np.zeros(reduced_shape)
expected_var = np.ones(reduced_shape)
inputs = random_ops.random_normal(input_shape, seed=0) * sigma + mu
output_op = normalization.group_norm(
inputs,
groups=groups,
center=False,
scale=False,
channels_axis=channels_axis,
reduction_axes=reduction_axes,
mean_close_to_zero=mean_close_to_zero)
with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
outputs = sess.run(output_op)
# Make sure that there are no NaNs
self.assertFalse(np.isnan(outputs).any())
outputs = np.reshape(outputs, outputs_shape)
mean = np.mean(outputs, axis=reduced_axes)
var = np.var(outputs, axis=reduced_axes)
# The mean and variance of each example should be close to 0 and 1
# respectively.
self.assertAllClose(expected_mean, mean, rtol=tol, atol=tol)
self.assertAllClose(expected_var, var, rtol=tol, atol=tol)
def doOutputTestForMeanCloseToZero(self,
input_shape,
channels_axis=None,
reduction_axes=None,
groups=2,
tol=5e-2):
self.doOutputTest(
input_shape,
channels_axis=channels_axis,
reduction_axes=reduction_axes,
groups=groups,
tol=tol,
mean_close_to_zero=True)
def testOutputSmallInput4D_NHWC(self):
input_shape = [10, 10, 10, 30]
# Specify axes with positive values.
self.doOutputTest(input_shape, channels_axis=3, reduction_axes=[1, 2])
# Specify axes with negative values.
self.doOutputTest(input_shape, channels_axis=-1, reduction_axes=[-3, -2])
# Specify axes with positive values.
self.doOutputTestForMeanCloseToZero(
input_shape, channels_axis=3, reduction_axes=[1, 2])
# Specify axes with negative values.
self.doOutputTestForMeanCloseToZero(
input_shape, channels_axis=-1, reduction_axes=[-3, -2])
def testOutputSmallInput3D_NHWC(self):
input_shape = [10, 10, 30]
# Specify axes with positive values.
self.doOutputTest(input_shape, channels_axis=2, reduction_axes=[0, 1])
# Specify axes with negative values.
self.doOutputTest(input_shape, channels_axis=-1, reduction_axes=[-3, -2])
# Specify axes with positive values.
self.doOutputTestForMeanCloseToZero(
input_shape, channels_axis=2, reduction_axes=[0, 1])
# Specify axes with negative values.
self.doOutputTestForMeanCloseToZero(
input_shape, channels_axis=-1, reduction_axes=[-3, -2])
def testOutputSmallInput4D_NCHW(self):
input_shape = [10, 10, 10, 30]
# Specify axes with positive values.
self.doOutputTest(input_shape, channels_axis=1, reduction_axes=[2, 3])
# Specify axes with negative values.
self.doOutputTest(input_shape, channels_axis=-3, reduction_axes=[-2, -1])
# Specify axes with positive values.
self.doOutputTestForMeanCloseToZero(
input_shape, channels_axis=1, reduction_axes=[2, 3])
# Specify axes with negative values.
self.doOutputTestForMeanCloseToZero(
input_shape, channels_axis=-3, reduction_axes=[-2, -1])
def testOutputSmallInput3D_NCHW(self):
input_shape = [10, 10, 30]
# Specify axes with positive values.
self.doOutputTest(input_shape, channels_axis=0, reduction_axes=[1, 2])
# Specify axes with negative values.
self.doOutputTest(input_shape, channels_axis=-3, reduction_axes=[-2, -1])
# Specify axes with positive values.
self.doOutputTestForMeanCloseToZero(
input_shape, channels_axis=0, reduction_axes=[1, 2])
# Specify axes with negative values.
self.doOutputTestForMeanCloseToZero(
input_shape, channels_axis=-3, reduction_axes=[-2, -1])
def testOutputBigInput4D_NHWC(self):
self.doOutputTest(
[5, 100, 100, 1], channels_axis=3, reduction_axes=[1, 2], groups=1)
self.doOutputTestForMeanCloseToZero(
[5, 100, 100, 1], channels_axis=3, reduction_axes=[1, 2], groups=1)
def testOutputBigInput4D_NCHW(self):
self.doOutputTest(
[1, 100, 100, 4], channels_axis=1, reduction_axes=[2, 3], groups=4)
self.doOutputTestForMeanCloseToZero(
[1, 100, 100, 4], channels_axis=1, reduction_axes=[2, 3], groups=4)
def testOutputSmallInput2D_NC(self):
self.doOutputTest(
[10, 7 * 100], channels_axis=1, reduction_axes=[], groups=7)
self.doOutputTestForMeanCloseToZero(
[10, 7 * 100], channels_axis=1, reduction_axes=[], groups=7)
def testOutputSmallInput5D_NCXXX(self):
self.doOutputTest(
[10, 10, 20, 40, 5],
channels_axis=1,
reduction_axes=[2, 3, 4],
groups=5)
self.doOutputTestForMeanCloseToZero(
[10, 10, 20, 40, 5],
channels_axis=1,
reduction_axes=[2, 3, 4],
groups=5)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/layers/python/layers/normalization_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for regularizers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.layers.python.layers import summaries as summaries_lib
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class SummariesTest(test.TestCase):
def test_summarize_scalar_tensor(self):
with self.cached_session():
scalar_var = variables.Variable(1)
summary_op = summaries_lib.summarize_tensor(scalar_var)
self.assertEquals(summary_op.op.type, 'ScalarSummary')
def test_summarize_multidim_tensor(self):
with self.cached_session():
tensor_var = variables.Variable([1, 2, 3])
summary_op = summaries_lib.summarize_tensor(tensor_var)
self.assertEquals(summary_op.op.type, 'HistogramSummary')
def test_summarize_activation(self):
with self.cached_session():
var = variables.Variable(1)
op = array_ops.identity(var, name='SummaryTest')
summary_op = summaries_lib.summarize_activation(op)
self.assertEquals(summary_op.op.type, 'HistogramSummary')
names = [op.op.name for op in ops.get_collection(ops.GraphKeys.SUMMARIES)]
self.assertEquals(len(names), 1)
self.assertIn(u'SummaryTest/activation', names)
def test_summarize_activation_relu(self):
with self.cached_session():
var = variables.Variable(1)
op = nn_ops.relu(var, name='SummaryTest')
summary_op = summaries_lib.summarize_activation(op)
self.assertEquals(summary_op.op.type, 'HistogramSummary')
names = [op.op.name for op in ops.get_collection(ops.GraphKeys.SUMMARIES)]
self.assertEquals(len(names), 2)
self.assertIn(u'SummaryTest/zeros', names)
self.assertIn(u'SummaryTest/activation', names)
def test_summarize_activation_relu6(self):
with self.cached_session():
var = variables.Variable(1)
op = nn_ops.relu6(var, name='SummaryTest')
summary_op = summaries_lib.summarize_activation(op)
self.assertEquals(summary_op.op.type, 'HistogramSummary')
names = [op.op.name for op in ops.get_collection(ops.GraphKeys.SUMMARIES)]
self.assertEquals(len(names), 3)
self.assertIn(u'SummaryTest/zeros', names)
self.assertIn(u'SummaryTest/sixes', names)
self.assertIn(u'SummaryTest/activation', names)
def test_summarize_collection_regex(self):
with self.cached_session():
var = variables.Variable(1)
array_ops.identity(var, name='Test1')
ops.add_to_collection('foo', array_ops.identity(var, name='Test2'))
ops.add_to_collection('foo', array_ops.identity(var, name='Foobar'))
ops.add_to_collection('foo', array_ops.identity(var, name='Test3'))
summaries = summaries_lib.summarize_collection('foo', r'Test[123]')
names = [op.op.name for op in summaries]
self.assertEquals(len(names), 2)
self.assertIn(u'Test2_summary', names)
self.assertIn(u'Test3_summary', names)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/layers/python/layers/summaries_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""This API defines FeatureColumn abstraction.
FeatureColumns provide a high level abstraction for ingesting and representing
features in `Estimator` models.
FeatureColumns are the primary way of encoding features for pre-canned
`Estimator` models.
When using FeatureColumns with `Estimator` models, the type of feature column
you should choose depends on (1) the feature type and (2) the model type.
(1) Feature type:
* Continuous features can be represented by `real_valued_column`.
* Categorical features can be represented by any `sparse_column_with_*`
column (`sparse_column_with_keys`, `sparse_column_with_vocabulary_file`,
`sparse_column_with_hash_bucket`, `sparse_column_with_integerized_feature`).
(2) Model type:
* Deep neural network models (`DNNClassifier`, `DNNRegressor`).
Continuous features can be directly fed into deep neural network models.
age_column = real_valued_column("age")
To feed sparse features into DNN models, wrap the column with
`embedding_column` or `one_hot_column`. `one_hot_column` will create a dense
boolean tensor with an entry for each possible value, and thus the
computation cost is linear in the number of possible values versus the number
of values that occur in the sparse tensor. Thus using a "one_hot_column" is
only recommended for features with only a few possible values. For features
with many possible values or for very sparse features, `embedding_column` is
recommended.
embedded_dept_column = embedding_column(
sparse_column_with_keys("department", ["math", "philosophy", ...]),
dimension=10)
* Wide (aka linear) models (`LinearClassifier`, `LinearRegressor`).
Sparse features can be fed directly into linear models. When doing so
an embedding_lookups are used to efficiently perform the sparse matrix
multiplication.
dept_column = sparse_column_with_keys("department",
["math", "philosophy", "english"])
It is recommended that continuous features be bucketized before being
fed into linear models.
bucketized_age_column = bucketized_column(
source_column=age_column,
boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
Sparse features can be crossed (also known as conjuncted or combined) in
order to form non-linearities, and then fed into linear models.
cross_dept_age_column = crossed_column(
columns=[department_column, bucketized_age_column],
hash_bucket_size=1000)
Example of building an `Estimator` model using FeatureColumns:
# Define features and transformations
deep_feature_columns = [age_column, embedded_dept_column]
wide_feature_columns = [dept_column, bucketized_age_column,
cross_dept_age_column]
# Build deep model
estimator = DNNClassifier(
feature_columns=deep_feature_columns,
hidden_units=[500, 250, 50])
estimator.train(...)
# Or build a wide model
estimator = LinearClassifier(
feature_columns=wide_feature_columns)
estimator.train(...)
# Or build a wide and deep model!
estimator = DNNLinearCombinedClassifier(
linear_feature_columns=wide_feature_columns,
dnn_feature_columns=deep_feature_columns,
dnn_hidden_units=[500, 250, 50])
estimator.train(...)
FeatureColumns can also be transformed into a generic input layer for
custom models using `input_from_feature_columns` within
`feature_column_ops.py`.
Example of building a non-`Estimator` model using FeatureColumns:
# Building model via layers
deep_feature_columns = [age_column, embedded_dept_column]
columns_to_tensor = parse_feature_columns_from_examples(
serialized=my_data,
feature_columns=deep_feature_columns)
first_layer = input_from_feature_columns(
columns_to_tensors=columns_to_tensor,
feature_columns=deep_feature_columns)
second_layer = fully_connected(first_layer, ...)
See feature_column_ops_test for more examples.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import math
import six
from tensorflow.contrib import lookup
from tensorflow.contrib.framework.python.framework import checkpoint_utils
from tensorflow.contrib.framework.python.framework import experimental
from tensorflow.contrib.framework.python.ops import variables as contrib_variables
from tensorflow.contrib.layers.python.layers import embedding_ops
from tensorflow.contrib.layers.python.layers import layers
from tensorflow.contrib.layers.python.ops import bucketization_op
from tensorflow.contrib.layers.python.ops import sparse_feature_cross_op
from tensorflow.contrib.layers.python.ops import sparse_ops as contrib_sparse_ops
from tensorflow.python.feature_column import feature_column as fc_core
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor as sparse_tensor_py
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import deprecation
from tensorflow.python.util import nest
# Imports the core `InputLayer` symbol in contrib during development.
InputLayer = fc_core.InputLayer # pylint: disable=invalid-name
class _LinearEmbeddingLookupArguments(
collections.namedtuple("_LinearEmbeddingLookupArguments", [
"input_tensor", "weight_tensor", "vocab_size", "initializer", "combiner"
])):
"""Represents the information needed from a column for embedding lookup.
Used to compute DNN inputs and weighted sum.
"""
pass
class _DeepEmbeddingLookupArguments(
collections.namedtuple("_DeepEmbeddingLookupArguments", [
"input_tensor", "weight_tensor", "vocab_size", "initializer",
"combiner", "dimension", "shared_embedding_name", "hash_key",
"max_norm", "trainable"
])):
"""Represents the information needed from a column for embedding lookup.
Used to compute DNN inputs and weighted sum.
"""
pass
@six.add_metaclass(abc.ABCMeta)
class _FeatureColumn(object):
"""Represents a feature column abstraction.
To distinguish the concept of a feature family and a specific binary feature
within a family, we refer to a feature family like "country" as a feature
column. For example "country:US" is a feature which is in "country" feature
column and has a feature value ("US").
This class is an abstract class. User should not create one instance of this.
Following classes (_SparseColumn, _RealValuedColumn, ...) are concrete
instances.
"""
@abc.abstractproperty
@deprecation.deprecated("2016-09-25", "Should be private.")
def name(self):
"""Returns the name of column or transformed column."""
pass
@abc.abstractproperty
@deprecation.deprecated("2016-09-25", "Should be private.")
def config(self):
"""Returns configuration of the base feature for `tf.io.parse_example`."""
pass
@abc.abstractproperty
@deprecation.deprecated("2016-09-25", "Should be private.")
def key(self):
"""Returns a string which will be used as a key when we do sorting."""
pass
@abc.abstractmethod
@deprecation.deprecated("2016-09-25", "Should be private.")
def insert_transformed_feature(self, columns_to_tensors):
"""Apply transformation and inserts it into columns_to_tensors.
Args:
columns_to_tensors: A mapping from feature columns to tensors. 'string'
key means a base feature (not-transformed). It can have _FeatureColumn
as a key too. That means that _FeatureColumn is already transformed.
"""
raise NotImplementedError(
"Transform is not implemented for {}.".format(self))
# pylint: disable=unused-argument
def _to_dnn_input_layer(self,
input_tensor,
weight_collection=None,
trainable=True,
output_rank=2):
"""Returns a Tensor as an input to the first layer of neural network."""
raise ValueError("Calling an abstract method.")
def _deep_embedding_lookup_arguments(self, input_tensor):
"""Returns arguments to embedding lookup to build an input layer."""
raise NotImplementedError(
"No deep embedding lookup arguments for column {}.".format(self))
# It is expected that classes implement either wide_embedding_lookup_arguments
# or to_dense_tensor to be used in linear models.
# pylint: disable=unused-argument
def _wide_embedding_lookup_arguments(self, input_tensor):
"""Returns arguments to look up embeddings for this column."""
raise NotImplementedError(
"No wide embedding lookup arguments for column {}.".format(self))
# pylint: disable=unused-argument
def _to_dense_tensor(self, input_tensor):
"""Returns a dense tensor representing this column's values."""
raise NotImplementedError(
"No dense tensor representation for column {}.".format(self))
def _checkpoint_path(self):
"""Returns None, or a (path,tensor_name) to load a checkpoint from."""
return None
def _key_without_properties(self, properties):
"""Helper method for self.key() that omits particular properties."""
fields_values = []
# pylint: disable=protected-access
for i, k in enumerate(self._fields):
if k in properties:
# Excludes a property from the key.
# For instance, exclude `initializer` from the key of EmbeddingColumn
# since we don't support users specifying different initializers for
# the same embedding column. Ditto for `normalizer` and
# RealValuedColumn.
# Special treatment is needed since the default str form of a
# function contains its address, which could introduce non-determinism
# in sorting.
continue
fields_values.append("{}={}".format(k, self[i]))
# pylint: enable=protected-access
# This is effectively the same format as str(self), except with our special
# treatment.
return "{}({})".format(type(self).__name__, ", ".join(fields_values))
# TODO(b/30410315): Support warm starting in all feature columns.
class _SparseColumn(
_FeatureColumn,
fc_core._CategoricalColumn, # pylint: disable=protected-access
collections.namedtuple("_SparseColumn", [
"column_name", "is_integerized", "bucket_size", "lookup_config",
"combiner", "dtype"
])):
"""Represents a sparse feature column also known as categorical features.
Instances of this class are immutable. A sparse column means features are
sparse and dictionary returned by InputBuilder contains a
("column_name", SparseTensor) pair.
One and only one of bucket_size or lookup_config should be set. If
is_integerized is True then bucket_size should be set.
Attributes:
column_name: A string defining sparse column name.
is_integerized: A bool if True means type of feature is an integer.
Integerized means we can use the feature itself as id.
bucket_size: An int that is > 0. The number of buckets.
lookup_config: A _SparseIdLookupConfig defining feature-to-id lookup
configuration
combiner: A string specifying how to reduce if the sparse column is
multivalent. Currently "mean", "sqrtn" and "sum" are supported, with "sum"
the default. "sqrtn" often achieves good accuracy, in particular with
bag-of-words columns.
* "sum": do not normalize features in the column
* "mean": do l1 normalization on features in the column
* "sqrtn": do l2 normalization on features in the column
For more information: `tf.embedding_lookup_sparse`.
dtype: Type of features, either `tf.string` or `tf.int64`.
Raises:
TypeError: if lookup_config is not a _SparseIdLookupConfig.
ValueError: if above expectations about input fails.
"""
def __new__(cls,
column_name,
is_integerized=False,
bucket_size=None,
lookup_config=None,
combiner="sum",
dtype=dtypes.string):
if is_integerized and bucket_size is None:
raise ValueError("bucket_size must be set if is_integerized is True. "
"column_name: {}".format(column_name))
if is_integerized and not dtype.is_integer:
raise ValueError("dtype must be an integer if is_integerized is True. "
"dtype: {}, column_name: {}.".format(dtype, column_name))
if dtype != dtypes.string and not dtype.is_integer:
raise ValueError("dtype must be string or integer. "
"dtype: {}, column_name: {}".format(dtype, column_name))
if bucket_size is None and lookup_config is None:
raise ValueError("one of bucket_size or lookup_config must be set. "
"column_name: {}".format(column_name))
if bucket_size is not None and lookup_config:
raise ValueError("one and only one of bucket_size or lookup_config "
"must be set. column_name: {}".format(column_name))
if bucket_size is not None and bucket_size < 1:
raise ValueError("bucket_size must be at least 1. "
"bucket_size: {}, column_name: {}".format(
bucket_size, column_name))
if ((lookup_config) and
(not isinstance(lookup_config, _SparseIdLookupConfig))):
raise TypeError(
"lookup_config must be an instance of _SparseIdLookupConfig. "
"Given one is in type {} for column_name {}".format(
type(lookup_config), column_name))
if (lookup_config and lookup_config.vocabulary_file and
lookup_config.vocab_size is None):
raise ValueError("vocab_size must be defined. "
"column_name: {}".format(column_name))
return super(_SparseColumn, cls).__new__(
cls,
column_name,
is_integerized=is_integerized,
bucket_size=bucket_size,
lookup_config=lookup_config,
combiner=combiner,
dtype=dtype)
@property
def name(self):
return self.column_name
@property
def length(self):
"""Returns vocabulary or hash_bucket size."""
if self.bucket_size is not None:
return self.bucket_size
return self.lookup_config.vocab_size + self.lookup_config.num_oov_buckets
@property
def config(self):
return {self.column_name: parsing_ops.VarLenFeature(self.dtype)}
@property
def key(self):
"""Returns a string which will be used as a key when we do sorting."""
return "{}".format(self)
def id_tensor(self, input_tensor):
"""Returns the id tensor from the given transformed input_tensor."""
return input_tensor
# pylint: disable=unused-argument
def weight_tensor(self, input_tensor):
"""Returns the weight tensor from the given transformed input_tensor."""
return None
# pylint: disable=unused-argument
def _to_dnn_input_layer(self,
input_tensor,
weight_collections=None,
trainable=True,
output_rank=2):
raise ValueError(
"SparseColumn is not supported in DNN. "
"Please use embedding_column or one_hot_column. column: {}".format(
self))
def _wide_embedding_lookup_arguments(self, input_tensor):
return _LinearEmbeddingLookupArguments(
input_tensor=self.id_tensor(input_tensor),
weight_tensor=self.weight_tensor(input_tensor),
vocab_size=self.length,
initializer=init_ops.zeros_initializer(),
combiner=self.combiner)
def _get_input_sparse_tensor(self, input_tensor):
"""sparsify input_tensor if dense."""
if not isinstance(input_tensor, sparse_tensor_py.SparseTensor):
# To avoid making any assumptions about which values are to be ignored,
# we set ignore_value to -1 for numeric tensors to avoid excluding valid
# indices.
if input_tensor.dtype == dtypes.string:
ignore_value = ""
else:
ignore_value = -1
input_tensor = _reshape_real_valued_tensor(input_tensor, 2, self.name)
input_tensor = contrib_sparse_ops.dense_to_sparse_tensor(
input_tensor, ignore_value=ignore_value)
return input_tensor
def is_compatible(self, other_column):
"""Check compatibility of two sparse columns."""
if self.lookup_config and other_column.lookup_config:
return self.lookup_config == other_column.lookup_config
compatible = (
self.length == other_column.length and
(self.dtype == other_column.dtype or
(self.dtype.is_integer and other_column.dtype.is_integer)))
if compatible:
logging.warn("Column {} and {} may not have the same vocabulary.".format(
self.name, other_column.name))
return compatible
@abc.abstractmethod
def _do_transform(self, input_tensor):
pass
def insert_transformed_feature(self, columns_to_tensors):
"""Handles sparse column to id conversion."""
input_tensor = self._get_input_sparse_tensor(columns_to_tensors[self.name])
columns_to_tensors[self] = self._do_transform(input_tensor)
def _transform_feature(self, inputs):
input_tensor = self._get_input_sparse_tensor(inputs.get(self.name))
return self._do_transform(input_tensor)
@property
def _parse_example_spec(self):
return self.config
@property
def _num_buckets(self):
return self.length
def _get_sparse_tensors(self, inputs, weight_collections=None,
trainable=None):
del weight_collections
del trainable
input_tensor = inputs.get(self)
return fc_core._CategoricalColumn.IdWeightPair( # pylint: disable=protected-access
self.id_tensor(input_tensor), self.weight_tensor(input_tensor))
class _SparseColumnIntegerized(_SparseColumn):
"""See `sparse_column_with_integerized_feature`."""
def _do_transform(self, input_tensor):
sparse_id_values = math_ops.mod(
input_tensor.values, self.bucket_size, name="mod")
return sparse_tensor_py.SparseTensor(input_tensor.indices, sparse_id_values,
input_tensor.dense_shape)
def sparse_column_with_integerized_feature(column_name,
bucket_size,
combiner="sum",
dtype=dtypes.int64):
"""Creates an integerized _SparseColumn.
Use this when your features are already pre-integerized into int64 IDs, that
is, when the set of values to output is already coming in as what's desired in
the output. Integerized means we can use the feature value itself as id.
Typically this is used for reading contiguous ranges of integers indexes, but
it doesn't have to be. The output value is simply copied from the
input_feature, whatever it is. Just be aware, however, that if you have large
gaps of unused integers it might affect what you feed those in (for instance,
if you make up a one-hot tensor from these, the unused integers will appear as
values in the tensor which are always zero.)
Args:
column_name: A string defining sparse column name.
bucket_size: An int that is >= 1. The number of buckets. It should be bigger
than maximum feature. In other words features in this column should be an
int64 in range [0, bucket_size)
combiner: A string specifying how to reduce if the sparse column is
multivalent. Currently "mean", "sqrtn" and "sum" are supported, with "sum"
the default. "sqrtn" often achieves good accuracy, in particular with
bag-of-words columns.
* "sum": do not normalize features in the column
* "mean": do l1 normalization on features in the column
* "sqrtn": do l2 normalization on features in the column
For more information: `tf.embedding_lookup_sparse`.
dtype: Type of features. It should be an integer type. Default value is
dtypes.int64.
Returns:
An integerized _SparseColumn definition.
Raises:
ValueError: bucket_size is less than 1.
ValueError: dtype is not integer.
"""
return _SparseColumnIntegerized(
column_name,
is_integerized=True,
bucket_size=bucket_size,
combiner=combiner,
dtype=dtype)
class _SparseColumnHashed(_SparseColumn):
"""See `sparse_column_with_hash_bucket`."""
def __new__(cls,
column_name,
is_integerized=False,
bucket_size=None,
lookup_config=None,
combiner="sum",
dtype=dtypes.string,
hash_keys=None):
if hash_keys is not None:
if not isinstance(hash_keys, list) or not hash_keys:
raise ValueError("hash_keys must be a non-empty list.")
if (any([not isinstance(key_pair, list) for key_pair in hash_keys]) or
any([len(key_pair) != 2 for key_pair in hash_keys]) or
any([not isinstance(key, int) for key in nest.flatten(hash_keys)])):
raise ValueError(
"Each element of hash_keys must be a pair of integers.")
obj = super(_SparseColumnHashed, cls).__new__(
cls,
column_name,
is_integerized=is_integerized,
bucket_size=bucket_size,
lookup_config=lookup_config,
combiner=combiner,
dtype=dtype)
obj.hash_keys = hash_keys
return obj
def _do_transform(self, input_tensor):
if self.dtype.is_integer:
sparse_values = string_ops.as_string(input_tensor.values)
else:
sparse_values = input_tensor.values
if self.hash_keys:
result = []
for key in self.hash_keys:
sparse_id_values = string_ops.string_to_hash_bucket_strong(
sparse_values, self.bucket_size, key)
result.append(
sparse_tensor_py.SparseTensor(input_tensor.indices,
sparse_id_values,
input_tensor.dense_shape))
return sparse_ops.sparse_concat(axis=1, sp_inputs=result, name="lookup")
else:
sparse_id_values = string_ops.string_to_hash_bucket_fast(
sparse_values, self.bucket_size, name="lookup")
return sparse_tensor_py.SparseTensor(input_tensor.indices,
sparse_id_values,
input_tensor.dense_shape)
def sparse_column_with_hash_bucket(column_name,
hash_bucket_size,
combiner="sum",
dtype=dtypes.string,
hash_keys=None):
"""Creates a _SparseColumn with hashed bucket configuration.
Use this when your sparse features are in string or integer format, but you
don't have a vocab file that maps each value to an integer ID.
output_id = Hash(input_feature_string) % bucket_size
When hash_keys is set, multiple integer IDs would be created with each key
pair in the `hash_keys`. This is useful to reduce the collision of hashed ids.
Args:
column_name: A string defining sparse column name.
hash_bucket_size: An int that is > 1. The number of buckets.
combiner: A string specifying how to reduce if the sparse column is
multivalent. Currently "mean", "sqrtn" and "sum" are supported, with "sum"
the default. "sqrtn" often achieves good accuracy, in particular with
bag-of-words columns.
* "sum": do not normalize features in the column
* "mean": do l1 normalization on features in the column
* "sqrtn": do l2 normalization on features in the column
For more information: `tf.embedding_lookup_sparse`.
dtype: The type of features. Only string and integer types are supported.
hash_keys: The hash keys to use. It is a list of lists of two uint64s. If
None, simple and fast hashing algorithm is used. Otherwise, multiple
strong hash ids would be produced with each two unit64s in this argument.
Returns:
A _SparseColumn with hashed bucket configuration
Raises:
ValueError: hash_bucket_size is not greater than 2.
ValueError: dtype is neither string nor integer.
"""
return _SparseColumnHashed(
column_name,
bucket_size=hash_bucket_size,
combiner=combiner,
dtype=dtype,
hash_keys=hash_keys)
class _SparseColumnKeys(_SparseColumn):
"""See `sparse_column_with_keys`."""
def _do_transform(self, input_tensor):
table = lookup.index_table_from_tensor(
mapping=tuple(self.lookup_config.keys),
default_value=self.lookup_config.default_value,
dtype=self.dtype,
name="lookup")
return table.lookup(input_tensor)
def sparse_column_with_keys(column_name,
keys,
default_value=-1,
combiner="sum",
dtype=dtypes.string):
"""Creates a _SparseColumn with keys.
Look up logic is as follows:
lookup_id = index_of_feature_in_keys if feature in keys else default_value
Args:
column_name: A string defining sparse column name.
keys: A list or tuple defining vocabulary. Must be castable to `dtype`.
default_value: The value to use for out-of-vocabulary feature values.
Default is -1.
combiner: A string specifying how to reduce if the sparse column is
multivalent. Currently "mean", "sqrtn" and "sum" are supported, with "sum"
the default. "sqrtn" often achieves good accuracy, in particular with
bag-of-words columns.
* "sum": do not normalize features in the column
* "mean": do l1 normalization on features in the column
* "sqrtn": do l2 normalization on features in the column
For more information: `tf.embedding_lookup_sparse`.
dtype: Type of features. Only integer and string are supported.
Returns:
A _SparseColumnKeys with keys configuration.
"""
keys = tuple(keys)
return _SparseColumnKeys(
column_name,
lookup_config=_SparseIdLookupConfig(
keys=keys, vocab_size=len(keys), default_value=default_value),
combiner=combiner,
dtype=dtype)
class _SparseColumnVocabulary(_SparseColumn):
"""See `sparse_column_with_vocabulary_file`."""
def _do_transform(self, st):
if self.dtype.is_integer:
sparse_string_values = string_ops.as_string(st.values)
sparse_string_tensor = sparse_tensor_py.SparseTensor(
st.indices, sparse_string_values, st.dense_shape)
else:
sparse_string_tensor = st
table = lookup.index_table_from_file(
vocabulary_file=self.lookup_config.vocabulary_file,
num_oov_buckets=self.lookup_config.num_oov_buckets,
vocab_size=self.lookup_config.vocab_size,
default_value=self.lookup_config.default_value,
name=self.name + "_lookup")
return table.lookup(sparse_string_tensor)
def sparse_column_with_vocabulary_file(column_name,
vocabulary_file,
num_oov_buckets=0,
vocab_size=None,
default_value=-1,
combiner="sum",
dtype=dtypes.string):
"""Creates a _SparseColumn with vocabulary file configuration.
Use this when your sparse features are in string or integer format, and you
have a vocab file that maps each value to an integer ID.
output_id = LookupIdFromVocab(input_feature_string)
Args:
column_name: A string defining sparse column name.
vocabulary_file: The vocabulary filename.
num_oov_buckets: The number of out-of-vocabulary buckets. If zero all out of
vocabulary features will be ignored.
vocab_size: Number of the elements in the vocabulary.
default_value: The value to use for out-of-vocabulary feature values.
Defaults to -1.
combiner: A string specifying how to reduce if the sparse column is
multivalent. Currently "mean", "sqrtn" and "sum" are supported, with "sum"
the default. "sqrtn" often achieves good accuracy, in particular with
bag-of-words columns.
* "sum": do not normalize features in the column
* "mean": do l1 normalization on features in the column
* "sqrtn": do l2 normalization on features in the column
For more information: `tf.embedding_lookup_sparse`.
dtype: The type of features. Only string and integer types are supported.
Returns:
A _SparseColumn with vocabulary file configuration.
Raises:
ValueError: vocab_size is not defined.
ValueError: dtype is neither string nor integer.
"""
if vocab_size is None:
raise ValueError("vocab_size should be defined. "
"column_name: {}".format(column_name))
return _SparseColumnVocabulary(
column_name,
lookup_config=_SparseIdLookupConfig(
vocabulary_file=vocabulary_file,
num_oov_buckets=num_oov_buckets,
vocab_size=vocab_size,
default_value=default_value),
combiner=combiner,
dtype=dtype)
class _WeightedSparseColumn(
_FeatureColumn,
fc_core._CategoricalColumn, # pylint: disable=protected-access
collections.namedtuple("_WeightedSparseColumn",
["sparse_id_column", "weight_column_name", "dtype"])
):
"""See `weighted_sparse_column`."""
def __new__(cls, sparse_id_column, weight_column_name, dtype):
return super(_WeightedSparseColumn, cls).__new__(cls, sparse_id_column,
weight_column_name, dtype)
@property
def name(self):
return "{}_weighted_by_{}".format(self.sparse_id_column.name,
self.weight_column_name)
@property
def length(self):
"""Returns id size."""
return self.sparse_id_column.length
@property
def config(self):
config = _get_feature_config(self.sparse_id_column)
config.update(
{self.weight_column_name: parsing_ops.VarLenFeature(self.dtype)})
return config
@property
def lookup_config(self):
return self.sparse_id_column.lookup_config
@property
def key(self):
"""Returns a string which will be used as a key when we do sorting."""
return "{}".format(self)
def id_tensor(self, input_tensor):
"""Returns the id tensor from the given transformed input_tensor."""
return input_tensor[0]
def weight_tensor(self, input_tensor):
"""Returns the weight tensor from the given transformed input_tensor."""
return input_tensor[1]
# pylint: disable=unused-argument
def _to_dnn_input_layer(self,
input_tensor,
weight_collections=None,
trainable=True,
output_rank=2):
raise ValueError(
"WeightedSparseColumn is not supported in DNN. "
"Please use embedding_column or one_hot_column. column: {}".format(
self))
def _wide_embedding_lookup_arguments(self, input_tensor):
return _LinearEmbeddingLookupArguments(
input_tensor=self.id_tensor(input_tensor),
weight_tensor=self.weight_tensor(input_tensor),
vocab_size=self.length,
initializer=init_ops.zeros_initializer(),
combiner=self.sparse_id_column.combiner)
def _do_transform(self, id_tensor, weight_tensor):
if not isinstance(weight_tensor, sparse_tensor_py.SparseTensor):
# The weight tensor can be a regular Tensor. In such case, sparsify it.
weight_tensor = contrib_sparse_ops.dense_to_sparse_tensor(weight_tensor)
if not self.dtype.is_floating:
weight_tensor = math_ops.cast(weight_tensor, dtypes.float32)
return tuple([id_tensor, weight_tensor])
def insert_transformed_feature(self, columns_to_tensors):
"""Inserts a tuple with the id and weight tensors."""
if self.sparse_id_column not in columns_to_tensors:
self.sparse_id_column.insert_transformed_feature(columns_to_tensors)
weight_tensor = columns_to_tensors[self.weight_column_name]
columns_to_tensors[self] = self._do_transform(
columns_to_tensors[self.sparse_id_column], weight_tensor)
def _transform_feature(self, inputs):
return self._do_transform(
inputs.get(self.sparse_id_column), inputs.get(self.weight_column_name))
@property
def _parse_example_spec(self):
return self.config
@property
def _num_buckets(self):
return self.length
def _get_sparse_tensors(self, inputs, weight_collections=None,
trainable=None):
del weight_collections
del trainable
input_tensor = inputs.get(self)
return fc_core._CategoricalColumn.IdWeightPair( # pylint: disable=protected-access
self.id_tensor(input_tensor), self.weight_tensor(input_tensor))
def is_compatible(self, other_column):
"""Check compatibility with other sparse column."""
if isinstance(other_column, _WeightedSparseColumn):
return self.sparse_id_column.is_compatible(other_column.sparse_id_column)
return self.sparse_id_column.is_compatible(other_column)
def weighted_sparse_column(sparse_id_column,
weight_column_name,
dtype=dtypes.float32):
"""Creates a _SparseColumn by combining sparse_id_column with a weight column.
Example:
```python
sparse_feature = sparse_column_with_hash_bucket(column_name="sparse_col",
hash_bucket_size=1000)
weighted_feature = weighted_sparse_column(sparse_id_column=sparse_feature,
weight_column_name="weights_col")
```
This configuration assumes that input dictionary of model contains the
following two items:
* (key="sparse_col", value=sparse_tensor) where sparse_tensor is
a SparseTensor.
* (key="weights_col", value=weights_tensor) where weights_tensor
is a SparseTensor.
Following are assumed to be true:
* sparse_tensor.indices = weights_tensor.indices
* sparse_tensor.dense_shape = weights_tensor.dense_shape
Args:
sparse_id_column: A `_SparseColumn` which is created by
`sparse_column_with_*` functions.
weight_column_name: A string defining a sparse column name which represents
weight or value of the corresponding sparse id feature.
dtype: Type of weights, such as `tf.float32`. Only floating and integer
weights are supported.
Returns:
A _WeightedSparseColumn composed of two sparse features: one represents id,
the other represents weight (value) of the id feature in that example.
Raises:
ValueError: if dtype is not convertible to float.
"""
if not (dtype.is_integer or dtype.is_floating):
raise ValueError(
"dtype is not convertible to float. Given {}".format(dtype))
return _WeightedSparseColumn(sparse_id_column, weight_column_name, dtype)
class _OneHotColumn(
_FeatureColumn,
fc_core._DenseColumn, # pylint: disable=protected-access
collections.namedtuple("_OneHotColumn", ["sparse_id_column"])):
"""Represents a one-hot column for use in deep networks.
Args:
sparse_id_column: A _SparseColumn which is created by `sparse_column_with_*`
function.
"""
@property
def name(self):
return "{}_one_hot".format(self.sparse_id_column.name)
@property
def length(self):
"""Returns vocabulary or hash_bucket size."""
return self.sparse_id_column.length
@property
def config(self):
"""Returns the parsing config of the origin column."""
return _get_feature_config(self.sparse_id_column)
@property
def key(self):
"""Returns a string which will be used as a key when we do sorting."""
return "{}".format(self)
def insert_transformed_feature(self, columns_to_tensors):
"""Used by the Transformer to prevent double transformations."""
if self.sparse_id_column not in columns_to_tensors:
self.sparse_id_column.insert_transformed_feature(columns_to_tensors)
columns_to_tensors[self] = columns_to_tensors[self.sparse_id_column]
def _to_dnn_input_layer(self,
transformed_input_tensor,
unused_weight_collections=None,
unused_trainable=False,
output_rank=2):
"""Returns a Tensor as an input to the first layer of neural network.
Args:
transformed_input_tensor: A tensor that has undergone the transformations
in `insert_transformed_feature`. Rank should be >= `output_rank`.
unused_weight_collections: Unused. One hot encodings are not variable.
unused_trainable: Unused. One hot encodings are not trainable.
output_rank: the desired rank of the output `Tensor`.
Returns:
A multi-hot Tensor to be fed into the first layer of neural network.
Raises:
ValueError: When using one_hot_column with weighted_sparse_column.
This is not yet supported.
"""
# Reshape ID column to `output_rank`.
sparse_id_column = self.sparse_id_column.id_tensor(transformed_input_tensor)
# pylint: disable=protected-access
sparse_id_column = layers._inner_flatten(sparse_id_column, output_rank)
weight_tensor = self.sparse_id_column.weight_tensor(
transformed_input_tensor)
if weight_tensor is not None:
weighted_column = sparse_ops.sparse_merge(
sp_ids=sparse_id_column,
sp_values=weight_tensor,
vocab_size=self.length)
# Remove (?, -1) index
weighted_column = sparse_ops.sparse_slice(
weighted_column, array_ops.zeros_like(weighted_column.dense_shape),
weighted_column.dense_shape)
dense_tensor = sparse_ops.sparse_tensor_to_dense(weighted_column)
batch_shape = array_ops.shape(dense_tensor)[:-1]
dense_tensor_shape = array_ops.concat([batch_shape, [self.length]],
axis=0)
dense_tensor = array_ops.reshape(dense_tensor, dense_tensor_shape)
return dense_tensor
dense_id_tensor = sparse_ops.sparse_tensor_to_dense(
sparse_id_column, default_value=-1)
# One hot must be float for tf.concat reasons since all other inputs to
# input_layer are float32.
one_hot_id_tensor = array_ops.one_hot(
dense_id_tensor, depth=self.length, on_value=1.0, off_value=0.0)
# Reduce to get a multi-hot per example.
return math_ops.reduce_sum(one_hot_id_tensor, axis=[output_rank - 1])
@property
def _variable_shape(self):
return tensor_shape.TensorShape([self.length])
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):
del weight_collections
del trainable
return inputs.get(self)
def _transform_feature(self, inputs):
return self._to_dnn_input_layer(inputs.get(self.sparse_id_column))
@property
def _parse_example_spec(self):
return self.config
class _EmbeddingColumn(
_FeatureColumn,
fc_core._DenseColumn, # pylint: disable=protected-access
collections.namedtuple("_EmbeddingColumn", [
"sparse_id_column", "dimension", "combiner", "initializer",
"ckpt_to_load_from", "tensor_name_in_ckpt", "shared_embedding_name",
"shared_vocab_size", "max_norm", "trainable"
])):
"""Represents an embedding column.
Args:
sparse_id_column: A `_SparseColumn` which is created by
`sparse_column_with_*` or `weighted_sparse_column` functions.
dimension: An integer specifying dimension of the embedding.
combiner: A string specifying how to reduce if there are multiple entries in
a single row. Currently "mean", "sqrtn" and "sum" are supported, with
"mean" the default. "sqrtn" often achieves good accuracy, in particular
with bag-of-words columns. Each of this can be thought as example level
normalizations on the column:
* "sum": do not normalize features in the column
* "mean": do l1 normalization on features in the column
* "sqrtn": do l2 normalization on features in the column
For more information: `tf.embedding_lookup_sparse`.
initializer: A variable initializer function to be used in embedding
variable initialization. If not specified, defaults to
`tf.compat.v1.truncated_normal_initializer` with mean 0.0 and standard
deviation 1/sqrt(sparse_id_column.length).
ckpt_to_load_from: (Optional). String representing checkpoint name/pattern
to restore the column weights. Required if `tensor_name_in_ckpt` is not
None.
tensor_name_in_ckpt: (Optional). Name of the `Tensor` in the provided
checkpoint from which to restore the column weights. Required if
`ckpt_to_load_from` is not None.
shared_embedding_name: (Optional). The common name for shared embedding.
shared_vocab_size: (Optional). The common vocab_size used for shared
embedding space.
max_norm: (Optional). If not None, embedding values are l2-normalized to the
value of max_norm.
trainable: (Optional). Should the embedding be trainable. Default is True.
Raises:
ValueError: if `initializer` is specified and is not callable. Also,
if only one of `ckpt_to_load_from` and `tensor_name_in_ckpt` is specified.
"""
def __new__(cls,
sparse_id_column,
dimension,
combiner="mean",
initializer=None,
ckpt_to_load_from=None,
tensor_name_in_ckpt=None,
shared_embedding_name=None,
shared_vocab_size=None,
max_norm=None,
trainable=True):
if initializer is not None and not callable(initializer):
raise ValueError("initializer must be callable if specified. "
"Embedding of column_name: {}".format(
sparse_id_column.name))
if (ckpt_to_load_from is None) != (tensor_name_in_ckpt is None):
raise ValueError("Must specify both `ckpt_to_load_from` and "
"`tensor_name_in_ckpt` or none of them.")
if initializer is None:
logging.warn("The default stddev value of initializer was changed from "
"\"1/sqrt(vocab_size)\" to \"1/sqrt(dimension)\" in core "
"implementation (tf.feature_column.embedding_column).")
stddev = 1 / math.sqrt(sparse_id_column.length)
initializer = init_ops.truncated_normal_initializer(
mean=0.0, stddev=stddev)
return super(_EmbeddingColumn,
cls).__new__(cls, sparse_id_column, dimension, combiner,
initializer, ckpt_to_load_from,
tensor_name_in_ckpt, shared_embedding_name,
shared_vocab_size, max_norm, trainable)
@property
def name(self):
if self.shared_embedding_name is None:
return "{}_embedding".format(self.sparse_id_column.name)
else:
return "{}_shared_embedding".format(self.sparse_id_column.name)
@property
def length(self):
"""Returns id size."""
if self.shared_vocab_size is None:
return self.sparse_id_column.length
else:
return self.shared_vocab_size
@property
def config(self):
return _get_feature_config(self.sparse_id_column)
@property
def key(self):
"""Returns a string which will be used as a key when we do sorting."""
return self._key_without_properties(["initializer"])
def insert_transformed_feature(self, columns_to_tensors):
if self.sparse_id_column not in columns_to_tensors:
self.sparse_id_column.insert_transformed_feature(columns_to_tensors)
columns_to_tensors[self] = columns_to_tensors[self.sparse_id_column]
def _deep_embedding_lookup_arguments(self, input_tensor):
return _DeepEmbeddingLookupArguments(
input_tensor=self.sparse_id_column.id_tensor(input_tensor),
weight_tensor=self.sparse_id_column.weight_tensor(input_tensor),
vocab_size=self.length,
dimension=self.dimension,
initializer=self.initializer,
combiner=self.combiner,
shared_embedding_name=self.shared_embedding_name,
hash_key=None,
max_norm=self.max_norm,
trainable=self.trainable)
def _checkpoint_path(self):
if self.ckpt_to_load_from is not None:
return self.ckpt_to_load_from, self.tensor_name_in_ckpt
return None
# pylint: disable=unused-argument
def _wide_embedding_lookup_arguments(self, input_tensor):
raise ValueError("Column {} is not supported in linear models. "
"Please use sparse_column.".format(self))
@property
def _variable_shape(self):
return tensor_shape.TensorShape([self.dimension])
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):
return _embeddings_from_arguments(
self, self._deep_embedding_lookup_arguments(inputs.get(self)),
weight_collections, trainable)
def _transform_feature(self, inputs):
return inputs.get(self.sparse_id_column)
@property
def _parse_example_spec(self):
return self.config
def _is_variable(v):
"""Returns true if `v` is a variable."""
return isinstance(
v, (variables.Variable, resource_variable_ops.ResourceVariable))
def _embeddings_from_arguments(column,
args,
weight_collections,
trainable,
output_rank=2):
"""Returns embeddings for a column based on the computed arguments.
Args:
column: the column name.
args: the _DeepEmbeddingLookupArguments for this column.
weight_collections: collections to store weights in.
trainable: whether these embeddings should be trainable.
output_rank: the desired rank of the returned `Tensor`. Inner dimensions will
be combined to produce the desired rank.
Returns:
the embeddings.
Raises:
ValueError: if not possible to create.
"""
# pylint: disable=protected-access
input_tensor = layers._inner_flatten(args.input_tensor, output_rank)
weight_tensor = None
if args.weight_tensor is not None:
weight_tensor = layers._inner_flatten(args.weight_tensor, output_rank)
# pylint: enable=protected-access
# This option is only enabled for scattered_embedding_column.
if args.hash_key:
embeddings = contrib_variables.model_variable(
name="weights",
shape=[args.vocab_size],
dtype=dtypes.float32,
initializer=args.initializer,
trainable=(trainable and args.trainable),
collections=weight_collections)
return embedding_ops.scattered_embedding_lookup_sparse(
embeddings,
input_tensor,
args.dimension,
hash_key=args.hash_key,
combiner=args.combiner,
name="lookup")
if args.shared_embedding_name is not None:
shared_embedding_collection_name = ("SHARED_EMBEDDING_COLLECTION_" +
args.shared_embedding_name.upper())
graph = ops.get_default_graph()
shared_embedding_collection = (
graph.get_collection_ref(shared_embedding_collection_name))
shape = [args.vocab_size, args.dimension]
if shared_embedding_collection:
if len(shared_embedding_collection) > 1:
raise ValueError("Collection %s can only contain one "
"(partitioned) variable." %
shared_embedding_collection_name)
else:
embeddings = shared_embedding_collection[0]
if embeddings.get_shape() != shape:
raise ValueError("The embedding variable with name {} already "
"exists, but its shape does not match required "
"embedding shape here. Please make sure to use "
"different shared_embedding_name for different "
"shared embeddings.".format(
args.shared_embedding_name))
else:
embeddings = contrib_variables.model_variable(
name=args.shared_embedding_name,
shape=shape,
dtype=dtypes.float32,
initializer=args.initializer,
trainable=(trainable and args.trainable),
collections=weight_collections)
graph.add_to_collection(shared_embedding_collection_name, embeddings)
else:
embeddings = contrib_variables.model_variable(
name="weights",
shape=[args.vocab_size, args.dimension],
dtype=dtypes.float32,
initializer=args.initializer,
trainable=(trainable and args.trainable),
collections=weight_collections)
if _is_variable(embeddings):
embeddings = [embeddings]
else:
embeddings = embeddings._get_variable_list() # pylint: disable=protected-access
# pylint: disable=protected-access
_maybe_restore_from_checkpoint(column._checkpoint_path(), embeddings)
return embedding_ops.safe_embedding_lookup_sparse(
embeddings,
input_tensor,
sparse_weights=weight_tensor,
combiner=args.combiner,
name=column.name + "weights",
max_norm=args.max_norm)
def _maybe_restore_from_checkpoint(checkpoint_path, variable):
if checkpoint_path is not None:
path, tensor_name = checkpoint_path
weights_to_restore = variable
if len(variable) == 1:
weights_to_restore = variable[0]
checkpoint_utils.init_from_checkpoint(path,
{tensor_name: weights_to_restore})
def one_hot_column(sparse_id_column):
"""Creates an `_OneHotColumn` for a one-hot or multi-hot repr in a DNN.
Args:
sparse_id_column: A _SparseColumn which is created by
`sparse_column_with_*` or crossed_column functions. Note that `combiner`
defined in `sparse_id_column` is ignored.
Returns:
An _OneHotColumn.
"""
return _OneHotColumn(sparse_id_column)
def embedding_column(sparse_id_column,
dimension,
combiner="mean",
initializer=None,
ckpt_to_load_from=None,
tensor_name_in_ckpt=None,
max_norm=None,
trainable=True):
"""Creates an `_EmbeddingColumn` for feeding sparse data into a DNN.
Args:
sparse_id_column: A `_SparseColumn` which is created by for example
`sparse_column_with_*` or crossed_column functions. Note that `combiner`
defined in `sparse_id_column` is ignored.
dimension: An integer specifying dimension of the embedding.
combiner: A string specifying how to reduce if there are multiple entries in
a single row. Currently "mean", "sqrtn" and "sum" are supported, with
"mean" the default. "sqrtn" often achieves good accuracy, in particular
with bag-of-words columns. Each of this can be thought as example level
normalizations on the column:
* "sum": do not normalize
* "mean": do l1 normalization
* "sqrtn": do l2 normalization
For more information: `tf.embedding_lookup_sparse`.
initializer: A variable initializer function to be used in embedding
variable initialization. If not specified, defaults to
`tf.compat.v1.truncated_normal_initializer` with mean 0.0 and standard
deviation 1/sqrt(sparse_id_column.length).
ckpt_to_load_from: (Optional). String representing checkpoint name/pattern
to restore the column weights. Required if `tensor_name_in_ckpt` is not
None.
tensor_name_in_ckpt: (Optional). Name of the `Tensor` in the provided
checkpoint from which to restore the column weights. Required if
`ckpt_to_load_from` is not None.
max_norm: (Optional). If not None, embedding values are l2-normalized to the
value of max_norm.
trainable: (Optional). Should the embedding be trainable. Default is True
Returns:
An `_EmbeddingColumn`.
"""
return _EmbeddingColumn(
sparse_id_column,
dimension,
combiner,
initializer,
ckpt_to_load_from,
tensor_name_in_ckpt,
max_norm=max_norm,
trainable=trainable)
def shared_embedding_columns(sparse_id_columns,
dimension,
combiner="mean",
shared_embedding_name=None,
initializer=None,
ckpt_to_load_from=None,
tensor_name_in_ckpt=None,
max_norm=None,
trainable=True):
"""Creates a list of `_EmbeddingColumn` sharing the same embedding.
Args:
sparse_id_columns: An iterable of `_SparseColumn`, such as those created by
`sparse_column_with_*` or crossed_column functions. Note that `combiner`
defined in each sparse_id_column is ignored.
dimension: An integer specifying dimension of the embedding.
combiner: A string specifying how to reduce if there are multiple entries in
a single row. Currently "mean", "sqrtn" and "sum" are supported, with
"mean" the default. "sqrtn" often achieves good accuracy, in particular
with bag-of-words columns. Each of this can be thought as example level
normalizations on the column:
* "sum": do not normalize
* "mean": do l1 normalization
* "sqrtn": do l2 normalization
For more information: `tf.embedding_lookup_sparse`.
shared_embedding_name: (Optional). A string specifying the name of shared
embedding weights. This will be needed if you want to reference the shared
embedding separately from the generated `_EmbeddingColumn`.
initializer: A variable initializer function to be used in embedding
variable initialization. If not specified, defaults to
`tf.compat.v1.truncated_normal_initializer` with mean 0.0 and standard
deviation 1/sqrt(sparse_id_columns[0].length).
ckpt_to_load_from: (Optional). String representing checkpoint name/pattern
to restore the column weights. Required if `tensor_name_in_ckpt` is not
None.
tensor_name_in_ckpt: (Optional). Name of the `Tensor` in the provided
checkpoint from which to restore the column weights. Required if
`ckpt_to_load_from` is not None.
max_norm: (Optional). If not None, embedding values are l2-normalized to the
value of max_norm.
trainable: (Optional). Should the embedding be trainable. Default is True
Returns:
A tuple of `_EmbeddingColumn` with shared embedding space.
Raises:
ValueError: if sparse_id_columns is empty, or its elements are not
compatible with each other.
TypeError: if `sparse_id_columns` is not a sequence or is a string. If at
least one element of `sparse_id_columns` is not a `SparseColumn` or a
`WeightedSparseColumn`.
"""
if (not isinstance(sparse_id_columns, collections.Sequence) or
isinstance(sparse_id_columns, six.string_types)):
raise TypeError(
"sparse_id_columns must be a non-string sequence (ex: list or tuple) "
"instead of type {}.".format(type(sparse_id_columns)))
if len(sparse_id_columns) < 1:
raise ValueError("The input sparse_id_columns should have at least one "
"element.")
for sparse_id_column in sparse_id_columns:
if not (isinstance(sparse_id_column, _SparseColumn) or
isinstance(sparse_id_column, _WeightedSparseColumn)):
raise TypeError(
"Elements of sparse_id_columns must be _SparseColumn or "
"_WeightedSparseColumn, but {} is not.".format(sparse_id_column))
if len(sparse_id_columns) == 1:
return [
_EmbeddingColumn(
sparse_id_columns[0],
dimension,
combiner,
initializer,
ckpt_to_load_from,
tensor_name_in_ckpt,
shared_embedding_name,
max_norm=max_norm,
trainable=trainable)
]
else:
# Check compatibility of sparse_id_columns
compatible = True
for column in sparse_id_columns[1:]:
if isinstance(sparse_id_columns[0], _WeightedSparseColumn):
compatible = compatible and sparse_id_columns[0].is_compatible(column)
else:
compatible = compatible and column.is_compatible(sparse_id_columns[0])
if not compatible:
raise ValueError("The input sparse id columns are not compatible.")
# Construct the shared name and size for shared embedding space.
if not shared_embedding_name:
# Sort the columns so that shared_embedding_name will be deterministic
# even if users pass in unsorted columns from a dict or something.
# Since they are different classes, ordering is SparseColumns first,
# then WeightedSparseColumns.
sparse_columns = []
weighted_sparse_columns = []
for column in sparse_id_columns:
if isinstance(column, _SparseColumn):
sparse_columns.append(column)
else:
weighted_sparse_columns.append(column)
sorted_columns = sorted(sparse_columns) + sorted(
weighted_sparse_columns, key=lambda x: x.name)
if len(sorted_columns) <= 3:
shared_embedding_name = "_".join(
[column.name for column in sorted_columns])
else:
shared_embedding_name = "_".join(
[column.name for column in sorted_columns[0:3]])
shared_embedding_name += (
"_plus_{}_others".format(len(sorted_columns) - 3))
shared_embedding_name += "_shared_embedding"
shared_vocab_size = sparse_id_columns[0].length
embedded_columns = []
for column in sparse_id_columns:
embedded_columns.append(
_EmbeddingColumn(
column,
dimension,
combiner,
initializer,
ckpt_to_load_from,
tensor_name_in_ckpt,
shared_embedding_name,
shared_vocab_size,
max_norm=max_norm,
trainable=trainable))
return tuple(embedded_columns)
class _ScatteredEmbeddingColumn(
_FeatureColumn,
fc_core._DenseColumn, # pylint: disable=protected-access
collections.namedtuple("_ScatteredEmbeddingColumn", [
"column_name", "size", "dimension", "hash_key", "combiner",
"initializer"
])):
"""See `scattered_embedding_column`."""
def __new__(cls,
column_name,
size,
dimension,
hash_key,
combiner="sqrtn",
initializer=None):
if initializer is not None and not callable(initializer):
raise ValueError("initializer must be callable if specified. "
"column_name: {}".format(column_name))
if initializer is None:
stddev = 0.1
initializer = init_ops.truncated_normal_initializer(
mean=0.0, stddev=stddev)
return super(_ScatteredEmbeddingColumn,
cls).__new__(cls, column_name, size, dimension, hash_key,
combiner, initializer)
@property
def name(self):
return "{}_scattered_embedding".format(self.column_name)
@property
def config(self):
return {self.column_name: parsing_ops.VarLenFeature(dtypes.string)}
@property
def key(self):
"""Returns a string which will be used as a key when we do sorting."""
return self._key_without_properties(["initializer"])
def insert_transformed_feature(self, columns_to_tensors):
columns_to_tensors[self] = columns_to_tensors[self.column_name]
def _deep_embedding_lookup_arguments(self, input_tensor):
return _DeepEmbeddingLookupArguments(
input_tensor=input_tensor,
weight_tensor=None,
vocab_size=self.size,
initializer=self.initializer,
combiner=self.combiner,
dimension=self.dimension,
shared_embedding_name=None,
hash_key=self.hash_key,
max_norm=None,
trainable=True)
@property
def _variable_shape(self):
return tensor_shape.TensorShape([self.dimension])
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):
return _embeddings_from_arguments(
self, self._deep_embedding_lookup_arguments(inputs.get(self)),
weight_collections, trainable)
def _transform_feature(self, inputs):
return inputs.get(self.column_name)
@property
def _parse_example_spec(self):
return self.config
def scattered_embedding_column(column_name,
size,
dimension,
hash_key,
combiner="mean",
initializer=None):
"""Creates an embedding column of a sparse feature using parameter hashing.
This is a useful shorthand when you have a sparse feature you want to use an
embedding for, but also want to hash the embedding's values in each dimension
to a variable based on a different hash.
Specifically, the i-th embedding component of a value v is found by retrieving
an embedding weight whose index is a fingerprint of the pair (v,i).
An embedding column with sparse_column_with_hash_bucket such as
embedding_column(
sparse_column_with_hash_bucket(column_name, bucket_size),
dimension)
could be replaced by
scattered_embedding_column(
column_name,
size=bucket_size * dimension,
dimension=dimension,
hash_key=tf.contrib.layers.SPARSE_FEATURE_CROSS_DEFAULT_HASH_KEY)
for the same number of embedding parameters. This should hopefully reduce the
impact of collisions, but adds the cost of slowing down training.
Args:
column_name: A string defining sparse column name.
size: An integer specifying the number of parameters in the embedding layer.
dimension: An integer specifying dimension of the embedding.
hash_key: Specify the hash_key that will be used by the `FingerprintCat64`
function to combine the crosses fingerprints on SparseFeatureCrossOp.
combiner: A string specifying how to reduce if there are multiple entries in
a single row. Currently "mean", "sqrtn" and "sum" are supported, with
"mean" the default. "sqrtn" often achieves good accuracy, in particular
with bag-of-words columns. Each of this can be thought as example level
normalizations on the column:
* "sum": do not normalize features in the column
* "mean": do l1 normalization on features in the column
* "sqrtn": do l2 normalization on features in the column
For more information: `tf.embedding_lookup_sparse`.
initializer: A variable initializer function to be used in embedding
variable initialization. If not specified, defaults to
`tf.compat.v1.truncated_normal_initializer` with mean 0 and standard
deviation 0.1.
Returns:
A _ScatteredEmbeddingColumn.
Raises:
ValueError: if dimension or size is not a positive integer; or if combiner
is not supported.
"""
if (dimension < 1) or (size < 1):
raise ValueError("Dimension and size must be greater than 0. "
"dimension: {}, size: {}, column_name: {}".format(
dimension, size, column_name))
if combiner not in ("mean", "sqrtn", "sum"):
raise ValueError("Combiner must be one of 'mean', 'sqrtn' or 'sum'. "
"combiner: {}, column_name: {}".format(
combiner, column_name))
return _ScatteredEmbeddingColumn(column_name, size, dimension, hash_key,
combiner, initializer)
def _reshape_real_valued_tensor(input_tensor, output_rank, column_name=None):
"""Reshaping logic for dense, numeric `Tensors`.
Follows the following rules:
1. If `output_rank > input_rank + 1` raise a `ValueError`.
2. If `output_rank == input_rank + 1`, expand `input_tensor` by one
dimension and return
3. If `output_rank == input_rank`, return `input_tensor`.
4. If `output_rank < input_rank`, flatten the inner dimensions of
`input_tensor` and return a `Tensor` with `output_rank`
Args:
input_tensor: a dense `Tensor` to be reshaped.
output_rank: the desired rank of the reshaped `Tensor`.
column_name: (optional) the name of the associated column. Used for error
messages.
Returns:
A `Tensor` with the same entries as `input_tensor` and rank `output_rank`.
Raises:
ValueError: if `output_rank > input_rank + 1`.
"""
input_rank = input_tensor.get_shape().ndims
if input_rank is not None:
if output_rank > input_rank + 1:
error_string = ("Rank of input Tensor ({}) should be the same as "
"output_rank ({}). For example, sequence data should "
"typically be 3 dimensional (rank 3) while non-sequence "
"data is typically 2 dimensional (rank 2).".format(
input_rank, output_rank))
if column_name is not None:
error_string = (
"Error while processing column {}.".format(column_name) +
error_string)
raise ValueError(error_string)
if output_rank == input_rank + 1:
logging.warning(
"Rank of input Tensor ({}) should be the same as output_rank ({}) "
"for column. Will attempt to expand dims. It is highly recommended "
"that you resize your input, as this behavior may change.".format(
input_rank, output_rank))
return array_ops.expand_dims(input_tensor, -1, name="expand_dims")
if output_rank == input_rank:
return input_tensor
# Here, either `input_rank` is unknown or it is greater than `output_rank`.
return layers._inner_flatten(input_tensor, output_rank) # pylint: disable=protected-access
class _RealValuedVarLenColumn(
_FeatureColumn,
collections.namedtuple(
"_RealValuedVarLenColumn",
["column_name", "default_value", "dtype", "normalizer", "is_sparse"])):
"""Represents a real valued feature column for variable length Features.
Instances of this class are immutable.
If is_sparse=False, the dictionary returned by InputBuilder contains a
("column_name", Tensor) pair with a Tensor shape of (batch_size, dimension).
If is_sparse=True, the dictionary contains a ("column_name", SparseTensor)
pair instead with shape inferred after parsing.
"""
@property
def name(self):
return self.column_name
@property
def config(self):
if self.is_sparse:
return {self.column_name: parsing_ops.VarLenFeature(self.dtype)}
else:
return {
self.column_name:
parsing_ops.FixedLenSequenceFeature(
[],
self.dtype,
allow_missing=True,
default_value=self.default_value)
}
@property
def key(self):
"""Returns a string which will be used as a key when we do sorting."""
return self._key_without_properties(["normalizer"])
@property
def normalizer_fn(self):
"""Returns the function used to normalize the column."""
return self.normalizer
def _normalized_input_tensor(self, input_tensor):
"""Returns the input tensor after custom normalization is applied."""
if self.normalizer is None:
return input_tensor
if self.is_sparse:
return sparse_tensor_py.SparseTensor(input_tensor.indices,
self.normalizer(input_tensor.values),
input_tensor.dense_shape)
else:
return self.normalizer(input_tensor)
def insert_transformed_feature(self, columns_to_tensors):
"""Apply transformation and inserts it into columns_to_tensors.
Args:
columns_to_tensors: A mapping from feature columns to tensors. 'string'
key means a base feature (not-transformed). It can have _FeatureColumn
as a key too. That means that _FeatureColumn is already transformed.
"""
# Transform the input tensor according to the normalizer function.
input_tensor = self._normalized_input_tensor(columns_to_tensors[self.name])
columns_to_tensors[self] = math_ops.cast(input_tensor, dtypes.float32)
# pylint: disable=unused-argument
def _to_dnn_input_layer(self,
input_tensor,
weight_collections=None,
trainable=True,
output_rank=2):
return _reshape_real_valued_tensor(
self._to_dense_tensor(input_tensor), output_rank, self.name)
def _to_dense_tensor(self, input_tensor):
if not self.is_sparse:
return input_tensor
raise ValueError("Set is_sparse to False if you want a dense Tensor for "
"column_name: {}".format(self.name))
@experimental
def _real_valued_var_len_column(column_name,
default_value=None,
dtype=dtypes.float32,
normalizer=None,
is_sparse=False):
"""Creates a `_RealValuedVarLenColumn` for variable-length numeric data.
Note, this is not integrated with any of the DNNEstimators, except the RNN
ones DynamicRNNEstimator and the StateSavingRNNEstimator.
It can either create a parsing config for a SparseTensor (with is_sparse=True)
or a padded Tensor.
The (dense_)shape of the result will be [batch_size, None], which can be used
with is_sparse=False as input into an RNN (see DynamicRNNEstimator or
StateSavingRNNEstimator) or with is_sparse=True as input into a tree (see
gtflow).
Use real_valued_column if the Feature has a fixed length. Use some
SparseColumn for columns to be embedded / one-hot-encoded.
Args:
column_name: A string defining real valued column name.
default_value: A scalar value compatible with dtype. Needs to be specified
if is_sparse=False.
dtype: Defines the type of values. Default value is tf.float32. Needs to be
convertible to tf.float32.
normalizer: If not None, a function that can be used to normalize the value
of the real valued column after default_value is applied for parsing.
Normalizer function takes the input tensor as its argument, and returns
the output tensor. (e.g. lambda x: (x - 3.0) / 4.2). Note that for
is_sparse=False, the normalizer will be run on the values of the
`SparseTensor`.
is_sparse: A boolean defining whether to create a SparseTensor or a Tensor.
Returns:
A _RealValuedSparseColumn.
Raises:
TypeError: if default_value is not a scalar value compatible with dtype.
TypeError: if dtype is not convertible to tf.float32.
ValueError: if default_value is None and is_sparse is False.
"""
if not (dtype.is_integer or dtype.is_floating):
raise TypeError("dtype must be convertible to float. "
"dtype: {}, column_name: {}".format(dtype, column_name))
if default_value is None and not is_sparse:
raise ValueError("default_value must be provided when is_sparse=False to "
"parse a padded Tensor. "
"column_name: {}".format(column_name))
if isinstance(default_value, list):
raise ValueError(
"Only scalar default value. default_value: {}, column_name: {}".format(
default_value, column_name))
if default_value is not None:
if dtype.is_integer:
default_value = int(default_value)
elif dtype.is_floating:
default_value = float(default_value)
return _RealValuedVarLenColumn(column_name, default_value, dtype, normalizer,
is_sparse)
class _RealValuedColumn(
_FeatureColumn,
fc_core._DenseColumn, # pylint: disable=protected-access
collections.namedtuple(
"_RealValuedColumn",
["column_name", "dimension", "default_value", "dtype", "normalizer"])):
"""Represents a real valued feature column also known as continuous features.
Instances of this class are immutable. The dictionary returned by InputBuilder
contains a ("column_name", Tensor) pair with a Tensor shape of
(batch_size, dimension).
"""
def __new__(cls, column_name, dimension, default_value, dtype, normalizer):
if default_value is not None:
default_value = tuple(default_value)
return super(_RealValuedColumn,
cls).__new__(cls, column_name, dimension, default_value, dtype,
normalizer)
@property
def name(self):
return self.column_name
@property
def config(self):
default_value = self.default_value
if default_value is not None:
default_value = list(default_value)
return {
self.column_name:
parsing_ops.FixedLenFeature([self.dimension], self.dtype,
default_value)
}
@property
def key(self):
"""Returns a string which will be used as a key when we do sorting."""
return self._key_without_properties(["normalizer"])
@property
def normalizer_fn(self):
"""Returns the function used to normalize the column."""
return self.normalizer
def _normalized_input_tensor(self, input_tensor):
"""Returns the input tensor after custom normalization is applied."""
return (self.normalizer(input_tensor)
if self.normalizer is not None else input_tensor)
def insert_transformed_feature(self, columns_to_tensors):
"""Apply transformation and inserts it into columns_to_tensors.
Args:
columns_to_tensors: A mapping from feature columns to tensors. 'string'
key means a base feature (not-transformed). It can have _FeatureColumn
as a key too. That means that _FeatureColumn is already transformed.
"""
# Transform the input tensor according to the normalizer function.
input_tensor = self._normalized_input_tensor(columns_to_tensors[self.name])
columns_to_tensors[self] = math_ops.cast(input_tensor, dtypes.float32)
# pylint: disable=unused-argument
def _to_dnn_input_layer(self,
input_tensor,
weight_collections=None,
trainable=True,
output_rank=2):
input_tensor = self._to_dense_tensor(input_tensor)
if input_tensor.dtype != dtypes.float32:
input_tensor = math_ops.cast(input_tensor, dtypes.float32)
return _reshape_real_valued_tensor(input_tensor, output_rank, self.name)
def _to_dense_tensor(self, input_tensor):
return input_tensor
@property
def _variable_shape(self):
return tensor_shape.TensorShape([self.dimension])
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):
del weight_collections
del trainable
return inputs.get(self)
def _transform_feature(self, inputs):
return math_ops.cast(
self._normalized_input_tensor(inputs.get(self.name)), dtypes.float32)
@property
def _parse_example_spec(self):
return self.config
def real_valued_column(column_name,
dimension=1,
default_value=None,
dtype=dtypes.float32,
normalizer=None):
"""Creates a `_RealValuedColumn` for dense numeric data.
Args:
column_name: A string defining real valued column name.
dimension: An integer specifying dimension of the real valued column. The
default is 1.
default_value: A single value compatible with dtype or a list of values
compatible with dtype which the column takes on during tf.Example parsing
if data is missing. When dimension is not None, a default value of None
will cause tf.io.parse_example to fail if an example does not contain this
column. If a single value is provided, the same value will be applied as
the default value for every dimension. If a list of values is provided,
the length of the list should be equal to the value of `dimension`. Only
scalar default value is supported in case dimension is not specified.
dtype: defines the type of values. Default value is tf.float32. Must be a
non-quantized, real integer or floating point type.
normalizer: If not None, a function that can be used to normalize the value
of the real valued column after default_value is applied for parsing.
Normalizer function takes the input tensor as its argument, and returns
the output tensor. (e.g. lambda x: (x - 3.0) / 4.2). Note that for
variable length columns, the normalizer should expect an input_tensor of
type `SparseTensor`.
Returns:
A _RealValuedColumn.
Raises:
TypeError: if dimension is not an int
ValueError: if dimension is not a positive integer
TypeError: if default_value is a list but its length is not equal to the
value of `dimension`.
TypeError: if default_value is not compatible with dtype.
ValueError: if dtype is not convertible to tf.float32.
"""
if dimension is None:
raise TypeError("dimension must be an integer. Use the "
"_real_valued_var_len_column for variable length features."
"dimension: {}, column_name: {}".format(
dimension, column_name))
if not isinstance(dimension, int):
raise TypeError("dimension must be an integer. "
"dimension: {}, column_name: {}".format(
dimension, column_name))
if dimension < 1:
raise ValueError("dimension must be greater than 0. "
"dimension: {}, column_name: {}".format(
dimension, column_name))
if not (dtype.is_integer or dtype.is_floating):
raise ValueError("dtype must be convertible to float. "
"dtype: {}, column_name: {}".format(dtype, column_name))
if default_value is None:
return _RealValuedColumn(column_name, dimension, default_value, dtype,
normalizer)
if isinstance(default_value, int):
if dtype.is_integer:
default_value = ([default_value for _ in range(dimension)]
if dimension else [default_value])
return _RealValuedColumn(column_name, dimension, default_value, dtype,
normalizer)
if dtype.is_floating:
default_value = float(default_value)
default_value = ([default_value for _ in range(dimension)]
if dimension else [default_value])
return _RealValuedColumn(column_name, dimension, default_value, dtype,
normalizer)
if isinstance(default_value, float):
if dtype.is_floating and (not dtype.is_integer):
default_value = ([default_value for _ in range(dimension)]
if dimension else [default_value])
return _RealValuedColumn(column_name, dimension, default_value, dtype,
normalizer)
if isinstance(default_value, list):
if len(default_value) != dimension:
raise ValueError(
"The length of default_value must be equal to dimension. "
"default_value: {}, dimension: {}, column_name: {}".format(
default_value, dimension, column_name))
# Check if the values in the list are all integers or are convertible to
# floats.
is_list_all_int = True
is_list_all_float = True
for v in default_value:
if not isinstance(v, int):
is_list_all_int = False
if not (isinstance(v, float) or isinstance(v, int)):
is_list_all_float = False
if is_list_all_int:
if dtype.is_integer:
return _RealValuedColumn(column_name, dimension, default_value, dtype,
normalizer)
elif dtype.is_floating:
default_value = [float(v) for v in default_value]
return _RealValuedColumn(column_name, dimension, default_value, dtype,
normalizer)
if is_list_all_float:
if dtype.is_floating and (not dtype.is_integer):
default_value = [float(v) for v in default_value]
return _RealValuedColumn(column_name, dimension, default_value, dtype,
normalizer)
raise TypeError("default_value must be compatible with dtype. "
"default_value: {}, dtype: {}, column_name: {}".format(
default_value, dtype, column_name))
class _BucketizedColumn(
_FeatureColumn,
fc_core._CategoricalColumn, # pylint: disable=protected-access
fc_core._DenseColumn, # pylint: disable=protected-access
collections.namedtuple("_BucketizedColumn",
["source_column", "boundaries"])):
"""Represents a bucketization transformation also known as binning.
Instances of this class are immutable. Values in `source_column` will be
bucketized based on `boundaries`.
For example, if the inputs are:
boundaries = [0, 10, 100]
source_column = [[-5], [150], [10], [0], [4], [19]]
then the bucketized feature will be:
output = [[0], [3], [2], [1], [1], [2]]
Attributes:
source_column: A _RealValuedColumn defining dense column.
boundaries: A list or tuple of floats specifying the boundaries. It has to
be sorted. [a, b, c] defines following buckets: (-inf., a), [a, b), [b,
c), [c, inf.)
Raises:
ValueError: if 'boundaries' is empty or not sorted.
"""
def __new__(cls, source_column, boundaries):
if not isinstance(source_column, _RealValuedColumn):
raise TypeError("source_column must be an instance of _RealValuedColumn. "
"source_column: {}".format(source_column))
if source_column.dimension is None:
raise ValueError("source_column must have a defined dimension. "
"source_column: {}".format(source_column))
if (not isinstance(boundaries, list) and
not isinstance(boundaries, tuple)) or not boundaries:
raise ValueError("boundaries must be a non-empty list or tuple. "
"boundaries: {}".format(boundaries))
# We allow bucket boundaries to be monotonically increasing
# (ie a[i+1] >= a[i]). When two bucket boundaries are the same, we
# de-duplicate.
sanitized_boundaries = []
for i in range(len(boundaries) - 1):
if boundaries[i] == boundaries[i + 1]:
continue
elif boundaries[i] < boundaries[i + 1]:
sanitized_boundaries.append(boundaries[i])
else:
raise ValueError("boundaries must be a sorted list. "
"boundaries: {}".format(boundaries))
sanitized_boundaries.append(boundaries[len(boundaries) - 1])
return super(_BucketizedColumn, cls).__new__(cls, source_column,
tuple(sanitized_boundaries))
@property
def name(self):
return "{}_bucketized".format(self.source_column.name)
@property
def length(self):
"""Returns total number of buckets."""
return len(self.boundaries) + 1
@property
def config(self):
return self.source_column.config
@property
def key(self):
"""Returns a string which will be used as a key when we do sorting."""
return "{}".format(self)
# pylint: disable=unused-argument
def _to_dnn_input_layer(self,
input_tensor,
weight_collections=None,
trainable=True,
output_rank=2):
if output_rank != 2:
raise ValueError("BucketizedColumn currently only supports output_rank=2")
return array_ops.reshape(
array_ops.one_hot(
math_ops.cast(input_tensor, dtypes.int64),
self.length,
1.,
0.,
name="one_hot"), [-1, self.length * self.source_column.dimension],
name="reshape")
def to_sparse_tensor(self, input_tensor):
"""Creates a SparseTensor from the bucketized Tensor."""
dimension = self.source_column.dimension
batch_size = array_ops.shape(input_tensor, name="shape")[0]
if dimension > 1:
i1 = array_ops.reshape(
array_ops.tile(
array_ops.expand_dims(
math_ops.range(0, batch_size), 1, name="expand_dims"),
[1, dimension],
name="tile"), [-1],
name="reshape")
i2 = array_ops.tile(
math_ops.range(0, dimension), [batch_size], name="tile")
# Flatten the bucket indices and unique them across dimensions
# E.g. 2nd dimension indices will range from k to 2*k-1 with k buckets
bucket_indices = array_ops.reshape(
input_tensor, [-1], name="reshape") + self.length * i2
else:
# Simpler indices when dimension=1
i1 = math_ops.range(0, batch_size)
i2 = array_ops.zeros([batch_size], dtype=dtypes.int32, name="zeros")
bucket_indices = array_ops.reshape(input_tensor, [-1], name="reshape")
indices = math_ops.cast(
array_ops.transpose(array_ops.stack((i1, i2))), dtypes.int64)
shape = math_ops.cast(
array_ops.stack([batch_size, dimension]), dtypes.int64)
sparse_id_values = sparse_tensor_py.SparseTensor(indices, bucket_indices,
shape)
return sparse_id_values
def _wide_embedding_lookup_arguments(self, input_tensor):
return _LinearEmbeddingLookupArguments(
input_tensor=self.to_sparse_tensor(input_tensor),
weight_tensor=None,
vocab_size=self.length * self.source_column.dimension,
initializer=init_ops.zeros_initializer(),
combiner="sum")
def _transform_feature(self, inputs):
"""Handles cross transformation."""
# Bucketize the source column.
return bucketization_op.bucketize(
inputs.get(self.source_column),
boundaries=list(self.boundaries),
name="bucketize")
def insert_transformed_feature(self, columns_to_tensors):
"""Handles sparse column to id conversion."""
columns_to_tensors[self] = self._transform_feature(
_LazyBuilderByColumnsToTensor(columns_to_tensors))
@property
def _parse_example_spec(self):
return self.config
@property
def _num_buckets(self):
return self.length * self.source_column.dimension
def _get_sparse_tensors(self, inputs, weight_collections=None,
trainable=None):
del weight_collections
del trainable
return fc_core._CategoricalColumn.IdWeightPair( # pylint: disable=protected-access
self.to_sparse_tensor(inputs.get(self)), None)
@property
def _variable_shape(self):
return tensor_shape.TensorShape(
[self.length * self.source_column.dimension])
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):
return self._to_dnn_input_layer(
inputs.get(self), weight_collections, trainable)
def bucketized_column(source_column, boundaries):
"""Creates a _BucketizedColumn for discretizing dense input.
Args:
source_column: A _RealValuedColumn defining dense column.
boundaries: A list or tuple of floats specifying the boundaries. It has to
be sorted.
Returns:
A _BucketizedColumn.
Raises:
ValueError: if 'boundaries' is empty or not sorted.
"""
return _BucketizedColumn(source_column, boundaries)
class _CrossedColumn(
_FeatureColumn,
fc_core._CategoricalColumn, # pylint: disable=protected-access
collections.namedtuple("_CrossedColumn", [
"columns", "hash_bucket_size", "hash_key", "combiner",
"ckpt_to_load_from", "tensor_name_in_ckpt"
])):
"""Represents a cross transformation also known as conjunction or combination.
Instances of this class are immutable. It crosses given `columns`. Crossed
column output will be hashed to hash_bucket_size.
Conceptually, transformation can be thought as:
Hash(cartesian product of features in columns) % `hash_bucket_size`
For example, if the columns are
SparseTensor referred by first column: shape = [2, 2]
[0, 0]: "a"
[1, 0]: "b"
[1, 1]: "c"
SparseTensor referred by second column: : shape = [2, 1]
[0, 0]: "d"
[1, 0]: "e"
then crossed feature will look like:
shape = [2, 2]
[0, 0]: Hash64("d", Hash64("a")) % hash_bucket_size
[1, 0]: Hash64("e", Hash64("b")) % hash_bucket_size
[1, 1]: Hash64("e", Hash64("c")) % hash_bucket_size
Attributes:
columns: An iterable of _FeatureColumn. Items can be an instance of
_SparseColumn, _CrossedColumn, or _BucketizedColumn.
hash_bucket_size: An int that is > 1. The number of buckets.
combiner: A string specifying how to reduce if there are multiple entries in
a single row. Currently "mean", "sqrtn" and "sum" are supported, with
"sum" the default. "sqrtn" often achieves good accuracy, in particular
with bag-of-words columns. Each of this can be thought as example level
normalizations on the column::
* "sum": do not normalize
* "mean": do l1 normalization
* "sqrtn": do l2 normalization
For more information: `tf.embedding_lookup_sparse`.
ckpt_to_load_from: (Optional). String representing checkpoint name/pattern
to restore the column weights. Required if `tensor_name_in_ckpt` is not
None.
tensor_name_in_ckpt: (Optional). Name of the `Tensor` in the provided
checkpoint from which to restore the column weights. Required if
`ckpt_to_load_from` is not None.
Raises:
TypeError: if all items in columns are not an instance of _SparseColumn,
_CrossedColumn, or _BucketizedColumn.
ValueError: if hash_bucket_size is not > 1 or len(columns) is not > 1. Also,
if only one of `ckpt_to_load_from` and `tensor_name_in_ckpt` is specified.
"""
@staticmethod
def _assert_is_crossable(column):
if isinstance(column, (_SparseColumn, _CrossedColumn, _BucketizedColumn)):
return
raise TypeError("columns must be a set of _SparseColumn, "
"_CrossedColumn, or _BucketizedColumn instances. "
"(column {} is a {})".format(column,
column.__class__.__name__))
def __new__(cls,
columns,
hash_bucket_size,
hash_key,
combiner="sum",
ckpt_to_load_from=None,
tensor_name_in_ckpt=None):
for column in columns:
_CrossedColumn._assert_is_crossable(column)
if len(columns) < 2:
raise ValueError("columns must contain at least 2 elements. "
"columns: {}".format(columns))
if hash_bucket_size < 2:
raise ValueError("hash_bucket_size must be at least 2. "
"hash_bucket_size: {}".format(hash_bucket_size))
if (ckpt_to_load_from is None) != (tensor_name_in_ckpt is None):
raise ValueError("Must specify both `ckpt_to_load_from` and "
"`tensor_name_in_ckpt` or none of them.")
sorted_columns = sorted([column for column in columns],
key=lambda column: column.name)
return super(_CrossedColumn,
cls).__new__(cls, tuple(sorted_columns), hash_bucket_size,
hash_key, combiner, ckpt_to_load_from,
tensor_name_in_ckpt)
@property
def name(self):
sorted_names = sorted([column.name for column in self.columns])
return "_X_".join(sorted_names)
@property
def config(self):
config = {}
for column in self.columns:
config.update(_get_feature_config(column))
return config
@property
def length(self):
"""Returns total number of buckets."""
return self.hash_bucket_size
@property
def key(self):
"""Returns a string which will be used as a key when we do sorting."""
return "{}".format(self)
def id_tensor(self, input_tensor):
"""Returns the id tensor from the given transformed input_tensor."""
return input_tensor
def weight_tensor(self, input_tensor):
"""Returns the weight tensor from the given transformed input_tensor."""
del input_tensor
return None
def _to_dnn_input_layer(self,
input_tensor,
weight_collections=None,
trainable=True,
output_rank=2):
del input_tensor
del weight_collections
del trainable
del output_rank
raise ValueError("CrossedColumn is not supported in DNN. "
"Please use embedding_column. column: {}".format(self))
def _checkpoint_path(self):
if self.ckpt_to_load_from is not None:
return self.ckpt_to_load_from, self.tensor_name_in_ckpt
return None
def _wide_embedding_lookup_arguments(self, input_tensor):
return _LinearEmbeddingLookupArguments(
input_tensor=input_tensor,
weight_tensor=None,
vocab_size=self.length,
initializer=init_ops.zeros_initializer(),
combiner=self.combiner)
def _transform_feature(self, inputs):
"""Handles cross transformation."""
def _collect_leaf_level_columns(cross):
"""Collects base columns contained in the cross."""
leaf_level_columns = []
for c in cross.columns:
if isinstance(c, _CrossedColumn):
leaf_level_columns.extend(_collect_leaf_level_columns(c))
else:
leaf_level_columns.append(c)
return leaf_level_columns
feature_tensors = []
for c in _collect_leaf_level_columns(self):
if isinstance(c, _SparseColumn):
feature_tensors.append(inputs.get(c.name))
else:
if isinstance(c, _BucketizedColumn):
feature_tensors.append(c.to_sparse_tensor(inputs.get(c)))
else:
feature_tensors.append(inputs.get(c))
return sparse_feature_cross_op.sparse_feature_cross(
feature_tensors,
hashed_output=True,
num_buckets=self.hash_bucket_size,
hash_key=self.hash_key,
name="cross")
def insert_transformed_feature(self, columns_to_tensors):
"""Handles sparse column to id conversion."""
columns_to_tensors[self] = self._transform_feature(
_LazyBuilderByColumnsToTensor(columns_to_tensors))
@property
def _parse_example_spec(self):
return self.config
@property
def _num_buckets(self):
return self.length
def _get_sparse_tensors(self, inputs, weight_collections=None,
trainable=None):
del weight_collections
del trainable
return fc_core._CategoricalColumn.IdWeightPair(inputs.get(self), None) # pylint: disable=protected-access
class _LazyBuilderByColumnsToTensor(object):
def __init__(self, columns_to_tensors):
self._columns_to_tensors = columns_to_tensors
def get(self, key):
"""Gets the transformed feature column."""
if key in self._columns_to_tensors:
return self._columns_to_tensors[key]
if isinstance(key, str):
raise ValueError(
"features dictionary doesn't contain key ({})".format(key))
if not isinstance(key, _FeatureColumn):
raise TypeError('"key" must be either a "str" or "_FeatureColumn". '
"Provided: {}".format(key))
key.insert_transformed_feature(self._columns_to_tensors)
return self._columns_to_tensors[key]
def crossed_column(columns,
hash_bucket_size,
combiner="sum",
ckpt_to_load_from=None,
tensor_name_in_ckpt=None,
hash_key=None):
"""Creates a _CrossedColumn for performing feature crosses.
Args:
columns: An iterable of _FeatureColumn. Items can be an instance of
_SparseColumn, _CrossedColumn, or _BucketizedColumn.
hash_bucket_size: An int that is > 1. The number of buckets.
combiner: A string specifying how to reduce if there are multiple entries in
a single row. Currently "mean", "sqrtn" and "sum" are supported, with
"sum" the default. "sqrtn" often achieves good accuracy, in particular
with bag-of-words columns. Each of this can be thought as example level
normalizations on the column::
* "sum": do not normalize
* "mean": do l1 normalization
* "sqrtn": do l2 normalization
For more information: `tf.embedding_lookup_sparse`.
ckpt_to_load_from: (Optional). String representing checkpoint name/pattern
to restore the column weights. Required if `tensor_name_in_ckpt` is not
None.
tensor_name_in_ckpt: (Optional). Name of the `Tensor` in the provided
checkpoint from which to restore the column weights. Required if
`ckpt_to_load_from` is not None.
hash_key: Specify the hash_key that will be used by the `FingerprintCat64`
function to combine the crosses fingerprints on SparseFeatureCrossOp
(optional).
Returns:
A _CrossedColumn.
Raises:
TypeError: if any item in columns is not an instance of _SparseColumn,
_CrossedColumn, or _BucketizedColumn, or
hash_bucket_size is not an int.
ValueError: if hash_bucket_size is not > 1 or
len(columns) is not > 1.
"""
return _CrossedColumn(
columns,
hash_bucket_size,
hash_key,
combiner=combiner,
ckpt_to_load_from=ckpt_to_load_from,
tensor_name_in_ckpt=tensor_name_in_ckpt)
class DataFrameColumn(_FeatureColumn,
collections.namedtuple("DataFrameColumn",
["column_name", "series"])):
"""Represents a feature column produced from a `DataFrame`.
Instances of this class are immutable. A `DataFrame` column may be dense or
sparse, and may have any shape, with the constraint that dimension 0 is
batch_size.
Args:
column_name: a name for this column
series: a `Series` to be wrapped, which has already had its base features
substituted with `PredefinedSeries`.
"""
def __new__(cls, column_name, series):
return super(DataFrameColumn, cls).__new__(cls, column_name, series)
@property
def name(self):
return self.column_name
@property
def config(self):
return self.series.required_base_features()
@property
def key(self):
"""Returns a string which will be used as a key when we do sorting."""
return self.name
def insert_transformed_feature(self, columns_to_tensors):
# The cache must already contain mappings from the expected base feature
# names to Tensors.
# Passing columns_to_tensors as the cache here means that multiple outputs
# of the transform will be cached, keyed by the repr of their associated
# TransformedSeries.
# The specific requested output ends up in columns_to_tensors twice: once
# keyed by the TransformedSeries repr, and once keyed by this
# DataFrameColumn instance.
columns_to_tensors[self] = self.series.build(columns_to_tensors)
# pylint: disable=unused-argument
def _to_dnn_input_layer(self,
input_tensor,
weight_collections=None,
trainable=True,
output_rank=2):
if input_tensor.dtype != dtypes.float32:
input_tensor = math_ops.cast(input_tensor, dtypes.float32)
return _reshape_real_valued_tensor(input_tensor, output_rank, self.name)
def _to_dense_tensor(self, input_tensor):
return self._to_dnn_input_layer(input_tensor)
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.__dict__ == other.__dict__
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
def _get_feature_config(feature_column):
"""Returns configuration for the base feature defined in feature_column."""
if not isinstance(feature_column, _FeatureColumn):
raise TypeError(
"feature_columns should only contain instances of _FeatureColumn. "
"Given column is {}".format(feature_column))
if isinstance(feature_column,
(_SparseColumn, _WeightedSparseColumn, _EmbeddingColumn,
_RealValuedColumn, _RealValuedVarLenColumn, _BucketizedColumn,
_CrossedColumn, _OneHotColumn, _ScatteredEmbeddingColumn)):
return feature_column.config
raise TypeError("Not supported _FeatureColumn type. "
"Given column is {}".format(feature_column))
def create_feature_spec_for_parsing(feature_columns):
"""Helper that prepares features config from input feature_columns.
The returned feature config can be used as arg 'features' in tf.parse_example.
Typical usage example:
```python
# Define features and transformations
feature_a = sparse_column_with_vocabulary_file(...)
feature_b = real_valued_column(...)
feature_c_bucketized = bucketized_column(real_valued_column("feature_c"), ...)
feature_a_x_feature_c = crossed_column(
columns=[feature_a, feature_c_bucketized], ...)
feature_columns = set(
[feature_b, feature_c_bucketized, feature_a_x_feature_c])
batch_examples = tf.io.parse_example(
serialized=serialized_examples,
features=create_feature_spec_for_parsing(feature_columns))
```
For the above example, create_feature_spec_for_parsing would return the dict:
{
"feature_a": parsing_ops.VarLenFeature(tf.string),
"feature_b": parsing_ops.FixedLenFeature([1], dtype=tf.float32),
"feature_c": parsing_ops.FixedLenFeature([1], dtype=tf.float32)
}
Args:
feature_columns: An iterable containing all the feature columns. All items
should be instances of classes derived from _FeatureColumn, unless
feature_columns is a dict -- in which case, this should be true of all
values in the dict.
Returns:
A dict mapping feature keys to FixedLenFeature or VarLenFeature values.
"""
if isinstance(feature_columns, dict):
feature_columns = feature_columns.values()
features_config = {}
for column in feature_columns:
features_config.update(_get_feature_config(column))
return features_config
def _create_sequence_feature_spec_for_parsing(sequence_feature_columns,
allow_missing_by_default=False):
"""Prepares a feature spec for parsing `tf.SequenceExample`s.
Args:
sequence_feature_columns: an iterable containing all the feature columns.
All items should be instances of classes derived from `_FeatureColumn`.
allow_missing_by_default: whether to set `allow_missing=True` by default for
`FixedLenSequenceFeature`s.
Returns:
A dict mapping feature keys to `FixedLenSequenceFeature` or `VarLenFeature`.
"""
feature_spec = create_feature_spec_for_parsing(sequence_feature_columns)
sequence_feature_spec = {}
for key, feature in feature_spec.items():
if isinstance(feature, parsing_ops.VarLenFeature):
sequence_feature = feature
elif (isinstance(feature, parsing_ops.FixedLenFeature) or
isinstance(feature, parsing_ops.FixedLenSequenceFeature)):
default_is_set = feature.default_value is not None
if default_is_set:
logging.warning(
'Found default value {} for feature "{}". Ignoring this value and '
"setting `allow_missing=True` instead.".format(
feature.default_value, key))
sequence_feature = parsing_ops.FixedLenSequenceFeature(
shape=feature.shape,
dtype=feature.dtype,
allow_missing=(allow_missing_by_default or default_is_set))
else:
raise TypeError("Unsupported feature type: {}".format(
type(feature).__name__))
sequence_feature_spec[key] = sequence_feature
return sequence_feature_spec
def make_place_holder_tensors_for_base_features(feature_columns):
"""Returns placeholder tensors for inference.
Args:
feature_columns: An iterable containing all the feature columns. All items
should be instances of classes derived from _FeatureColumn.
Returns:
A dict mapping feature keys to SparseTensors (sparse columns) or
placeholder Tensors (dense columns).
"""
# Get dict mapping features to FixedLenFeature or VarLenFeature values.
dict_for_parse_example = create_feature_spec_for_parsing(feature_columns)
placeholders = {}
for column_name, column_type in dict_for_parse_example.items():
if isinstance(column_type, parsing_ops.VarLenFeature):
# Sparse placeholder for sparse tensors.
placeholders[column_name] = array_ops.sparse_placeholder(
column_type.dtype, name="Placeholder_{}".format(column_name))
else:
# Simple placeholder for dense tensors.
placeholders[column_name] = array_ops.placeholder(
column_type.dtype,
shape=(None, column_type.shape[0]),
name="Placeholder_{}".format(column_name))
return placeholders
class _SparseIdLookupConfig(
collections.namedtuple("_SparseIdLookupConfig", [
"vocabulary_file", "keys", "num_oov_buckets", "vocab_size",
"default_value"
])):
"""Defines lookup configuration for a sparse feature.
An immutable object defines lookup table configuration used by
tf.feature_to_id_v2.
Attributes:
vocabulary_file: The vocabulary filename. vocabulary_file cannot be combined
with keys.
keys: A 1-D string iterable that specifies the mapping of strings to
indices. It means a feature in keys will map to it's index in keys.
num_oov_buckets: The number of out-of-vocabulary buckets. If zero all out of
vocabulary features will be ignored.
vocab_size: Number of the elements in the vocabulary.
default_value: The value to use for out-of-vocabulary feature values.
Defaults to -1.
"""
def __new__(cls,
vocabulary_file=None,
keys=None,
num_oov_buckets=0,
vocab_size=None,
default_value=-1):
return super(_SparseIdLookupConfig,
cls).__new__(cls, vocabulary_file, keys, num_oov_buckets,
vocab_size, default_value)
|
tensorflow-master
|
tensorflow/contrib/layers/python/layers/feature_column.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for optimizers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.layers.python.layers import optimizers as optimizers_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import gradient_descent
def _setup_model():
x = array_ops.placeholder(dtypes.float32, [])
var = variable_scope.get_variable(
"test", [], initializer=init_ops.constant_initializer(10))
loss = math_ops.abs(var * x)
global_step = variable_scope.get_variable(
"global_step", [],
trainable=False,
dtype=dtypes.int64,
initializer=init_ops.constant_initializer(
0, dtype=dtypes.int64))
return x, var, loss, global_step
def _no_op_learning_rate_decay_fn(lr, global_step):
assert lr is not None
assert global_step is not None
return lr
class OptimizersTest(test.TestCase):
def testSGDOptimizer(self):
optimizers = [
"SGD", gradient_descent.GradientDescentOptimizer,
gradient_descent.GradientDescentOptimizer(learning_rate=0.1),
lambda lr: gradient_descent.GradientDescentOptimizer(learning_rate=lr),
"Momentum"
]
for optimizer in optimizers:
with ops.Graph().as_default() as g:
with self.session(graph=g) as session:
x, var, loss, global_step = _setup_model()
train = optimizers_lib.optimize_loss(
loss, global_step, learning_rate=0.1, optimizer=optimizer)
variables.global_variables_initializer().run()
session.run(train, feed_dict={x: 5})
var_value, global_step_value = session.run([var, global_step])
self.assertEqual(var_value, 9.5)
self.assertEqual(global_step_value, 1)
def testNoLrCallable(self):
def optimizer_fn():
return gradient_descent.GradientDescentOptimizer(learning_rate=0.1)
with ops.Graph().as_default() as g:
with self.session(graph=g) as session:
x, var, loss, global_step = _setup_model()
train = optimizers_lib.optimize_loss(
loss, global_step, learning_rate=None, optimizer=optimizer_fn)
variables.global_variables_initializer().run()
session.run(train, feed_dict={x: 5})
var_value, global_step_value = session.run([var, global_step])
self.assertEqual(var_value, 9.5)
self.assertEqual(global_step_value, 1)
def testWrongOptimizer(self):
optimizers = ["blah", variables.Variable, object(), lambda x: None]
for optimizer in optimizers:
with ops.Graph().as_default() as g:
with self.session(graph=g):
_, _, loss, global_step = _setup_model()
with self.assertRaises(ValueError):
optimizers_lib.optimize_loss(
loss, global_step, learning_rate=0.1, optimizer=optimizer)
def testBadSummaries(self):
with ops.Graph().as_default() as g, self.session(graph=g):
_, _, loss, global_step = _setup_model()
with self.assertRaises(ValueError):
optimizers_lib.optimize_loss(
loss, global_step, learning_rate=0.1, optimizer="SGD",
summaries=["loss", "bad_summary"])
def testInvalidLoss(self):
with ops.Graph().as_default() as g, self.session(graph=g):
_, _, _, global_step = _setup_model()
with self.assertRaises(ValueError):
optimizers_lib.optimize_loss(
None, global_step, learning_rate=0.1, optimizer="SGD")
with self.assertRaises(ValueError):
optimizers_lib.optimize_loss(
[[1.0]], global_step, learning_rate=0.1, optimizer="SGD")
def testInvalidGlobalStep(self):
with ops.Graph().as_default() as g, self.session(graph=g):
x = array_ops.placeholder(dtypes.float32, [])
var = variable_scope.get_variable(
"test", [], initializer=init_ops.constant_initializer(10))
loss = math_ops.abs(var * x)
with self.assertRaises(AttributeError):
optimizers_lib.optimize_loss(
loss,
global_step=constant_op.constant(
43, dtype=dtypes.int64),
learning_rate=0.1,
optimizer="SGD")
with self.assertRaises(TypeError):
optimizers_lib.optimize_loss(
loss,
global_step=variable_scope.get_variable(
"global_step", [],
trainable=False,
dtype=dtypes.float64,
initializer=init_ops.constant_initializer(
0.0, dtype=dtypes.float64)),
learning_rate=0.1,
optimizer="SGD")
with self.assertRaises(ValueError):
optimizers_lib.optimize_loss(
loss,
global_step=variable_scope.get_variable(
"global_step", [1],
trainable=False,
dtype=dtypes.int64,
initializer=init_ops.constant_initializer(
[0], dtype=dtypes.int64)),
learning_rate=0.1,
optimizer="SGD")
def testInvalidLearningRate(self):
with ops.Graph().as_default() as g, self.session(graph=g):
_, _, loss, global_step = _setup_model()
with self.assertRaises(ValueError):
optimizers_lib.optimize_loss(
loss, global_step, learning_rate=-0.1, optimizer="SGD")
def testGradientNoise(self):
random_seed.set_random_seed(42)
with self.cached_session() as session:
x, var, loss, global_step = _setup_model()
train = optimizers_lib.optimize_loss(
loss,
global_step,
learning_rate=0.1,
optimizer="SGD",
gradient_noise_scale=10.0)
variables.global_variables_initializer().run()
session.run(train, feed_dict={x: 5})
var_value, global_step_value = session.run([var, global_step])
# Due to randomness the following number may change if graph is different.
self.assertAlmostEqual(var_value, 9.86912, 4)
self.assertEqual(global_step_value, 1)
def testGradientNoiseWithClipping(self):
random_seed.set_random_seed(42)
with self.cached_session() as session:
x, var, loss, global_step = _setup_model()
train = optimizers_lib.optimize_loss(
loss,
global_step,
learning_rate=0.1,
optimizer="SGD",
gradient_noise_scale=10.0,
clip_gradients=10.0)
variables.global_variables_initializer().run()
session.run(train, feed_dict={x: 5})
var_value, global_step_value = session.run([var, global_step])
self.assertAlmostEqual(var_value, 9.86912, 4)
self.assertEqual(global_step_value, 1)
def testGradientClip(self):
with self.cached_session() as session:
x, var, loss, global_step = _setup_model()
train = optimizers_lib.optimize_loss(
loss,
global_step,
learning_rate=0.1,
optimizer="SGD",
clip_gradients=0.1)
variables.global_variables_initializer().run()
session.run(train, feed_dict={x: 5})
var_value, global_step_value = session.run([var, global_step])
self.assertAlmostEqual(var_value, 9.98999, 4)
self.assertEqual(global_step_value, 1)
def testAdaptiveGradientClip(self):
with self.cached_session() as session:
x, var, loss, global_step = _setup_model()
clip_gradients = optimizers_lib.adaptive_clipping_fn()
train = optimizers_lib.optimize_loss(
loss,
global_step,
learning_rate=0.1,
optimizer="SGD",
clip_gradients=clip_gradients)
variables.global_variables_initializer().run()
session.run(train, feed_dict={x: 5})
var_value, global_step_value = session.run([var, global_step])
self.assertAlmostEqual(var_value, 9.8916, 4)
self.assertEqual(global_step_value, 1)
var_count = 0
for var in variables.global_variables():
if var.name.startswith("OptimizeLoss/AdaptiveMaxNorm"):
var_count += 1
self.assertEqual(2, var_count)
def testGradientMultiply(self):
with self.cached_session() as session:
x, var, loss, global_step = _setup_model()
train = optimizers_lib.optimize_loss(
loss,
global_step,
learning_rate=0.1,
optimizer="SGD",
gradient_multipliers={var: 7.})
variables.global_variables_initializer().run()
session.run(train, feed_dict={x: 5})
var_value, global_step_value = session.run([var, global_step])
# var(0) = 10, x = 5, var(0)/dx = 5,
# var(1) = var(0) - learning_rate * gradient_multiplier * var(0)/dx
self.assertAlmostEqual(var_value, 6.5, 4)
self.assertEqual(global_step_value, 1)
def testGradientMultiplyInt32Tensor(self):
with self.cached_session() as session:
x, var, loss, global_step = _setup_model()
v = array_ops.placeholder(dtypes.float32, [])
train = optimizers_lib.optimize_loss(
loss,
global_step,
learning_rate=0.1,
optimizer="SGD",
gradient_multipliers={var: v})
variables.global_variables_initializer().run()
session.run(train, feed_dict={x: 5, v: 7.})
var_value, global_step_value = session.run([var, global_step])
# var(0) = 10, x = 5, var(0)/dx = 5,
# var(1) = var(0) - learning_rate * gradient_multiplier * var(0)/dx
self.assertAlmostEqual(var_value, 6.5, 4)
self.assertEqual(global_step_value, 1)
def testGradientMultiplyInt64Tensor(self):
with self.cached_session() as session:
x, var, loss, global_step = _setup_model()
v = array_ops.placeholder(dtypes.float64, [])
train = optimizers_lib.optimize_loss(
loss,
global_step,
learning_rate=0.1,
optimizer="SGD",
gradient_multipliers={var: v})
variables.global_variables_initializer().run()
session.run(train, feed_dict={x: 5, v: 7.})
var_value, global_step_value = session.run([var, global_step])
# var(0) = 10, x = 5, var(0)/dx = 5,
# var(1) = var(0) - learning_rate * gradient_multiplier * var(0)/dx
self.assertAlmostEqual(var_value, 6.5, 4)
self.assertEqual(global_step_value, 1)
def testIgnoreVariablesWithNoGradients(self):
_, _, loss, global_step = _setup_model()
unused_variable = variable_scope.get_variable("ignore_me", [])
optimizers_lib.optimize_loss(
loss,
global_step,
learning_rate=0.1,
optimizer="SGD",
gradient_noise_scale=10.0,
gradient_multipliers={unused_variable: 1.},
clip_gradients=10.0)
def testNoGlobalStep(self):
optimizers = [
"SGD", gradient_descent.GradientDescentOptimizer,
gradient_descent.GradientDescentOptimizer(learning_rate=0.1)
]
for optimizer in optimizers:
with ops.Graph().as_default() as g, self.session(graph=g) as session:
x = array_ops.placeholder(dtypes.float32, [])
var = variable_scope.get_variable(
"test", [], initializer=init_ops.constant_initializer(10))
loss = math_ops.abs(var * x)
update_var = variable_scope.get_variable(
"update", [], initializer=init_ops.constant_initializer(10))
update_op = state_ops.assign(update_var, 20)
train = optimizers_lib.optimize_loss(
loss,
global_step=None,
learning_rate=0.1,
optimizer=optimizer,
update_ops=[update_op])
variables.global_variables_initializer().run()
session.run(train, feed_dict={x: 5})
self.assertEqual(9.5, var.eval())
self.assertEqual(20, update_var.eval())
def testNoGlobalStepWithDecay(self):
optimizers = [
"SGD", gradient_descent.GradientDescentOptimizer,
gradient_descent.GradientDescentOptimizer(learning_rate=0.1)
]
for optimizer in optimizers:
with ops.Graph().as_default() as g, self.session(graph=g):
x = array_ops.placeholder(dtypes.float32, [])
var = variable_scope.get_variable(
"test", [], initializer=init_ops.constant_initializer(10))
loss = math_ops.abs(var * x)
update_var = variable_scope.get_variable(
"update", [], initializer=init_ops.constant_initializer(10))
update_op = state_ops.assign(update_var, 20)
with self.assertRaisesRegexp(
ValueError, "global_step is required for learning_rate_decay_fn"):
optimizers_lib.optimize_loss(
loss,
global_step=None,
learning_rate=0.1,
learning_rate_decay_fn=_no_op_learning_rate_decay_fn,
optimizer=optimizer,
update_ops=[update_op])
def testNoGlobalStepArg(self):
optimizers = [
"SGD", gradient_descent.GradientDescentOptimizer,
gradient_descent.GradientDescentOptimizer(learning_rate=0.1)
]
for optimizer in optimizers:
with ops.Graph().as_default() as g, self.session(graph=g) as session:
x, var, loss, global_step = _setup_model()
update_var = variable_scope.get_variable(
"update", [], initializer=init_ops.constant_initializer(10))
update_op = state_ops.assign(update_var, 20)
train = optimizers_lib.optimize_loss(
loss,
global_step=None,
learning_rate=0.1,
optimizer=optimizer,
update_ops=[update_op])
variables.global_variables_initializer().run()
session.run(train, feed_dict={x: 5})
self.assertEqual(9.5, var.eval())
self.assertEqual(20, update_var.eval())
self.assertEqual(1, global_step.eval())
def testUpdateOp(self):
optimizers = [
"SGD", gradient_descent.GradientDescentOptimizer,
gradient_descent.GradientDescentOptimizer(learning_rate=0.1)
]
for optimizer in optimizers:
with ops.Graph().as_default() as g, self.session(graph=g) as session:
x, var, loss, global_step = _setup_model()
update_var = variable_scope.get_variable(
"update", [], initializer=init_ops.constant_initializer(10))
update_op = state_ops.assign(update_var, 20)
train = optimizers_lib.optimize_loss(
loss,
global_step,
learning_rate=0.1,
optimizer=optimizer,
update_ops=[update_op])
variables.global_variables_initializer().run()
session.run(train, feed_dict={x: 5})
self.assertEqual(9.5, var.eval())
self.assertEqual(20, update_var.eval())
self.assertEqual(1, global_step.eval())
def testUpdateOpNoIncrementGlobalStep(self):
optimizers = [
"SGD", gradient_descent.GradientDescentOptimizer,
gradient_descent.GradientDescentOptimizer(learning_rate=0.1)
]
for optimizer in optimizers:
with ops.Graph().as_default() as g, self.session(graph=g) as session:
x, var, loss, global_step = _setup_model()
update_var = variable_scope.get_variable(
"update", [], initializer=init_ops.constant_initializer(10))
update_op = state_ops.assign(update_var, 20)
train = optimizers_lib.optimize_loss(
loss,
global_step,
learning_rate=0.1,
optimizer=optimizer,
update_ops=[update_op],
increment_global_step=False)
variables.global_variables_initializer().run()
session.run(train, feed_dict={x: 5})
self.assertEqual(9.5, var.eval())
self.assertEqual(20, update_var.eval())
self.assertEqual(0, global_step.eval())
def testUpdateOpWithNoOpDecay(self):
optimizers = [
"SGD", gradient_descent.GradientDescentOptimizer,
gradient_descent.GradientDescentOptimizer(learning_rate=0.1)
]
for optimizer in optimizers:
with ops.Graph().as_default() as g, self.session(graph=g) as session:
x, var, loss, global_step = _setup_model()
update_var = variable_scope.get_variable(
"update", [], initializer=init_ops.constant_initializer(10))
update_op = state_ops.assign(update_var, 20)
train = optimizers_lib.optimize_loss(
loss,
global_step,
learning_rate=0.1,
learning_rate_decay_fn=_no_op_learning_rate_decay_fn,
optimizer=optimizer,
update_ops=[update_op])
variables.global_variables_initializer().run()
session.run(train, feed_dict={x: 5})
self.assertEqual(9.5, var.eval())
self.assertEqual(20, update_var.eval())
self.assertEqual(1, global_step.eval())
def testUpdateOpFromCollection(self):
optimizers = [
"SGD", gradient_descent.GradientDescentOptimizer,
gradient_descent.GradientDescentOptimizer(learning_rate=0.1)
]
for optimizer in optimizers:
with ops.Graph().as_default() as g, self.session(graph=g) as session:
x, var, loss, global_step = _setup_model()
update_var = variable_scope.get_variable(
"update", [], initializer=init_ops.constant_initializer(10))
update_op = state_ops.assign(update_var, 20)
ops.add_to_collection(ops.GraphKeys.UPDATE_OPS, update_op)
train = optimizers_lib.optimize_loss(
loss, global_step, learning_rate=0.1, optimizer=optimizer)
variables.global_variables_initializer().run()
session.run(train, feed_dict={x: 5})
var_value, update_var_value, global_step_value = session.run(
[var, update_var, global_step])
self.assertEqual(var_value, 9.5)
self.assertEqual(update_var_value, 20)
self.assertEqual(global_step_value, 1)
class AdaptiveClipping(test.TestCase):
def testAverages(self):
with self.cached_session() as session:
scale = 2.
grad = array_ops.ones([3, 4]) * scale
log_norm = np.log(np.sqrt(scale**2 * grad.get_shape().num_elements()))
grads_and_vars = [(grad, grad)]
grads_and_vars = optimizers_lib.adaptive_clipping_fn(
decay=0.5)(grads_and_vars)
var_dict = {}
for var in variables.global_variables():
if var.name.startswith("AdaptiveMaxNorm"):
var_dict[var.name.split(":")[0]] = var
self.assertEqual(2, len(var_dict))
moving_mean = var_dict["AdaptiveMaxNorm/mean"]
moving_sq_mean = var_dict["AdaptiveMaxNorm/sq_mean"]
variables.global_variables_initializer().run()
mean, sq_mean = session.run([moving_mean, moving_sq_mean])
self.assertEqual([0], mean)
self.assertEqual([0], sq_mean)
for i in range(20):
mean, sq_mean, _ = session.run(
[moving_mean, moving_sq_mean, grads_and_vars[0][0]])
if i == 0:
self.assertLess(mean, 0.9 * log_norm)
self.assertLess(sq_mean, 0.9 * log_norm**2)
self.assertAlmostEqual(float(mean), log_norm, places=4)
self.assertAlmostEqual(float(sq_mean), log_norm**2, places=4)
def testClip(self):
with self.cached_session() as session:
spike = 1000.
multiplier = array_ops.placeholder(dtypes.float32, [], "multiplier")
step = array_ops.placeholder(dtypes.int32, [], "step")
grad = array_ops.ones([3, 4]) * multiplier
grads_and_vars = [(grad, grad)]
grads_and_vars = optimizers_lib.adaptive_clipping_fn(
decay=0.9, global_step=step)(grads_and_vars)
variables.global_variables_initializer().run()
def run(scale, i):
return session.run(grads_and_vars[0][0],
feed_dict={multiplier: scale,
step: i})
for i in range(20):
scale = [1., -2.][i % 2]
clipped_grad = run(scale, i)
if i > 3:
self.assertAllClose(np.ones(clipped_grad.shape) * scale, clipped_grad)
# assert that the spike will have low influence.
clipped_grad = run(spike, 20)
self.assertTrue((clipped_grad < 25.).all())
# assert that a repeated spike will converge to this new value.
for i in range(10):
clipped_grad = run(spike, i + 21)
self.assertAllClose(np.ones(clipped_grad.shape) * spike, clipped_grad)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/layers/python/layers/optimizers_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.contrib.layers.python.layers.encoders."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.layers.python.layers import encoders
from tensorflow.contrib.layers.python.ops import sparse_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
def _get_const_var(name, shape, value):
return variable_scope.get_variable(
name, shape, initializer=init_ops.constant_initializer(value))
class EncodersTest(test.TestCase):
def testBowEncoderSparse(self):
with self.cached_session() as sess:
docs = [[0, 1], [2, 3]]
enc = encoders.bow_encoder(docs, 4, 3)
sess.run(variables.global_variables_initializer())
self.assertAllEqual([2, 3], enc.eval().shape)
def testBowEncoderSparseTensor(self):
with self.cached_session() as sess:
docs = [[0, 1], [2, 3]]
sparse_docs = sparse_ops.dense_to_sparse_tensor(docs)
enc = encoders.bow_encoder(sparse_docs, 4, 3)
sess.run(variables.global_variables_initializer())
self.assertAllEqual([2, 3], enc.eval().shape)
def testBowEncoderSparseEmptyRow(self):
with self.cached_session() as sess:
docs = [[0, 1], [2, 3], [0, 0]]
enc = encoders.bow_encoder(docs, 4, 5)
sess.run(variables.global_variables_initializer())
self.assertAllEqual([3, 5], enc.eval().shape)
def testBowEncoderDense(self):
with self.cached_session() as sess:
docs = [[0, 1], [2, 3], [0, 0], [0, 0]]
enc = encoders.bow_encoder(docs, 4, 3, sparse_lookup=False)
sess.run(variables.global_variables_initializer())
self.assertAllEqual([4, 3], enc.eval().shape)
def testBowEncoderSparseTensorDenseLookup(self):
with self.cached_session():
docs = [[0, 1]]
sparse_docs = sparse_ops.dense_to_sparse_tensor(docs)
with self.assertRaises(TypeError):
encoders.bow_encoder(sparse_docs, 4, 3, sparse_lookup=False)
def testBowEncodersSharingEmbeddings(self):
with self.cached_session() as sess:
docs = [[0, 1], [2, 3]]
enc_1 = encoders.bow_encoder(docs, 4, 3, scope='test')
enc_2 = encoders.bow_encoder(docs, 4, 3, scope='test', reuse=True)
sess.run(variables.global_variables_initializer())
avg_1, avg_2 = sess.run([enc_1, enc_2])
self.assertAllEqual(avg_1, avg_2)
def testBowEncodersSharingEmbeddingsInheritedScopes(self):
with self.cached_session() as sess:
docs = [[0, 1], [2, 3]]
with variable_scope.variable_scope('test'):
enc_1 = encoders.bow_encoder(docs, 4, 3)
with variable_scope.variable_scope('test', reuse=True):
enc_2 = encoders.bow_encoder(docs, 4, 3)
sess.run(variables.global_variables_initializer())
avg_1, avg_2 = sess.run([enc_1, enc_2])
self.assertAllEqual(avg_1, avg_2)
def testBowEncodersSharingEmbeddingsSharedScope(self):
with self.cached_session() as sess:
docs = [[0, 1], [2, 3]]
enc_1 = encoders.bow_encoder(docs, 4, 3, scope='bow')
variable_scope.get_variable_scope().reuse_variables()
enc_2 = encoders.bow_encoder(docs, 4, 3, scope='bow')
sess.run(variables.global_variables_initializer())
avg_1, avg_2 = sess.run([enc_1, enc_2])
self.assertAllEqual(avg_1, avg_2)
def testBowEncoderReuseEmbeddingsVariable(self):
with self.cached_session() as sess:
docs = [[1, 1], [2, 3]]
with variable_scope.variable_scope('test'):
v = _get_const_var('embeddings', (4, 3),
[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11]])
self.assertEqual(v.name, 'test/embeddings:0')
enc = encoders.bow_encoder(docs, 4, 3, scope='test', reuse=True)
sess.run(variables.global_variables_initializer())
self.assertAllClose([[3., 4., 5.], [7.5, 8.5, 9.5]], enc.eval())
def testEmbedSequence(self):
with self.cached_session() as sess:
docs = [[1, 1], [2, 3]]
with variable_scope.variable_scope('test'):
v = _get_const_var('embeddings', (4, 3),
[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11]])
self.assertEqual(v.name, 'test/embeddings:0')
emb = encoders.embed_sequence(docs, 4, 3, scope='test', reuse=True)
sess.run(variables.global_variables_initializer())
self.assertAllClose(
[[[3., 4., 5.], [3., 4., 5.]], [[6., 7., 8.], [9., 10., 11.]]],
emb.eval())
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/layers/python/layers/encoders_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for RevBlock."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.layers.python.layers import layers
from tensorflow.contrib.layers.python.layers import rev_block_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.layers import convolutional
from tensorflow.python.layers import core as core_layers
from tensorflow.python.layers import normalization as normalization_layers
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class RevBlockTest(test.TestCase):
CHANNELS = 8
NUM_LAYERS = 4
BATCH_SIZE = 16
def testForwardBackward(self):
def f(x):
return core_layers.dense(x, self.CHANNELS // 2, use_bias=True)
def g(x):
return core_layers.dense(x, self.CHANNELS // 2, use_bias=True)
x = random_ops.random_uniform(
[self.BATCH_SIZE, self.CHANNELS], dtype=dtypes.float32)
x1, x2 = array_ops.split(x, 2, axis=-1)
block = rev_block_lib.RevBlock(f, g, num_layers=3)
y1, y2 = block.forward(x1, x2)
x1_inv, x2_inv = block.backward(y1, y2)
with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
x1, x2, x1_inv, x2_inv = sess.run([x1, x2, x1_inv, x2_inv])
self.assertAllClose(x1, x1_inv, atol=1e-5)
self.assertAllClose(x2, x2_inv, atol=1e-5)
def testBackwardForward(self):
def f(x):
return core_layers.dense(x, self.CHANNELS // 2, use_bias=True)
def g(x):
return core_layers.dense(x, self.CHANNELS // 2, use_bias=True)
y = random_ops.random_uniform(
[self.BATCH_SIZE, self.CHANNELS], dtype=dtypes.float32)
y1, y2 = array_ops.split(y, 2, axis=-1)
block = rev_block_lib.RevBlock(f, g, num_layers=3)
x1, x2 = block.backward(y1, y2)
y1_inv, y2_inv = block.forward(x1, x2)
with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
y1, y2, y1_inv, y2_inv = sess.run([y1, y2, y1_inv, y2_inv])
self.assertAllClose(y1, y1_inv, rtol=1e-5)
self.assertAllClose(y2, y2_inv, rtol=1e-5)
def _testRevBlock(self,
x=None,
f=None,
g=None,
f_side_input=None,
g_side_input=None):
random_seed.set_random_seed(1234)
if f is None:
def f(x): # pylint: disable=function-redefined
return core_layers.dense(x, self.CHANNELS // 2, use_bias=True)
if g is None:
def g(x): # pylint: disable=function-redefined
return core_layers.dense(x, self.CHANNELS // 2, use_bias=True)
if f_side_input is None:
f_side_input = []
if g_side_input is None:
g_side_input = []
if x is None:
x = random_ops.random_uniform(
[self.BATCH_SIZE, self.CHANNELS], dtype=dtypes.float32)
x1, x2 = array_ops.split(x, 2, axis=-1)
with variable_scope.variable_scope("rev_test") as vs:
y1_rev, y2_rev = rev_block_lib.rev_block(
x1,
x2,
f,
g,
f_side_input=f_side_input,
g_side_input=g_side_input,
num_layers=self.NUM_LAYERS)
y_rev = array_ops.concat([y1_rev, y2_rev], axis=1)
fg_vars = vs.trainable_variables()
num_vars = len(variables.global_variables())
with variable_scope.variable_scope(vs, reuse=True):
y1, y2 = rev_block_lib.rev_block(
x1,
x2,
f,
g,
f_side_input=f_side_input,
g_side_input=g_side_input,
num_layers=self.NUM_LAYERS,
is_training=False)
y = array_ops.concat([y1, y2], axis=1)
# Ensure no new vars were created - full reuse
assert len(variables.global_variables()) == num_vars
loss_rev = math_ops.reduce_mean(y_rev + 10.)
loss = math_ops.reduce_mean(y + 10.)
wrt = [x] + f_side_input + g_side_input + fg_vars
grads_rev = gradients_impl.gradients(loss_rev, wrt)
grads = gradients_impl.gradients(loss, wrt)
with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
y_val, yd_val, gd_val, g_val = sess.run([y, y_rev, grads_rev, grads])
self.assertAllClose(y_val, yd_val)
for g1, g2 in zip(gd_val, g_val):
self.assertAllClose(g1, g2, rtol=1e-5)
def testRevBlock(self):
self._testRevBlock()
def testSideInput(self):
f_side_input = random_ops.random_uniform(
[self.BATCH_SIZE, self.CHANNELS // 2])
def f(x, side_input):
return core_layers.dense(
x, self.CHANNELS // 2, use_bias=True) + side_input[0]
self._testRevBlock(f=f, f_side_input=[f_side_input])
def testMultipleFns(self):
def f1(x):
return core_layers.dense(x, self.CHANNELS // 2)
def f2(x):
return core_layers.dense(x, self.CHANNELS // 2, activation=nn_ops.relu)
self._testRevBlock(f=[f1, f2, f1, f2])
def testConvAndBatchNorm(self):
x = random_ops.random_uniform(
[self.BATCH_SIZE, 10, self.CHANNELS], dtype=dtypes.float32)
def f(x):
x = convolutional.conv1d(x, self.CHANNELS // 2, 3, padding="same")
x = layers.batch_norm(x, is_training=False)
x = convolutional.conv1d(x, self.CHANNELS // 2, 3, padding="same")
x = layers.batch_norm(x, is_training=False)
return x
self._testRevBlock(x=x, f=f)
def testReuse(self):
def f(x):
return core_layers.dense(x, self.CHANNELS // 2)
def g(x):
return core_layers.dense(x, self.CHANNELS // 2)
x = random_ops.random_uniform(
[self.BATCH_SIZE, self.CHANNELS], dtype=dtypes.float32)
x1, x2 = array_ops.split(x, 2, axis=-1)
with variable_scope.variable_scope("test"):
y1, y2 = rev_block_lib.rev_block(x1, x2, f, g, num_layers=self.NUM_LAYERS)
num_vars_before = len(variables.global_variables())
with variable_scope.variable_scope("test", reuse=True):
y1, y2 = rev_block_lib.rev_block(x1, x2, f, g, num_layers=self.NUM_LAYERS)
num_vars_after = len(variables.global_variables())
self.assertEqual(num_vars_before, num_vars_after)
loss = math_ops.reduce_mean(y1 + y2)
_ = gradients_impl.gradients(loss,
[x] + variables.trainable_variables())
with variable_scope.variable_scope("test", reuse=True):
y1, y2 = rev_block_lib.rev_block(x1, x2, f, g, num_layers=self.NUM_LAYERS)
num_vars_after = len(variables.global_variables())
self.assertEqual(num_vars_before, num_vars_after)
class RecomputeTest(test.TestCase):
def testRecompute(self):
def layer(x, name=None):
with variable_scope.variable_scope(name, default_name="layer"):
x = layers.layer_norm(x)
x = convolutional.conv1d(
x,
10,
1,
use_bias=False,
kernel_initializer=init_ops.constant_initializer(42.42))
x = nn_ops.relu(x)
return x
def fn(x):
out = x
for _ in range(3):
out = layer(out)
return out
@rev_block_lib.recompute_grad
def fn_recompute(x):
return fn(x)
@rev_block_lib.recompute_grad(use_data_dep=True)
def fn_use_data_dep(x):
return fn(x)
@rev_block_lib.recompute_grad(tupleize_grads=True)
def fn_tupleize(x):
return fn(x)
@rev_block_lib.recompute_grad(use_data_dep=True, tupleize_grads=True)
def fn_both(x):
return fn(x)
x = random_ops.random_uniform((3, 1, 3))
names_and_fns = [
("recompute", fn_recompute),
("regular", fn),
("use_data_dep", fn_use_data_dep),
("tupleize", fn_tupleize),
("tuple_and_data_dep", fn_both),
]
outputs_and_vars = []
for name, wrapped_fn in names_and_fns:
with variable_scope.variable_scope(name, use_resource=True) as vs:
out = math_ops.reduce_sum(wrapped_fn(x))
outputs_and_vars.append((out, vs.trainable_variables()))
all_grads = []
for out, scope_vars in outputs_and_vars:
all_grads.append(gradients_impl.gradients(out, scope_vars))
with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
outputs = list(zip(*outputs_and_vars))[0]
outs, all_grads_val = sess.run([outputs, all_grads])
# All outputs are the same
current = outs[0]
for out in outs[1:]:
self.assertAllClose(current, out)
current = out
# All gradients are the same
for grads in zip(all_grads_val):
current = grads[0]
for g in grads[1:]:
self.assertAllClose(current, g)
current = g
def testDoubleCallInSameScopeFails(self):
@rev_block_lib.recompute_grad
def layer_with_recompute(inputs):
return core_layers.dense(inputs, 2)
with variable_scope.variable_scope("layer", use_resource=True):
inputs = array_ops.ones((2, 4), dtypes.float32)
out1 = layer_with_recompute(inputs)
out2 = layer_with_recompute(inputs) + out1
out = math_ops.reduce_sum(out2)
tvars = variables.trainable_variables()
assert len(tvars) == 4
with self.assertRaisesWithPredicateMatch(
ValueError, "called twice in the same enclosing scope"):
gradients_impl.gradients(out, [inputs] + tvars)
def testDoubleCallInUniqueScope(self):
@rev_block_lib.recompute_grad
def layer_with_recompute(inputs):
with variable_scope.variable_scope("inner", use_resource=True):
return core_layers.dense(inputs, 2)
with variable_scope.variable_scope("layer", use_resource=True):
inputs = array_ops.ones((2, 4), dtypes.float32)
with variable_scope.variable_scope("layer1", use_resource=True):
out1 = layer_with_recompute(inputs)
with variable_scope.variable_scope("layer2", use_resource=True):
out2 = layer_with_recompute(inputs) + out1
out = math_ops.reduce_sum(out2)
tvars = variables.trainable_variables()
assert len(tvars) == 4
grads = gradients_impl.gradients(out, [inputs] + tvars)
for grad in grads:
self.assertTrue(grad is not None)
def testWithIsRecomputeKwarg(self):
kwarg_values = []
@rev_block_lib.recompute_grad
def layer_with_recompute(inputs, is_recomputing=False):
kwarg_values.append(is_recomputing)
out = core_layers.dense(inputs, 2)
out = normalization_layers.batch_normalization(out, training=True)
if is_recomputing:
# Ensure that the updates are not duplicated by popping off the latest
# 2 additions.
update_ops = ops.get_collection_ref(ops.GraphKeys.UPDATE_OPS)
update_ops.pop()
update_ops.pop()
return out
x = array_ops.ones((2, 4), dtypes.float32)
with variable_scope.variable_scope("layer1", use_resource=True):
y = layer_with_recompute(x)
loss = math_ops.reduce_sum(y)
tvars = variables.trainable_variables()
gradients_impl.gradients(loss, [x] + tvars)
update_ops = ops.get_collection(ops.GraphKeys.UPDATE_OPS)
self.assertEqual(2, len(update_ops))
self.assertEqual([False, True], kwarg_values)
def testWithoutVariables(self):
def concat_n(layer_list, num_inputs):
return math_ops.reduce_sum(
array_ops.concat([x for x in layer_list[-num_inputs:]], axis=-1),
axis=1, keepdims=True)
@rev_block_lib.recompute_grad
def concat_n_wrap(*args):
return concat_n(args, 3)
# DenseNet-style layers
layer_list = [random_ops.random_uniform((4, 8))]
for _ in range(5):
layer_list.append(math_ops.sqrt(concat_n_wrap(*layer_list)))
grads = gradients_impl.gradients(layer_list[-1], layer_list[0])
with self.cached_session() as sess:
sess.run(grads)
def testErrorOnClosedOverTensor(self):
x = random_ops.random_uniform((4, 8))
y = random_ops.random_uniform((4, 8))
z = x * y
with self.assertRaisesWithPredicateMatch(ValueError, "closes over"):
@rev_block_lib.recompute_grad
def fn_with_capture(a): # pylint: disable=unused-variable
return a * z
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/layers/python/layers/rev_block_lib_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for initializers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib import layers
from tensorflow.contrib.layers.python.layers import initializers
from tensorflow.contrib.layers.python.layers import regularizers
from tensorflow.python.client import session
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class InitializerTest(test.TestCase):
def test_xavier_wrong_dtype(self):
with self.assertRaisesRegexp(
TypeError, 'Cannot create initializer for non-floating point type.'):
initializers.xavier_initializer(dtype=dtypes.int32)
self.assertIsNone(regularizers.l1_regularizer(0.)(None))
def _test_xavier(self, initializer, shape, variance, uniform):
with session.Session() as sess:
var = variable_scope.get_variable(
name='test',
shape=shape,
dtype=dtypes.float32,
initializer=initializer(
uniform=uniform, seed=1))
sess.run(variables.global_variables_initializer())
values = var.eval()
self.assertAllClose(np.var(values), variance, 1e-3, 1e-3)
def test_xavier_uniform(self):
self._test_xavier(initializers.xavier_initializer, [100, 40],
2. / (100. + 40.), True)
def test_xavier_normal(self):
self._test_xavier(initializers.xavier_initializer, [100, 40],
2. / (100. + 40.), False)
def test_xavier_scalar(self):
self._test_xavier(initializers.xavier_initializer, [], 0.0, True)
def test_xavier_conv2d_uniform(self):
self._test_xavier(layers.xavier_initializer_conv2d, [100, 40, 5, 7],
2. / (100. * 40 * (5 + 7)), True)
def test_xavier_conv2d_normal(self):
self._test_xavier(layers.xavier_initializer_conv2d, [100, 40, 5, 7],
2. / (100. * 40 * (5 + 7)), False)
class VarianceScalingInitializerTest(test.TestCase):
def test_wrong_dtype(self):
with self.assertRaisesRegexp(
TypeError, 'Cannot create initializer for non-floating point type.'):
initializers.variance_scaling_initializer(dtype=dtypes.int32)
initializer = initializers.variance_scaling_initializer()
with self.assertRaisesRegexp(
TypeError, 'Cannot create initializer for non-floating point type.'):
initializer([], dtype=dtypes.int32)
def _test_variance(self, initializer, shape, variance, factor, mode, uniform):
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
var = variable_scope.get_variable(
name='test',
shape=shape,
dtype=dtypes.float32,
initializer=initializer(
factor=factor, mode=mode, uniform=uniform, seed=1))
sess.run(variables.global_variables_initializer())
values = var.eval()
self.assertAllClose(np.var(values), variance, 1e-3, 1e-3)
def test_fan_in(self):
for uniform in [False, True]:
self._test_variance(
initializers.variance_scaling_initializer,
shape=[100, 40],
variance=2. / 100.,
factor=2.0,
mode='FAN_IN',
uniform=uniform)
def test_fan_out(self):
for uniform in [False, True]:
self._test_variance(
initializers.variance_scaling_initializer,
shape=[100, 40],
variance=2. / 40.,
factor=2.0,
mode='FAN_OUT',
uniform=uniform)
def test_fan_avg(self):
for uniform in [False, True]:
self._test_variance(
initializers.variance_scaling_initializer,
shape=[100, 40],
variance=4. / (100. + 40.),
factor=2.0,
mode='FAN_AVG',
uniform=uniform)
def test_conv2d_fan_in(self):
for uniform in [False, True]:
self._test_variance(
initializers.variance_scaling_initializer,
shape=[100, 40, 5, 7],
variance=2. / (100. * 40. * 5.),
factor=2.0,
mode='FAN_IN',
uniform=uniform)
def test_conv2d_fan_out(self):
for uniform in [False, True]:
self._test_variance(
initializers.variance_scaling_initializer,
shape=[100, 40, 5, 7],
variance=2. / (100. * 40. * 7.),
factor=2.0,
mode='FAN_OUT',
uniform=uniform)
def test_conv2d_fan_avg(self):
for uniform in [False, True]:
self._test_variance(
initializers.variance_scaling_initializer,
shape=[100, 40, 5, 7],
variance=2. / (100. * 40. * (5. + 7.)),
factor=2.0,
mode='FAN_AVG',
uniform=uniform)
def test_xavier_uniform(self):
self._test_variance(
initializers.variance_scaling_initializer,
shape=[100, 40],
variance=2. / (100. + 40.),
factor=1.0,
mode='FAN_AVG',
uniform=True)
def test_xavier_normal(self):
self._test_variance(
initializers.variance_scaling_initializer,
shape=[100, 40],
variance=2. / (100. + 40.),
factor=1.0,
mode='FAN_AVG',
uniform=False)
def test_xavier_scalar(self):
self._test_variance(
initializers.variance_scaling_initializer,
shape=[],
variance=0.0,
factor=1.0,
mode='FAN_AVG',
uniform=False)
def test_xavier_conv2d_uniform(self):
self._test_variance(
initializers.variance_scaling_initializer,
shape=[100, 40, 5, 7],
variance=2. / (100. * 40. * (5. + 7.)),
factor=1.0,
mode='FAN_AVG',
uniform=True)
def test_xavier_conv2d_normal(self):
self._test_variance(
initializers.variance_scaling_initializer,
shape=[100, 40, 5, 7],
variance=2. / (100. * 40. * (5. + 7.)),
factor=1.0,
mode='FAN_AVG',
uniform=True)
def test_1d_shape_fan_in(self):
for uniform in [False, True]:
self._test_variance(
initializers.variance_scaling_initializer,
shape=[100],
variance=2. / 100.,
factor=2.0,
mode='FAN_IN',
uniform=uniform)
def test_1d_shape_fan_out(self):
for uniform in [False, True]:
self._test_variance(
initializers.variance_scaling_initializer,
shape=[100],
variance=2. / 100.,
factor=2.0,
mode='FAN_OUT',
uniform=uniform)
def test_1d_shape_fan_avg(self):
for uniform in [False, True]:
self._test_variance(
initializers.variance_scaling_initializer,
shape=[100],
variance=4. / (100. + 100.),
factor=2.0,
mode='FAN_AVG',
uniform=uniform)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/layers/python/layers/initializers_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for regularizers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.layers.python.layers import regularizers
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class RegularizerTest(test.TestCase):
def test_l1(self):
with self.assertRaises(ValueError):
regularizers.l1_regularizer(-1.)
with self.assertRaises(ValueError):
regularizers.l1_regularizer(0)
self.assertIsNone(regularizers.l1_regularizer(0.)(None))
values = np.array([1., -1., 4., 2.])
weights = constant_op.constant(values)
with session.Session() as sess:
result = sess.run(regularizers.l1_regularizer(.5)(weights))
self.assertAllClose(np.abs(values).sum() * .5, result)
def test_l2(self):
with self.assertRaises(ValueError):
regularizers.l2_regularizer(-1.)
with self.assertRaises(ValueError):
regularizers.l2_regularizer(0)
self.assertIsNone(regularizers.l2_regularizer(0.)(None))
values = np.array([1., -1., 4., 2.])
weights = constant_op.constant(values)
with session.Session() as sess:
result = sess.run(regularizers.l2_regularizer(.42)(weights))
self.assertAllClose(np.power(values, 2).sum() / 2.0 * .42, result)
def test_l1_l2(self):
with self.assertRaises(ValueError):
regularizers.l1_l2_regularizer(-1., 0.5)
with self.assertRaises(ValueError):
regularizers.l1_l2_regularizer(0.5, -1.)
with self.assertRaises(ValueError):
regularizers.l1_l2_regularizer(0, 0.5)
with self.assertRaises(ValueError):
regularizers.l1_l2_regularizer(0.5, 0)
with self.cached_session():
shape = [5, 5, 5]
num_elem = 5 * 5 * 5
tensor = constant_op.constant(1.0, shape=shape)
loss = regularizers.l1_l2_regularizer(1.0, 1.0)(tensor)
self.assertEquals(loss.op.name, 'l1_l2_regularizer')
self.assertAlmostEqual(loss.eval(), num_elem + num_elem / 2, 5)
def test_l1_l2_scale_l1Zero(self):
shape = [5, 5, 5]
num_elem = 5 * 5 * 5
tensor = constant_op.constant(1.0, shape=shape)
loss = regularizers.l1_l2_regularizer(0.0, 1.0)(tensor)
with self.cached_session():
self.assertEquals(loss.op.name, 'l1_l2_regularizer')
self.assertAlmostEqual(loss.eval(), num_elem / 2, 5)
def test_l1_l2_scale_l2Zero(self):
shape = [5, 5, 5]
num_elem = 5 * 5 * 5
tensor = constant_op.constant(1.0, shape=shape)
loss = regularizers.l1_l2_regularizer(1.0, 0.0)(tensor)
with self.cached_session():
self.assertEquals(loss.op.name, 'l1_l2_regularizer')
self.assertAlmostEqual(loss.eval(), num_elem, 5)
def test_l1_l2_scales_Zero(self):
shape = [5, 5, 5]
tensor = constant_op.constant(1.0, shape=shape)
loss = regularizers.l1_l2_regularizer(0.0, 0.0)(tensor)
self.assertEquals(loss, None)
def testL1L2RegularizerWithScope(self):
with self.cached_session():
shape = [5, 5, 5]
num_elem = 5 * 5 * 5
tensor = constant_op.constant(1.0, shape=shape)
with ops.name_scope('foo'):
loss = regularizers.l1_l2_regularizer(1.0, 1.0, scope='l1_l2')(tensor)
self.assertEquals(loss.op.name, 'foo/l1_l2')
self.assertAlmostEqual(loss.eval(), num_elem + num_elem / 2, 5)
def test_sum_regularizer(self):
l1_function = regularizers.l1_regularizer(.1)
l2_function = regularizers.l2_regularizer(.2)
self.assertIsNone(regularizers.sum_regularizer([]))
self.assertIsNone(regularizers.sum_regularizer([None]))
values = np.array([-3.])
weights = constant_op.constant(values)
with session.Session() as sess:
l1_reg1 = regularizers.sum_regularizer([l1_function])
l1_result1 = sess.run(l1_reg1(weights))
l1_reg2 = regularizers.sum_regularizer([l1_function, None])
l1_result2 = sess.run(l1_reg2(weights))
l1_l2_reg = regularizers.sum_regularizer([l1_function, l2_function])
l1_l2_result = sess.run(l1_l2_reg(weights))
self.assertAllClose(.1 * np.abs(values).sum(), l1_result1)
self.assertAllClose(.1 * np.abs(values).sum(), l1_result2)
self.assertAllClose(
.1 * np.abs(values).sum() + .2 * np.power(values, 2).sum() / 2.0,
l1_l2_result)
def test_apply_regularization(self):
dummy_regularizer = lambda x: math_ops.reduce_sum(2 * x)
array_weights_list = [[1.5], [2, 3, 4.2], [10, 42, 666.6]]
tensor_weights_list = [constant_op.constant(x) for x in array_weights_list]
expected = sum(2 * x for l in array_weights_list for x in l)
with self.cached_session():
result = regularizers.apply_regularization(dummy_regularizer,
tensor_weights_list)
self.assertAllClose(expected, result.eval())
def test_apply_zero_regularization(self):
regularizer = regularizers.l2_regularizer(0.0)
array_weights_list = [[1.5], [2, 3, 4.2], [10, 42, 666.6]]
tensor_weights_list = [constant_op.constant(x) for x in array_weights_list]
with self.cached_session():
result = regularizers.apply_regularization(regularizer,
tensor_weights_list)
self.assertAllClose(0.0, result.eval())
def test_apply_regularization_invalid_regularizer(self):
non_scalar_regularizer = lambda x: array_ops.tile(x, [2])
tensor_weights_list = [
constant_op.constant(x) for x in [[1.5], [2, 3, 4.2], [10, 42, 666.6]]
]
with self.cached_session():
with self.assertRaises(ValueError):
regularizers.apply_regularization(non_scalar_regularizer,
tensor_weights_list)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/layers/python/layers/regularizers_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""layers module with higher level NN primitives."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import
from tensorflow.contrib.layers.python.layers.embedding_ops import *
from tensorflow.contrib.layers.python.layers.encoders import *
from tensorflow.contrib.layers.python.layers.feature_column import *
from tensorflow.contrib.layers.python.layers.feature_column_ops import *
from tensorflow.contrib.layers.python.layers.initializers import *
from tensorflow.contrib.layers.python.layers.layers import *
from tensorflow.contrib.layers.python.layers.normalization import *
from tensorflow.contrib.layers.python.layers.optimizers import *
from tensorflow.contrib.layers.python.layers.regularizers import *
from tensorflow.contrib.layers.python.layers.rev_block_lib import *
from tensorflow.contrib.layers.python.layers.summaries import *
from tensorflow.contrib.layers.python.layers.target_column import *
from tensorflow.contrib.layers.python.ops.bucketization_op import *
from tensorflow.contrib.layers.python.ops.sparse_feature_cross_op import *
# pylint: enable=wildcard-import
|
tensorflow-master
|
tensorflow/contrib/layers/python/layers/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Encoders to transform sequence of symbols into vector representation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.framework.python.ops import variables
from tensorflow.contrib.layers.python.layers import embedding_ops as contrib_embedding_ops
from tensorflow.contrib.layers.python.ops import sparse_ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope
__all__ = ['bow_encoder', 'embed_sequence']
def bow_encoder(ids,
vocab_size,
embed_dim,
sparse_lookup=True,
initializer=None,
regularizer=None,
trainable=True,
scope=None,
reuse=None):
"""Maps a sequence of symbols to a vector per example by averaging embeddings.
Args:
ids: `[batch_size, doc_length]` `Tensor` or `SparseTensor` of type
`int32` or `int64` with symbol ids.
vocab_size: Integer number of symbols in vocabulary.
embed_dim: Integer number of dimensions for embedding matrix.
sparse_lookup: `bool`, if `True`, converts ids to a `SparseTensor`
and performs a sparse embedding lookup. This is usually faster,
but not desirable if padding tokens should have an embedding. Empty rows
are assigned a special embedding.
initializer: An initializer for the embeddings, if `None` default for
current scope is used.
regularizer: Optional regularizer for the embeddings.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
scope: Optional string specifying the variable scope for the op, required
if `reuse=True`.
reuse: If `True`, variables inside the op will be reused.
Returns:
Encoding `Tensor` `[batch_size, embed_dim]` produced by
averaging embeddings.
Raises:
ValueError: If `embed_dim` or `vocab_size` are not specified.
"""
if not vocab_size or not embed_dim:
raise ValueError('Must specify vocab size and embedding dimension')
with variable_scope.variable_scope(
scope, 'bow_encoder', [ids], reuse=reuse):
embeddings = variables.model_variable(
'embeddings', shape=[vocab_size, embed_dim],
initializer=initializer, regularizer=regularizer,
trainable=trainable)
if sparse_lookup:
if isinstance(ids, sparse_tensor.SparseTensor):
sparse_ids = ids
else:
sparse_ids = sparse_ops.dense_to_sparse_tensor(ids)
return contrib_embedding_ops.safe_embedding_lookup_sparse(
[embeddings], sparse_ids, combiner='mean', default_id=0)
else:
if isinstance(ids, sparse_tensor.SparseTensor):
raise TypeError('ids are expected to be dense Tensor, got: %s', ids)
return math_ops.reduce_mean(
embedding_ops.embedding_lookup(embeddings, ids), axis=1)
def embed_sequence(ids,
vocab_size=None,
embed_dim=None,
unique=False,
initializer=None,
regularizer=None,
trainable=True,
scope=None,
reuse=None):
"""Maps a sequence of symbols to a sequence of embeddings.
Typical use case would be reusing embeddings between an encoder and decoder.
Args:
ids: `[batch_size, doc_length]` `Tensor` of type `int32` or `int64`
with symbol ids.
vocab_size: Integer number of symbols in vocabulary.
embed_dim: Integer number of dimensions for embedding matrix.
unique: If `True`, will first compute the unique set of indices, and then
lookup each embedding once, repeating them in the output as needed.
initializer: An initializer for the embeddings, if `None` default for
current scope is used.
regularizer: Optional regularizer for the embeddings.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
scope: Optional string specifying the variable scope for the op, required
if `reuse=True`.
reuse: If `True`, variables inside the op will be reused.
Returns:
`Tensor` of `[batch_size, doc_length, embed_dim]` with embedded sequences.
Raises:
ValueError: if `embed_dim` or `vocab_size` are not specified when
`reuse` is `None` or `False`.
"""
if not (reuse or (vocab_size and embed_dim)):
raise ValueError('Must specify vocab size and embedding dimension when not '
'reusing. Got vocab_size=%s and embed_dim=%s' % (
vocab_size, embed_dim))
with variable_scope.variable_scope(
scope, 'EmbedSequence', [ids], reuse=reuse):
shape = [vocab_size, embed_dim]
if reuse and vocab_size is None or embed_dim is None:
shape = None
embeddings = variables.model_variable(
'embeddings', shape=shape,
initializer=initializer, regularizer=regularizer,
trainable=trainable)
if unique:
return contrib_embedding_ops.embedding_lookup_unique(embeddings, ids)
return embedding_ops.embedding_lookup(embeddings, ids)
|
tensorflow-master
|
tensorflow/contrib/layers/python/layers/encoders.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TargetColumn abstract a single head in the model.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.contrib.framework import deprecated
from tensorflow.contrib.losses.python.losses import loss_ops
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
@deprecated(
"2016-11-12", "This file will be removed after the deprecation date."
"Please switch to "
"third_party/tensorflow/contrib/learn/python/learn/estimators/head.py")
def regression_target(label_name=None,
weight_column_name=None,
label_dimension=1):
"""Creates a _TargetColumn for linear regression.
Args:
label_name: String, name of the key in label dict. Can be null if label
is a tensor (single headed models).
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
label_dimension: dimension of the target for multilabels.
Returns:
An instance of _TargetColumn
"""
return _RegressionTargetColumn(
loss_fn=_mean_squared_loss,
label_name=label_name,
weight_column_name=weight_column_name,
label_dimension=label_dimension)
# TODO(zakaria): Add logistic_regression_target
@deprecated(
"2016-11-12", "This file will be removed after the deprecation date."
"Please switch to "
"third_party/tensorflow/contrib/learn/python/learn/estimators/head.py")
def multi_class_target(n_classes, label_name=None, weight_column_name=None):
"""Creates a _TargetColumn for multi class single label classification.
The target column uses softmax cross entropy loss.
Args:
n_classes: Integer, number of classes, must be >= 2
label_name: String, name of the key in label dict. Can be null if label
is a tensor (single headed models).
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
Returns:
An instance of _MultiClassTargetColumn.
Raises:
ValueError: if n_classes is < 2
"""
if n_classes < 2:
raise ValueError("n_classes must be > 1 for classification.")
if n_classes == 2:
loss_fn = _log_loss_with_two_classes
else:
loss_fn = _softmax_cross_entropy_loss
return _MultiClassTargetColumn(
loss_fn=loss_fn,
n_classes=n_classes,
label_name=label_name,
weight_column_name=weight_column_name)
@deprecated(
"2016-11-12", "This file will be removed after the deprecation date."
"Please switch to "
"third_party/tensorflow/contrib/learn/python/learn/estimators/head.py")
def binary_svm_target(label_name=None, weight_column_name=None):
"""Creates a _TargetColumn for binary classification with SVMs.
The target column uses binary hinge loss.
Args:
label_name: String, name of the key in label dict. Can be null if label
is a tensor (single headed models).
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
Returns:
An instance of _TargetColumn.
"""
return _BinarySvmTargetColumn(
label_name=label_name, weight_column_name=weight_column_name)
@deprecated(
"2016-11-12", "This file will be removed after the deprecation date."
"Please switch to "
"third_party/tensorflow/contrib/learn/python/learn/estimators/head.py")
class ProblemType(object):
UNSPECIFIED = 0
CLASSIFICATION = 1
LINEAR_REGRESSION = 2
LOGISTIC_REGRESSION = 3
class _TargetColumn(object):
"""_TargetColumn is the abstraction for a single head in a model.
Args:
loss_fn: a function that returns the loss tensor.
num_label_columns: Integer, number of label columns.
label_name: String, name of the key in label dict. Can be null if label
is a tensor (single headed models).
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
Raises:
ValueError: if loss_fn or n_classes are missing.
"""
def __init__(self, loss_fn, num_label_columns, label_name, weight_column_name,
problem_type):
if not loss_fn:
raise ValueError("loss_fn must be provided")
if num_label_columns is None: # n_classes can be 0
raise ValueError("num_label_columns must be provided")
self._loss_fn = loss_fn
self._num_label_columns = num_label_columns
self._label_name = label_name
self._weight_column_name = weight_column_name
self._problem_type = problem_type
def logits_to_predictions(self, logits, proba=False):
# Abstrat, Subclasses must implement.
raise NotImplementedError()
def get_eval_ops(self, features, logits, labels, metrics=None):
"""Returns eval op."""
raise NotImplementedError
@property
def label_name(self):
return self._label_name
@property
def weight_column_name(self):
return self._weight_column_name
@property
def num_label_columns(self):
return self._num_label_columns
def get_weight_tensor(self, features):
if not self._weight_column_name:
return None
else:
return array_ops.reshape(
math_ops.cast(features[self._weight_column_name], dtypes.float32),
shape=(-1,))
@property
def problem_type(self):
return self._problem_type
def _weighted_loss(self, loss, weight_tensor):
"""Returns cumulative weighted loss."""
unweighted_loss = array_ops.reshape(loss, shape=(-1,))
weighted_loss = math_ops.multiply(unweighted_loss,
array_ops.reshape(
weight_tensor, shape=(-1,)))
return weighted_loss
def training_loss(self, logits, target, features, name="training_loss"):
"""Returns training loss tensor for this head.
Training loss is different from the loss reported on the tensorboard as we
should respect the example weights when computing the gradient.
L = sum_{i} w_{i} * l_{i} / B
where B is the number of examples in the batch, l_{i}, w_{i} are individual
losses, and example weight.
Args:
logits: logits, a float tensor.
target: either a tensor for labels or in multihead case, a dict of string
to target tensor.
features: features dict.
name: Op name.
Returns:
Loss tensor.
"""
target = target[self.name] if isinstance(target, dict) else target
loss_unweighted = self._loss_fn(logits, target)
weight_tensor = self.get_weight_tensor(features)
if weight_tensor is None:
return math_ops.reduce_mean(loss_unweighted, name=name)
loss_weighted = self._weighted_loss(loss_unweighted, weight_tensor)
return math_ops.reduce_mean(loss_weighted, name=name)
def loss(self, logits, target, features):
"""Returns loss tensor for this head.
The loss returned is the weighted average.
L = sum_{i} w_{i} * l_{i} / sum_{i} w_{i}
Args:
logits: logits, a float tensor.
target: either a tensor for labels or in multihead case, a dict of string
to target tensor.
features: features dict.
Returns:
Loss tensor.
"""
target = target[self.name] if isinstance(target, dict) else target
loss_unweighted = self._loss_fn(logits, target)
weight_tensor = self.get_weight_tensor(features)
if weight_tensor is None:
return math_ops.reduce_mean(loss_unweighted, name="loss")
loss_weighted = self._weighted_loss(loss_unweighted, weight_tensor)
return math_ops.div(
math_ops.reduce_sum(loss_weighted),
math_ops.cast(math_ops.reduce_sum(weight_tensor), dtypes.float32),
name="loss")
class _RegressionTargetColumn(_TargetColumn):
"""_TargetColumn for regression."""
def __init__(self, loss_fn, label_name, weight_column_name, label_dimension):
super(_RegressionTargetColumn, self).__init__(
loss_fn=loss_fn,
num_label_columns=label_dimension,
label_name=label_name,
weight_column_name=weight_column_name,
problem_type=ProblemType.LINEAR_REGRESSION)
def logits_to_predictions(self, logits, proba=False):
if self.num_label_columns == 1:
return array_ops.squeeze(logits, axis=[1])
return logits
def get_eval_ops(self, features, logits, labels, metrics=None):
loss = self.loss(logits, labels, features)
result = {"loss": metric_ops.streaming_mean(loss)}
if metrics:
predictions = self.logits_to_predictions(logits, proba=False)
result.update(
_run_metrics(predictions, labels, metrics,
self.get_weight_tensor(features)))
return result
class _MultiClassTargetColumn(_TargetColumn):
"""_TargetColumn for classification."""
# TODO(zakaria): support multilabel.
def __init__(self, loss_fn, n_classes, label_name, weight_column_name):
if n_classes < 2:
raise ValueError("n_classes must be >= 2")
super(_MultiClassTargetColumn, self).__init__(
loss_fn=loss_fn,
num_label_columns=1 if n_classes == 2 else n_classes,
label_name=label_name,
weight_column_name=weight_column_name,
problem_type=ProblemType.CLASSIFICATION)
def logits_to_predictions(self, logits, proba=False):
if self.num_label_columns == 1:
logits = array_ops.concat([array_ops.zeros_like(logits), logits], 1)
if proba:
return nn.softmax(logits)
else:
return math_ops.argmax(logits, 1)
def _default_eval_metrics(self):
if self._num_label_columns == 1:
return get_default_binary_metrics_for_eval(thresholds=[.5])
return {}
def get_eval_ops(self, features, logits, labels, metrics=None):
loss = self.loss(logits, labels, features)
result = {"loss": metric_ops.streaming_mean(loss)}
# Adds default metrics.
if metrics is None:
# TODO(b/29366811): This currently results in both an "accuracy" and an
# "accuracy/threshold_0.500000_mean" metric for binary classification.
metrics = {("accuracy", "classes"): metric_ops.streaming_accuracy}
predictions = math_ops.sigmoid(logits)
labels_float = math_ops.cast(labels, dtypes.float32)
default_metrics = self._default_eval_metrics()
for metric_name, metric_op in default_metrics.items():
result[metric_name] = metric_op(predictions, labels_float)
class_metrics = {}
proba_metrics = {}
for name, metric_op in six.iteritems(metrics):
if isinstance(name, tuple):
if len(name) != 2:
raise ValueError("Ignoring metric {}. It returned a tuple with "
"len {}, expected 2.".format(name, len(name)))
else:
if name[1] not in ["classes", "probabilities"]:
raise ValueError("Ignoring metric {}. The 2nd element of its "
"name should be either 'classes' or "
"'probabilities'.".format(name))
elif name[1] == "classes":
class_metrics[name[0]] = metric_op
else:
proba_metrics[name[0]] = metric_op
elif isinstance(name, str):
class_metrics[name] = metric_op
else:
raise ValueError("Ignoring metric {}. Its name is not in the correct "
"form.".format(name))
if class_metrics:
class_predictions = self.logits_to_predictions(logits, proba=False)
result.update(
_run_metrics(class_predictions, labels, class_metrics,
self.get_weight_tensor(features)))
if proba_metrics:
predictions = self.logits_to_predictions(logits, proba=True)
result.update(
_run_metrics(predictions, labels, proba_metrics,
self.get_weight_tensor(features)))
return result
class _BinarySvmTargetColumn(_MultiClassTargetColumn):
"""_TargetColumn for binary classification using SVMs."""
def __init__(self, label_name, weight_column_name):
def loss_fn(logits, target):
check_shape_op = control_flow_ops.Assert(
math_ops.less_equal(array_ops.rank(target), 2),
["target's shape should be either [batch_size, 1] or [batch_size]"])
with ops.control_dependencies([check_shape_op]):
target = array_ops.reshape(
target, shape=[array_ops.shape(target)[0], 1])
return loss_ops.hinge_loss(logits, target)
super(_BinarySvmTargetColumn, self).__init__(
loss_fn=loss_fn,
n_classes=2,
label_name=label_name,
weight_column_name=weight_column_name)
def logits_to_predictions(self, logits, proba=False):
if proba:
raise ValueError(
"logits to probabilities is not supported for _BinarySvmTargetColumn")
logits = array_ops.concat([array_ops.zeros_like(logits), logits], 1)
return math_ops.argmax(logits, 1)
# TODO(zakaria): use contrib losses.
def _mean_squared_loss(logits, target):
# To prevent broadcasting inside "-".
if len(target.get_shape()) == 1:
target = array_ops.expand_dims(target, axis=1)
logits.get_shape().assert_is_compatible_with(target.get_shape())
return math_ops.squared_difference(logits,
math_ops.cast(target, dtypes.float32))
def _log_loss_with_two_classes(logits, target):
# sigmoid_cross_entropy_with_logits requires [batch_size, 1] target.
if len(target.get_shape()) == 1:
target = array_ops.expand_dims(target, axis=1)
loss_vec = nn.sigmoid_cross_entropy_with_logits(
labels=math_ops.cast(target, dtypes.float32), logits=logits)
return loss_vec
def _softmax_cross_entropy_loss(logits, target):
# Check that we got integer for classification.
if not target.dtype.is_integer:
raise ValueError("Target's dtype should be integer "
"Instead got %s." % target.dtype)
# sparse_softmax_cross_entropy_with_logits requires [batch_size] target.
if len(target.get_shape()) == 2:
target = array_ops.squeeze(target, axis=[1])
loss_vec = nn.sparse_softmax_cross_entropy_with_logits(
labels=target, logits=logits)
return loss_vec
def _run_metrics(predictions, labels, metrics, weights):
result = {}
labels = math_ops.cast(labels, predictions.dtype)
for name, metric in six.iteritems(metrics or {}):
if weights is not None:
result[name] = metric(predictions, labels, weights=weights)
else:
result[name] = metric(predictions, labels)
return result
@deprecated(
"2016-11-12", "This file will be removed after the deprecation date."
"Please switch to "
"third_party/tensorflow/contrib/learn/python/learn/estimators/head.py")
def get_default_binary_metrics_for_eval(thresholds):
"""Returns a dictionary of basic metrics for logistic regression.
Args:
thresholds: List of floating point thresholds to use for accuracy,
precision, and recall metrics. If None, defaults to [0.5].
Returns:
Dictionary mapping metrics string names to metrics functions.
"""
metrics = {}
metrics[_MetricKeys.PREDICTION_MEAN] = _predictions_streaming_mean
metrics[_MetricKeys.TARGET_MEAN] = _labels_streaming_mean
# Also include the streaming mean of the label as an accuracy baseline, as
# a reminder to users.
metrics[_MetricKeys.ACCURACY_BASELINE] = _labels_streaming_mean
metrics[_MetricKeys.AUC] = _streaming_auc
for threshold in thresholds:
metrics[_MetricKeys.ACCURACY_MEAN %
threshold] = _accuracy_at_threshold(threshold)
# Precision for positive examples.
metrics[_MetricKeys.PRECISION_MEAN % threshold] = _streaming_at_threshold(
metric_ops.streaming_precision_at_thresholds, threshold)
# Recall for positive examples.
metrics[_MetricKeys.RECALL_MEAN % threshold] = _streaming_at_threshold(
metric_ops.streaming_recall_at_thresholds, threshold)
return metrics
def _float_weights_or_none(weights):
if weights is None:
return None
return math_ops.cast(weights, dtypes.float32)
def _labels_streaming_mean(unused_predictions, labels, weights=None):
return metric_ops.streaming_mean(labels, weights=weights)
def _predictions_streaming_mean(predictions, unused_labels, weights=None):
return metric_ops.streaming_mean(predictions, weights=weights)
def _streaming_auc(predictions, labels, weights=None):
return metric_ops.streaming_auc(
predictions, labels, weights=_float_weights_or_none(weights))
def _accuracy_at_threshold(threshold):
def _accuracy_metric(predictions, labels, weights=None):
threshold_predictions = math_ops.cast(
math_ops.greater_equal(predictions, threshold), dtypes.float32)
return metric_ops.streaming_accuracy(
predictions=threshold_predictions, labels=labels, weights=weights)
return _accuracy_metric
def _streaming_at_threshold(streaming_metrics_fn, threshold):
def _streaming_metrics(predictions, labels, weights=None):
precision_tensor, update_op = streaming_metrics_fn(
predictions,
labels=labels,
thresholds=[threshold],
weights=_float_weights_or_none(weights))
return array_ops.squeeze(precision_tensor), update_op
return _streaming_metrics
class _MetricKeys(object):
AUC = "auc"
PREDICTION_MEAN = "labels/prediction_mean"
TARGET_MEAN = "labels/actual_target_mean"
ACCURACY_BASELINE = "accuracy/baseline_target_mean"
ACCURACY_MEAN = "accuracy/threshold_%f_mean"
PRECISION_MEAN = "precision/positive_threshold_%f_mean"
RECALL_MEAN = "recall/positive_threshold_%f_mean"
|
tensorflow-master
|
tensorflow/contrib/layers/python/layers/target_column.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Regularizers for use with layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numbers
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import standard_ops
from tensorflow.python.platform import tf_logging as logging
__all__ = ['l1_regularizer',
'l2_regularizer',
'l1_l2_regularizer',
'sum_regularizer',
'apply_regularization']
def l1_regularizer(scale, scope=None):
"""Returns a function that can be used to apply L1 regularization to weights.
L1 regularization encourages sparsity.
Args:
scale: A scalar multiplier `Tensor`. 0.0 disables the regularizer.
scope: An optional scope name.
Returns:
A function with signature `l1(weights)` that apply L1 regularization.
Raises:
ValueError: If scale is negative or if scale is not a float.
"""
if isinstance(scale, numbers.Integral):
raise ValueError('scale cannot be an integer: %s' % scale)
if isinstance(scale, numbers.Real):
if scale < 0.:
raise ValueError('Setting a scale less than 0 on a regularizer: %g' %
scale)
if scale == 0.:
logging.info('Scale of 0 disables regularizer.')
return lambda _: None
def l1(weights, name=None):
"""Applies L1 regularization to weights."""
with ops.name_scope(scope, 'l1_regularizer', [weights]) as name:
my_scale = ops.convert_to_tensor(scale,
dtype=weights.dtype.base_dtype,
name='scale')
return standard_ops.multiply(
my_scale,
standard_ops.reduce_sum(standard_ops.abs(weights)),
name=name)
return l1
def l2_regularizer(scale, scope=None):
"""Returns a function that can be used to apply L2 regularization to weights.
Small values of L2 can help prevent overfitting the training data.
Args:
scale: A scalar multiplier `Tensor`. 0.0 disables the regularizer.
scope: An optional scope name.
Returns:
A function with signature `l2(weights)` that applies L2 regularization.
Raises:
ValueError: If scale is negative or if scale is not a float.
"""
if isinstance(scale, numbers.Integral):
raise ValueError('scale cannot be an integer: %s' % (scale,))
if isinstance(scale, numbers.Real):
if scale < 0.:
raise ValueError('Setting a scale less than 0 on a regularizer: %g.' %
scale)
if scale == 0.:
logging.info('Scale of 0 disables regularizer.')
return lambda _: None
def l2(weights):
"""Applies l2 regularization to weights."""
with ops.name_scope(scope, 'l2_regularizer', [weights]) as name:
my_scale = ops.convert_to_tensor(scale,
dtype=weights.dtype.base_dtype,
name='scale')
return standard_ops.multiply(my_scale, nn.l2_loss(weights), name=name)
return l2
def l1_l2_regularizer(scale_l1=1.0, scale_l2=1.0, scope=None):
"""Returns a function that can be used to apply L1 L2 regularizations.
Args:
scale_l1: A scalar multiplier `Tensor` for L1 regularization.
scale_l2: A scalar multiplier `Tensor` for L2 regularization.
scope: An optional scope name.
Returns:
A function with signature `l1_l2(weights)` that applies a weighted sum of
L1 L2 regularization.
Raises:
ValueError: If scale is negative or if scale is not a float.
"""
if isinstance(scale_l1, numbers.Integral):
raise ValueError('scale_l1 cannot be an integer: %s' % (scale_l1,))
if isinstance(scale_l2, numbers.Integral):
raise ValueError('scale_l2 cannot be an integer: %s' % (scale_l2,))
scope = scope or 'l1_l2_regularizer'
if scale_l1 == 0.:
return l2_regularizer(scale_l2, scope)
if scale_l2 == 0.:
return l1_regularizer(scale_l1, scope)
return sum_regularizer([l1_regularizer(scale_l1),
l2_regularizer(scale_l2)],
scope=scope)
def sum_regularizer(regularizer_list, scope=None):
"""Returns a function that applies the sum of multiple regularizers.
Args:
regularizer_list: A list of regularizers to apply.
scope: An optional scope name
Returns:
A function with signature `sum_reg(weights)` that applies the
sum of all the input regularizers.
"""
regularizer_list = [reg for reg in regularizer_list if reg is not None]
if not regularizer_list:
return None
def sum_reg(weights):
"""Applies the sum of all the input regularizers."""
with ops.name_scope(scope, 'sum_regularizer', [weights]) as name:
regularizer_tensors = [reg(weights) for reg in regularizer_list]
return math_ops.add_n(regularizer_tensors, name=name)
return sum_reg
def apply_regularization(regularizer, weights_list=None):
"""Returns the summed penalty by applying `regularizer` to the `weights_list`.
Adding a regularization penalty over the layer weights and embedding weights
can help prevent overfitting the training data. Regularization over layer
biases is less common/useful, but assuming proper data preprocessing/mean
subtraction, it usually shouldn't hurt much either.
Args:
regularizer: A function that takes a single `Tensor` argument and returns
a scalar `Tensor` output.
weights_list: List of weights `Tensors` or `Variables` to apply
`regularizer` over. Defaults to the `GraphKeys.WEIGHTS` collection if
`None`.
Returns:
A scalar representing the overall regularization penalty.
Raises:
ValueError: If `regularizer` does not return a scalar output, or if we find
no weights.
"""
if not weights_list:
weights_list = ops.get_collection(ops.GraphKeys.WEIGHTS)
if not weights_list:
raise ValueError('No weights to regularize.')
with ops.name_scope('get_regularization_penalty',
values=weights_list) as scope:
penalties = [regularizer(w) for w in weights_list]
penalties = [
p if p is not None else constant_op.constant(0.0) for p in penalties
]
for p in penalties:
if p.get_shape().ndims != 0:
raise ValueError('regularizer must return a scalar Tensor instead of a '
'Tensor with rank %d.' % p.get_shape().ndims)
summed_penalty = math_ops.add_n(penalties, name=scope)
ops.add_to_collection(ops.GraphKeys.REGULARIZATION_LOSSES, summed_penalty)
return summed_penalty
|
tensorflow-master
|
tensorflow/contrib/layers/python/layers/regularizers.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Optimizer ops for use in layers and tf.learn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.contrib import framework as contrib_framework
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops import variables as vars_
from tensorflow.python.summary import summary
from tensorflow.python.training import moving_averages
from tensorflow.python.training import optimizer as optimizer_
from tensorflow.python.training import training as train
OPTIMIZER_CLS_NAMES = {
"Adagrad": train.AdagradOptimizer,
"Adam": train.AdamOptimizer,
"Ftrl": train.FtrlOptimizer,
"Momentum": lambda learning_rate: train.MomentumOptimizer(learning_rate, momentum=0.9), # pylint: disable=line-too-long
"RMSProp": train.RMSPropOptimizer,
"SGD": train.GradientDescentOptimizer,
}
OPTIMIZER_SUMMARIES = [
"learning_rate",
"loss",
"gradients",
"gradient_norm",
"global_gradient_norm",
]
def optimize_loss(loss,
global_step,
learning_rate,
optimizer,
gradient_noise_scale=None,
gradient_multipliers=None,
clip_gradients=None,
learning_rate_decay_fn=None,
update_ops=None,
variables=None,
name=None,
summaries=None,
colocate_gradients_with_ops=False,
increment_global_step=True):
"""Given loss and parameters for optimizer, returns a training op.
Various ways of passing optimizers include:
- by string specifying the name of the optimizer. See OPTIMIZER_CLS_NAMES
for full list. E.g. `optimize_loss(..., optimizer='Adam')`.
- by function taking learning rate `Tensor` as argument and returning an
`Optimizer` instance. E.g. `optimize_loss(...,
optimizer=lambda lr: tf.compat.v1.train.MomentumOptimizer(lr,
momentum=0.5))`.
Alternatively, if `learning_rate` is `None`, the function takes no
arguments. E.g. `optimize_loss(..., learning_rate=None,
optimizer=lambda: tf.compat.v1.train.MomentumOptimizer(0.5,
momentum=0.5))`.
- by a subclass of `Optimizer` having a single-argument constructor
(the argument is the learning rate), such as AdamOptimizer or
AdagradOptimizer. E.g. `optimize_loss(...,
optimizer=tf.compat.v1.train.AdagradOptimizer)`.
- by an instance of a subclass of `Optimizer`.
E.g., `optimize_loss(...,
optimizer=tf.compat.v1.train.AdagradOptimizer(0.5))`.
Args:
loss: Scalar `Tensor`.
global_step: Scalar int `Tensor`, step counter to update on each step unless
`increment_global_step` is `False`. If not supplied, it will be fetched
from the default graph (see `tf.compat.v1.train.get_global_step` for
details). If it has not been created, no step will be incremented with
each weight update. `learning_rate_decay_fn` requires `global_step`.
learning_rate: float or `Tensor`, magnitude of update per each training
step. Can be `None`.
optimizer: string, class or optimizer instance, used as trainer. string
should be name of optimizer, like 'SGD', 'Adam', 'Adagrad'. Full list in
OPTIMIZER_CLS_NAMES constant. class should be sub-class of `tf.Optimizer`
that implements `compute_gradients` and `apply_gradients` functions.
optimizer instance should be instantiation of `tf.Optimizer` sub-class and
have `compute_gradients` and `apply_gradients` functions.
gradient_noise_scale: float or None, adds 0-mean normal noise scaled by this
value.
gradient_multipliers: dict of variables or variable names to floats. If
present, gradients for specified variables will be multiplied by given
constant.
clip_gradients: float, callable or `None`. If a float is provided, a global
clipping is applied to prevent the norm of the gradient from exceeding
this value. Alternatively, a callable can be provided, e.g.,
`adaptive_clipping_fn()`. This callable takes a list of `(gradients,
variables)` tuples and returns the same thing with the gradients modified.
learning_rate_decay_fn: function, takes `learning_rate` and `global_step`
`Tensor`s, returns `Tensor`. Can be used to implement any learning rate
decay functions.
For example: `tf.compat.v1.train.exponential_decay`.
Ignored if `learning_rate` is not supplied.
update_ops: list of update `Operation`s to execute at each step. If `None`,
uses elements of UPDATE_OPS collection. The order of execution between
`update_ops` and `loss` is non-deterministic.
variables: list of variables to optimize or `None` to use all trainable
variables.
name: The name for this operation is used to scope operations and summaries.
summaries: List of internal quantities to visualize on tensorboard. If not
set, the loss, the learning rate, and the global norm of the gradients
will be reported. The complete list of possible values is in
OPTIMIZER_SUMMARIES.
colocate_gradients_with_ops: If True, try colocating gradients with the
corresponding op.
increment_global_step: Whether to increment `global_step`. If your model
calls `optimize_loss` multiple times per training step (e.g. to optimize
different parts of the model), use this arg to avoid incrementing
`global_step` more times than necessary.
Returns:
Training op.
Raises:
ValueError: if:
* `loss` is an invalid type or shape.
* `global_step` is an invalid type or shape.
* `learning_rate` is an invalid type or value.
* `optimizer` has the wrong type.
* `clip_gradients` is neither float nor callable.
* `learning_rate` and `learning_rate_decay_fn` are supplied, but no
`global_step` is available.
* `gradients` is empty.
"""
loss = ops.convert_to_tensor(loss)
contrib_framework.assert_scalar(loss)
if global_step is None:
global_step = train.get_global_step()
else:
train.assert_global_step(global_step)
with vs.variable_scope(name, "OptimizeLoss", [loss, global_step]):
# Update ops take UPDATE_OPS collection if not provided.
if update_ops is None:
update_ops = set(ops.get_collection(ops.GraphKeys.UPDATE_OPS))
# Make sure update ops are ran before computing loss.
if update_ops:
loss = control_flow_ops.with_dependencies(list(update_ops), loss)
# Learning rate variable, with possible decay.
lr = None
if learning_rate is not None:
if (isinstance(learning_rate, ops.Tensor) and
learning_rate.get_shape().ndims == 0):
lr = learning_rate
elif isinstance(learning_rate, float):
if learning_rate < 0.0:
raise ValueError("Invalid learning_rate %s.", learning_rate)
lr = vs.get_variable(
"learning_rate", [],
trainable=False,
initializer=init_ops.constant_initializer(learning_rate))
else:
raise ValueError("Learning rate should be 0d Tensor or float. "
"Got %s of type %s" %
(str(learning_rate), str(type(learning_rate))))
if summaries is None:
summaries = ["loss", "learning_rate", "global_gradient_norm"]
else:
for summ in summaries:
if summ not in OPTIMIZER_SUMMARIES:
raise ValueError("Summaries should be one of [%s], you provided %s." %
(", ".join(OPTIMIZER_SUMMARIES), summ))
if learning_rate is not None and learning_rate_decay_fn is not None:
if global_step is None:
raise ValueError("global_step is required for learning_rate_decay_fn.")
lr = learning_rate_decay_fn(lr, global_step)
if "learning_rate" in summaries:
summary.scalar("learning_rate", lr)
# Create optimizer, given specified parameters.
if isinstance(optimizer, six.string_types):
if lr is None:
raise ValueError("Learning rate is None, but should be specified if "
"optimizer is string (%s)." % optimizer)
if optimizer not in OPTIMIZER_CLS_NAMES:
raise ValueError(
"Optimizer name should be one of [%s], you provided %s." %
(", ".join(OPTIMIZER_CLS_NAMES), optimizer))
opt = OPTIMIZER_CLS_NAMES[optimizer](learning_rate=lr)
elif (isinstance(optimizer, type) and
issubclass(optimizer, optimizer_.Optimizer)):
if lr is None:
raise ValueError("Learning rate is None, but should be specified if "
"optimizer is class (%s)." % optimizer)
opt = optimizer(learning_rate=lr)
elif isinstance(optimizer, optimizer_.Optimizer):
opt = optimizer
elif callable(optimizer):
if learning_rate is not None:
opt = optimizer(lr)
else:
opt = optimizer()
if not isinstance(opt, optimizer_.Optimizer):
raise ValueError("Unrecognized optimizer: function should return "
"subclass of Optimizer. Got %s." % str(opt))
else:
raise ValueError("Unrecognized optimizer: should be string, "
"subclass of Optimizer, instance of "
"subclass of Optimizer or function with one argument. "
"Got %s." % str(optimizer))
# All trainable variables, if specific variables are not specified.
if variables is None:
variables = vars_.trainable_variables()
# Compute gradients.
gradients = opt.compute_gradients(
loss,
variables,
colocate_gradients_with_ops=colocate_gradients_with_ops)
# Optionally add gradient noise.
if gradient_noise_scale is not None:
gradients = _add_scaled_noise_to_gradients(gradients,
gradient_noise_scale)
# Multiply some gradients.
if gradient_multipliers is not None:
gradients = _multiply_gradients(gradients, gradient_multipliers)
if not gradients:
raise ValueError(
"Empty list of (gradient, var) pairs encountered. This is most "
"likely to be caused by an improper value of gradient_multipliers.")
if "global_gradient_norm" in summaries or "gradient_norm" in summaries:
summary.scalar("global_norm/gradient_norm",
clip_ops.global_norm(list(zip(*gradients))[0]))
# Optionally clip gradients by global norm.
if isinstance(clip_gradients, float):
gradients = _clip_gradients_by_norm(gradients, clip_gradients)
elif callable(clip_gradients):
gradients = clip_gradients(gradients)
elif clip_gradients is not None:
raise ValueError("Unknown type %s for clip_gradients" %
type(clip_gradients))
# Add scalar summary for loss.
if "loss" in summaries:
summary.scalar("loss", loss)
# Add histograms for variables, gradients and gradient norms.
for gradient, variable in gradients:
if isinstance(gradient, ops.IndexedSlices):
grad_values = gradient.values
else:
grad_values = gradient
if grad_values is not None:
var_name = variable.name.replace(":", "_")
if "gradients" in summaries:
summary.histogram("gradients/%s" % var_name, grad_values)
if "gradient_norm" in summaries:
summary.scalar("gradient_norm/%s" % var_name,
clip_ops.global_norm([grad_values]))
if clip_gradients is not None and ("global_gradient_norm" in summaries or
"gradient_norm" in summaries):
summary.scalar("global_norm/clipped_gradient_norm",
clip_ops.global_norm(list(zip(*gradients))[0]))
# Create gradient updates.
grad_updates = opt.apply_gradients(
gradients,
global_step=global_step if increment_global_step else None,
name="train")
# Ensure the train_tensor computes grad_updates.
train_tensor = control_flow_ops.with_dependencies([grad_updates], loss)
return train_tensor
def _clip_gradients_by_norm(grads_and_vars, clip_gradients):
"""Clips gradients by global norm."""
gradients, variables = zip(*grads_and_vars)
clipped_gradients, _ = clip_ops.clip_by_global_norm(gradients, clip_gradients)
return list(zip(clipped_gradients, variables))
def _adaptive_max_norm(norm, std_factor, decay, global_step, epsilon, name):
"""Find max_norm given norm and previous average."""
with vs.variable_scope(name, "AdaptiveMaxNorm", [norm]):
log_norm = math_ops.log(norm + epsilon)
def moving_average(name, value, decay):
moving_average_variable = vs.get_variable(
name,
shape=value.get_shape(),
dtype=value.dtype,
initializer=init_ops.zeros_initializer(),
trainable=False)
return moving_averages.assign_moving_average(
moving_average_variable, value, decay, zero_debias=False)
# quicker adaptation at the beginning
if global_step is not None:
n = math_ops.cast(global_step, dtypes.float32)
decay = math_ops.minimum(decay, n / (n + 1.))
# update averages
mean = moving_average("mean", log_norm, decay)
sq_mean = moving_average("sq_mean", math_ops.square(log_norm), decay)
variance = sq_mean - math_ops.square(mean)
std = math_ops.sqrt(math_ops.maximum(epsilon, variance))
max_norms = math_ops.exp(mean + std_factor * std)
return max_norms, mean
def adaptive_clipping_fn(std_factor=2.,
decay=0.95,
static_max_norm=None,
global_step=None,
report_summary=False,
epsilon=1e-8,
name=None):
"""Adapt the clipping value using statistics on the norms.
Implement adaptive gradient as presented in section 3.2.1 of
https://arxiv.org/abs/1412.1602.
Keeps a moving average of the mean and std of the log(norm) of the gradient.
If the norm exceeds `exp(mean + std_factor*std)` then all gradients will be
rescaled such that the global norm becomes `exp(mean)`.
Args:
std_factor: Python scaler (or tensor). `max_norm = exp(mean +
std_factor*std)`
decay: The smoothing factor of the moving averages.
static_max_norm: If provided, will threshold the norm to this value as an
extra safety.
global_step: Optional global_step. If provided, `decay = decay*n/(n+1)`.
This provides a quicker adaptation of the mean for the first steps.
report_summary: If `True`, will add histogram summaries of the `max_norm`.
epsilon: Small value chosen to avoid zero variance.
name: The name for this operation is used to scope operations and summaries.
Returns:
A function for applying gradient clipping.
"""
def gradient_clipping(grads_and_vars):
"""Internal function for adaptive clipping."""
grads, variables = zip(*grads_and_vars)
norm = clip_ops.global_norm(grads)
max_norm, log_mean = _adaptive_max_norm(norm, std_factor, decay,
global_step, epsilon, name)
# reports the max gradient norm for debugging
if report_summary:
summary.scalar("global_norm/adaptive_max_gradient_norm", max_norm)
# factor will be 1. if norm is smaller than max_norm
factor = array_ops.where(norm < max_norm, array_ops.ones_like(norm),
math_ops.exp(log_mean) / norm)
if static_max_norm is not None:
factor = math_ops.minimum(static_max_norm / norm, factor)
# apply factor
clipped_grads = []
for grad in grads:
if grad is None:
clipped_grads.append(None)
elif isinstance(grad, ops.IndexedSlices):
clipped_grads.append(
ops.IndexedSlices(grad.values * factor, grad.indices,
grad.dense_shape))
else:
clipped_grads.append(grad * factor)
return list(zip(clipped_grads, variables))
return gradient_clipping
def _add_scaled_noise_to_gradients(grads_and_vars, gradient_noise_scale):
"""Adds scaled noise from a 0-mean normal distribution to gradients."""
gradients, variables = zip(*grads_and_vars)
noisy_gradients = []
for gradient in gradients:
if gradient is None:
noisy_gradients.append(None)
continue
if isinstance(gradient, ops.IndexedSlices):
gradient_shape = gradient.dense_shape
else:
gradient_shape = gradient.get_shape()
noise = random_ops.truncated_normal(gradient_shape) * gradient_noise_scale
noisy_gradients.append(gradient + noise)
return list(zip(noisy_gradients, variables))
def _multiply_gradients(grads_and_vars, gradient_multipliers):
"""Multiply specified gradients."""
multiplied_grads_and_vars = []
for grad, var in grads_and_vars:
if (grad is not None and
(var in gradient_multipliers or var.name in gradient_multipliers)):
key = var if var in gradient_multipliers else var.name
multiplier = gradient_multipliers[key]
if isinstance(grad, ops.IndexedSlices):
grad_values = grad.values * multiplier
grad = ops.IndexedSlices(grad_values, grad.indices, grad.dense_shape)
else:
grad *= math_ops.cast(multiplier, grad.dtype)
multiplied_grads_and_vars.append((grad, var))
return multiplied_grads_and_vars
|
tensorflow-master
|
tensorflow/contrib/layers/python/layers/optimizers.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""embedding_ops tests."""
# pylint: disable=unused-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import math
import sys
import numpy as np
from tensorflow.contrib.layers.python.layers import embedding_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import sparse_tensor as sparse_tensor_lib
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import test
from tensorflow.python.util import compat
class SafeEmbeddingLookupSparseTest(test.TestCase):
def _random_weights(self, vocab_size=4, embed_dim=4, num_shards=1):
assert vocab_size > 0
assert embed_dim > 0
assert num_shards > 0
assert num_shards <= vocab_size
initializer = init_ops.truncated_normal_initializer(
mean=0.0, stddev=1.0 / math.sqrt(vocab_size), dtype=dtypes.float32)
embedding_weights = list(variable_scope.get_variable(
"embedding_weights",
shape=[vocab_size, embed_dim],
partitioner=partitioned_variables.fixed_size_partitioner(num_shards),
initializer=initializer))
for w in embedding_weights:
w.initializer.run()
embedding_weights = [w.eval() for w in embedding_weights]
return embedding_weights
def _ids_and_weights_2d(self):
# Each row demonstrates a test case:
# Row 0: multiple valid ids, 1 invalid id, weighted mean
# Row 1: all ids are invalid (leaving no valid ids after pruning)
# Row 2: no ids to begin with
# Row 3: single id
# Row 4: all ids have <=0 weight
indices = [[0, 0], [0, 1], [0, 2], [1, 0], [3, 0], [4, 0], [4, 1]]
ids = [0, 1, -1, -1, 2, 0, 1]
weights = [1.0, 2.0, 1.0, 1.0, 3.0, 0.0, -0.5]
shape = [5, 4]
sparse_ids = sparse_tensor_lib.SparseTensor(
constant_op.constant(indices, dtypes.int64),
constant_op.constant(ids, dtypes.int64),
constant_op.constant(shape, dtypes.int64))
sparse_weights = sparse_tensor_lib.SparseTensor(
constant_op.constant(indices, dtypes.int64),
constant_op.constant(weights, dtypes.float32),
constant_op.constant(shape, dtypes.int64))
return sparse_ids, sparse_weights
def _ids_and_weights_3d(self):
# Each (2-D) index demonstrates a test case:
# Index 0, 0: multiple valid ids, 1 invalid id, weighted mean
# Index 0, 1: all ids are invalid (leaving no valid ids after pruning)
# Index 0, 2: no ids to begin with
# Index 1, 0: single id
# Index 1, 1: all ids have <=0 weight
# Index 1, 2: no ids to begin with
indices = [[0, 0, 0], [0, 0, 1], [0, 0, 2], [0, 1, 0], [1, 0, 0], [1, 1, 0],
[1, 1, 1]]
ids = [0, 1, -1, -1, 2, 0, 1]
weights = [1.0, 2.0, 1.0, 1.0, 3.0, 0.0, -0.5]
shape = [2, 3, 4]
sparse_ids = sparse_tensor_lib.SparseTensor(
constant_op.constant(indices, dtypes.int64),
constant_op.constant(ids, dtypes.int64),
constant_op.constant(shape, dtypes.int64))
sparse_weights = sparse_tensor_lib.SparseTensor(
constant_op.constant(indices, dtypes.int64),
constant_op.constant(weights, dtypes.float32),
constant_op.constant(shape, dtypes.int64))
return sparse_ids, sparse_weights
def test_safe_embedding_lookup_sparse_return_zero_vector(self):
with self.cached_session():
embedding_weights = self._random_weights()
sparse_ids, sparse_weights = self._ids_and_weights_2d()
embedding_lookup_result = (embedding_ops.safe_embedding_lookup_sparse(
embedding_weights, sparse_ids, sparse_weights).eval())
self.assertAllClose(
embedding_lookup_result,
[(1.0 * embedding_weights[0][0] + 2.0 * embedding_weights[0][1]) /
3.0, [0] * 4, [0] * 4, embedding_weights[0][2], [0] * 4])
def test_safe_embedding_lookup_sparse_return_special_vector(self):
with self.cached_session():
embedding_weights = self._random_weights()
sparse_ids, sparse_weights = self._ids_and_weights_2d()
embedding_lookup_result = (embedding_ops.safe_embedding_lookup_sparse(
embedding_weights, sparse_ids, sparse_weights, default_id=3).eval())
self.assertAllClose(
embedding_lookup_result,
[(1.0 * embedding_weights[0][0] + 2.0 * embedding_weights[0][1]) /
3.0, embedding_weights[0][3], embedding_weights[0][3],
embedding_weights[0][2], embedding_weights[0][3]])
def test_safe_embedding_lookup_sparse_no_weights(self):
with self.cached_session():
embedding_weights = self._random_weights()
sparse_ids, _ = self._ids_and_weights_2d()
embedding_lookup_result = (embedding_ops.safe_embedding_lookup_sparse(
embedding_weights, sparse_ids, None).eval())
self.assertAllClose(
embedding_lookup_result,
[(embedding_weights[0][0] + embedding_weights[0][1]) / 2.0, [0] * 4,
[0] * 4, embedding_weights[0][2], (
embedding_weights[0][0] + embedding_weights[0][1]) / 2.0])
def test_safe_embedding_lookup_sparse_partitioned(self):
with self.cached_session():
embedding_weights = self._random_weights(num_shards=3)
sparse_ids, _ = self._ids_and_weights_2d()
embedding_lookup_result = (embedding_ops.safe_embedding_lookup_sparse(
embedding_weights, sparse_ids, None).eval())
embedding_weights = list(itertools.chain(*embedding_weights))
self.assertAllClose(embedding_lookup_result,
[(embedding_weights[0] + embedding_weights[1]) / 2.0,
[0] * 4, [0] * 4, embedding_weights[2],
(embedding_weights[0] + embedding_weights[1]) / 2.0])
def test_safe_embedding_lookup_sparse_partitioned_inconsistent_weights(self):
with self.cached_session():
embedding_weights = self._random_weights(num_shards=3)
sparse_ids, sparse_weights = self._ids_and_weights_2d()
embedding_weights[1] = embedding_weights[1].astype(np.float64)
self.assertRaises(ValueError, embedding_ops.safe_embedding_lookup_sparse,
embedding_weights, sparse_ids)
embedding_weights = [
constant_op.constant(w, dtype=dtypes.float64)
for w in embedding_weights
]
self.assertRaises(ValueError, embedding_ops.safe_embedding_lookup_sparse,
embedding_weights, sparse_ids, sparse_weights)
def test_safe_embedding_lookup_sparse_3d_return_zero_vector(self):
with self.cached_session():
embedding_weights = self._random_weights()
sparse_ids, sparse_weights = self._ids_and_weights_3d()
embedding_lookup_result = (embedding_ops.safe_embedding_lookup_sparse(
embedding_weights, sparse_ids, sparse_weights).eval())
self.assertAllClose(embedding_lookup_result, [[
(1.0 * embedding_weights[0][0] + 2.0 * embedding_weights[0][1]) / 3.0,
[0] * 4, [0] * 4
], [embedding_weights[0][2], [0] * 4, [0] * 4]])
def test_safe_embedding_lookup_sparse_3d_return_special_vector(self):
with self.cached_session():
embedding_weights = self._random_weights()
sparse_ids, sparse_weights = self._ids_and_weights_3d()
embedding_lookup_result = (embedding_ops.safe_embedding_lookup_sparse(
embedding_weights, sparse_ids, sparse_weights, default_id=3).eval())
self.assertAllClose(
embedding_lookup_result,
[[(1.0 * embedding_weights[0][0] + 2.0 * embedding_weights[0][1]) /
3.0, embedding_weights[0][3], embedding_weights[0][3]], [
embedding_weights[0][2], embedding_weights[0][3],
embedding_weights[0][3]
]])
def test_safe_embedding_lookup_sparse_3d_no_weights(self):
with self.cached_session():
embedding_weights = self._random_weights()
sparse_ids, _ = self._ids_and_weights_3d()
embedding_lookup_result = (embedding_ops.safe_embedding_lookup_sparse(
embedding_weights, sparse_ids, None).eval())
self.assertAllClose(embedding_lookup_result, [[(
embedding_weights[0][0] + embedding_weights[0][1]) / 2.0, [0] * 4, [
0
] * 4], [
embedding_weights[0][2],
(embedding_weights[0][0] + embedding_weights[0][1]) / 2.0, [0] * 4
]])
def test_safe_embedding_lookup_sparse_3d_partitioned(self):
with self.cached_session():
embedding_weights = self._random_weights(num_shards=3)
sparse_ids, _ = self._ids_and_weights_3d()
embedding_lookup_result = (embedding_ops.safe_embedding_lookup_sparse(
embedding_weights, sparse_ids, None).eval())
embedding_weights = list(itertools.chain(*embedding_weights))
self.assertAllClose(embedding_lookup_result, [[
(embedding_weights[0] + embedding_weights[1]) / 2.0, [0] * 4, [0] * 4
], [
embedding_weights[2],
(embedding_weights[0] + embedding_weights[1]) / 2.0, [0] * 4
]])
def test_safe_embedding_lookup_sparse_3d_partitioned_inconsistent_weights(
self):
with self.cached_session():
embedding_weights = self._random_weights(num_shards=3)
sparse_ids, sparse_weights = self._ids_and_weights_3d()
embedding_weights[1] = embedding_weights[1].astype(np.float64)
self.assertRaises(ValueError, embedding_ops.safe_embedding_lookup_sparse,
embedding_weights, sparse_ids)
embedding_weights = [
constant_op.constant(w, dtype=dtypes.float64)
for w in embedding_weights
]
self.assertRaises(ValueError, embedding_ops.safe_embedding_lookup_sparse,
embedding_weights, sparse_ids, sparse_weights)
# pylint: disable=invalid-name
def local_variable_scope():
"""Create a variable scope named like the caller function."""
return variable_scope.variable_scope(sys._getframe(1).f_code.co_name)
# pylint: enable=invalid-name
class ScatteredEmbeddingLookupTest(test.TestCase):
def setUp(self):
random_seed.set_random_seed(1)
def _random_weights(self, size=50, num_shards=1):
assert size > 0
assert num_shards > 0
assert num_shards <= size
embedding_weights = list(variable_scope.get_variable(
"embedding_weights",
shape=[size],
partitioner=partitioned_variables.fixed_size_partitioner(num_shards),
initializer=init_ops.truncated_normal_initializer(
mean=0.0, stddev=1.0, dtype=dtypes.float32)))
for w in embedding_weights:
w.initializer.run()
return embedding_weights
def test_scattered_embedding_consistency(self):
with self.cached_session(), local_variable_scope():
embedding_weights = self._random_weights()
values = constant_op.constant(["foo", "foo"])
embedding_lookup_result = embedding_ops.scattered_embedding_lookup(
embedding_weights, values, dimension=10).eval()
self.assertAllEqual(embedding_lookup_result.shape, [2, 10])
self.assertAllEqual(embedding_lookup_result[0],
embedding_lookup_result[1])
def test_scattered_embedding_multiple_partition(self):
with self.cached_session(), local_variable_scope():
embedding_weights = self._random_weights(num_shards=7)
values = constant_op.constant([4, 4, 5])
embedding_lookup_result = embedding_ops.scattered_embedding_lookup(
embedding_weights, values, dimension=5).eval()
self.assertAllEqual(embedding_lookup_result.shape, [3, 5])
self.assertAllEqual(embedding_lookup_result[0],
embedding_lookup_result[1])
# Different embedding expected for different value.
embedding_diff = np.min(
(embedding_lookup_result[2] - embedding_lookup_result[0])**2)
self.assertGreater(embedding_diff, 0)
def test_scattered_embedding_coverage(self):
with self.cached_session(), local_variable_scope():
size = 8
embedding_weights = self._random_weights(size=size, num_shards=3)
values = constant_op.constant(["foo"])
# Large embedding dimension to cover the full range of weights.
embedding_lookup_result = embedding_ops.scattered_embedding_lookup(
embedding_weights, values, dimension=100).eval()
self.assertEqual(len(np.unique(embedding_lookup_result[0])), size)
def test_scattered_embedding_multi_dimension(self):
with self.cached_session(), local_variable_scope():
embedding_weights = self._random_weights()
values = constant_op.constant([["foo", "bar", "bar"],
["bar", "bar", "foo"]])
embedding_lookup_result = embedding_ops.scattered_embedding_lookup(
embedding_weights, values, dimension=10).eval()
self.assertAllEqual(embedding_lookup_result.shape, [2, 3, 10])
self.assertAllEqual(embedding_lookup_result[0][0],
embedding_lookup_result[1][2])
def test_scattered_embedding_lookup_sparse(self):
with self.cached_session(), local_variable_scope():
embedding_weights = self._random_weights(num_shards=3)
sparse_tensor = sparse_tensor_lib.SparseTensor(
values=["foo", "bar", "foo", "bar"],
indices=[[0, 0], [1, 0], [1, 1], [3, 0]],
dense_shape=[5, 2])
embedding_lookup_result = (
embedding_ops.scattered_embedding_lookup_sparse(
embedding_weights, sparse_tensor, dimension=5,
combiner="mean").eval())
self.assertAllEqual(embedding_lookup_result.shape, [5, 5])
# Same non-zero embedding for the empty rows filled with a default value.
self.assertAllEqual(embedding_lookup_result[2],
embedding_lookup_result[4])
embedding_norm = np.sum(embedding_lookup_result[2]**2)
self.assertGreater(embedding_norm, 0)
self.assertAllEqual(embedding_lookup_result[1], 0.5 * (
embedding_lookup_result[0] + embedding_lookup_result[3]))
def test_embedding_lookup_unique(self):
d_embed = 5
n_embed = 10
idx_shape = (2, 3, 4)
embeds = np.random.randn(n_embed, d_embed)
idx = np.random.randint(0, n_embed, idx_shape)
with self.cached_session(), local_variable_scope():
embedded_np = embeds[idx]
embedded_tf = embedding_ops.embedding_lookup_unique(embeds, idx).eval()
self.assertEqual(embedded_np.shape, embedded_tf.shape)
np.testing.assert_almost_equal(embedded_np, embedded_tf)
def test_embedding_lookup_unique_param3d(self):
embeds = np.random.randn(5, 3, 3)
idx = np.random.randint(0, 5, 10)
idx2d = np.random.randint(0, 5, (10, 2))
with self.cached_session(), local_variable_scope():
embedded_np = embeds[idx]
embedded_np2d = embeds[idx2d]
embedded_tf = embedding_ops.embedding_lookup_unique(embeds, idx).eval()
embedded_tf_lst = embedding_ops.embedding_lookup_unique([embeds],
idx).eval()
embedded_tf2d = embedding_ops.embedding_lookup_unique(embeds,
idx2d).eval()
self.assertEqual(embedded_np.shape, embedded_tf.shape)
np.testing.assert_almost_equal(embedded_np, embedded_tf)
self.assertEqual(embedded_np.shape, embedded_tf_lst.shape)
np.testing.assert_almost_equal(embedded_np, embedded_tf_lst)
self.assertEqual(embedded_np2d.shape, embedded_tf2d.shape)
np.testing.assert_almost_equal(embedded_np2d, embedded_tf2d)
class SampledScatteredEmbeddingLookupTest(test.TestCase):
def setUp(self):
random_seed.set_random_seed(1)
self._hash_key = 1
def _random_weights(self, size=50, num_shards=1):
assert size > 0
assert num_shards > 0
assert num_shards <= size
embedding_weights = list(variable_scope.get_variable(
"embedding_weights",
shape=[size],
partitioner=partitioned_variables.fixed_size_partitioner(num_shards),
initializer=init_ops.truncated_normal_initializer(
mean=0.0, stddev=1.0, dtype=dtypes.float32)))
for w in embedding_weights:
w.initializer.run()
return embedding_weights
def test_hashed_embedding_consistency(self):
with self.cached_session(), local_variable_scope():
embedding_weights = self._random_weights()
values = constant_op.constant(["foo", "foo"])
# The first three sampled_candidates are equal, so the first three
# embedding weights will be equal.
sampled_candidates = constant_op.constant([[1, 3, 4, 6], [1, 3, 4, 7]])
embedding_lookup_result = ( # pylint: disable=protected-access
embedding_ops._sampled_scattered_embedding_lookup(
embedding_weights,
values,
sampled_candidates=sampled_candidates,
hash_key=self._hash_key).eval())
self.assertAllEqual(embedding_lookup_result.shape, [2, 4])
self.assertAllEqual(embedding_lookup_result[0][:3],
embedding_lookup_result[1][:3])
self.assertNotEqual(embedding_lookup_result[0][3],
embedding_lookup_result[1][3])
def test_hashed_embedding_multi_dimension(self):
with self.cached_session(), local_variable_scope():
embedding_weights = self._random_weights()
values = constant_op.constant([["foo", "bar", "bar"],
["bar", "bar", "foo"]])
sampled_candidates = constant_op.constant(
[[[1, 3, 4, 6], [1, 7, 8, 9], [1, 7, 8, 9]],
[[1, 7, 8, 9], [1, 7, 8, 9], [1, 3, 4, 6]]])
embedding_lookup_result = ( # pylint: disable=protected-access
embedding_ops._sampled_scattered_embedding_lookup(
embedding_weights,
values,
sampled_candidates=sampled_candidates,
hash_key=self._hash_key).eval())
self.assertAllEqual(embedding_lookup_result.shape, [2, 3, 4])
self.assertAllEqual(embedding_lookup_result[0][0],
embedding_lookup_result[1][2])
invalid_indices = constant_op.constant([[[1, 3, 4, 6], [1, 7, 8, 9]],
[[1, 7, 8, 9], [1, 7, 8, 9]]])
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError, (
r"\[The shape of sampled_candidates: \] \[2 2 4\] "
r"\[ does not match the shape of values: \] \[2 3\]")):
# pylint: disable=protected-access
embedding_ops._sampled_scattered_embedding_lookup(
embedding_weights, values,
sampled_candidates=invalid_indices).eval()
class SampledScatteredEmbeddingLookupSparseTest(test.TestCase):
def setUp(self):
random_seed.set_random_seed(1)
self._hash_key = 1
def test_output_shape(self):
"""Verifies the shape of the output tensor."""
with self.cached_session():
sp_values = sparse_tensor_lib.SparseTensor(
values=["a", "a", "b", "c", "d", "e", "f"],
indices=[[1, 0], [2, 0], [2, 1], [2, 2], [2, 3], [2, 4], [2, 5]],
dense_shape=[3, 6])
params = constant_op.constant([.1, .2, .3])
result = embedding_ops._sampled_scattered_embedding_lookup_sparse(
params, sp_values, dimension=4, hash_key=self._hash_key)
self.assertEqual(result.eval().shape, (3, 4))
def test_output_values(self):
"""Verifies the values in a trivial case."""
with self.cached_session():
sp_values = sparse_tensor_lib.SparseTensor(
values=["a"], indices=[[1, 0]], dense_shape=[3, 1])
params = constant_op.constant([.1, .2, .3])
result = embedding_ops._sampled_scattered_embedding_lookup_sparse(
params, sp_values, dimension=5, hash_key=self._hash_key)
self.assertAllClose(result.eval(), [[0., 0., 0., 0.,
0.], [.3, .2, .2, .3, .1],
[0., 0., 0., 0., 0.]])
def test_output_values_with_sampled_candidates(self):
"""Verifies the values for given sampled_candidates."""
with self.cached_session():
sp_values = sparse_tensor_lib.SparseTensor(
values=["a", "a", "b", "c", "d", "e", "f"],
indices=[[1, 0], [2, 0], [2, 1], [2, 2], [2, 3], [2, 4], [2, 5]],
dense_shape=[3, 6])
params = constant_op.constant([.1, .2, .3])
sampled_candidates = [[1, 0], [2, 1], [3, 2]]
sampled_result = embedding_ops._sampled_scattered_embedding_lookup_sparse(
params,
sp_values,
sampled_candidates=constant_op.constant(sampled_candidates),
hash_key=self._hash_key)
full_result = embedding_ops._sampled_scattered_embedding_lookup_sparse(
params, sp_values, dimension=4, hash_key=self._hash_key)
sampled_result_val = sampled_result.eval()
full_result_val = full_result.eval()
self.assertEqual(sampled_result_val.shape, (3, 2))
for i in range(len(sampled_candidates)):
self.assertAllClose(sampled_result_val[i],
full_result_val[i, sampled_candidates[i]])
def test_output_values_with_sign_hash(self):
"""Verifies the values in a trivial case with hash_signs=True."""
with self.cached_session():
sp_values = sparse_tensor_lib.SparseTensor(
values=["a"], indices=[[1, 0]], dense_shape=[3, 1])
params = constant_op.constant([.1, .1, .1])
result = embedding_ops._sampled_scattered_embedding_lookup_sparse(
params,
sp_values,
dimension=4,
with_sign_hash=True,
hash_key=self._hash_key)
self.assertAllClose(result.eval(), [[0., 0., 0., 0.], [-.1, -.1, -.1, .1],
[0., 0., 0., 0.]])
def test_distributive_property(self):
"""Verifies the distributive property of matrix multiplication."""
with self.cached_session():
params = constant_op.constant([.1, .2, .3])
sp_values_a = sparse_tensor_lib.SparseTensor(
values=["a"], indices=[[0, 0]], dense_shape=[3, 1])
sp_values_b = sparse_tensor_lib.SparseTensor(
values=["b"], indices=[[2, 0]], dense_shape=[3, 1])
sp_values_c = sparse_tensor_lib.SparseTensor(
values=["c"], indices=[[2, 0]], dense_shape=[3, 1])
sp_values = sparse_tensor_lib.SparseTensor(
values=["a", "b", "c"],
indices=[[0, 0], [2, 0], [2, 1]],
dense_shape=[3, 2])
result_a = embedding_ops._sampled_scattered_embedding_lookup_sparse(
params, sp_values_a, dimension=4, hash_key=self._hash_key)
result_b = embedding_ops._sampled_scattered_embedding_lookup_sparse(
params, sp_values_b, dimension=4, hash_key=self._hash_key)
result_c = embedding_ops._sampled_scattered_embedding_lookup_sparse(
params, sp_values_c, dimension=4, hash_key=self._hash_key)
result = embedding_ops._sampled_scattered_embedding_lookup_sparse(
params, sp_values, dimension=4, hash_key=self._hash_key)
result_abc = math_ops.add_n([result_a, result_b, result_c])
self.assertAllClose(result.eval(), result_abc.eval())
def _PName(param_id):
return "p" + str(param_id)
def _EmbeddingParams(num_shards,
vocab_size,
dtype=dtypes.float32,
shape=None,
use_shapeless_placeholder=False):
p = []
params = {}
feed_dict = {}
if not shape:
shape = [10]
for i in range(num_shards):
shard_shape = [vocab_size // num_shards] + shape
if i < vocab_size % num_shards: # Excess goes evenly on the first shards
shard_shape[0] += 1
param_name = _PName(i)
if use_shapeless_placeholder:
param = array_ops.placeholder(dtype, shape=None, name=param_name)
else:
param = constant_op.constant(
1.0, shape=shard_shape, dtype=dtype, name=param_name)
p.append(param)
np_type = "f" if dtype == dtypes.float32 else "d"
val = (np.random.rand(*shard_shape).astype(np_type)) + 1
params[param_name + ":0"] = val
feed_dict[param.name] = val
return p, params, feed_dict
def _EmbeddingResult(params,
id_vals,
num_shards,
vocab_size,
partition_strategy="mod",
weight_vals=None):
if weight_vals is None:
weight_vals = np.copy(id_vals)
weight_vals.fill(1)
values = []
weights = []
weights_squared = []
for ids, wts in zip(id_vals, weight_vals):
value_aggregation = None
weight_aggregation = None
squared_weight_aggregation = None
if isinstance(ids, compat.integral_types):
ids = [ids]
wts = [wts]
for i, weight_value in zip(ids, wts):
if partition_strategy == "mod":
val = np.copy(params[_PName(i % num_shards) + ":0"][
i // num_shards, :]) * weight_value
elif partition_strategy == "div":
ids_per_partition, extras = divmod(vocab_size, num_shards)
threshold = extras * (ids_per_partition + 1)
if i < threshold:
partition = i // (ids_per_partition + 1)
offset = i % (ids_per_partition + 1)
else:
partition = extras + (i - threshold) // ids_per_partition
offset = (i - threshold) % ids_per_partition
val = np.copy(
params[_PName(partition) + ":0"][offset, :]) * weight_value
else:
assert False
if value_aggregation is None:
assert weight_aggregation is None
assert squared_weight_aggregation is None
value_aggregation = val
weight_aggregation = weight_value
squared_weight_aggregation = weight_value * weight_value
else:
assert weight_aggregation is not None
assert squared_weight_aggregation is not None
value_aggregation += val
weight_aggregation += weight_value
squared_weight_aggregation += weight_value * weight_value
values.append(value_aggregation)
weights.append(weight_aggregation)
weights_squared.append(squared_weight_aggregation)
values = np.array(values).astype(np.float32)
weights = np.array(weights).astype(np.float32)
weights_squared = np.array(weights_squared).astype(np.float32)
return values, weights, weights_squared
class EmbeddingLookupSparseWithDistributedAggregationTest(test.TestCase):
def _RandomIdsAndWeights(self, batch_size, vocab_size):
max_val_per_entry = 6
vals_per_batch_entry = np.random.randint(
1, max_val_per_entry, size=batch_size)
num_vals = np.sum(vals_per_batch_entry)
ids = np.random.randint(vocab_size, size=num_vals)
weights = 1 + np.random.rand(num_vals)
indices = []
for batch_entry, num_val in enumerate(vals_per_batch_entry):
for val_index in range(num_val):
indices.append([batch_entry, val_index])
shape = [batch_size, max_val_per_entry]
sp_ids = sparse_tensor_lib.SparseTensor(
constant_op.constant(indices, dtypes.int64),
constant_op.constant(ids, dtypes.int32),
constant_op.constant(shape, dtypes.int64))
sp_weights = sparse_tensor_lib.SparseTensor(
constant_op.constant(indices, dtypes.int64),
constant_op.constant(weights, dtypes.float32),
constant_op.constant(shape, dtypes.int64))
return sp_ids, sp_weights, ids, weights, vals_per_batch_entry
def _GroupByBatchEntry(self, vals, vals_per_batch_entry):
grouped_vals = []
index = 0
for num_val in vals_per_batch_entry:
grouped_vals.append(list(vals[index:(index + num_val)]))
index += num_val
return grouped_vals
def testEmbeddingLookupSparse(self):
vocab_size = 13
batch_size = 10
param_shape = [2, 5]
expected_lookup_result_shape = param_shape
sp_ids, sp_weights, ids, weights, vals_per_batch_entry = (
self._RandomIdsAndWeights(batch_size, vocab_size))
grouped_ids = self._GroupByBatchEntry(ids, vals_per_batch_entry)
grouped_weights = self._GroupByBatchEntry(weights, vals_per_batch_entry)
grouped_ignored_weights = self._GroupByBatchEntry(
np.ones(np.sum(vals_per_batch_entry)), vals_per_batch_entry)
for num_shards, combiner, dtype, ignore_weights in itertools.product(
[1, 5], ["sum", "mean", "sqrtn"], [dtypes.float32,
dtypes.float64], [True, False]):
with self.cached_session():
p, params, feed_dict = _EmbeddingParams(
num_shards, vocab_size, shape=param_shape, dtype=dtype)
embedding_sum = \
embedding_ops.embedding_lookup_sparse_with_distributed_aggregation(
p,
sp_ids,
None if ignore_weights else sp_weights,
combiner=combiner)
self.assertEqual(embedding_sum.get_shape().as_list()[1:],
expected_lookup_result_shape)
tf_embedding_sum = embedding_sum.eval(feed_dict=feed_dict)
np_embedding_sum, np_weight_sum, np_weight_sq_sum = _EmbeddingResult(
params,
grouped_ids,
num_shards,
vocab_size,
weight_vals=grouped_ignored_weights
if ignore_weights else grouped_weights)
if combiner == "mean":
np_embedding_sum /= np.reshape(np_weight_sum, (batch_size, 1, 1))
if combiner == "sqrtn":
np_embedding_sum /= np.reshape(
np.sqrt(np_weight_sq_sum), (batch_size, 1, 1))
self.assertAllClose(np_embedding_sum, tf_embedding_sum)
def testGradientsEmbeddingLookupSparse(self):
vocab_size = 12
batch_size = 4
param_shape = [2, 3]
sp_ids, sp_weights, _, _, _ = (self._RandomIdsAndWeights(
batch_size, vocab_size))
for num_shards, combiner, dtype, ignore_weights in itertools.product(
[1, 3], ["sum", "mean", "sqrtn"], [dtypes.float32,
dtypes.float64], [True, False]):
with self.cached_session():
x, params, _ = _EmbeddingParams(
num_shards, vocab_size, shape=param_shape, dtype=dtype)
y = embedding_ops.embedding_lookup_sparse_with_distributed_aggregation(
x,
sp_ids,
None if ignore_weights else sp_weights,
combiner=combiner)
x_name = [_PName(i) for i in range(num_shards)]
x_init_value = [params[x_n + ":0"] for x_n in x_name]
x_shape = [i.shape for i in x_init_value]
y_shape = [batch_size] + list(params[_PName(0) + ":0"].shape[1:])
err = gradient_checker.compute_gradient_error(
x, x_shape, y, y_shape, x_init_value=x_init_value)
self.assertLess(err, 1e-5 if dtype == dtypes.float64 else 2e-3)
def testIncompatibleShapes(self):
with self.cached_session():
x, _, _ = _EmbeddingParams(1, 10, dtype=dtypes.float32)
sp_ids = sparse_tensor_lib.SparseTensor(
constant_op.constant([[0, 0], [0, 1], [1, 0]], dtypes.int64),
constant_op.constant([0, 1, 2], dtypes.int32),
constant_op.constant([2, 2], dtypes.int64))
sp_weights = sparse_tensor_lib.SparseTensor(
constant_op.constant([[0, 0], [0, 1]], dtypes.int64),
constant_op.constant([12.0, 5.0], dtypes.float32),
constant_op.constant([1, 2], dtypes.int64))
with self.assertRaises(ValueError):
embedding_ops.embedding_lookup_sparse_with_distributed_aggregation(
x, sp_ids, sp_weights, combiner="mean")
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/layers/python/layers/embedding_ops_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Weight initializers for use with layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import random_ops
__all__ = ['xavier_initializer', 'xavier_initializer_conv2d',
'variance_scaling_initializer']
def xavier_initializer(uniform=True, seed=None, dtype=dtypes.float32):
"""Returns an initializer performing "Xavier" initialization for weights.
This function implements the weight initialization from:
Xavier Glorot and Yoshua Bengio (2010):
[Understanding the difficulty of training deep feedforward neural
networks. International conference on artificial intelligence and
statistics.](
http://www.jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf)
This initializer is designed to keep the scale of the gradients roughly the
same in all layers. In uniform distribution this ends up being the range:
`x = sqrt(6. / (in + out)); [-x, x]` and for normal distribution a standard
deviation of `sqrt(2. / (in + out))` is used.
Args:
uniform: Whether to use uniform or normal distributed random initialization.
seed: A Python integer. Used to create random seeds. See
`tf.compat.v1.set_random_seed` for behavior.
dtype: The data type. Only floating point types are supported.
Returns:
An initializer for a weight matrix.
"""
return variance_scaling_initializer(factor=1.0, mode='FAN_AVG',
uniform=uniform, seed=seed, dtype=dtype)
xavier_initializer_conv2d = xavier_initializer
def variance_scaling_initializer(factor=2.0, mode='FAN_IN', uniform=False,
seed=None, dtype=dtypes.float32):
"""Returns an initializer that generates tensors without scaling variance.
When initializing a deep network, it is in principle advantageous to keep
the scale of the input variance constant, so it does not explode or diminish
by reaching the final layer. This initializer use the following formula:
```python
if mode='FAN_IN': # Count only number of input connections.
n = fan_in
elif mode='FAN_OUT': # Count only number of output connections.
n = fan_out
elif mode='FAN_AVG': # Average number of inputs and output connections.
n = (fan_in + fan_out)/2.0
truncated_normal(shape, 0.0, stddev=sqrt(factor / n))
```
* To get [Delving Deep into Rectifiers](
http://arxiv.org/pdf/1502.01852v1.pdf) (also know as the "MSRA
initialization"), use (Default):<br/>
`factor=2.0 mode='FAN_IN' uniform=False`
* To get [Convolutional Architecture for Fast Feature Embedding](
http://arxiv.org/abs/1408.5093), use:<br/>
`factor=1.0 mode='FAN_IN' uniform=True`
* To get [Understanding the difficulty of training deep feedforward neural
networks](http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf),
use:<br/>
`factor=1.0 mode='FAN_AVG' uniform=True.`
* To get `xavier_initializer` use either:<br/>
`factor=1.0 mode='FAN_AVG' uniform=True`, or<br/>
`factor=1.0 mode='FAN_AVG' uniform=False`.
Args:
factor: Float. A multiplicative factor.
mode: String. 'FAN_IN', 'FAN_OUT', 'FAN_AVG'.
uniform: Whether to use uniform or normal distributed random initialization.
seed: A Python integer. Used to create random seeds. See
`tf.compat.v1.set_random_seed` for behavior.
dtype: The data type. Only floating point types are supported.
Returns:
An initializer that generates tensors with unit variance.
Raises:
ValueError: if `dtype` is not a floating point type.
TypeError: if `mode` is not in ['FAN_IN', 'FAN_OUT', 'FAN_AVG'].
"""
if not dtype.is_floating:
raise TypeError('Cannot create initializer for non-floating point type.')
if mode not in ['FAN_IN', 'FAN_OUT', 'FAN_AVG']:
raise TypeError('Unknown mode %s [FAN_IN, FAN_OUT, FAN_AVG]', mode)
# pylint: disable=unused-argument
def _initializer(shape, dtype=dtype, partition_info=None):
"""Initializer function."""
if not dtype.is_floating:
raise TypeError('Cannot create initializer for non-floating point type.')
# Estimating fan_in and fan_out is not possible to do perfectly, but we try.
# This is the right thing for matrix multiply and convolutions.
if shape:
fan_in = float(shape[-2]) if len(shape) > 1 else float(shape[-1])
fan_out = float(shape[-1])
else:
fan_in = 1.0
fan_out = 1.0
for dim in shape[:-2]:
fan_in *= float(dim)
fan_out *= float(dim)
if mode == 'FAN_IN':
# Count only number of input connections.
n = fan_in
elif mode == 'FAN_OUT':
# Count only number of output connections.
n = fan_out
elif mode == 'FAN_AVG':
# Average number of inputs and output connections.
n = (fan_in + fan_out) / 2.0
if uniform:
# To get stddev = math.sqrt(factor / n) need to adjust for uniform.
limit = math.sqrt(3.0 * factor / n)
return random_ops.random_uniform(shape, -limit, limit,
dtype, seed=seed)
else:
# To get stddev = math.sqrt(factor / n) need to adjust for truncated.
trunc_stddev = math.sqrt(1.3 * factor / n)
return random_ops.truncated_normal(shape, 0.0, trunc_stddev, dtype,
seed=seed)
# pylint: enable=unused-argument
return _initializer
|
tensorflow-master
|
tensorflow/contrib/layers/python/layers/initializers.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities related to FeatureColumn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from tensorflow.contrib.framework.python.framework import experimental
from tensorflow.contrib.framework.python.ops import variables as contrib_variables
from tensorflow.contrib.layers.python.layers import embedding_ops
from tensorflow.contrib.layers.python.layers import feature_column as fc
from tensorflow.contrib.layers.python.layers import layers
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor as sparse_tensor_py
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import nest
def _maybe_reshape_input_tensor(tensor, column_name, output_rank):
"""Reshape the input tensor by the following rule.
1. If `output_rank > input_rank + 1`, raise a `ValueError`.
2. If `output_rank == input_rank + 1`, expand the tensor by one dimension.
3. If `output_rank == input_rank`, do nothing.
4. If `output_rank < input_rank`, flatten the inner dimensions of the tensor.
Args:
tensor: A Tensor or SparseTensor to be reshaped.
column_name: A string name of the feature column for the tensor.
output_rank: the desired rank of the tensor.
Returns:
A reshaped Tensor or SparseTensor.
Raises:
ValueError: if `output_rank > input_rank + 1` for the input tensor.
"""
input_rank = tensor.get_shape().ndims
if input_rank is None and isinstance(tensor, sparse_tensor_py.SparseTensor):
# Try to get the rank of a sparse tensor by its dense_shape's shape.
input_rank = tensor.dense_shape.get_shape().as_list()[0]
if input_rank is None:
raise ValueError('Error while processing column {}. Rank of input Tensor '
'can not be None.'.format(column_name))
if output_rank > input_rank + 1:
raise ValueError('Error while processing column {}. Rank of input Tensor '
'({}) should be the same as output_rank ({}). For '
'example, sequence data should typically be 3 '
'dimensional (rank 3) while non-sequence data is '
'typically 2 dimensional (rank 2).'.format(
column_name, input_rank, output_rank))
elif output_rank == input_rank + 1:
# Expand the tensor's shape by 1 dimension.
if isinstance(tensor, sparse_tensor_py.SparseTensor):
output_shape = array_ops.concat([tensor.dense_shape, [1]], 0)
return sparse_ops.sparse_reshape(tensor, output_shape)
else:
reshaped = array_ops.expand_dims(tensor, -1)
# Try to calculate the new shape.
static_shape = tensor.get_shape()
if static_shape is not None and static_shape.dims is not None:
reshaped.set_shape(static_shape.as_list() + [1])
return reshaped
elif output_rank < input_rank:
return layers._inner_flatten(tensor, output_rank) # pylint: disable=protected-access
else:
return tensor
def _input_from_feature_columns(columns_to_tensors,
feature_columns,
weight_collections,
trainable,
scope,
output_rank,
default_name,
cols_to_outs=None):
"""Implementation of `input_from(_sequence)_feature_columns`."""
columns_to_tensors = columns_to_tensors.copy()
check_feature_columns(feature_columns)
if cols_to_outs is not None and not isinstance(cols_to_outs, dict):
raise ValueError('cols_to_outs must be a dict unless None')
with variable_scope.variable_scope(scope,
default_name=default_name,
values=columns_to_tensors.values()):
output_tensors = []
transformer = _Transformer(columns_to_tensors)
if weight_collections:
weight_collections = list(set(list(weight_collections) +
[ops.GraphKeys.GLOBAL_VARIABLES]))
for column in sorted(set(feature_columns), key=lambda x: x.key):
with variable_scope.variable_scope(None,
default_name=column.name,
values=columns_to_tensors.values()):
transformed_tensor = transformer.transform(column)
if output_rank == 3:
transformed_tensor = nest.map_structure(
functools.partial(
_maybe_reshape_input_tensor,
column_name=column.name,
output_rank=output_rank), transformed_tensor)
try:
# pylint: disable=protected-access
arguments = column._deep_embedding_lookup_arguments(
transformed_tensor)
output_tensors.append(
fc._embeddings_from_arguments( # pylint: disable=protected-access
column,
arguments,
weight_collections,
trainable,
output_rank=output_rank))
except NotImplementedError as ee:
try:
# pylint: disable=protected-access
output_tensors.append(column._to_dnn_input_layer(
transformed_tensor,
weight_collections,
trainable,
output_rank=output_rank))
except ValueError as e:
raise ValueError('Error creating input layer for column: {}.\n'
'{}, {}'.format(column.name, e, ee))
if cols_to_outs is not None:
cols_to_outs[column] = output_tensors[-1]
return array_ops.concat(output_tensors, output_rank - 1)
def input_from_feature_columns(columns_to_tensors,
feature_columns,
weight_collections=None,
trainable=True,
scope=None,
cols_to_outs=None):
"""A tf.contrib.layers style input layer builder based on FeatureColumns.
Generally a single example in training data is described with feature columns.
At the first layer of the model, this column oriented data should be converted
to a single tensor. Each feature column needs a different kind of operation
during this conversion. For example sparse features need a totally different
handling than continuous features.
Example:
```python
# Building model for training
columns_to_tensor = tf.io.parse_example(...)
first_layer = input_from_feature_columns(
columns_to_tensors=columns_to_tensor,
feature_columns=feature_columns)
second_layer = fully_connected(inputs=first_layer, ...)
...
```
where feature_columns can be defined as follows:
```python
sparse_feature = sparse_column_with_hash_bucket(
column_name="sparse_col", ...)
sparse_feature_emb = embedding_column(sparse_id_column=sparse_feature, ...)
real_valued_feature = real_valued_column(...)
real_valued_buckets = bucketized_column(
source_column=real_valued_feature, ...)
feature_columns=[sparse_feature_emb, real_valued_buckets]
```
Args:
columns_to_tensors: A mapping from feature column to tensors. 'string' key
means a base feature (not-transformed). It can have FeatureColumn as a
key too. That means that FeatureColumn is already transformed by input
pipeline.
feature_columns: A set containing all the feature columns. All items in the
set should be instances of classes derived by FeatureColumn.
weight_collections: List of graph collections to which weights are added.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
scope: Optional scope for variable_scope.
cols_to_outs: Optional dict from feature column to output tensor,
which is concatenated into the returned tensor.
Returns:
A Tensor which can be consumed by hidden layers in the neural network.
Raises:
ValueError: if FeatureColumn cannot be consumed by a neural network.
"""
return _input_from_feature_columns(columns_to_tensors,
feature_columns,
weight_collections,
trainable,
scope,
output_rank=2,
default_name='input_from_feature_columns',
cols_to_outs=cols_to_outs)
@experimental
def sequence_input_from_feature_columns(columns_to_tensors,
feature_columns,
weight_collections=None,
trainable=True,
scope=None):
"""Builds inputs for sequence models from `FeatureColumn`s.
See documentation for `input_from_feature_columns`. The following types of
`FeatureColumn` are permitted in `feature_columns`: `_OneHotColumn`,
`_EmbeddingColumn`, `_ScatteredEmbeddingColumn`, `_RealValuedColumn`,
`_DataFrameColumn`. In addition, columns in `feature_columns` may not be
constructed using any of the following: `ScatteredEmbeddingColumn`,
`BucketizedColumn`, `CrossedColumn`.
Args:
columns_to_tensors: A mapping from feature column to tensors. 'string' key
means a base feature (not-transformed). It can have FeatureColumn as a
key too. That means that FeatureColumn is already transformed by input
pipeline.
feature_columns: A set containing all the feature columns. All items in the
set should be instances of classes derived by FeatureColumn.
weight_collections: List of graph collections to which weights are added.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
scope: Optional scope for variable_scope.
Returns:
A Tensor which can be consumed by hidden layers in the neural network.
Raises:
ValueError: if FeatureColumn cannot be consumed by a neural network.
"""
_check_supported_sequence_columns(feature_columns)
_check_forbidden_sequence_columns(feature_columns)
return _input_from_feature_columns(
columns_to_tensors,
feature_columns,
weight_collections,
trainable,
scope,
output_rank=3,
default_name='sequence_input_from_feature_columns')
def _create_embedding_lookup(column,
columns_to_tensors,
embedding_lookup_arguments,
num_outputs,
trainable,
weight_collections):
"""Creates variables and returns predictions for linear weights in a model.
Args:
column: the column we're working on.
columns_to_tensors: a map from column name to tensors.
embedding_lookup_arguments: arguments for embedding lookup.
num_outputs: how many outputs.
trainable: whether the variable we create is trainable.
weight_collections: weights will be placed here.
Returns:
variables: the created embeddings.
predictions: the computed predictions.
"""
with variable_scope.variable_scope(
None, default_name=column.name, values=columns_to_tensors.values()):
variable = contrib_variables.model_variable(
name='weights',
shape=[embedding_lookup_arguments.vocab_size, num_outputs],
dtype=dtypes.float32,
initializer=embedding_lookup_arguments.initializer,
trainable=trainable,
collections=weight_collections)
if fc._is_variable(variable): # pylint: disable=protected-access
variable = [variable]
else:
variable = variable._get_variable_list() # pylint: disable=protected-access
predictions = embedding_ops.safe_embedding_lookup_sparse(
variable,
embedding_lookup_arguments.input_tensor,
sparse_weights=embedding_lookup_arguments.weight_tensor,
combiner=embedding_lookup_arguments.combiner,
name=column.name + '_weights')
return variable, predictions
def _create_joint_embedding_lookup(columns_to_tensors,
embedding_lookup_arguments,
num_outputs,
trainable,
weight_collections):
"""Creates an embedding lookup for all columns sharing a single weight."""
for arg in embedding_lookup_arguments:
assert arg.weight_tensor is None, (
'Joint sums for weighted sparse columns are not supported. '
'Please use weighted_sum_from_feature_columns instead.')
assert arg.combiner == 'sum', (
'Combiners other than sum are not supported for joint sums. '
'Please use weighted_sum_from_feature_columns instead.')
assert len(embedding_lookup_arguments) >= 1, (
'At least one column must be in the model.')
prev_size = 0
sparse_tensors = []
for a in embedding_lookup_arguments:
t = a.input_tensor
values = t.values + prev_size
prev_size += a.vocab_size
sparse_tensors.append(
sparse_tensor_py.SparseTensor(t.indices,
values,
t.dense_shape))
sparse_tensor = sparse_ops.sparse_concat(1, sparse_tensors)
with variable_scope.variable_scope(
None, default_name='linear_weights', values=columns_to_tensors.values()):
variable = contrib_variables.model_variable(
name='weights',
shape=[prev_size, num_outputs],
dtype=dtypes.float32,
initializer=init_ops.zeros_initializer(),
trainable=trainable,
collections=weight_collections)
if fc._is_variable(variable): # pylint: disable=protected-access
variable = [variable]
else:
variable = variable._get_variable_list() # pylint: disable=protected-access
predictions = embedding_ops.safe_embedding_lookup_sparse(
variable,
sparse_tensor,
sparse_weights=None,
combiner='sum',
name='_weights')
return variable, predictions
def joint_weighted_sum_from_feature_columns(columns_to_tensors,
feature_columns,
num_outputs,
weight_collections=None,
trainable=True,
scope=None):
"""A restricted linear prediction builder based on FeatureColumns.
As long as all feature columns are unweighted sparse columns this computes the
prediction of a linear model which stores all weights in a single variable.
Args:
columns_to_tensors: A mapping from feature column to tensors. 'string' key
means a base feature (not-transformed). It can have FeatureColumn as a
key too. That means that FeatureColumn is already transformed by input
pipeline. For example, `inflow` may have handled transformations.
feature_columns: A set containing all the feature columns. All items in the
set should be instances of classes derived from FeatureColumn.
num_outputs: An integer specifying number of outputs. Default value is 1.
weight_collections: List of graph collections to which weights are added.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
scope: Optional scope for variable_scope.
Returns:
A tuple containing:
* A Tensor which represents predictions of a linear model.
* A list of Variables storing the weights.
* A Variable which is used for bias.
Raises:
ValueError: if FeatureColumn cannot be used for linear predictions.
"""
columns_to_tensors = columns_to_tensors.copy()
check_feature_columns(feature_columns)
with variable_scope.variable_scope(
scope,
default_name='joint_weighted_sum_from_feature_columns',
values=columns_to_tensors.values()):
transformer = _Transformer(columns_to_tensors)
embedding_lookup_arguments = []
for column in sorted(set(feature_columns), key=lambda x: x.key):
transformed_tensor = transformer.transform(column)
try:
embedding_lookup_arguments.append(
column._wide_embedding_lookup_arguments(transformed_tensor)) # pylint: disable=protected-access
except NotImplementedError:
raise NotImplementedError('Real-valued columns are not supported. '
'Use weighted_sum_from_feature_columns '
'instead, or bucketize these columns.')
variable, predictions_no_bias = _create_joint_embedding_lookup(
columns_to_tensors,
embedding_lookup_arguments,
num_outputs,
trainable,
weight_collections)
bias = contrib_variables.model_variable(
'bias_weight',
shape=[num_outputs],
initializer=init_ops.zeros_initializer(),
trainable=trainable,
collections=_add_variable_collection(weight_collections))
_log_variable(bias)
predictions = nn_ops.bias_add(predictions_no_bias, bias)
return predictions, variable, bias
def weighted_sum_from_feature_columns(columns_to_tensors,
feature_columns,
num_outputs,
weight_collections=None,
trainable=True,
scope=None):
"""A tf.contrib.layers style linear prediction builder based on FeatureColumn.
Generally a single example in training data is described with feature columns.
This function generates weighted sum for each num_outputs. Weighted sum refers
to logits in classification problems. It refers to prediction itself for
linear regression problems.
Example:
```
# Building model for training
feature_columns = (
real_valued_column("my_feature1"),
...
)
columns_to_tensor = tf.io.parse_example(...)
logits = weighted_sum_from_feature_columns(
columns_to_tensors=columns_to_tensor,
feature_columns=feature_columns,
num_outputs=1)
loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=labels,
logits=logits)
```
Args:
columns_to_tensors: A mapping from feature column to tensors. 'string' key
means a base feature (not-transformed). It can have FeatureColumn as a
key too. That means that FeatureColumn is already transformed by input
pipeline. For example, `inflow` may have handled transformations.
feature_columns: A set containing all the feature columns. All items in the
set should be instances of classes derived from FeatureColumn.
num_outputs: An integer specifying number of outputs. Default value is 1.
weight_collections: List of graph collections to which weights are added.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
scope: Optional scope for variable_scope.
Returns:
A tuple containing:
* A Tensor which represents predictions of a linear model.
* A dictionary which maps feature_column to corresponding Variable.
* A Variable which is used for bias.
Raises:
ValueError: if FeatureColumn cannot be used for linear predictions.
"""
columns_to_tensors = columns_to_tensors.copy()
check_feature_columns(feature_columns)
with variable_scope.variable_scope(
scope,
default_name='weighted_sum_from_feature_columns',
values=columns_to_tensors.values()):
output_tensors = []
column_to_variable = {}
transformer = _Transformer(columns_to_tensors)
# pylint: disable=protected-access
for column in sorted(set(feature_columns), key=lambda x: x.key):
transformed_tensor = transformer.transform(column)
try:
embedding_lookup_arguments = column._wide_embedding_lookup_arguments(
transformed_tensor)
variable, predictions = _create_embedding_lookup(
column,
columns_to_tensors,
embedding_lookup_arguments,
num_outputs,
trainable,
weight_collections)
except NotImplementedError:
with variable_scope.variable_scope(
None,
default_name=column.name,
values=columns_to_tensors.values()):
tensor = column._to_dense_tensor(transformed_tensor)
tensor = _maybe_reshape_input_tensor(
tensor, column.name, output_rank=2)
variable = [
contrib_variables.model_variable(
name='weight',
shape=[tensor.get_shape()[1], num_outputs],
initializer=init_ops.zeros_initializer(),
trainable=trainable,
collections=weight_collections)
]
predictions = math_ops.matmul(tensor, variable[0], name='matmul')
except ValueError as ee:
raise ValueError('Error creating weighted sum for column: {}.\n'
'{}'.format(column.name, ee))
output_tensors.append(array_ops.reshape(
predictions, shape=(-1, num_outputs)))
column_to_variable[column] = variable
_log_variable(variable)
fc._maybe_restore_from_checkpoint(column._checkpoint_path(), variable) # pylint: disable=protected-access
# pylint: enable=protected-access
predictions_no_bias = math_ops.add_n(output_tensors)
bias = contrib_variables.model_variable(
'bias_weight',
shape=[num_outputs],
initializer=init_ops.zeros_initializer(),
trainable=trainable,
collections=_add_variable_collection(weight_collections))
_log_variable(bias)
predictions = nn_ops.bias_add(predictions_no_bias, bias)
return predictions, column_to_variable, bias
def parse_feature_columns_from_examples(serialized,
feature_columns,
name=None,
example_names=None):
"""Parses tf.Examples to extract tensors for given feature_columns.
This is a wrapper of 'tf.io.parse_example'.
Example:
```python
columns_to_tensor = parse_feature_columns_from_examples(
serialized=my_data,
feature_columns=my_features)
# Where my_features are:
# Define features and transformations
sparse_feature_a = sparse_column_with_keys(
column_name="sparse_feature_a", keys=["AB", "CD", ...])
embedding_feature_a = embedding_column(
sparse_id_column=sparse_feature_a, dimension=3, combiner="sum")
sparse_feature_b = sparse_column_with_hash_bucket(
column_name="sparse_feature_b", hash_bucket_size=1000)
embedding_feature_b = embedding_column(
sparse_id_column=sparse_feature_b, dimension=16, combiner="sum")
crossed_feature_a_x_b = crossed_column(
columns=[sparse_feature_a, sparse_feature_b], hash_bucket_size=10000)
real_feature = real_valued_column("real_feature")
real_feature_buckets = bucketized_column(
source_column=real_feature, boundaries=[...])
my_features = [embedding_feature_b, real_feature_buckets, embedding_feature_a]
```
Args:
serialized: A vector (1-D Tensor) of strings, a batch of binary
serialized `Example` protos.
feature_columns: An iterable containing all the feature columns. All items
should be instances of classes derived from _FeatureColumn.
name: A name for this operation (optional).
example_names: A vector (1-D Tensor) of strings (optional), the names of
the serialized protos in the batch.
Returns:
A `dict` mapping FeatureColumn to `Tensor` and `SparseTensor` values.
"""
check_feature_columns(feature_columns)
columns_to_tensors = parsing_ops.parse_example(
serialized=serialized,
features=fc.create_feature_spec_for_parsing(feature_columns),
name=name,
example_names=example_names)
transformer = _Transformer(columns_to_tensors)
for column in sorted(set(feature_columns), key=lambda x: x.key):
transformer.transform(column)
return columns_to_tensors
def transform_features(features, feature_columns):
"""Returns transformed features based on features columns passed in.
Example:
```python
columns_to_tensor = transform_features(features=features,
feature_columns=feature_columns)
# Where my_features are:
# Define features and transformations
sparse_feature_a = sparse_column_with_keys(
column_name="sparse_feature_a", keys=["AB", "CD", ...])
embedding_feature_a = embedding_column(
sparse_id_column=sparse_feature_a, dimension=3, combiner="sum")
sparse_feature_b = sparse_column_with_hash_bucket(
column_name="sparse_feature_b", hash_bucket_size=1000)
embedding_feature_b = embedding_column(
sparse_id_column=sparse_feature_b, dimension=16, combiner="sum")
crossed_feature_a_x_b = crossed_column(
columns=[sparse_feature_a, sparse_feature_b], hash_bucket_size=10000)
real_feature = real_valued_column("real_feature")
real_feature_buckets = bucketized_column(
source_column=real_feature, boundaries=[...])
feature_columns = [embedding_feature_b,
real_feature_buckets,
embedding_feature_a]
```
Args:
features: A dictionary of features.
feature_columns: An iterable containing all the feature columns. All items
should be instances of classes derived from _FeatureColumn.
Returns:
A `dict` mapping FeatureColumn to `Tensor` and `SparseTensor` values.
"""
columns_to_tensor = features.copy()
check_feature_columns(feature_columns)
transformer = _Transformer(columns_to_tensor)
for column in sorted(set(feature_columns), key=lambda x: x.key):
transformer.transform(column)
keys = list(columns_to_tensor.keys())
for k in keys:
if k not in feature_columns:
columns_to_tensor.pop(k)
return columns_to_tensor
def parse_feature_columns_from_sequence_examples(
serialized,
context_feature_columns,
sequence_feature_columns,
name=None,
example_name=None):
"""Parses tf.SequenceExamples to extract tensors for given `FeatureColumn`s.
Args:
serialized: A scalar (0-D Tensor) of type string, a single serialized
`SequenceExample` proto.
context_feature_columns: An iterable containing the feature columns for
context features. All items should be instances of classes derived from
`_FeatureColumn`. Can be `None`.
sequence_feature_columns: An iterable containing the feature columns for
sequence features. All items should be instances of classes derived from
`_FeatureColumn`. Can be `None`.
name: A name for this operation (optional).
example_name: A scalar (0-D Tensor) of type string (optional), the names of
the serialized proto.
Returns:
A tuple consisting of (context_features, sequence_features)
* context_features: a dict mapping `FeatureColumns` from
`context_feature_columns` to their parsed `Tensors`/`SparseTensor`s.
* sequence_features: a dict mapping `FeatureColumns` from
`sequence_feature_columns` to their parsed `Tensors`/`SparseTensor`s.
"""
# Sequence example parsing requires a single (scalar) example.
try:
serialized = array_ops.reshape(serialized, [])
except ValueError as e:
raise ValueError(
'serialized must contain as single sequence example. Batching must be '
'done after parsing for sequence examples. Error: {}'.format(e))
if context_feature_columns is None:
context_feature_columns = []
if sequence_feature_columns is None:
sequence_feature_columns = []
check_feature_columns(context_feature_columns)
context_feature_spec = fc.create_feature_spec_for_parsing(
context_feature_columns)
check_feature_columns(sequence_feature_columns)
sequence_feature_spec = fc._create_sequence_feature_spec_for_parsing( # pylint: disable=protected-access
sequence_feature_columns, allow_missing_by_default=False)
return parsing_ops.parse_single_sequence_example(serialized,
context_feature_spec,
sequence_feature_spec,
example_name,
name)
def _log_variable(variable):
if isinstance(variable, list):
for var in variable:
if fc._is_variable(variable): # pylint: disable=protected-access
logging.info('Created variable %s, with device=%s', var.name,
var.device)
elif fc._is_variable(variable): # pylint: disable=protected-access
logging.info('Created variable %s, with device=%s', variable.name,
variable.device)
def _infer_real_valued_column_for_tensor(name, tensor):
"""Creates a real_valued_column for given tensor and name."""
if isinstance(tensor, sparse_tensor_py.SparseTensor):
raise ValueError(
'SparseTensor is not supported for auto detection. Please define '
'corresponding FeatureColumn for tensor {} {}.', name, tensor)
if not (tensor.dtype.is_integer or tensor.dtype.is_floating):
raise ValueError(
'Non integer or non floating types are not supported for auto detection'
'. Please define corresponding FeatureColumn for tensor {} {}.', name,
tensor)
shape = tensor.get_shape().as_list()
dimension = 1
for i in range(1, len(shape)):
dimension *= shape[i]
return fc.real_valued_column(name, dimension=dimension, dtype=tensor.dtype)
def infer_real_valued_columns(features):
if not isinstance(features, dict):
return [_infer_real_valued_column_for_tensor('', features)]
feature_columns = []
for key, value in features.items():
feature_columns.append(_infer_real_valued_column_for_tensor(key, value))
return feature_columns
def check_feature_columns(feature_columns):
"""Checks the validity of the set of FeatureColumns.
Args:
feature_columns: An iterable of instances or subclasses of FeatureColumn.
Raises:
ValueError: If `feature_columns` is a dict.
ValueError: If there are duplicate feature column keys.
"""
if isinstance(feature_columns, dict):
raise ValueError('Expected feature_columns to be iterable, found dict.')
seen_keys = set()
for f in feature_columns:
key = f.key
if key in seen_keys:
raise ValueError('Duplicate feature column key found for column: {}. '
'This usually means that the column is almost identical '
'to another column, and one must be discarded.'.format(
f.name))
seen_keys.add(key)
class _Transformer(object):
"""Handles all the transformations defined by FeatureColumn if needed.
FeatureColumn specifies how to digest an input column to the network. Some
feature columns require data transformations. This class handles those
transformations if they are not handled already.
Some features may be used in more than one place. For example, one can use a
bucketized feature by itself and a cross with it. In that case Transformer
should create only one bucketization op instead of multiple ops for each
feature column. To handle re-use of transformed columns, Transformer keeps all
previously transformed columns.
Example:
```python
sparse_feature = sparse_column_with_hash_bucket(...)
real_valued_feature = real_valued_column(...)
real_valued_buckets = bucketized_column(source_column=real_valued_feature,
...)
sparse_x_real = crossed_column(
columns=[sparse_feature, real_valued_buckets], hash_bucket_size=10000)
columns_to_tensor = tf.io.parse_example(...)
transformer = Transformer(columns_to_tensor)
sparse_x_real_tensor = transformer.transform(sparse_x_real)
sparse_tensor = transformer.transform(sparse_feature)
real_buckets_tensor = transformer.transform(real_valued_buckets)
```
"""
def __init__(self, columns_to_tensors):
"""Initializes transformer.
Args:
columns_to_tensors: A mapping from feature columns to tensors. 'string'
key means a base feature (not-transformed). It can have FeatureColumn as
a key too. That means that FeatureColumn is already transformed by input
pipeline. For example, `inflow` may have handled transformations.
Transformed features are inserted in columns_to_tensors.
"""
self._columns_to_tensors = columns_to_tensors
def transform(self, feature_column):
"""Returns a Tensor which represents given feature_column.
Args:
feature_column: An instance of FeatureColumn.
Returns:
A Tensor which represents given feature_column. It may create a new Tensor
or re-use an existing one.
Raises:
ValueError: if FeatureColumn cannot be handled by this Transformer.
"""
logging.debug('Transforming feature_column %s', feature_column)
if feature_column in self._columns_to_tensors:
# Feature_column is already transformed.
return self._columns_to_tensors[feature_column]
feature_column.insert_transformed_feature(self._columns_to_tensors)
if feature_column not in self._columns_to_tensors:
raise ValueError('Column {} is not supported.'.format(
feature_column.name))
return self._columns_to_tensors[feature_column]
def _add_variable_collection(weight_collections):
if weight_collections:
weight_collections = list(
set(list(weight_collections) + [ops.GraphKeys.GLOBAL_VARIABLES]))
return weight_collections
# TODO(jamieas): remove the following logic once all FeatureColumn types are
# supported for sequences.
# pylint: disable=protected-access
_SUPPORTED_SEQUENCE_COLUMNS = (fc._OneHotColumn,
fc._EmbeddingColumn,
fc._RealValuedColumn,
fc._RealValuedVarLenColumn)
_FORBIDDEN_SEQUENCE_COLUMNS = (fc._ScatteredEmbeddingColumn,
fc._BucketizedColumn,
fc._CrossedColumn)
def _check_supported_sequence_columns(feature_columns):
"""Asserts `feature_columns` are in `_SUPPORTED_SEQUENCE_COLUMNS`."""
for col in feature_columns:
if not isinstance(col, _SUPPORTED_SEQUENCE_COLUMNS):
raise ValueError(
'FeatureColumn type {} is not currently supported for sequence data.'.
format(type(col).__name__))
def _get_parent_columns(feature_column):
"""Returns the tuple of `FeatureColumn`s that `feature_column` depends on."""
if isinstance(feature_column, (fc._WeightedSparseColumn,
fc._OneHotColumn,
fc._EmbeddingColumn,)):
return (feature_column.sparse_id_column,)
if isinstance(feature_column, (fc._BucketizedColumn,)):
return (feature_column.source_column,)
if isinstance(feature_column, (fc._CrossedColumn)):
return tuple(feature_column.columns)
return tuple()
def _gather_feature_columns(feature_columns):
"""Returns a list of all ancestor `FeatureColumns` of `feature_columns`."""
gathered = list(feature_columns)
i = 0
while i < len(gathered):
for column in _get_parent_columns(gathered[i]):
if column not in gathered:
gathered.append(column)
i += 1
return gathered
def _check_forbidden_sequence_columns(feature_columns):
"""Recursively checks `feature_columns` for `_FORBIDDEN_SEQUENCE_COLUMNS`."""
all_feature_columns = _gather_feature_columns(feature_columns)
for feature_column in all_feature_columns:
if isinstance(feature_column, _FORBIDDEN_SEQUENCE_COLUMNS):
raise ValueError(
'Column {} is of type {}, which is not currently supported for '
'sequences.'.format(feature_column.name,
type(feature_column).__name__))
|
tensorflow-master
|
tensorflow/contrib/layers/python/layers/feature_column_ops.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Common util functions used by layers.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import namedtuple
from collections import OrderedDict
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import variables
__all__ = ['collect_named_outputs',
'constant_value',
'static_cond',
'smart_cond',
'get_variable_collections',
'two_element_tuple',
'n_positive_integers',
'channel_dimension',
'last_dimension']
NamedOutputs = namedtuple('NamedOutputs', ['name', 'outputs'])
def collect_named_outputs(collections, alias, outputs):
"""Add `Tensor` outputs tagged with alias to collections.
It is useful to collect end-points or tags for summaries. Example of usage:
logits = collect_named_outputs('end_points', 'inception_v3/logits', logits)
assert 'inception_v3/logits' in logits.aliases
Args:
collections: A collection or list of collections. If None skip collection.
alias: String to append to the list of aliases of outputs, for example,
'inception_v3/conv1'.
outputs: Tensor, an output tensor to collect
Returns:
The outputs Tensor to allow inline call.
"""
if collections:
append_tensor_alias(outputs, alias)
ops.add_to_collections(collections, outputs)
return outputs
def append_tensor_alias(tensor, alias):
"""Append an alias to the list of aliases of the tensor.
Args:
tensor: A `Tensor`.
alias: String, to add to the list of aliases of the tensor.
Returns:
The tensor with a new alias appended to its list of aliases.
"""
# Remove ending '/' if present.
if alias[-1] == '/':
alias = alias[:-1]
if hasattr(tensor, 'aliases'):
tensor.aliases.append(alias)
else:
tensor.aliases = [alias]
return tensor
def gather_tensors_aliases(tensors):
"""Given a list of tensors, gather their aliases.
Args:
tensors: A list of `Tensors`.
Returns:
A list of strings with the aliases of all tensors.
"""
aliases = []
for tensor in tensors:
aliases += get_tensor_aliases(tensor)
return aliases
def get_tensor_aliases(tensor):
"""Get a list with the aliases of the input tensor.
If the tensor does not have any alias, it would default to its its op.name or
its name.
Args:
tensor: A `Tensor`.
Returns:
A list of strings with the aliases of the tensor.
"""
if hasattr(tensor, 'aliases'):
aliases = tensor.aliases
else:
if tensor.name[-2:] == ':0':
# Use op.name for tensor ending in :0
aliases = [tensor.op.name]
else:
aliases = [tensor.name]
return aliases
def convert_collection_to_dict(collection, clear_collection=False):
"""Returns an OrderedDict of Tensors with their aliases as keys.
Args:
collection: A collection.
clear_collection: When True, it clears the collection after converting to
OrderedDict.
Returns:
An OrderedDict of {alias: tensor}
"""
output = OrderedDict((alias, tensor)
for tensor in ops.get_collection(collection)
for alias in get_tensor_aliases(tensor))
if clear_collection:
ops.get_default_graph().clear_collection(collection)
return output
def constant_value(value_or_tensor_or_var, dtype=None):
"""Returns value if value_or_tensor_or_var has a constant value.
Args:
value_or_tensor_or_var: A value, a `Tensor` or a `Variable`.
dtype: Optional `tf.dtype`, if set it would check it has the right
dtype.
Returns:
The constant value or None if it not constant.
Raises:
ValueError: if value_or_tensor_or_var is None or the tensor_variable has the
wrong dtype.
"""
if value_or_tensor_or_var is None:
raise ValueError('value_or_tensor_or_var cannot be None')
value = value_or_tensor_or_var
if isinstance(value_or_tensor_or_var, (ops.Tensor, variables.Variable)):
if dtype and value_or_tensor_or_var.dtype != dtype:
raise ValueError('It has the wrong type %s instead of %s' % (
value_or_tensor_or_var.dtype, dtype))
if isinstance(value_or_tensor_or_var, variables.Variable):
value = None
else:
value = tensor_util.constant_value(value_or_tensor_or_var)
return value
def static_cond(pred, fn1, fn2):
"""Return either fn1() or fn2() based on the boolean value of `pred`.
Same signature as `control_flow_ops.cond()` but requires pred to be a bool.
Args:
pred: A value determining whether to return the result of `fn1` or `fn2`.
fn1: The callable to be performed if pred is true.
fn2: The callable to be performed if pred is false.
Returns:
Tensors returned by the call to either `fn1` or `fn2`.
Raises:
TypeError: if `fn1` or `fn2` is not callable.
"""
if not callable(fn1):
raise TypeError('fn1 must be callable.')
if not callable(fn2):
raise TypeError('fn2 must be callable.')
if pred:
return fn1()
else:
return fn2()
def smart_cond(pred, fn1, fn2, name=None):
"""Return either fn1() or fn2() based on the boolean predicate/value `pred`.
If `pred` is bool or has a constant value it would use `static_cond`,
otherwise it would use `tf.cond`.
Args:
pred: A scalar determining whether to return the result of `fn1` or `fn2`.
fn1: The callable to be performed if pred is true.
fn2: The callable to be performed if pred is false.
name: Optional name prefix when using tf.cond
Returns:
Tensors returned by the call to either `fn1` or `fn2`.
"""
pred_value = constant_value(pred)
if pred_value is not None:
# Use static_cond if pred has a constant value.
return static_cond(pred_value, fn1, fn2)
else:
# Use dynamic cond otherwise.
return control_flow_ops.cond(pred, fn1, fn2, name)
def get_variable_collections(variables_collections, name):
if isinstance(variables_collections, dict):
variable_collections = variables_collections.get(name, None)
else:
variable_collections = variables_collections
return variable_collections
def _get_dimension(shape, dim, min_rank=1):
"""Returns the `dim` dimension of `shape`, while checking it has `min_rank`.
Args:
shape: A `TensorShape`.
dim: Integer, which dimension to return.
min_rank: Integer, minimum rank of shape.
Returns:
The value of the `dim` dimension.
Raises:
ValueError: if inputs don't have at least min_rank dimensions, or if the
first dimension value is not defined.
"""
dims = shape.dims
if dims is None:
raise ValueError('dims of shape must be known but is None')
if len(dims) < min_rank:
raise ValueError('rank of shape must be at least %d not: %d' % (min_rank,
len(dims)))
value = dims[dim].value
if value is None:
raise ValueError(
'dimension %d of shape must be known but is None: %s' % (dim, shape))
return value
def channel_dimension(shape, data_format, min_rank=1):
"""Returns the channel dimension of shape, while checking it has min_rank.
Args:
shape: A `TensorShape`.
data_format: `channels_first` or `channels_last`.
min_rank: Integer, minimum rank of shape.
Returns:
The value of the first dimension.
Raises:
ValueError: if inputs don't have at least min_rank dimensions, or if the
first dimension value is not defined.
"""
return _get_dimension(shape, 1 if data_format == 'channels_first' else -1,
min_rank=min_rank)
def last_dimension(shape, min_rank=1):
"""Returns the last dimension of shape while checking it has min_rank.
Args:
shape: A `TensorShape`.
min_rank: Integer, minimum rank of shape.
Returns:
The value of the last dimension.
Raises:
ValueError: if inputs don't have at least min_rank dimensions, or if the
last dimension value is not defined.
"""
return _get_dimension(shape, -1, min_rank=min_rank)
def two_element_tuple(int_or_tuple):
"""Converts `int_or_tuple` to height, width.
Several of the functions that follow accept arguments as either
a tuple of 2 integers or a single integer. A single integer
indicates that the 2 values of the tuple are the same.
This functions normalizes the input value by always returning a tuple.
Args:
int_or_tuple: A list of 2 ints, a single int or a `TensorShape`.
Returns:
A tuple with 2 values.
Raises:
ValueError: If `int_or_tuple` it not well formed.
"""
if isinstance(int_or_tuple, (list, tuple)):
if len(int_or_tuple) != 2:
raise ValueError('Must be a list with 2 elements: %s' % int_or_tuple)
return int(int_or_tuple[0]), int(int_or_tuple[1])
if isinstance(int_or_tuple, int):
return int(int_or_tuple), int(int_or_tuple)
if isinstance(int_or_tuple, tensor_shape.TensorShape):
if len(int_or_tuple) == 2:
return int_or_tuple[0], int_or_tuple[1]
raise ValueError('Must be an int, a list with 2 elements or a TensorShape of '
'length 2')
def n_positive_integers(n, value):
"""Converts `value` to a sequence of `n` positive integers.
`value` may be either be a sequence of values convertible to `int`, or a
single value convertible to `int`, in which case the resulting integer is
duplicated `n` times. It may also be a TensorShape of rank `n`.
Args:
n: Length of sequence to return.
value: Either a single value convertible to a positive `int` or an
`n`-element sequence of values convertible to a positive `int`.
Returns:
A tuple of `n` positive integers.
Raises:
TypeError: If `n` is not convertible to an integer.
ValueError: If `n` or `value` are invalid.
"""
n_orig = n
n = int(n)
if n < 1 or n != n_orig:
raise ValueError('n must be a positive integer')
try:
value = int(value)
except (TypeError, ValueError):
sequence_len = len(value)
if sequence_len != n:
raise ValueError(
'Expected sequence of %d positive integers, but received %r' %
(n, value))
try:
values = tuple(int(x) for x in value)
except:
raise ValueError(
'Expected sequence of %d positive integers, but received %r' %
(n, value))
for x in values:
if x < 1:
raise ValueError('expected positive integer, but received %d' % x)
return values
if value < 1:
raise ValueError('expected positive integer, but received %d' % value)
return (value,) * n
|
tensorflow-master
|
tensorflow/contrib/layers/python/layers/utils.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for layers.feature_column_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from tensorflow.contrib import layers
from tensorflow.contrib.layers.python.layers import feature_column
from tensorflow.contrib.layers.python.layers import feature_column_ops
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.feature_column import feature_column_lib as fc_core
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import test
class TransformerTest(test.TestCase):
def testRealValuedColumnIsIdentityTransformation(self):
real_valued = feature_column.real_valued_column("price")
features = {"price": constant_op.constant([[20.], [110], [-3]])}
output = feature_column_ops._Transformer(features).transform(real_valued)
with self.cached_session():
self.assertAllEqual(output.eval(), [[20.], [110], [-3]])
def testSparseRealValuedColumnIdentityTransformation(self):
sparse_real_valued = feature_column._real_valued_var_len_column(
"rating", is_sparse=True)
rating_tensor = sparse_tensor.SparseTensor(
values=[2.0, 5.0], indices=[[0, 0], [2, 0]], dense_shape=[3, 1])
features = {"rating": rating_tensor}
output = feature_column_ops._Transformer(features).transform(
sparse_real_valued)
with self.cached_session():
self.assertAllEqual(output.values.eval(), rating_tensor.values.eval())
self.assertAllEqual(output.indices.eval(), rating_tensor.indices.eval())
self.assertAllEqual(output.dense_shape.eval(),
rating_tensor.dense_shape.eval())
def testSparseRealValuedColumnWithTransformation(self):
def square_fn(x):
return x**2
sparse_real_valued = feature_column._real_valued_var_len_column(
"rating", normalizer=square_fn, is_sparse=True)
rating_tensor = sparse_tensor.SparseTensor(
values=[2.0, 5.0], indices=[[0, 0], [2, 0]], dense_shape=[3, 1])
features = {"rating": rating_tensor}
output_dict = feature_column_ops.transform_features(features,
[sparse_real_valued])
self.assertTrue(sparse_real_valued in output_dict)
output = output_dict[sparse_real_valued]
with self.cached_session():
self.assertArrayNear(output.values.eval(), [4.0, 25.0], 1e-5)
self.assertAllEqual(output.indices.eval(), rating_tensor.indices.eval())
self.assertAllEqual(output.dense_shape.eval(),
rating_tensor.dense_shape.eval())
def testBucketizedColumn(self):
bucket = feature_column.bucketized_column(
feature_column.real_valued_column("price"), boundaries=[0., 10., 100.])
# buckets 2, 3, 0
features = {"price": constant_op.constant([[20.], [110], [-3]])}
# Test transform features.
output = feature_column_ops.transform_features(
features=features, feature_columns=[bucket])
self.assertEqual(len(output), 1)
self.assertIn(bucket, output)
with self.cached_session():
self.assertAllEqual(output[bucket].eval(), [[2], [3], [0]])
def testBucketizedColumnWithMultiDimensions(self):
bucket = feature_column.bucketized_column(
feature_column.real_valued_column("price", 2),
boundaries=[0., 10., 100.])
# buckets 2, 3, 0
features = {
"price": constant_op.constant([[20., 110], [110., 20], [-3, -3]])
}
output = feature_column_ops._Transformer(features).transform(bucket)
with self.cached_session():
self.assertAllEqual(output.eval(), [[2, 3], [3, 2], [0, 0]])
def testCachedTransformation(self):
bucket = feature_column.bucketized_column(
feature_column.real_valued_column("price"), boundaries=[0., 10., 100.])
# buckets 2, 3, 0
features = {"price": constant_op.constant([[20.], [110], [-3]])}
transformer = feature_column_ops._Transformer(features)
with self.cached_session() as sess:
transformer.transform(bucket)
num_of_ops = len(sess.graph.get_operations())
# Verify that the second call to transform the same feature
# doesn't increase the number of ops.
transformer.transform(bucket)
self.assertEqual(num_of_ops, len(sess.graph.get_operations()))
def testSparseColumnWithHashBucket(self):
hashed_sparse = feature_column.sparse_column_with_hash_bucket("wire", 10)
wire_tensor = sparse_tensor.SparseTensor(
values=["omar", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
features = {"wire": wire_tensor}
# Test transform features.
output = feature_column_ops.transform_features(
features=features, feature_columns=[hashed_sparse])
self.assertEqual(len(output), 1)
self.assertIn(hashed_sparse, output)
with self.cached_session():
self.assertEqual(output[hashed_sparse].values.dtype, dtypes.int64)
self.assertTrue(
all(x < 10 and x >= 0 for x in output[hashed_sparse].values.eval()))
self.assertAllEqual(output[hashed_sparse].indices.eval(),
wire_tensor.indices.eval())
self.assertAllEqual(output[hashed_sparse].dense_shape.eval(),
wire_tensor.dense_shape.eval())
def testSparseIntColumnWithHashBucket(self):
"""Tests a sparse column with int values."""
hashed_sparse = feature_column.sparse_column_with_hash_bucket(
"wire", 10, dtype=dtypes.int64)
wire_tensor = sparse_tensor.SparseTensor(
values=[101, 201, 301],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
features = {"wire": wire_tensor}
# Test transform features.
output = feature_column_ops.transform_features(
features=features, feature_columns=[hashed_sparse])
self.assertEqual(len(output), 1)
self.assertIn(hashed_sparse, output)
with self.cached_session():
self.assertEqual(output[hashed_sparse].values.dtype, dtypes.int64)
self.assertTrue(
all(x < 10 and x >= 0 for x in output[hashed_sparse].values.eval()))
self.assertAllEqual(output[hashed_sparse].indices.eval(),
wire_tensor.indices.eval())
self.assertAllEqual(output[hashed_sparse].dense_shape.eval(),
wire_tensor.dense_shape.eval())
def testSparseColumnWithHashBucketWithDenseInputTensor(self):
hashed_sparse = feature_column.sparse_column_with_hash_bucket("wire", 10)
wire_tensor = constant_op.constant(
[["omar", "stringer"], ["marlo", "rick"]])
features = {"wire": wire_tensor}
output = feature_column_ops._Transformer(features).transform(hashed_sparse)
with self.cached_session():
# While the input is a dense Tensor, the output should be a SparseTensor.
self.assertIsInstance(output, sparse_tensor.SparseTensor)
self.assertEqual(output.values.dtype, dtypes.int64)
self.assertTrue(all(x < 10 and x >= 0 for x in output.values.eval()))
self.assertAllEqual(output.indices.eval(),
[[0, 0], [0, 1], [1, 0], [1, 1]])
self.assertAllEqual(output.dense_shape.eval(), [2, 2])
def testEmbeddingColumn(self):
wire_tensor = sparse_tensor.SparseTensor(
values=["omar", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
features = {"wire": wire_tensor}
hashed_sparse = feature_column.sparse_column_with_hash_bucket("wire", 10)
wire_embedding = feature_column.embedding_column(hashed_sparse, 10)
# Test transform features.
output = feature_column_ops.transform_features(
features=features, feature_columns=[hashed_sparse, wire_embedding])
# Check that features dict haven't changed
self.assertEqual({"wire": wire_tensor}, features)
self.assertEqual(len(output), 2)
self.assertIn(hashed_sparse, output)
self.assertIn(wire_embedding, output)
with self.cached_session():
self.assertAllEqual(output[wire_embedding].indices.eval(),
wire_tensor.indices.eval())
self.assertAllEqual(output[wire_embedding].dense_shape.eval(), [2, 2])
self.assertAllEqual(output[wire_embedding].values.eval(),
output[hashed_sparse].values.eval())
def testSparseColumnWithKeys(self):
keys_sparse = feature_column.sparse_column_with_keys(
"wire", ["marlo", "omar", "stringer"])
wire_tensor = sparse_tensor.SparseTensor(
values=["omar", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
features = {"wire": wire_tensor}
# Test transform features.
output = feature_column_ops.transform_features(
features=features, feature_columns=[keys_sparse])
self.assertEqual(len(output), 1)
self.assertIn(keys_sparse, output)
with self.cached_session():
lookup_ops.tables_initializer().run()
self.assertEqual(output[keys_sparse].values.dtype, dtypes.int64)
self.assertAllEqual(output[keys_sparse].values.eval(), [1, 2, 0])
self.assertAllEqual(output[keys_sparse].indices.eval(),
wire_tensor.indices.eval())
self.assertAllEqual(output[keys_sparse].dense_shape.eval(),
wire_tensor.dense_shape.eval())
def testSparseColumnWithKeysWithDenseInputTensor(self):
keys_sparse = feature_column.sparse_column_with_keys(
"wire", ["marlo", "omar", "stringer", "rick"])
wire_tensor = constant_op.constant(
[["omar", "stringer"], ["marlo", "rick"]])
features = {"wire": wire_tensor}
output = feature_column_ops._Transformer(features).transform(keys_sparse)
with self.cached_session():
lookup_ops.tables_initializer().run()
# While the input is a dense Tensor, the output should be a SparseTensor.
self.assertIsInstance(output, sparse_tensor.SparseTensor)
self.assertEqual(output.dtype, dtypes.int64)
self.assertAllEqual(output.values.eval(), [1, 2, 0, 3])
self.assertAllEqual(output.indices.eval(),
[[0, 0], [0, 1], [1, 0], [1, 1]])
self.assertAllEqual(output.dense_shape.eval(), [2, 2])
def testSparseColumnWithHashBucket_IsIntegerized(self):
hashed_sparse = feature_column.sparse_column_with_integerized_feature(
"wire", 10)
wire_tensor = sparse_tensor.SparseTensor(
values=[100, 1, 25],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
features = {"wire": wire_tensor}
# Test transform features.
output = feature_column_ops.transform_features(
features=features, feature_columns=[hashed_sparse])
self.assertEqual(len(output), 1)
self.assertIn(hashed_sparse, output)
with self.cached_session():
self.assertEqual(output[hashed_sparse].values.dtype, dtypes.int32)
self.assertTrue(
all(x < 10 and x >= 0 for x in output[hashed_sparse].values.eval()))
self.assertAllEqual(output[hashed_sparse].indices.eval(),
wire_tensor.indices.eval())
self.assertAllEqual(output[hashed_sparse].dense_shape.eval(),
wire_tensor.dense_shape.eval())
def testSparseColumnWithHashBucketWithDenseInputTensor_IsIntegerized(self):
hashed_sparse = feature_column.sparse_column_with_integerized_feature(
"wire", 10)
# wire_tensor = tf.SparseTensor(values=[100, 1, 25],
# indices=[[0, 0], [1, 0], [1, 1]],
# dense_shape=[2, 2])
wire_tensor = constant_op.constant([[100, 0], [1, 25]])
features = {"wire": wire_tensor}
output = feature_column_ops._Transformer(features).transform(hashed_sparse)
with self.cached_session():
# While the input is a dense Tensor, the output should be a SparseTensor.
self.assertIsInstance(output, sparse_tensor.SparseTensor)
self.assertEqual(output.values.dtype, dtypes.int32)
self.assertTrue(all(x < 10 and x >= 0 for x in output.values.eval()))
self.assertAllEqual(output.indices.eval(),
[[0, 0], [0, 1], [1, 0], [1, 1]])
self.assertAllEqual(output.dense_shape.eval(), [2, 2])
def testWeightedSparseColumn(self):
ids = feature_column.sparse_column_with_keys("ids",
["marlo", "omar", "stringer"])
ids_tensor = sparse_tensor.SparseTensor(
values=["stringer", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
weighted_ids = feature_column.weighted_sparse_column(ids, "weights")
weights_tensor = sparse_tensor.SparseTensor(
values=[10.0, 20.0, 30.0],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
features = {"ids": ids_tensor, "weights": weights_tensor}
# Test transform features.
output = feature_column_ops.transform_features(
features=features, feature_columns=[weighted_ids])
self.assertEqual(len(output), 1)
self.assertIn(weighted_ids, output)
with self.cached_session():
lookup_ops.tables_initializer().run()
self.assertAllEqual(output[weighted_ids][0].dense_shape.eval(),
ids_tensor.dense_shape.eval())
self.assertAllEqual(output[weighted_ids][0].indices.eval(),
ids_tensor.indices.eval())
self.assertAllEqual(output[weighted_ids][0].values.eval(), [2, 2, 0])
self.assertAllEqual(output[weighted_ids][1].dense_shape.eval(),
weights_tensor.dense_shape.eval())
self.assertAllEqual(output[weighted_ids][1].indices.eval(),
weights_tensor.indices.eval())
self.assertEqual(output[weighted_ids][1].values.dtype, dtypes.float32)
self.assertAllEqual(output[weighted_ids][1].values.eval(),
weights_tensor.values.eval())
def testSparseColumnWithVocabulary(self):
vocabulary_file = os.path.join(self.get_temp_dir(), "movies.txt")
with open(vocabulary_file, "w") as f:
f.write("\n".join(["marlo", "omar", "stringer"]) + "\n")
vocab_sparse = feature_column.sparse_column_with_vocabulary_file(
"wire", vocabulary_file, vocab_size=3)
wire_tensor = sparse_tensor.SparseTensor(
values=["omar", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
features = {"wire": wire_tensor}
output = feature_column_ops.transform_features(
features=features, feature_columns=[vocab_sparse])
self.assertEqual(len(output), 1)
self.assertIn(vocab_sparse, output)
with self.cached_session():
lookup_ops.tables_initializer().run()
self.assertEqual(output[vocab_sparse].values.dtype, dtypes.int64)
self.assertAllEqual(output[vocab_sparse].values.eval(), [1, 2, 0])
self.assertAllEqual(output[vocab_sparse].indices.eval(),
wire_tensor.indices.eval())
self.assertAllEqual(output[vocab_sparse].dense_shape.eval(),
wire_tensor.dense_shape.eval())
def testSparseColumnWithVocabularyWithDenseInputTensor(self):
vocabulary_file = os.path.join(self.get_temp_dir(), "movies.txt")
with open(vocabulary_file, "w") as f:
f.write("\n".join(["marlo", "omar", "stringer"]) + "\n")
vocab_sparse = feature_column.sparse_column_with_vocabulary_file(
"wire", vocabulary_file, vocab_size=3)
wire_tensor = constant_op.constant(
[["omar", "stringer"], ["marlo", "omar"]])
features = {"wire": wire_tensor}
output = feature_column_ops.transform_features(
features=features, feature_columns=[vocab_sparse])
self.assertEqual(len(output), 1)
self.assertIn(vocab_sparse, output)
with self.cached_session():
lookup_ops.tables_initializer().run()
self.assertEqual(output[vocab_sparse].values.dtype, dtypes.int64)
self.assertAllEqual(output[vocab_sparse].values.eval(), [1, 2, 0, 1])
self.assertAllEqual(output[vocab_sparse].indices.eval(),
[[0, 0], [0, 1], [1, 0], [1, 1]])
self.assertAllEqual(output[vocab_sparse].dense_shape.eval(), [2, 2])
def testSparseIntColumnWithVocabulary(self):
"""Tests a sparse integer column with vocabulary."""
vocabulary_file = os.path.join(self.get_temp_dir(), "courses.txt")
with open(vocabulary_file, "w") as f:
f.write("\n".join(["101", "201", "301"]) + "\n")
vocab_sparse = feature_column.sparse_column_with_vocabulary_file(
"wire", vocabulary_file, vocab_size=3, dtype=dtypes.int64)
wire_tensor = sparse_tensor.SparseTensor(
values=[201, 301, 101],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
features = {"wire": wire_tensor}
output = feature_column_ops.transform_features(
features=features, feature_columns=[vocab_sparse])
self.assertEqual(len(output), 1)
self.assertIn(vocab_sparse, output)
with self.cached_session():
lookup_ops.tables_initializer().run()
self.assertEqual(output[vocab_sparse].values.dtype, dtypes.int64)
self.assertAllEqual(output[vocab_sparse].values.eval(), [1, 2, 0])
self.assertAllEqual(output[vocab_sparse].indices.eval(),
wire_tensor.indices.eval())
self.assertAllEqual(output[vocab_sparse].dense_shape.eval(),
wire_tensor.dense_shape.eval())
def testSparseIntColumnWithVocabularyWithDenseInputTensor(self):
"""Tests a sparse integer column with vocabulary."""
vocabulary_file = os.path.join(self.get_temp_dir(), "courses.txt")
with open(vocabulary_file, "w") as f:
f.write("\n".join(["101", "201", "301"]) + "\n")
vocab_sparse = feature_column.sparse_column_with_vocabulary_file(
"wire", vocabulary_file, vocab_size=3, dtype=dtypes.int64)
wire_tensor = constant_op.constant([[201, 301], [101, 201]])
features = {"wire": wire_tensor}
output = feature_column_ops.transform_features(
features=features, feature_columns=[vocab_sparse])
self.assertEqual(len(output), 1)
self.assertIn(vocab_sparse, output)
with self.cached_session():
lookup_ops.tables_initializer().run()
self.assertEqual(output[vocab_sparse].values.dtype, dtypes.int64)
self.assertAllEqual(output[vocab_sparse].values.eval(), [1, 2, 0, 1])
self.assertAllEqual(output[vocab_sparse].indices.eval(),
[[0, 0], [0, 1], [1, 0], [1, 1]])
self.assertAllEqual(output[vocab_sparse].dense_shape.eval(), [2, 2])
def testCrossColumn(self):
language = feature_column.sparse_column_with_hash_bucket(
"language", hash_bucket_size=3)
country = feature_column.sparse_column_with_hash_bucket(
"country", hash_bucket_size=5)
country_language = feature_column.crossed_column(
[language, country], hash_bucket_size=15)
features = {
"language":
sparse_tensor.SparseTensor(
values=["english", "spanish"],
indices=[[0, 0], [1, 0]],
dense_shape=[2, 1]),
"country":
sparse_tensor.SparseTensor(
values=["US", "SV"],
indices=[[0, 0], [1, 0]],
dense_shape=[2, 1])
}
# Test transform features.
output = feature_column_ops.transform_features(
features=features, feature_columns=[country_language])
self.assertEqual(len(output), 1)
self.assertIn(country_language, output)
with self.cached_session():
self.assertEqual(output[country_language].values.dtype, dtypes.int64)
self.assertTrue(
all(x < 15 and x >= 0 for x in output[country_language].values.eval(
)))
def testCrossWithBucketizedColumn(self):
price_bucket = feature_column.bucketized_column(
feature_column.real_valued_column("price"), boundaries=[0., 10., 100.])
country = feature_column.sparse_column_with_hash_bucket(
"country", hash_bucket_size=5)
country_price = feature_column.crossed_column(
[country, price_bucket], hash_bucket_size=15)
features = {
"price":
constant_op.constant([[20.]]),
"country":
sparse_tensor.SparseTensor(
values=["US", "SV"],
indices=[[0, 0], [0, 1]],
dense_shape=[1, 2])
}
# Test transform features.
output = feature_column_ops.transform_features(
features=features, feature_columns=[country_price])
self.assertEqual(len(output), 1)
self.assertIn(country_price, output)
with self.cached_session():
self.assertEqual(output[country_price].values.dtype, dtypes.int64)
self.assertTrue(
all(x < 15 and x >= 0 for x in output[country_price].values.eval()))
def testCrossWithMultiDimensionBucketizedColumn(self):
country = feature_column.sparse_column_with_hash_bucket(
"country", hash_bucket_size=5)
price_bucket = feature_column.bucketized_column(
feature_column.real_valued_column("price", 2),
boundaries=[0., 10., 100.])
country_price = feature_column.crossed_column(
[country, price_bucket], hash_bucket_size=1000)
with ops.Graph().as_default():
features = {
"price":
constant_op.constant([[20., 210.], [110., 50.], [-3., -30.]]),
"country":
sparse_tensor.SparseTensor(
values=["US", "SV", "US"],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 2])
}
output, column_to_variable, _ = (
feature_column_ops.weighted_sum_from_feature_columns(
features, [country_price], num_outputs=1))
weights = column_to_variable[country_price][0]
grad = array_ops.squeeze(
gradients_impl.gradients(output, weights)[0].values)
with self.cached_session():
variables_lib.global_variables_initializer().run()
self.assertEqual(len(grad.eval()), 6)
# Test transform features.
output = feature_column_ops.transform_features(
features=features, feature_columns=[country_price])
self.assertEqual(len(output), 1)
self.assertIn(country_price, output)
def testCrossWithCrossedColumn(self):
price_bucket = feature_column.bucketized_column(
feature_column.real_valued_column("price"), boundaries=[0., 10., 100.])
country = feature_column.sparse_column_with_hash_bucket(
"country", hash_bucket_size=5)
country_price = feature_column.crossed_column(
[country, price_bucket], hash_bucket_size=15)
wire = feature_column.sparse_column_with_hash_bucket("wire", 10)
wire_country_price = feature_column.crossed_column(
[wire, country_price], hash_bucket_size=15)
features = {
"price":
constant_op.constant([[20.]]),
"country":
sparse_tensor.SparseTensor(
values=["US", "SV"],
indices=[[0, 0], [0, 1]],
dense_shape=[1, 2]),
"wire":
sparse_tensor.SparseTensor(
values=["omar", "stringer", "marlo"],
indices=[[0, 0], [0, 1], [0, 2]],
dense_shape=[1, 3])
}
# Test transform features.
output = feature_column_ops.transform_features(
features=features, feature_columns=[wire_country_price])
self.assertEqual(len(output), 1)
self.assertIn(wire_country_price, output)
with self.cached_session():
self.assertEqual(output[wire_country_price].values.dtype, dtypes.int64)
self.assertTrue(
all(x < 15 and x >= 0 for x in output[wire_country_price].values.eval(
)))
def testIfFeatureTableContainsTransformationReturnIt(self):
any_column = feature_column.sparse_column_with_hash_bucket("sparse", 10)
features = {any_column: "any-thing-even-not-a-tensor"}
output = feature_column_ops._Transformer(features).transform(any_column)
self.assertEqual(output, "any-thing-even-not-a-tensor")
class CreateInputLayersForDNNsTest(test.TestCase):
def testFeatureColumnDictFails(self):
real_valued = feature_column.real_valued_column("price")
features = {"price": constant_op.constant([[20.], [110], [-3]])}
with self.assertRaisesRegexp(
ValueError,
"Expected feature_columns to be iterable, found dict"):
feature_column_ops.input_from_feature_columns(
features, {"feature": real_valued})
def testSparseTensorRealValuedColumn(self):
var_len_sparse_real_valued_column = (
feature_column._real_valued_var_len_column("rating", is_sparse=True))
features = {
"ids":
sparse_tensor.SparseTensor(
values=["c", "b", "a"],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
"income":
constant_op.constant([[20.3, 10], [110.3, 0.4], [-3.0, 30.4]]),
"rating":
sparse_tensor.SparseTensor(
values=[3.5, 5.0], indices=[[0, 0], [2, 0]], dense_shape=[3, 1])
}
with self.assertRaisesRegexp(
ValueError,
"dd"):
feature_column_ops.input_from_feature_columns(
features, [var_len_sparse_real_valued_column])
def testAllDNNColumns(self):
sparse_column = feature_column.sparse_column_with_keys(
"ids", ["a", "b", "c", "unseen"])
real_valued_column = feature_column.real_valued_column("income", 2)
one_hot_column = feature_column.one_hot_column(sparse_column)
embedding_column = feature_column.embedding_column(sparse_column, 10)
features = {
"ids":
sparse_tensor.SparseTensor(
values=["c", "b", "a"],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
"income":
constant_op.constant([[20.3, 10], [110.3, 0.4], [-3.0, 30.4]]),
}
columns = [one_hot_column, embedding_column, real_valued_column]
output = feature_column_ops.input_from_feature_columns(features, columns)
output_core = fc_core.input_layer(features, columns)
with self.cached_session():
variables_lib.global_variables_initializer().run()
lookup_ops.tables_initializer().run()
self.assertAllEqual(output.eval().shape, [3, 2 + 4 + 10])
# Verify cross compatibility: Core builder output should equal to contrib.
self.assertAllEqual(output.eval().shape, output_core.eval().shape)
def testAllDNNColumnsWithColumnwiseOutputs(self):
sparse_column = feature_column.sparse_column_with_keys(
"ids", ["a", "b", "c", "unseen"])
real_valued_column = feature_column.real_valued_column("income", 2)
one_hot_column = feature_column.one_hot_column(sparse_column)
embedding_column = feature_column.embedding_column(sparse_column, 10)
features = {
"ids":
sparse_tensor.SparseTensor(
values=["c", "b", "a"],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
"income":
constant_op.constant([[20.3, 10], [110.3, 0.4], [-3.0, 30.4]]),
}
columns = [one_hot_column, embedding_column, real_valued_column]
cols_to_outs = {}
feature_column_ops.input_from_feature_columns(
features, columns, cols_to_outs=cols_to_outs)
with self.cached_session():
variables_lib.global_variables_initializer().run()
lookup_ops.tables_initializer().run()
for column in columns:
self.assertTrue(column in cols_to_outs)
def testRealValuedColumn(self):
real_valued = feature_column.real_valued_column("price")
features = {"price": constant_op.constant([[20.], [110], [-3]])}
output = feature_column_ops.input_from_feature_columns(features,
[real_valued])
with self.cached_session():
self.assertAllClose(output.eval(), features["price"].eval())
# Verify cross compatibility: Core builder output should equal to contrib.
self.assertAllClose(output.eval(),
fc_core.input_layer(features, [real_valued]).eval())
def testRealValuedColumnWithMultiDimensions(self):
real_valued = feature_column.real_valued_column("price", 2)
features = {
"price": constant_op.constant([[20., 10.], [110, 0.], [-3, 30]])
}
output = feature_column_ops.input_from_feature_columns(features,
[real_valued])
with self.cached_session():
self.assertAllClose(output.eval(), features["price"].eval())
# Verify cross compatibility: Core builder output should equal to contrib.
self.assertAllClose(output.eval(),
fc_core.input_layer(features, [real_valued]).eval())
def testRealValuedColumnDense(self):
var_len_real_valued = feature_column._real_valued_var_len_column(
"rating", default_value=-1)
rating = np.array([[0., 1., 2., -1.],
[3., 4., 5., 6.]])
features = {"rating": constant_op.constant(rating)}
with self.cached_session() as sess:
output = sess.run(feature_column_ops.input_from_feature_columns(
features, [var_len_real_valued]))
self.assertAllClose(rating, output)
def testRealValuedColumnTypeConversion(self):
var_len_real_valued = feature_column._real_valued_var_len_column(
"rating", default_value=-1)
rating = np.array([[0, 1, 2, -1],
[3, 4, 5, 6]])
features = {"rating": constant_op.constant(rating, dtype=dtypes.int64)}
with self.cached_session() as sess:
output = sess.run(feature_column_ops.input_from_feature_columns(
features, [var_len_real_valued]))
self.assertAllClose(rating.astype(np.float32), output)
def testRealValuedColumnWithNormalizer(self):
real_valued = feature_column.real_valued_column(
"price", normalizer=lambda x: x - 2)
features = {"price": constant_op.constant([[20.], [110], [-3]])}
output = feature_column_ops.input_from_feature_columns(features,
[real_valued])
with self.cached_session():
self.assertAllClose(output.eval(), features["price"].eval() - 2)
# Verify cross compatibility: Core builder output should equal to contrib.
self.assertAllClose(output.eval(),
fc_core.input_layer(features, [real_valued]).eval())
def testRealValuedColumnWithMultiDimensionsAndNormalizer(self):
real_valued = feature_column.real_valued_column(
"price", 2, normalizer=lambda x: x - 2)
features = {
"price": constant_op.constant([[20., 10.], [110, 0.], [-3, 30]])
}
output = feature_column_ops.input_from_feature_columns(features,
[real_valued])
with self.cached_session():
self.assertAllClose(output.eval(), features["price"].eval() - 2)
# Verify cross compatibility: Core builder output should equal to contrib.
self.assertAllClose(output.eval(),
fc_core.input_layer(features, [real_valued]).eval())
def testBucketizedColumnWithNormalizerSucceedsForDNN(self):
bucket = feature_column.bucketized_column(
feature_column.real_valued_column(
"price", normalizer=lambda x: x - 15),
boundaries=[0., 10., 100.])
# buckets 2, 3, 0
features = {"price": constant_op.constant([[20.], [110], [-3]])}
output = feature_column_ops.input_from_feature_columns(features, [bucket])
expected = [[0, 1, 0, 0], [0, 0, 1, 0], [1, 0, 0, 0]]
with self.cached_session():
self.assertAllClose(output.eval(), expected)
self.assertAllClose(output.eval(),
fc_core.input_layer(features, [bucket]).eval())
def testBucketizedColumnWithMultiDimensionsSucceedsForDNN(self):
bucket = feature_column.bucketized_column(
feature_column.real_valued_column("price", 2),
boundaries=[0., 10., 100.])
# buckets [2, 3], [3, 2], [0, 0]. dimension = 2
features = {
"price": constant_op.constant([[20., 200], [110, 50], [-3, -3]])
}
output = feature_column_ops.input_from_feature_columns(features, [bucket])
expected = [[0, 0, 1, 0, 0, 0, 0, 1], [0, 0, 0, 1, 0, 0, 1, 0],
[1, 0, 0, 0, 1, 0, 0, 0]]
with self.cached_session():
self.assertAllClose(output.eval(), expected)
self.assertAllClose(output.eval(),
fc_core.input_layer(features, [bucket]).eval())
def testOneHotColumnFromWeightedSparseColumnSucceedsForDNN(self):
ids_column = feature_column.sparse_column_with_keys(
"ids", ["a", "b", "c", "unseen"])
ids_tensor = sparse_tensor.SparseTensor(
values=["c", "b", "a", "c"],
indices=[[0, 0], [1, 0], [2, 0], [2, 1]],
dense_shape=[3, 2])
weighted_ids_column = feature_column.weighted_sparse_column(ids_column,
"weights")
weights_tensor = sparse_tensor.SparseTensor(
values=[10.0, 20.0, 30.0, 40.0],
indices=[[0, 0], [1, 0], [2, 0], [2, 1]],
dense_shape=[3, 2])
features = {"ids": ids_tensor, "weights": weights_tensor}
one_hot_column = feature_column.one_hot_column(weighted_ids_column)
output = feature_column_ops.input_from_feature_columns(features,
[one_hot_column])
output_core = fc_core.input_layer(features, [one_hot_column])
with self.cached_session():
variables_lib.global_variables_initializer().run()
lookup_ops.tables_initializer().run()
self.assertAllEqual([[0, 0, 10., 0], [0, 20., 0, 0], [30., 0, 40., 0]],
output.eval())
# Verify cross compatibility: Core builder output should equal to contrib.
self.assertAllEqual(output.eval(), output_core.eval())
def testOneHotColumnFromSparseColumnWithKeysSucceedsForDNN(self):
ids_column = feature_column.sparse_column_with_keys(
"ids", ["a", "b", "c", "unseen"])
ids_tensor = sparse_tensor.SparseTensor(
values=["c", "b", "a"],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1])
one_hot_sparse = feature_column.one_hot_column(ids_column)
features = {"ids": ids_tensor}
output = feature_column_ops.input_from_feature_columns(features,
[one_hot_sparse])
output_core = fc_core.input_layer(features, [one_hot_sparse])
with self.cached_session():
variables_lib.global_variables_initializer().run()
lookup_ops.tables_initializer().run()
self.assertAllEqual([[0, 0, 1, 0], [0, 1, 0, 0], [1, 0, 0, 0]],
output.eval())
# Verify cross compatibility: Core builder output should equal to contrib.
self.assertAllEqual(output.eval(), output_core.eval())
def testOneHotColumnFromMultivalentSparseColumnWithKeysSucceedsForDNN(self):
ids_column = feature_column.sparse_column_with_keys(
"ids", ["a", "b", "c", "unseen"])
ids_tensor = sparse_tensor.SparseTensor(
values=["c", "b", "a", "c"],
indices=[[0, 0], [1, 0], [2, 0], [2, 1]],
dense_shape=[3, 2])
one_hot_sparse = feature_column.one_hot_column(ids_column)
features = {"ids": ids_tensor}
output = feature_column_ops.input_from_feature_columns(features,
[one_hot_sparse])
output_core = fc_core.input_layer(features, [one_hot_sparse])
with self.cached_session():
variables_lib.global_variables_initializer().run()
lookup_ops.tables_initializer().run()
self.assertAllEqual([[0, 0, 1, 0], [0, 1, 0, 0], [1, 0, 1, 0]],
output.eval())
# Verify cross compatibility: Core builder output should equal to contrib.
self.assertAllEqual(output.eval(), output_core.eval())
def testOneHotColumnFromSparseColumnWithIntegerizedFeaturePassesForDNN(self):
ids_column = feature_column.sparse_column_with_integerized_feature(
"ids", bucket_size=4)
one_hot_sparse = feature_column.one_hot_column(ids_column)
features = {
"ids":
sparse_tensor.SparseTensor(
values=[2, 1, 0, 2],
indices=[[0, 0], [1, 0], [2, 0], [2, 1]],
dense_shape=[3, 2])
}
output = feature_column_ops.input_from_feature_columns(features,
[one_hot_sparse])
output_core = fc_core.input_layer(features, [one_hot_sparse])
with self.cached_session():
variables_lib.global_variables_initializer().run()
self.assertAllEqual([[0, 0, 1, 0], [0, 1, 0, 0], [1, 0, 1, 0]],
output.eval())
# Verify cross compatibility: Core builder output should equal to contrib.
self.assertAllEqual(output.eval(), output_core.eval())
def testOneHotColumnFromSparseColumnWithHashBucketSucceedsForDNN(self):
hashed_sparse = feature_column.sparse_column_with_hash_bucket("feat", 10)
wire_tensor = sparse_tensor.SparseTensor(
values=["a", "b", "c1", "c2"],
indices=[[0, 0], [1, 0], [2, 0], [2, 1]],
dense_shape=[3, 2])
features = {"feat": wire_tensor}
one_hot_sparse = feature_column.one_hot_column(hashed_sparse)
output = feature_column_ops.input_from_feature_columns(features,
[one_hot_sparse])
output_core = fc_core.input_layer(features, [one_hot_sparse])
with self.cached_session():
variables_lib.global_variables_initializer().run()
lookup_ops.tables_initializer().run()
self.assertAllEqual([3, 10], output.eval().shape)
# Verify cross compatibility: Core builder output should equal to contrib.
self.assertAllEqual(output.eval(), output_core.eval())
def testEmbeddingColumnSucceedsForDNN(self):
hashed_sparse = feature_column.sparse_column_with_hash_bucket("wire", 10)
wire_tensor = sparse_tensor.SparseTensor(
values=["omar", "stringer", "marlo", "xx", "yy"],
indices=[[0, 0], [1, 0], [1, 1], [2, 0], [3, 0]],
dense_shape=[4, 2])
features = {"wire": wire_tensor}
embeded_sparse = feature_column.embedding_column(hashed_sparse, 10)
output = feature_column_ops.input_from_feature_columns(features,
[embeded_sparse])
output_core = fc_core.input_layer(features, [embeded_sparse])
with self.cached_session():
variables_lib.global_variables_initializer().run()
self.assertAllEqual(output.eval().shape, [4, 10])
# Verify cross compatibility: Core builder output should equal to contrib.
self.assertAllEqual(output.eval().shape, output_core.eval().shape)
def testScatteredEmbeddingColumnSucceedsForDNN(self):
wire_tensor = sparse_tensor.SparseTensor(
values=["omar", "stringer", "marlo", "omar"],
indices=[[0, 0], [1, 0], [1, 1], [2, 0]],
dense_shape=[3, 2])
features = {"wire": wire_tensor}
# Big enough hash space so that hopefully there is no collision
embedded_sparse = feature_column.scattered_embedding_column(
"wire", 1000, 3, layers.SPARSE_FEATURE_CROSS_DEFAULT_HASH_KEY)
output = feature_column_ops.input_from_feature_columns(
features, [embedded_sparse], weight_collections=["my_collection"])
weights = ops.get_collection("my_collection")
grad = gradients_impl.gradients(output, weights)
# Calcuates the tensors calculated by FC core libs. Later, the values will
# be compared with the contrib version.
output_core = fc_core.input_layer(
features, [embedded_sparse], weight_collections=["my_collection_core"])
weights_core = ops.get_collection("my_collection_core")
grad_core = gradients_impl.gradients(output_core, weights_core)
with self.cached_session():
variables_lib.global_variables_initializer().run()
gradient_values = []
gradient_values_core = []
# Collect the gradient from the different partitions (one in this test)
for p in range(len(grad)):
gradient_values.extend(grad[p].values.eval())
gradient_values_core.extend(grad_core[p].values.eval())
gradient_values.sort()
gradient_values_core.sort()
self.assertAllEqual(gradient_values, [0.5] * 6 + [2] * 3)
self.assertAllEqual(gradient_values, gradient_values_core)
def testEmbeddingColumnWithInitializerSucceedsForDNN(self):
hashed_sparse = feature_column.sparse_column_with_hash_bucket("wire", 10)
wire_tensor = sparse_tensor.SparseTensor(
values=["omar", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
features = {"wire": wire_tensor}
init_value = 133.7
embeded_sparse = feature_column.embedding_column(
hashed_sparse,
10,
initializer=init_ops.constant_initializer(init_value))
output = feature_column_ops.input_from_feature_columns(features,
[embeded_sparse])
output_core = fc_core.input_layer(features, [embeded_sparse])
with self.cached_session():
variables_lib.global_variables_initializer().run()
output_eval = output.eval()
self.assertAllEqual(output_eval.shape, [2, 10])
self.assertAllClose(output_eval, np.tile(init_value, [2, 10]))
# Verify cross compatibility: Core builder output should equal to contrib.
self.assertAllEqual(output.eval(), output_core.eval())
def testEmbeddingColumnWithMultipleInitializersFails(self):
hashed_sparse = feature_column.sparse_column_with_hash_bucket("wire", 10)
wire_tensor = sparse_tensor.SparseTensor(
values=["omar", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
features = {"wire": wire_tensor}
embedded_sparse = feature_column.embedding_column(
hashed_sparse,
10,
initializer=init_ops.truncated_normal_initializer(
mean=42, stddev=1337))
embedded_sparse_alternate = feature_column.embedding_column(
hashed_sparse,
10,
initializer=init_ops.truncated_normal_initializer(
mean=1337, stddev=42))
# Makes sure that trying to use different initializers with the same
# embedding column explicitly fails.
with self.cached_session():
with self.assertRaisesRegexp(
ValueError,
"Duplicate feature column key found for column: wire_embedding"):
feature_column_ops.input_from_feature_columns(
features, [embedded_sparse, embedded_sparse_alternate])
def testEmbeddingColumnWithWeightedSparseColumnSucceedsForDNN(self):
"""Tests DNN input with embedded weighted sparse column."""
ids = feature_column.sparse_column_with_keys("ids",
["marlo", "omar", "stringer"])
ids_tensor = sparse_tensor.SparseTensor(
values=["stringer", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
weighted_ids = feature_column.weighted_sparse_column(ids, "weights")
weights_tensor = sparse_tensor.SparseTensor(
values=[10.0, 20.0, 30.0],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
features = {"ids": ids_tensor, "weights": weights_tensor}
embeded_sparse = feature_column.embedding_column(weighted_ids, 10)
output = feature_column_ops.input_from_feature_columns(features,
[embeded_sparse])
output_core = fc_core.input_layer(features, [embeded_sparse])
with self.cached_session():
variables_lib.global_variables_initializer().run()
lookup_ops.tables_initializer().run()
self.assertAllEqual(output.eval().shape, [2, 10])
# Verify cross compatibility: Core builder output should equal to contrib.
self.assertAllEqual(output.eval().shape, output_core.eval().shape)
def testEmbeddingColumnWithIntegerWeightedSparseColumnSucceedsForDNN(self):
"""Same as the previous test, but with integer weights."""
ids = feature_column.sparse_column_with_keys("ids",
["marlo", "omar", "stringer"])
ids_tensor = sparse_tensor.SparseTensor(
values=["stringer", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
weighted_ids = feature_column.weighted_sparse_column(
ids, "weights", dtype=dtypes.int32)
weights_tensor = sparse_tensor.SparseTensor(
values=constant_op.constant([10, 20, 30], dtype=dtypes.int32),
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
features = {"ids": ids_tensor, "weights": weights_tensor}
embeded_sparse = feature_column.embedding_column(weighted_ids, 10)
output = feature_column_ops.input_from_feature_columns(features,
[embeded_sparse])
with self.cached_session():
variables_lib.global_variables_initializer().run()
lookup_ops.tables_initializer().run()
self.assertAllEqual(output.eval().shape, [2, 10])
def testEmbeddingColumnWithCrossedColumnSucceedsForDNN(self):
a = feature_column.sparse_column_with_hash_bucket(
"aaa", hash_bucket_size=100)
b = feature_column.sparse_column_with_hash_bucket(
"bbb", hash_bucket_size=100)
crossed = feature_column.crossed_column(set([a, b]), hash_bucket_size=10000)
wire_tensor = sparse_tensor.SparseTensor(
values=["omar", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
features = {"aaa": wire_tensor, "bbb": wire_tensor}
embeded_sparse = feature_column.embedding_column(crossed, 10)
output = feature_column_ops.input_from_feature_columns(features,
[embeded_sparse])
with self.cached_session():
variables_lib.global_variables_initializer().run()
self.assertAllEqual(output.eval().shape, [2, 10])
def testSparseColumnFailsForDNN(self):
hashed_sparse = feature_column.sparse_column_with_hash_bucket("wire", 10)
wire_tensor = sparse_tensor.SparseTensor(
values=["omar", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
features = {"wire": wire_tensor}
with self.cached_session():
with self.assertRaisesRegexp(
ValueError, "Error creating input layer for column: wire"):
variables_lib.global_variables_initializer().run()
feature_column_ops.input_from_feature_columns(features, [hashed_sparse])
def testWeightedSparseColumnFailsForDNN(self):
ids = feature_column.sparse_column_with_keys("ids",
["marlo", "omar", "stringer"])
ids_tensor = sparse_tensor.SparseTensor(
values=["stringer", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
weighted_ids = feature_column.weighted_sparse_column(ids, "weights")
weights_tensor = sparse_tensor.SparseTensor(
values=[10.0, 20.0, 30.0],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
features = {"ids": ids_tensor, "weights": weights_tensor}
with self.cached_session():
with self.assertRaisesRegexp(
ValueError,
"Error creating input layer for column: ids_weighted_by_weights"):
lookup_ops.tables_initializer().run()
feature_column_ops.input_from_feature_columns(features, [weighted_ids])
def testCrossedColumnFailsForDNN(self):
a = feature_column.sparse_column_with_hash_bucket(
"aaa", hash_bucket_size=100)
b = feature_column.sparse_column_with_hash_bucket(
"bbb", hash_bucket_size=100)
crossed = feature_column.crossed_column(set([a, b]), hash_bucket_size=10000)
wire_tensor = sparse_tensor.SparseTensor(
values=["omar", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
features = {"aaa": wire_tensor, "bbb": wire_tensor}
with self.cached_session():
with self.assertRaisesRegexp(
ValueError, "Error creating input layer for column: aaa_X_bbb"):
variables_lib.global_variables_initializer().run()
feature_column_ops.input_from_feature_columns(features, [crossed])
def testDeepColumnsSucceedForDNN(self):
real_valued = feature_column.real_valued_column("income", 3)
bucket = feature_column.bucketized_column(
feature_column.real_valued_column("price", 2),
boundaries=[0., 10., 100.])
hashed_sparse = feature_column.sparse_column_with_hash_bucket("wire", 10)
features = {
"income":
constant_op.constant([[20., 10, -5], [110, 0, -7], [-3, 30, 50]]),
"price":
constant_op.constant([[20., 200], [110, 2], [-20, -30]]),
"wire":
sparse_tensor.SparseTensor(
values=["omar", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1])
}
embeded_sparse = feature_column.embedding_column(
hashed_sparse, 10, initializer=init_ops.constant_initializer(133.7))
output = feature_column_ops.input_from_feature_columns(
features, [real_valued, bucket, embeded_sparse])
with self.cached_session():
variables_lib.global_variables_initializer().run()
# size of output = 3 (real_valued) + 2 * 4 (bucket) + 10 (embedding) = 21
self.assertAllEqual(output.eval().shape, [3, 21])
def testEmbeddingColumnForDNN(self):
hashed_sparse = feature_column.sparse_column_with_hash_bucket("wire", 10)
wire_tensor = sparse_tensor.SparseTensor(
values=["omar", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[3, 2])
features = {"wire": wire_tensor}
embeded_sparse = feature_column.embedding_column(
hashed_sparse,
1,
combiner="sum",
initializer=init_ops.ones_initializer())
output = feature_column_ops.input_from_feature_columns(features,
[embeded_sparse])
with self.cached_session():
variables_lib.global_variables_initializer().run()
# score: (number of values)
self.assertAllEqual(output.eval(), [[1.], [2.], [0.]])
def testEmbeddingColumnWithMaxNormForDNN(self):
hashed_sparse = feature_column.sparse_column_with_hash_bucket("wire", 10)
wire_tensor = sparse_tensor.SparseTensor(
values=["omar", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[3, 2])
features = {"wire": wire_tensor}
embedded_sparse = feature_column.embedding_column(
hashed_sparse,
1,
combiner="sum",
initializer=init_ops.ones_initializer(),
max_norm=0.5)
output = feature_column_ops.input_from_feature_columns(features,
[embedded_sparse])
with self.cached_session():
variables_lib.global_variables_initializer().run()
# score: (number of values * 0.5)
self.assertAllClose(output.eval(), [[0.5], [1.], [0.]])
def testEmbeddingColumnWithWeightedSparseColumnForDNN(self):
ids = feature_column.sparse_column_with_keys("ids",
["marlo", "omar", "stringer"])
ids_tensor = sparse_tensor.SparseTensor(
values=["stringer", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[3, 2])
weighted_ids = feature_column.weighted_sparse_column(ids, "weights")
weights_tensor = sparse_tensor.SparseTensor(
values=[10.0, 20.0, 30.0],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[3, 2])
features = {"ids": ids_tensor, "weights": weights_tensor}
embeded_sparse = feature_column.embedding_column(
weighted_ids,
1,
combiner="sum",
initializer=init_ops.ones_initializer())
output = feature_column_ops.input_from_feature_columns(features,
[embeded_sparse])
with self.cached_session():
variables_lib.global_variables_initializer().run()
lookup_ops.tables_initializer().run()
# score: (sum of weights)
self.assertAllEqual(output.eval(), [[10.], [50.], [0.]])
def testInputLayerWithCollectionsForDNN(self):
real_valued = feature_column.real_valued_column("price")
bucket = feature_column.bucketized_column(
real_valued, boundaries=[0., 10., 100.])
hashed_sparse = feature_column.sparse_column_with_hash_bucket("wire", 10)
features = {
"price":
constant_op.constant([[20.], [110], [-3]]),
"wire":
sparse_tensor.SparseTensor(
values=["omar", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1])
}
embeded_sparse = feature_column.embedding_column(hashed_sparse, 10)
feature_column_ops.input_from_feature_columns(
features, [real_valued, bucket, embeded_sparse],
weight_collections=["my_collection"])
weights = ops.get_collection("my_collection")
# one variable for embeded sparse
self.assertEqual(1, len(weights))
def testInputLayerWithTrainableArgForDNN(self):
real_valued = feature_column.real_valued_column("price")
bucket = feature_column.bucketized_column(
real_valued, boundaries=[0., 10., 100.])
hashed_sparse = feature_column.sparse_column_with_hash_bucket("wire", 10)
features = {
"price":
constant_op.constant([[20.], [110], [-3]]),
"wire":
sparse_tensor.SparseTensor(
values=["omar", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1])
}
embeded_sparse = feature_column.embedding_column(hashed_sparse, 10)
feature_column_ops.input_from_feature_columns(
features, [real_valued, bucket, embeded_sparse],
weight_collections=["my_collection"],
trainable=False)
# There should not be any trainable variables
self.assertEqual(0, len(variables_lib.trainable_variables()))
feature_column_ops.input_from_feature_columns(
features, [real_valued, bucket, embeded_sparse],
weight_collections=["my_collection"],
trainable=True)
# There should one trainable variable for embeded sparse
self.assertEqual(1, len(variables_lib.trainable_variables()))
def testInputLayerWithNonTrainableEmbeddingForDNN(self):
sparse_1 = feature_column.sparse_column_with_hash_bucket("wire_1", 10)
sparse_2 = feature_column.sparse_column_with_hash_bucket("wire_2", 10)
features = {
"wire_1":
sparse_tensor.SparseTensor(
values=["omar", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
"wire_2":
sparse_tensor.SparseTensor(
values=["jack", "jill"],
indices=[[0, 0], [1, 0]],
dense_shape=[4, 1])
}
dims_1 = 10
init_1 = 3.14
embeded_1 = feature_column.embedding_column(
sparse_1, dims_1, initializer=init_ops.constant_initializer(init_1),
trainable=False)
output_1 = feature_column_ops.input_from_feature_columns(
features, [embeded_1])
# There should be no trainable variables for sparse_1
self.assertEqual(0, len(variables_lib.trainable_variables()))
dims_2 = 7
init_2 = 6.14
embeded_2 = feature_column.embedding_column(
sparse_2, dims_2, initializer=init_ops.constant_initializer(init_2),
trainable=True)
output_2 = feature_column_ops.input_from_feature_columns(
features, [embeded_2])
# There should be one trainable variables for sparse_2
self.assertEqual(1, len(variables_lib.trainable_variables()))
with self.cached_session():
variables_lib.global_variables_initializer().run()
output_1_eval = output_1.eval()
output_2_eval = output_2.eval()
self.assertAllEqual(output_1_eval.shape, [3, dims_1])
self.assertAllClose(output_1_eval, np.tile(init_1, [3, dims_1]))
self.assertAllEqual(output_2_eval.shape, [4, dims_2])
self.assertAllClose(output_2_eval, np.concatenate(
(np.tile(init_2, [2, dims_2]), np.tile(0, [2, dims_2]))))
class SequenceInputFromFeatureColumnTest(test.TestCase):
def testSupportedColumns(self):
measurement = feature_column.real_valued_column("measurements")
country = feature_column.sparse_column_with_hash_bucket("country", 100)
pets = feature_column.sparse_column_with_hash_bucket("pets", 100)
ids = feature_column.sparse_column_with_integerized_feature("id", 100)
country_x_pets = feature_column.crossed_column([country, pets], 100)
country_x_pets_onehot = feature_column.one_hot_column(country_x_pets)
bucketized_measurement = feature_column.bucketized_column(measurement,
[.25, .5, .75])
embedded_id = feature_column.embedding_column(ids, 100)
# `_BucketizedColumn` is not supported.
self.assertRaisesRegexp(
ValueError,
"FeatureColumn type _BucketizedColumn is not currently supported",
feature_column_ops.sequence_input_from_feature_columns, {},
[measurement, bucketized_measurement])
# `_CrossedColumn` is not supported.
self.assertRaisesRegexp(
ValueError,
"FeatureColumn type _CrossedColumn is not currently supported",
feature_column_ops.sequence_input_from_feature_columns, {},
[embedded_id, country_x_pets])
# `country_x_pets_onehot` depends on a `_CrossedColumn` which is forbidden.
self.assertRaisesRegexp(
ValueError, "Column country_X_pets .* _CrossedColumn",
feature_column_ops.sequence_input_from_feature_columns, {},
[embedded_id, country_x_pets_onehot])
def testRealValuedColumn(self):
batch_size = 4
sequence_length = 8
dimension = 3
np.random.seed(1111)
measurement_input = np.random.rand(batch_size, sequence_length, dimension)
measurement_column = feature_column.real_valued_column("measurements")
columns_to_tensors = {
"measurements": constant_op.constant(measurement_input)
}
model_input_tensor = feature_column_ops.sequence_input_from_feature_columns(
columns_to_tensors, [measurement_column])
with self.cached_session() as sess:
model_inputs = sess.run(model_input_tensor)
self.assertAllClose(measurement_input, model_inputs)
def testRealValuedVarLenColumn(self):
var_len_real_valued = feature_column._real_valued_var_len_column(
"rating", default_value=-1)
rating = np.array([[0., 1., 2., -1.],
[3., 4., 5., 6.]])
features = {"rating": constant_op.constant(rating)}
with self.cached_session() as sess:
output = sess.run(
feature_column_ops.sequence_input_from_feature_columns(
features, [var_len_real_valued]))
reshaped_rating = np.reshape(rating, [2, 4, 1])
self.assertAllClose(reshaped_rating, output)
def testRealValuedColumnWithExtraDimensions(self):
batch_size = 4
sequence_length = 8
dimensions = [3, 4, 5]
np.random.seed(2222)
measurement_input = np.random.rand(batch_size, sequence_length, *dimensions)
measurement_column = feature_column.real_valued_column("measurements")
columns_to_tensors = {
"measurements": constant_op.constant(measurement_input)
}
model_input_tensor = feature_column_ops.sequence_input_from_feature_columns(
columns_to_tensors, [measurement_column])
expected_shape = [batch_size, sequence_length, np.prod(dimensions)]
reshaped_measurements = np.reshape(measurement_input, expected_shape)
with self.cached_session() as sess:
model_inputs = sess.run(model_input_tensor)
self.assertAllClose(reshaped_measurements, model_inputs)
def testRealValuedColumnWithNormalizer(self):
batch_size = 4
sequence_length = 8
dimension = 3
normalizer = lambda x: x - 2
np.random.seed(3333)
measurement_input = np.random.rand(batch_size, sequence_length, dimension)
measurement_column = feature_column.real_valued_column(
"measurements", normalizer=normalizer)
columns_to_tensors = {
"measurements": constant_op.constant(measurement_input)
}
model_input_tensor = feature_column_ops.sequence_input_from_feature_columns(
columns_to_tensors, [measurement_column])
with self.cached_session() as sess:
model_inputs = sess.run(model_input_tensor)
self.assertAllClose(normalizer(measurement_input), model_inputs)
def testRealValuedColumnWithMultiDimensionsAndNormalizer(self):
batch_size = 4
sequence_length = 8
dimensions = [3, 4, 5]
normalizer = lambda x: x / 2.0
np.random.seed(1234)
measurement_input = np.random.rand(batch_size, sequence_length, *dimensions)
measurement_column = feature_column.real_valued_column(
"measurements", normalizer=normalizer)
columns_to_tensors = {
"measurements": constant_op.constant(measurement_input)
}
model_input_tensor = feature_column_ops.sequence_input_from_feature_columns(
columns_to_tensors, [measurement_column])
expected_shape = [batch_size, sequence_length, np.prod(dimensions)]
reshaped_measurements = np.reshape(measurement_input, expected_shape)
with self.cached_session() as sess:
model_inputs = sess.run(model_input_tensor)
self.assertAllClose(normalizer(reshaped_measurements), model_inputs)
def testOneHotColumnFromSparseColumnWithKeys(self):
ids_tensor = sparse_tensor.SparseTensor(
values=["c", "b",
"a", "c", "b",
"b"],
indices=[[0, 0, 0], [0, 1, 0],
[1, 0, 0], [1, 0, 1], [1, 1, 0],
[3, 2, 0]],
dense_shape=[4, 3, 2])
ids_column = feature_column.sparse_column_with_keys(
"ids", ["a", "b", "c", "unseen"])
one_hot_column = feature_column.one_hot_column(ids_column)
columns_to_tensors = {"ids": ids_tensor}
model_input_tensor = feature_column_ops.sequence_input_from_feature_columns(
columns_to_tensors, [one_hot_column])
with self.cached_session() as sess:
variables_lib.global_variables_initializer().run()
lookup_ops.tables_initializer().run()
model_input = sess.run(model_input_tensor)
expected_input_shape = np.array([4, 3, 4])
expected_model_input = np.array(
[[[0, 0, 1, 0], [0, 1, 0, 0], [0, 0, 0, 0]],
[[1, 0, 1, 0], [0, 1, 0, 0], [0, 0, 0, 0]],
[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
[[0, 0, 0, 0], [0, 0, 0, 0], [0, 1, 0, 0]]],
dtype=np.float32)
self.assertAllEqual(expected_input_shape, model_input.shape)
self.assertAllClose(expected_model_input, model_input)
def testOneHotColumnFromSparseColumnWithHashBucket(self):
hash_buckets = 10
ids_tensor = sparse_tensor.SparseTensor(
values=["c", "b",
"a", "c", "b",
"b"],
indices=[[0, 0, 0], [0, 1, 0],
[1, 0, 0], [1, 0, 1], [1, 1, 0],
[3, 2, 0]],
dense_shape=[4, 3, 2])
hashed_ids_column = feature_column.sparse_column_with_hash_bucket(
"ids", hash_buckets)
one_hot_column = feature_column.one_hot_column(hashed_ids_column)
columns_to_tensors = {"ids": ids_tensor}
model_input_tensor = feature_column_ops.sequence_input_from_feature_columns(
columns_to_tensors, [one_hot_column])
with self.cached_session() as sess:
variables_lib.global_variables_initializer().run()
lookup_ops.tables_initializer().run()
model_input = sess.run(model_input_tensor)
expected_input_shape = np.array([4, 3, hash_buckets])
self.assertAllEqual(expected_input_shape, model_input.shape)
def testEmbeddingColumn(self):
hash_buckets = 10
embedding_dimension = 5
ids_tensor = sparse_tensor.SparseTensor(
values=["c", "b",
"a", "c", "b",
"b"],
indices=[[0, 0, 0], [0, 1, 0],
[1, 0, 0], [1, 0, 1], [1, 1, 0],
[3, 2, 0]],
dense_shape=[4, 3, 2])
expected_input_shape = np.array([4, 3, embedding_dimension])
hashed_ids_column = feature_column.sparse_column_with_hash_bucket(
"ids", hash_buckets)
embedded_column = feature_column.embedding_column(hashed_ids_column,
embedding_dimension)
columns_to_tensors = {"ids": ids_tensor}
model_input_tensor = feature_column_ops.sequence_input_from_feature_columns(
columns_to_tensors, [embedded_column])
with self.cached_session() as sess:
variables_lib.global_variables_initializer().run()
lookup_ops.tables_initializer().run()
model_input = sess.run(model_input_tensor)
self.assertAllEqual(expected_input_shape, model_input.shape)
def testEmbeddingColumnWithAutoReshape(self):
hash_buckets = 10
embedding_dimension = 5
ids_tensor = sparse_tensor.SparseTensor(
values=["c", "b",
"a", "c", "b",
"b"],
indices=[[0, 0], [0, 1],
[1, 0], [1, 1], [1, 2],
[3, 2]],
dense_shape=[4, 3])
expected_input_shape = np.array([4, 3, embedding_dimension])
hashed_ids_column = feature_column.sparse_column_with_hash_bucket(
"ids", hash_buckets)
embedded_column = feature_column.embedding_column(hashed_ids_column,
embedding_dimension)
columns_to_tensors = {"ids": ids_tensor}
model_input_tensor = feature_column_ops.sequence_input_from_feature_columns(
columns_to_tensors, [embedded_column])
with self.cached_session() as sess:
variables_lib.global_variables_initializer().run()
lookup_ops.tables_initializer().run()
model_input = sess.run(model_input_tensor)
self.assertAllEqual(expected_input_shape, model_input.shape)
def testEmbeddingColumnGradient(self):
hash_buckets = 1000
embedding_dimension = 3
ids_tensor = sparse_tensor.SparseTensor(
values=["c", "b",
"a", "c", "b",
"b"],
indices=[[0, 0, 0], [0, 1, 0],
[1, 0, 0], [1, 0, 1], [1, 1, 0],
[3, 2, 0]],
dense_shape=[4, 3, 2])
hashed_ids_column = feature_column.sparse_column_with_hash_bucket(
"ids", hash_buckets)
embedded_column = feature_column.embedding_column(
hashed_ids_column, embedding_dimension, combiner="sum")
columns_to_tensors = {"ids": ids_tensor}
model_input_tensor = feature_column_ops.sequence_input_from_feature_columns(
columns_to_tensors, [embedded_column],
weight_collections=["my_collection"])
embedding_weights = ops.get_collection("my_collection")
gradient_tensor = gradients_impl.gradients(model_input_tensor,
embedding_weights)
with self.cached_session() as sess:
variables_lib.global_variables_initializer().run()
lookup_ops.tables_initializer().run()
model_input, gradients = sess.run([model_input_tensor, gradient_tensor])
expected_input_shape = [4, 3, embedding_dimension]
self.assertAllEqual(expected_input_shape, model_input.shape)
# `ids_tensor` consists of 7 instances of <empty>, 3 occurrences of "b",
# 2 occurrences of "c" and 1 instance of "a".
expected_gradient_values = sorted([0., 3., 2., 1.] * embedding_dimension)
actual_gradient_values = np.sort(gradients[0].values, axis=None)
self.assertAllClose(expected_gradient_values, actual_gradient_values)
def testMultipleColumns(self):
batch_size = 4
sequence_length = 3
measurement_dimension = 5
country_hash_size = 10
max_id = 200
id_embedding_dimension = 11
normalizer = lambda x: x / 10.0
measurement_tensor = random_ops.random_uniform(
[batch_size, sequence_length, measurement_dimension])
country_tensor = sparse_tensor.SparseTensor(
values=["us", "ca",
"ru", "fr", "ca",
"mx"],
indices=[[0, 0, 0], [0, 1, 0],
[1, 0, 0], [1, 0, 1], [1, 1, 0],
[3, 2, 0]],
dense_shape=[4, 3, 2])
id_tensor = sparse_tensor.SparseTensor(
values=[2, 5,
26, 123, 1,
0],
indices=[[0, 0, 0], [0, 0, 1],
[0, 1, 1], [1, 0, 0], [1, 1, 0],
[3, 2, 0]],
dense_shape=[4, 3, 2])
columns_to_tensors = {
"measurements": measurement_tensor,
"country": country_tensor,
"id": id_tensor
}
measurement_column = feature_column.real_valued_column(
"measurements", normalizer=normalizer)
country_column = feature_column.sparse_column_with_hash_bucket(
"country", country_hash_size)
id_column = feature_column.sparse_column_with_integerized_feature("id",
max_id)
onehot_country_column = feature_column.one_hot_column(country_column)
embedded_id_column = feature_column.embedding_column(id_column,
id_embedding_dimension)
model_input_columns = [
measurement_column, onehot_country_column, embedded_id_column
]
model_input_tensor = feature_column_ops.sequence_input_from_feature_columns(
columns_to_tensors, model_input_columns)
self.assertEqual(dtypes.float32, model_input_tensor.dtype)
with self.cached_session() as sess:
variables_lib.global_variables_initializer().run()
lookup_ops.tables_initializer().run()
model_input = sess.run(model_input_tensor)
expected_input_shape = [
batch_size, sequence_length,
measurement_dimension + country_hash_size + id_embedding_dimension
]
self.assertAllEqual(expected_input_shape, model_input.shape)
class WeightedSumTest(test.TestCase):
def testFeatureColumnDictFails(self):
hashed_sparse = feature_column.sparse_column_with_hash_bucket("wire", 10)
wire_tensor = sparse_tensor.SparseTensor(
values=["omar", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
features = {"wire": wire_tensor}
with self.assertRaisesRegexp(
ValueError,
"Expected feature_columns to be iterable, found dict"):
feature_column_ops.weighted_sum_from_feature_columns(
features, {"feature": hashed_sparse}, num_outputs=5)
def testSparseColumn(self):
hashed_sparse = feature_column.sparse_column_with_hash_bucket("wire", 10)
wire_tensor = sparse_tensor.SparseTensor(
values=["omar", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
features = {"wire": wire_tensor}
logits, _, _ = feature_column_ops.weighted_sum_from_feature_columns(
features, [hashed_sparse], num_outputs=5)
logits_core = fc_core.linear_model(features, [hashed_sparse], units=5)
with self.cached_session():
variables_lib.global_variables_initializer().run()
self.assertAllEqual(logits.eval().shape, [2, 5])
# Verify cross compatibility: Core builder output should equal to contrib.
self.assertAllEqual(logits.eval(), logits_core.eval())
def testSparseIntColumn(self):
"""Tests a sparse column with int values."""
hashed_sparse = feature_column.sparse_column_with_hash_bucket(
"wire", 10, dtype=dtypes.int64)
wire_tensor = sparse_tensor.SparseTensor(
values=[101, 201, 301],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
features = {"wire": wire_tensor}
logits, _, _ = feature_column_ops.weighted_sum_from_feature_columns(
features, [hashed_sparse], num_outputs=5)
logits_core = fc_core.linear_model(features, [hashed_sparse], units=5)
with self.cached_session():
variables_lib.global_variables_initializer().run()
self.assertAllEqual(logits.eval().shape, [2, 5])
# Verify cross compatibility: Core builder output should equal to contrib.
self.assertAllEqual(logits.eval(), logits_core.eval())
def testSparseColumnWithDenseInputTensor(self):
hashed_sparse = feature_column.sparse_column_with_hash_bucket("wire", 10)
wire_tensor = constant_op.constant(
[["omar", "stringer"], ["marlo", "rick"]])
features = {"wire": wire_tensor}
logits, _, _ = feature_column_ops.weighted_sum_from_feature_columns(
features, [hashed_sparse], num_outputs=5)
logits_core = fc_core.linear_model(features, [hashed_sparse], units=5)
with self.cached_session():
variables_lib.global_variables_initializer().run()
self.assertAllEqual(logits.eval().shape, [2, 5])
# Verify cross compatibility: Core builder output should equal to contrib.
self.assertAllEqual(logits.eval(), logits_core.eval())
def testWeightedSparseColumn(self):
ids = feature_column.sparse_column_with_keys("ids",
["marlo", "omar", "stringer"])
ids_tensor = sparse_tensor.SparseTensor(
values=["stringer", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
weighted_ids = feature_column.weighted_sparse_column(ids, "weights")
weights_tensor = sparse_tensor.SparseTensor(
values=[10.0, 20.0, 30.0],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
features = {"ids": ids_tensor, "weights": weights_tensor}
logits, _, _ = feature_column_ops.weighted_sum_from_feature_columns(
features, [weighted_ids], num_outputs=5)
logits_core = fc_core.linear_model(features, [weighted_ids], units=5)
with self.cached_session():
variables_lib.global_variables_initializer().run()
lookup_ops.tables_initializer().run()
self.assertAllEqual(logits.eval().shape, [2, 5])
# Verify cross compatibility: Core builder output should equal to contrib.
self.assertAllEqual(logits.eval(), logits_core.eval())
def testWeightedSparseColumnWithDenseInputTensor(self):
ids = feature_column.sparse_column_with_keys(
"ids", ["marlo", "omar", "stringer", "rick"])
ids_tensor = constant_op.constant([["omar", "stringer"], ["marlo", "rick"]])
weighted_ids = feature_column.weighted_sparse_column(ids, "weights")
weights_tensor = constant_op.constant([[10.0, 20.0], [30.0, 40.0]])
features = {"ids": ids_tensor, "weights": weights_tensor}
logits, _, _ = feature_column_ops.weighted_sum_from_feature_columns(
features, [weighted_ids], num_outputs=5)
logits_core = fc_core.linear_model(features, [weighted_ids], units=5)
with self.cached_session():
variables_lib.global_variables_initializer().run()
lookup_ops.tables_initializer().run()
self.assertAllEqual(logits.eval().shape, [2, 5])
# Verify cross compatibility: Core builder output should equal to contrib.
self.assertAllEqual(logits.eval(), logits_core.eval())
def testCrossedColumn(self):
a = feature_column.sparse_column_with_hash_bucket(
"aaa", hash_bucket_size=100)
b = feature_column.sparse_column_with_hash_bucket(
"bbb", hash_bucket_size=100)
crossed = feature_column.crossed_column(set([a, b]), hash_bucket_size=10000)
wire_tensor = sparse_tensor.SparseTensor(
values=["omar", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
features = {"aaa": wire_tensor, "bbb": wire_tensor}
logits, _, _ = feature_column_ops.weighted_sum_from_feature_columns(
features, [crossed], num_outputs=5)
logits_core = fc_core.linear_model(features, [crossed], units=5)
with self.cached_session():
variables_lib.global_variables_initializer().run()
self.assertAllEqual(logits.eval().shape, [2, 5])
# Verify cross compatibility: Core builder output should equal to contrib.
self.assertAllEqual(logits.eval(), logits_core.eval())
def testEmbeddingColumn(self):
hashed_sparse = feature_column.sparse_column_with_hash_bucket("wire", 10)
wire_tensor = sparse_tensor.SparseTensor(
values=["omar", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
features = {"wire": wire_tensor}
embeded_sparse = feature_column.embedding_column(hashed_sparse, 10)
with self.cached_session():
with self.assertRaisesRegexp(
ValueError, "Error creating weighted sum for column: wire_embedding"):
variables_lib.global_variables_initializer().run()
feature_column_ops.weighted_sum_from_feature_columns(
features, [embeded_sparse], num_outputs=5)
def testSparseFeatureColumnWithVocabularyFile(self):
vocabulary_file = os.path.join(self.get_temp_dir(), "movies.txt")
with open(vocabulary_file, "w") as f:
f.write("\n".join(["head-on", "matrix", "winter sleep"]) + "\n")
movies = feature_column.sparse_column_with_vocabulary_file(
column_name="movies", vocabulary_file=vocabulary_file, vocab_size=3)
with ops.Graph().as_default():
features = {
"movies":
sparse_tensor.SparseTensor(
values=["matrix", "head-on", "winter sleep"],
indices=[[0, 0], [0, 1], [1, 0]],
dense_shape=[2, 2])
}
output, column_to_variable, _ = (
feature_column_ops.weighted_sum_from_feature_columns(
features, [movies], num_outputs=1))
logits_core = fc_core.linear_model(features, [movies])
with self.cached_session() as sess:
variables_lib.global_variables_initializer().run()
lookup_ops.tables_initializer().run()
weights = column_to_variable[movies][0]
self.assertEqual(weights.get_shape(), (3, 1))
sess.run(weights.assign([[0.1], [0.3], [0.5]]))
# score for first example = 0.3 (matrix) + 0.1 (head-on) = 0.4
# score for second example = 0.5 (winter sleep)
self.assertAllClose(output.eval(), [[0.4], [0.5]])
# Cross compatibility: Core builder output should equal to contrib.
self.assertAllEqual(output.eval().shape, logits_core.eval().shape)
def testRealValuedColumnWithMultiDimensions(self):
real_valued = feature_column.real_valued_column("price", 2)
features = {
"price": constant_op.constant([[20., 10.], [110, 0.], [-3, 30]])
}
logits, _, _ = feature_column_ops.weighted_sum_from_feature_columns(
features, [real_valued], num_outputs=5)
with self.cached_session():
variables_lib.global_variables_initializer().run()
self.assertAllEqual(logits.eval().shape, [3, 5])
def testBucketizedColumnWithMultiDimensions(self):
bucket = feature_column.bucketized_column(
feature_column.real_valued_column("price", 2),
boundaries=[0., 10., 100.])
features = {
"price": constant_op.constant([[20., 10.], [110, 0.], [-3, 30]])
}
logits, _, _ = feature_column_ops.weighted_sum_from_feature_columns(
features, [bucket], num_outputs=5)
with self.cached_session():
variables_lib.global_variables_initializer().run()
self.assertAllEqual(logits.eval().shape, [3, 5])
def testAllWideColumns(self):
real_valued = feature_column.real_valued_column("income", 2)
bucket = feature_column.bucketized_column(
feature_column.real_valued_column("price"), boundaries=[0., 10., 100.])
hashed_sparse = feature_column.sparse_column_with_hash_bucket("wire", 10)
crossed = feature_column.crossed_column([bucket, hashed_sparse], 100)
features = {
"income":
constant_op.constant([[20., 10], [110, 0], [-3, 30]]),
"price":
constant_op.constant([[20.], [110], [-3]]),
"wire":
sparse_tensor.SparseTensor(
values=["omar", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1])
}
output, _, _ = feature_column_ops.weighted_sum_from_feature_columns(
features, [real_valued, bucket, hashed_sparse, crossed], num_outputs=5)
output_core = fc_core.linear_model(
features, [real_valued, bucket, hashed_sparse, crossed], units=5)
with self.cached_session():
variables_lib.global_variables_initializer().run()
self.assertAllEqual(output.eval().shape, [3, 5])
# Verify cross compatibility: Core builder output should equal to contrib.
self.assertAllEqual(output.eval(), output_core.eval())
def testPredictions(self):
language = feature_column.sparse_column_with_keys(
column_name="language", keys=["english", "finnish", "hindi"])
age = feature_column.real_valued_column("age")
with ops.Graph().as_default():
features = {
"age":
constant_op.constant([[1], [2]]),
"language":
sparse_tensor.SparseTensor(
values=["hindi", "english"],
indices=[[0, 0], [1, 0]],
dense_shape=[2, 1]),
}
output, column_to_variable, bias = (
feature_column_ops.weighted_sum_from_feature_columns(
features, [age, language], num_outputs=1))
with self.cached_session() as sess:
variables_lib.global_variables_initializer().run()
lookup_ops.tables_initializer().run()
self.assertAllClose(output.eval(), [[0.], [0.]])
sess.run(bias.assign([0.1]))
self.assertAllClose(output.eval(), [[0.1], [0.1]])
# score: 0.1 + age*0.1
sess.run(column_to_variable[age][0].assign([[0.2]]))
self.assertAllClose(output.eval(), [[0.3], [0.5]])
# score: 0.1 + age*0.1 + language_weight[language_index]
sess.run(column_to_variable[language][0].assign([[0.1], [0.3], [0.2]]))
self.assertAllClose(output.eval(), [[0.5], [0.6]])
def testJointPredictions(self):
country = feature_column.sparse_column_with_keys(
column_name="country", keys=["us", "finland"])
language = feature_column.sparse_column_with_keys(
column_name="language", keys=["english", "finnish", "hindi"])
with ops.Graph().as_default():
features = {
"country":
sparse_tensor.SparseTensor(
values=["finland", "us"],
indices=[[0, 0], [1, 0]],
dense_shape=[2, 1]),
"language":
sparse_tensor.SparseTensor(
values=["hindi", "english"],
indices=[[0, 0], [1, 0]],
dense_shape=[2, 1]),
}
output, variables, bias = (
feature_column_ops.joint_weighted_sum_from_feature_columns(
features, [country, language], num_outputs=1))
# Assert that only a single weight is created.
self.assertEqual(len(variables), 1)
with self.cached_session() as sess:
variables_lib.global_variables_initializer().run()
lookup_ops.tables_initializer().run()
self.assertAllClose(output.eval(), [[0.], [0.]])
sess.run(bias.assign([0.1]))
self.assertAllClose(output.eval(), [[0.1], [0.1]])
# shape is [5,1] because 1 class and 2 + 3 features.
self.assertEquals(variables[0].get_shape().as_list(), [5, 1])
# score: bias + country_weight + language_weight
sess.run(variables[0].assign([[0.1], [0.2], [0.3], [0.4], [0.5]]))
self.assertAllClose(output.eval(), [[0.8], [0.5]])
def testJointPredictionsWeightedFails(self):
language = feature_column.weighted_sparse_column(
feature_column.sparse_column_with_keys(
column_name="language", keys=["english", "finnish", "hindi"]),
"weight")
with ops.Graph().as_default():
features = {
"weight":
constant_op.constant([[1], [2]]),
"language":
sparse_tensor.SparseTensor(
values=["hindi", "english"],
indices=[[0, 0], [1, 0]],
dense_shape=[2, 1]),
}
with self.assertRaises(AssertionError):
feature_column_ops.joint_weighted_sum_from_feature_columns(
features, [language], num_outputs=1)
def testJointPredictionsRealFails(self):
age = feature_column.real_valued_column("age")
with ops.Graph().as_default():
features = {"age": constant_op.constant([[1], [2]]),}
with self.assertRaises(NotImplementedError):
feature_column_ops.joint_weighted_sum_from_feature_columns(
features, [age], num_outputs=1)
def testPredictionsWithWeightedSparseColumn(self):
language = feature_column.sparse_column_with_keys(
column_name="language", keys=["english", "finnish", "hindi"])
weighted_language = feature_column.weighted_sparse_column(
sparse_id_column=language, weight_column_name="age")
with ops.Graph().as_default():
features = {
"language":
sparse_tensor.SparseTensor(
values=["hindi", "english"],
indices=[[0, 0], [1, 0]],
dense_shape=[2, 1]),
"age":
sparse_tensor.SparseTensor(
values=[10.0, 20.0],
indices=[[0, 0], [1, 0]],
dense_shape=[2, 1])
}
output, column_to_variable, bias = (
feature_column_ops.weighted_sum_from_feature_columns(
features, [weighted_language], num_outputs=1))
with self.cached_session() as sess:
variables_lib.global_variables_initializer().run()
lookup_ops.tables_initializer().run()
self.assertAllClose(output.eval(), [[0.], [0.]])
sess.run(bias.assign([0.1]))
self.assertAllClose(output.eval(), [[0.1], [0.1]])
# score: bias + age*language_weight[index]
sess.run(column_to_variable[weighted_language][0].assign([[0.1], [0.2],
[0.3]]))
self.assertAllClose(output.eval(), [[3.1], [2.1]])
def testPredictionsWithMultivalentColumnButNoCross(self):
language = feature_column.sparse_column_with_keys(
column_name="language", keys=["english", "turkish", "hindi"])
with ops.Graph().as_default():
features = {
"language":
sparse_tensor.SparseTensor(
values=["hindi", "english"],
indices=[[0, 0], [0, 1]],
dense_shape=[1, 2])
}
output, column_to_variable, bias = (
feature_column_ops.weighted_sum_from_feature_columns(
features, [language], num_outputs=1))
with self.cached_session() as sess:
variables_lib.global_variables_initializer().run()
lookup_ops.tables_initializer().run()
# score: 0.1 + language_weight['hindi'] + language_weight['english']
sess.run(bias.assign([0.1]))
sess.run(column_to_variable[language][0].assign([[0.1], [0.3], [0.2]]))
self.assertAllClose(output.eval(), [[0.4]])
def testSparseFeatureColumnWithHashedBucketSize(self):
movies = feature_column.sparse_column_with_hash_bucket(
column_name="movies", hash_bucket_size=15)
with ops.Graph().as_default():
features = {
"movies":
sparse_tensor.SparseTensor(
values=["matrix", "head-on", "winter sleep"],
indices=[[0, 0], [0, 1], [1, 0]],
dense_shape=[2, 2])
}
output, column_to_variable, _ = (
feature_column_ops.weighted_sum_from_feature_columns(
features, [movies], num_outputs=1))
with self.cached_session() as sess:
variables_lib.global_variables_initializer().run()
lookup_ops.tables_initializer().run()
weights = column_to_variable[movies][0]
self.assertEqual(weights.get_shape(), (15, 1))
sess.run(weights.assign(weights + 0.4))
# score for first example = 0.4 (matrix) + 0.4 (head-on) = 0.8
# score for second example = 0.4 (winter sleep)
self.assertAllClose(output.eval(), [[0.8], [0.4]])
def testCrossUsageInPredictions(self):
language = feature_column.sparse_column_with_hash_bucket(
"language", hash_bucket_size=3)
country = feature_column.sparse_column_with_hash_bucket(
"country", hash_bucket_size=5)
country_language = feature_column.crossed_column(
[language, country], hash_bucket_size=10)
with ops.Graph().as_default():
features = {
"language":
sparse_tensor.SparseTensor(
values=["english", "spanish"],
indices=[[0, 0], [1, 0]],
dense_shape=[2, 1]),
"country":
sparse_tensor.SparseTensor(
values=["US", "SV"],
indices=[[0, 0], [1, 0]],
dense_shape=[2, 1])
}
output, column_to_variable, _ = (
feature_column_ops.weighted_sum_from_feature_columns(
features, [country_language], num_outputs=1))
with self.cached_session() as sess:
variables_lib.global_variables_initializer().run()
lookup_ops.tables_initializer().run()
weights = column_to_variable[country_language][0]
sess.run(weights.assign(weights + 0.4))
self.assertAllClose(output.eval(), [[0.4], [0.4]])
def testCrossColumnByItself(self):
language = feature_column.sparse_column_with_hash_bucket(
"language", hash_bucket_size=3)
language_language = feature_column.crossed_column(
[language, language], hash_bucket_size=10)
with ops.Graph().as_default():
features = {
"language":
sparse_tensor.SparseTensor(
values=["english", "spanish"],
indices=[[0, 0], [0, 1]],
dense_shape=[1, 2]),
}
output, column_to_variable, _ = (
feature_column_ops.weighted_sum_from_feature_columns(
features, [language_language], num_outputs=1))
with self.cached_session() as sess:
variables_lib.global_variables_initializer().run()
lookup_ops.tables_initializer().run()
weights = column_to_variable[language_language][0]
sess.run(weights.assign(weights + 0.4))
# There are two features inside language. If we cross it by itself we'll
# have four crossed features.
self.assertAllClose(output.eval(), [[1.6]])
def testMultivalentCrossUsageInPredictions(self):
language = feature_column.sparse_column_with_hash_bucket(
"language", hash_bucket_size=3)
country = feature_column.sparse_column_with_hash_bucket(
"country", hash_bucket_size=5)
country_language = feature_column.crossed_column(
[language, country], hash_bucket_size=10)
with ops.Graph().as_default():
features = {
"language":
sparse_tensor.SparseTensor(
values=["english", "spanish"],
indices=[[0, 0], [0, 1]],
dense_shape=[1, 2]),
"country":
sparse_tensor.SparseTensor(
values=["US", "SV"],
indices=[[0, 0], [0, 1]],
dense_shape=[1, 2])
}
output, column_to_variable, _ = (
feature_column_ops.weighted_sum_from_feature_columns(
features, [country_language], num_outputs=1))
with self.cached_session() as sess:
variables_lib.global_variables_initializer().run()
lookup_ops.tables_initializer().run()
weights = column_to_variable[country_language][0]
sess.run(weights.assign(weights + 0.4))
# There are four crosses each with 0.4 weight.
# score = 0.4 + 0.4 + 0.4 + 0.4
self.assertAllClose(output.eval(), [[1.6]])
def testMultivalentCrossUsageInPredictionsWithPartition(self):
# bucket size has to be big enough to allow sharding.
language = feature_column.sparse_column_with_hash_bucket(
"language", hash_bucket_size=64 << 19)
country = feature_column.sparse_column_with_hash_bucket(
"country", hash_bucket_size=64 << 18)
country_language = feature_column.crossed_column(
[language, country], hash_bucket_size=64 << 18)
with ops.Graph().as_default():
features = {
"language":
sparse_tensor.SparseTensor(
values=["english", "spanish"],
indices=[[0, 0], [0, 1]],
dense_shape=[1, 2]),
"country":
sparse_tensor.SparseTensor(
values=["US", "SV"],
indices=[[0, 0], [0, 1]],
dense_shape=[1, 2])
}
with variable_scope.variable_scope(
"weighted_sum_from_feature_columns",
features.values(),
partitioner=partitioned_variables.min_max_variable_partitioner(
max_partitions=10, min_slice_size=((64 << 20) - 1))) as scope:
output, column_to_variable, _ = (
feature_column_ops.weighted_sum_from_feature_columns(
features, [country, language, country_language],
num_outputs=1,
scope=scope))
with self.cached_session() as sess:
variables_lib.global_variables_initializer().run()
lookup_ops.tables_initializer().run()
self.assertEqual(2, len(column_to_variable[country]))
self.assertEqual(3, len(column_to_variable[language]))
self.assertEqual(2, len(column_to_variable[country_language]))
weights = column_to_variable[country_language]
for partition_variable in weights:
sess.run(partition_variable.assign(partition_variable + 0.4))
# There are four crosses each with 0.4 weight.
# score = 0.4 + 0.4 + 0.4 + 0.4
self.assertAllClose(output.eval(), [[1.6]])
def testRealValuedColumnHavingMultiDimensions(self):
country = feature_column.sparse_column_with_hash_bucket(
"country", hash_bucket_size=5)
age = feature_column.real_valued_column("age")
# The following RealValuedColumn has 3 dimensions.
incomes = feature_column.real_valued_column("incomes", 3)
with ops.Graph().as_default():
features = {
"age":
constant_op.constant([[1], [1]]),
"incomes":
constant_op.constant([[100., 200., 300.], [10., 20., 30.]]),
"country":
sparse_tensor.SparseTensor(
values=["US", "SV"],
indices=[[0, 0], [1, 0]],
dense_shape=[2, 2])
}
output, column_to_variable, _ = (
feature_column_ops.weighted_sum_from_feature_columns(
features, [country, age, incomes], num_outputs=1))
with self.cached_session() as sess:
variables_lib.global_variables_initializer().run()
lookup_ops.tables_initializer().run()
incomes_weights = column_to_variable[incomes][0]
sess.run(incomes_weights.assign([[0.1], [0.2], [0.3]]))
self.assertAllClose(output.eval(), [[140.], [14.]])
def testMulticlassWithRealValuedColumnHavingMultiDimensionsAndSparse(self):
country = feature_column.sparse_column_with_hash_bucket(
"country", hash_bucket_size=5)
age = feature_column.real_valued_column("age")
# The following RealValuedColumn has no predefined dimension so it
# can be missing.
height = feature_column._real_valued_var_len_column("height",
default_value=0,
is_sparse=False)
# The following RealValuedColumn has 3 dimensions.
incomes = feature_column.real_valued_column("incomes", 3)
with ops.Graph().as_default():
features = {
"age":
constant_op.constant([[1], [1]]),
"incomes":
constant_op.constant([[100., 200., 300.], [10., 20., 30.]]),
"height":
constant_op.constant([[5., 4.], [0., 6.]]),
"country":
sparse_tensor.SparseTensor(
values=["US", "SV"],
indices=[[0, 0], [1, 0]],
dense_shape=[2, 2])
}
output, column_to_variable, _ = (
feature_column_ops.weighted_sum_from_feature_columns(
features, [country, age, height, incomes], num_outputs=5))
with self.cached_session() as sess:
variables_lib.global_variables_initializer().run()
lookup_ops.tables_initializer().run()
height_weights = column_to_variable[height][0]
sess.run(
height_weights.assign(
[[1., 2., 3., 5., 10.], [1., 2., 3., 5., 10.]]))
self.assertAllClose(output.eval(), [[9., 18., 27., 45., 90.],
[6., 12., 18., 30., 60.]])
incomes_weights = column_to_variable[incomes][0]
sess.run(
incomes_weights.assign([[0.01, 0.1, 1., 10., 100.],
[0.02, 0.2, 2., 20., 200.],
[0.03, 0.3, 3., 30., 300.]]))
self.assertAllClose(
output.eval(),
[[14. + 9., 140. + 18., 1400. + 27., 14000. + 45., 140000. + 90.],
[1.4 + 6., 14. + 12., 140. + 18., 1400. + 30., 14000. + 60.]])
def testBucketizedColumn(self):
bucket = feature_column.bucketized_column(
feature_column.real_valued_column("price"), boundaries=[0., 10., 100.])
with ops.Graph().as_default():
# buckets 2, 3, 0
features = {"price": constant_op.constant([[20.], [110], [-3]])}
output, column_to_variable, _ = (
feature_column_ops.weighted_sum_from_feature_columns(
features, [bucket], num_outputs=1))
output_core = fc_core.linear_model(features, [bucket])
with self.cached_session() as sess:
variables_lib.global_variables_initializer().run()
lookup_ops.tables_initializer().run()
# Cross compatibility: Core builder output should equal to contrib.
self.assertAllEqual(output.eval(), output_core.eval())
sess.run(column_to_variable[bucket][0].assign([[0.1], [0.2], [0.3],
[0.4]]))
self.assertAllClose(output.eval(), [[0.3], [0.4], [0.1]])
def testBucketizedColumnHavingMultiDimensions(self):
country = feature_column.sparse_column_with_hash_bucket(
"country", hash_bucket_size=5)
bucket = feature_column.bucketized_column(
feature_column.real_valued_column("price", 2),
boundaries=[0., 10., 100.])
with ops.Graph().as_default():
# buckets 2, 3, 0
features = {
"price":
constant_op.constant([[20., 210], [110, 50], [-3, -30]]),
"country":
sparse_tensor.SparseTensor(
values=["US", "SV"],
indices=[[0, 0], [1, 0]],
dense_shape=[3, 2])
}
output, column_to_variable, _ = (
feature_column_ops.weighted_sum_from_feature_columns(
features, [bucket, country], num_outputs=1))
output_core = fc_core.linear_model(features, [bucket, country])
with self.cached_session() as sess:
variables_lib.global_variables_initializer().run()
lookup_ops.tables_initializer().run()
# Cross compatibility: Core builder output should equal to contrib.
self.assertAllEqual(output.eval(), output_core.eval())
# dimension = 2, bucket_size = 4, num_classes = 1
sess.run(column_to_variable[bucket][0].assign(
[[0.1], [0.2], [0.3], [0.4], [1], [2], [3], [4]]))
self.assertAllClose(output.eval(), [[0.3 + 4], [0.4 + 3], [0.1 + 1]])
def testMulticlassWithBucketizedColumnHavingMultiDimensions(self):
country = feature_column.sparse_column_with_hash_bucket(
"country", hash_bucket_size=5)
bucket = feature_column.bucketized_column(
feature_column.real_valued_column("price", 2),
boundaries=[0., 10., 100.])
with ops.Graph().as_default():
# buckets 2, 3, 0
features = {
"price":
constant_op.constant([[20., 210], [110, 50], [-3, -30]]),
"country":
sparse_tensor.SparseTensor(
values=["US", "SV"],
indices=[[0, 0], [1, 0]],
dense_shape=[3, 2])
}
output, column_to_variable, _ = (
feature_column_ops.weighted_sum_from_feature_columns(
features, [bucket, country], num_outputs=5))
with self.cached_session() as sess:
variables_lib.global_variables_initializer().run()
lookup_ops.tables_initializer().run()
# dimension = 2, bucket_size = 4, num_classes = 5
sess.run(column_to_variable[bucket][0].assign(
[[0.1, 1, 10, 100, 1000], [0.2, 2, 20, 200, 2000],
[0.3, 3, 30, 300, 3000], [0.4, 4, 40, 400, 4000],
[5, 50, 500, 5000, 50000], [6, 60, 600, 6000, 60000],
[7, 70, 700, 7000, 70000], [8, 80, 800, 8000, 80000]]))
self.assertAllClose(
output.eval(),
[[0.3 + 8, 3 + 80, 30 + 800, 300 + 8000, 3000 + 80000],
[0.4 + 7, 4 + 70, 40 + 700, 400 + 7000, 4000 + 70000],
[0.1 + 5, 1 + 50, 10 + 500, 100 + 5000, 1000 + 50000]])
def testCrossWithBucketizedColumn(self):
price_bucket = feature_column.bucketized_column(
feature_column.real_valued_column("price"), boundaries=[0., 10., 100.])
country = feature_column.sparse_column_with_hash_bucket(
"country", hash_bucket_size=5)
country_price = feature_column.crossed_column(
[country, price_bucket], hash_bucket_size=10)
with ops.Graph().as_default():
features = {
"price":
constant_op.constant([[20.]]),
"country":
sparse_tensor.SparseTensor(
values=["US", "SV"],
indices=[[0, 0], [0, 1]],
dense_shape=[1, 2])
}
output, column_to_variable, _ = (
feature_column_ops.weighted_sum_from_feature_columns(
features, [country_price], num_outputs=1))
with self.cached_session() as sess:
variables_lib.global_variables_initializer().run()
lookup_ops.tables_initializer().run()
weights = column_to_variable[country_price][0]
sess.run(weights.assign(weights + 0.4))
# There are two crosses each with 0.4 weight.
# score = 0.4 + 0.4
self.assertAllClose(output.eval(), [[0.8]])
def testCrossWithCrossedColumn(self):
price_bucket = feature_column.bucketized_column(
feature_column.real_valued_column("price"), boundaries=[0., 10., 100.])
language = feature_column.sparse_column_with_hash_bucket(
"language", hash_bucket_size=3)
country = feature_column.sparse_column_with_hash_bucket(
"country", hash_bucket_size=5)
country_language = feature_column.crossed_column(
[language, country], hash_bucket_size=10)
country_language_price = feature_column.crossed_column(
set([country_language, price_bucket]), hash_bucket_size=15)
with ops.Graph().as_default():
features = {
"price":
constant_op.constant([[20.]]),
"country":
sparse_tensor.SparseTensor(
values=["US", "SV"],
indices=[[0, 0], [0, 1]],
dense_shape=[1, 2]),
"language":
sparse_tensor.SparseTensor(
values=["english", "spanish"],
indices=[[0, 0], [0, 1]],
dense_shape=[1, 2])
}
output, column_to_variable, _ = (
feature_column_ops.weighted_sum_from_feature_columns(
features, [country_language_price], num_outputs=1))
with self.cached_session() as sess:
variables_lib.global_variables_initializer().run()
lookup_ops.tables_initializer().run()
weights = column_to_variable[country_language_price][0]
sess.run(weights.assign(weights + 0.4))
# There are two crosses each with 0.4 weight.
# score = 0.4 + 0.4 + 0.4 + 0.4
self.assertAllClose(output.eval(), [[1.6]])
def testIntegerizedColumn(self):
product = feature_column.sparse_column_with_integerized_feature(
"product", bucket_size=5)
with ops.Graph().as_default():
features = {
"product":
sparse_tensor.SparseTensor(
values=[0, 4, 2],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1])
}
output, column_to_variable, _ = (
feature_column_ops.weighted_sum_from_feature_columns(
features, [product], num_outputs=1))
with self.cached_session() as sess:
variables_lib.global_variables_initializer().run()
lookup_ops.tables_initializer().run()
product_weights = column_to_variable[product][0]
sess.run(product_weights.assign([[0.1], [0.2], [0.3], [0.4], [0.5]]))
self.assertAllClose(output.eval(), [[0.1], [0.5], [0.3]])
def testIntegerizedColumnWithDenseInputTensor(self):
product = feature_column.sparse_column_with_integerized_feature(
"product", bucket_size=5)
with ops.Graph().as_default():
features = {"product": constant_op.constant([[0], [4], [2]])}
output, column_to_variable, _ = (
feature_column_ops.weighted_sum_from_feature_columns(
features, [product], num_outputs=1))
with self.cached_session() as sess:
variables_lib.global_variables_initializer().run()
lookup_ops.tables_initializer().run()
product_weights = column_to_variable[product][0]
sess.run(product_weights.assign([[0.1], [0.2], [0.3], [0.4], [0.5]]))
self.assertAllClose(output.eval(), [[0.1], [0.5], [0.3]])
def testIntegerizedColumnWithDenseInputTensor2(self):
product = feature_column.sparse_column_with_integerized_feature(
"product", bucket_size=5)
with ops.Graph().as_default():
features = {"product": constant_op.constant([[0, 4], [2, 3]])}
output, column_to_variable, _ = (
feature_column_ops.weighted_sum_from_feature_columns(
features, [product], num_outputs=1))
with self.cached_session() as sess:
variables_lib.global_variables_initializer().run()
lookup_ops.tables_initializer().run()
product_weights = column_to_variable[product][0]
sess.run(product_weights.assign([[0.1], [0.2], [0.3], [0.4], [0.5]]))
self.assertAllClose(output.eval(), [[0.6], [0.7]])
def testIntegerizedColumnWithInvalidId(self):
product = feature_column.sparse_column_with_integerized_feature(
"product", bucket_size=5)
with ops.Graph().as_default():
features = {
"product":
sparse_tensor.SparseTensor(
values=[5, 4, 7],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1])
}
output, column_to_variable, _ = (
feature_column_ops.weighted_sum_from_feature_columns(
features, [product], num_outputs=1))
with self.cached_session() as sess:
variables_lib.global_variables_initializer().run()
lookup_ops.tables_initializer().run()
product_weights = column_to_variable[product][0]
sess.run(product_weights.assign([[0.1], [0.2], [0.3], [0.4], [0.5]]))
self.assertAllClose(output.eval(), [[0.1], [0.5], [0.3]])
def testMulticlassWithOnlyBias(self):
with ops.Graph().as_default():
features = {"age": constant_op.constant([[10.], [20.], [30.], [40.]])}
output, _, bias = feature_column_ops.weighted_sum_from_feature_columns(
features, [feature_column.real_valued_column("age")], num_outputs=3)
with self.cached_session() as sess:
variables_lib.global_variables_initializer().run()
lookup_ops.tables_initializer().run()
sess.run(bias.assign([0.1, 0.2, 0.3]))
self.assertAllClose(output.eval(), [[0.1, 0.2, 0.3], [0.1, 0.2, 0.3],
[0.1, 0.2, 0.3], [0.1, 0.2, 0.3]])
def testMulticlassWithRealValuedColumn(self):
with ops.Graph().as_default():
column = feature_column.real_valued_column("age")
features = {"age": constant_op.constant([[10.], [20.], [30.], [40.]])}
output, column_to_variable, _ = (
feature_column_ops.weighted_sum_from_feature_columns(
features, [column], num_outputs=3))
with self.cached_session() as sess:
variables_lib.global_variables_initializer().run()
lookup_ops.tables_initializer().run()
weights = column_to_variable[column][0]
self.assertEqual(weights.get_shape(), (1, 3))
sess.run(weights.assign([[0.01, 0.03, 0.05]]))
self.assertAllClose(output.eval(), [[0.1, 0.3, 0.5], [0.2, 0.6, 1.0],
[0.3, 0.9, 1.5], [0.4, 1.2, 2.0]])
def testMulticlassWithSparseColumn(self):
with ops.Graph().as_default():
column = feature_column.sparse_column_with_keys(
column_name="language",
keys=["english", "arabic", "hindi", "russian", "swahili"])
features = {
"language":
sparse_tensor.SparseTensor(
values=["hindi", "english", "arabic", "russian"],
indices=[[0, 0], [1, 0], [2, 0], [3, 0]],
dense_shape=[4, 1])
}
output, column_to_variable, _ = (
feature_column_ops.weighted_sum_from_feature_columns(
features, [column], num_outputs=3))
with self.cached_session() as sess:
variables_lib.global_variables_initializer().run()
lookup_ops.tables_initializer().run()
weights = column_to_variable[column][0]
self.assertEqual(weights.get_shape(), (5, 3))
sess.run(
weights.assign([[0.1, 0.4, 0.7],
[0.2, 0.5, 0.8],
[0.3, 0.6, 0.9],
[0.4, 0.7, 1.0],
[0.5, 0.8, 1.1]]))
self.assertAllClose(output.eval(), [[0.3, 0.6, 0.9],
[0.1, 0.4, 0.7],
[0.2, 0.5, 0.8],
[0.4, 0.7, 1.0]])
def testMulticlassWithBucketizedColumn(self):
column = feature_column.bucketized_column(
feature_column.real_valued_column("price"),
boundaries=[0., 100., 500., 1000.])
with ops.Graph().as_default():
# buckets 0, 2, 1, 2
features = {"price": constant_op.constant([[-3], [110], [20.], [210]])}
output, column_to_variable, _ = (
feature_column_ops.weighted_sum_from_feature_columns(
features, [column], num_outputs=3))
with self.cached_session() as sess:
variables_lib.global_variables_initializer().run()
lookup_ops.tables_initializer().run()
weights = column_to_variable[column][0]
self.assertEqual(weights.get_shape(), (5, 3))
sess.run(
weights.assign([[0.1, 0.4, 0.7],
[0.2, 0.5, 0.8],
[0.3, 0.6, 0.9],
[0.4, 0.7, 1.0],
[0.5, 0.8, 1.1]]))
self.assertAllClose(output.eval(), [[0.1, 0.4, 0.7],
[0.3, 0.6, 0.9],
[0.2, 0.5, 0.8],
[0.3, 0.6, 0.9]])
def testMulticlassWithCrossedColumn(self):
language = feature_column.sparse_column_with_hash_bucket(
"language", hash_bucket_size=3)
country = feature_column.sparse_column_with_hash_bucket(
"country", hash_bucket_size=2)
column = feature_column.crossed_column(
{language, country}, hash_bucket_size=5)
with ops.Graph().as_default():
features = {
"language":
sparse_tensor.SparseTensor(
values=["english", "spanish", "russian", "swahili"],
indices=[[0, 0], [1, 0], [2, 0], [3, 0]],
dense_shape=[4, 1]),
"country":
sparse_tensor.SparseTensor(
values=["US", "SV", "RU", "KE"],
indices=[[0, 0], [1, 0], [2, 0], [3, 0]],
dense_shape=[4, 1])
}
output, column_to_variable, _ = (
feature_column_ops.weighted_sum_from_feature_columns(
features, [column], num_outputs=3))
with self.cached_session() as sess:
variables_lib.global_variables_initializer().run()
lookup_ops.tables_initializer().run()
weights = column_to_variable[column][0]
self.assertEqual(weights.get_shape(), (5, 3))
sess.run(
weights.assign([[0.1, 0.4, 0.7],
[0.2, 0.5, 0.8],
[0.3, 0.6, 0.9],
[0.4, 0.7, 1.0],
[0.5, 0.8, 1.1]]))
self.assertAllClose(array_ops.shape(output).eval(), [4, 3])
def testMulticlassWithMultivalentColumn(self):
column = feature_column.sparse_column_with_keys(
column_name="language",
keys=["english", "turkish", "hindi", "russian", "swahili"])
with ops.Graph().as_default():
features = {
"language":
sparse_tensor.SparseTensor(
values=["hindi", "english", "turkish", "turkish", "english"],
indices=[[0, 0], [0, 1], [1, 0], [2, 0], [3, 0]],
dense_shape=[4, 2])
}
output, column_to_variable, _ = (
feature_column_ops.weighted_sum_from_feature_columns(
features, [column], num_outputs=3))
with self.cached_session() as sess:
variables_lib.global_variables_initializer().run()
lookup_ops.tables_initializer().run()
weights = column_to_variable[column][0]
self.assertEqual(weights.get_shape(), (5, 3))
sess.run(
weights.assign([[0.1, 0.4, 0.7],
[0.2, 0.5, 0.8],
[0.3, 0.6, 0.9],
[0.4, 0.7, 1.0],
[0.5, 0.8, 1.1]]))
self.assertAllClose(output.eval(), [[0.4, 1.0, 1.6],
[0.2, 0.5, 0.8],
[0.2, 0.5, 0.8],
[0.1, 0.4, 0.7]])
def testVariablesAddedToCollection(self):
price_bucket = feature_column.bucketized_column(
feature_column.real_valued_column("price"), boundaries=[0., 10., 100.])
country = feature_column.sparse_column_with_hash_bucket(
"country", hash_bucket_size=5)
country_price = feature_column.crossed_column(
[country, price_bucket], hash_bucket_size=10)
with ops.Graph().as_default():
features = {
"price":
constant_op.constant([[20.]]),
"country":
sparse_tensor.SparseTensor(
values=["US", "SV"],
indices=[[0, 0], [0, 1]],
dense_shape=[1, 2])
}
feature_column_ops.weighted_sum_from_feature_columns(
features, [country_price, price_bucket],
num_outputs=1,
weight_collections=["my_collection"])
weights = ops.get_collection("my_collection")
# 3 = bias + price_bucket + country_price
self.assertEqual(3, len(weights))
class ParseExampleTest(test.TestCase):
def testParseExample(self):
bucket = feature_column.bucketized_column(
feature_column.real_valued_column(
"price", dimension=3),
boundaries=[0., 10., 100.])
wire_cast = feature_column.sparse_column_with_keys(
"wire_cast", ["marlo", "omar", "stringer"])
# buckets 2, 3, 0
data = example_pb2.Example(features=feature_pb2.Features(feature={
"price":
feature_pb2.Feature(float_list=feature_pb2.FloatList(
value=[20., 110, -3])),
"wire_cast":
feature_pb2.Feature(bytes_list=feature_pb2.BytesList(
value=[b"stringer", b"marlo"])),
}))
output = feature_column_ops.parse_feature_columns_from_examples(
serialized=[data.SerializeToString()],
feature_columns=[bucket, wire_cast])
self.assertIn(bucket, output)
self.assertIn(wire_cast, output)
with self.cached_session():
lookup_ops.tables_initializer().run()
self.assertAllEqual(output[bucket].eval(), [[2, 3, 0]])
self.assertAllEqual(output[wire_cast].indices.eval(), [[0, 0], [0, 1]])
self.assertAllEqual(output[wire_cast].values.eval(), [2, 0])
def testParseSequenceExample(self):
location_keys = ["east_side", "west_side", "nyc"]
embedding_dimension = 10
location = feature_column.sparse_column_with_keys(
"location", keys=location_keys)
location_onehot = feature_column.one_hot_column(location)
wire_cast = feature_column.sparse_column_with_keys(
"wire_cast", ["marlo", "omar", "stringer"])
wire_cast_embedded = feature_column.embedding_column(
wire_cast, dimension=embedding_dimension)
measurements = feature_column.real_valued_column(
"measurements", dimension=2)
context_feature_columns = [location_onehot]
sequence_feature_columns = [wire_cast_embedded, measurements]
sequence_example = example_pb2.SequenceExample(
context=feature_pb2.Features(feature={
"location":
feature_pb2.Feature(bytes_list=feature_pb2.BytesList(
value=[b"west_side"])),
}),
feature_lists=feature_pb2.FeatureLists(feature_list={
"wire_cast":
feature_pb2.FeatureList(feature=[
feature_pb2.Feature(bytes_list=feature_pb2.BytesList(
value=[b"marlo", b"stringer"])),
feature_pb2.Feature(bytes_list=feature_pb2.BytesList(
value=[b"omar", b"stringer", b"marlo"])),
feature_pb2.Feature(bytes_list=feature_pb2.BytesList(
value=[b"marlo"])),
]),
"measurements":
feature_pb2.FeatureList(feature=[
feature_pb2.Feature(float_list=feature_pb2.FloatList(
value=[0.2, 0.3])),
feature_pb2.Feature(float_list=feature_pb2.FloatList(
value=[0.1, 0.8])),
feature_pb2.Feature(float_list=feature_pb2.FloatList(
value=[0.5, 0.0])),
])
}))
ctx, seq = feature_column_ops.parse_feature_columns_from_sequence_examples(
serialized=sequence_example.SerializeToString(),
context_feature_columns=context_feature_columns,
sequence_feature_columns=sequence_feature_columns)
self.assertIn("location", ctx)
self.assertIsInstance(ctx["location"], sparse_tensor.SparseTensor)
self.assertIn("wire_cast", seq)
self.assertIsInstance(seq["wire_cast"], sparse_tensor.SparseTensor)
self.assertIn("measurements", seq)
self.assertIsInstance(seq["measurements"], ops.Tensor)
with self.cached_session() as sess:
location_val, wire_cast_val, measurement_val = sess.run(
[ctx["location"], seq["wire_cast"], seq["measurements"]])
self.assertAllEqual(location_val.indices, np.array([[0]]))
self.assertAllEqual(location_val.values, np.array([b"west_side"]))
self.assertAllEqual(location_val.dense_shape, np.array([1]))
self.assertAllEqual(wire_cast_val.indices,
np.array(
[[0, 0], [0, 1], [1, 0], [1, 1], [1, 2], [2, 0]]))
self.assertAllEqual(
wire_cast_val.values,
np.array(
[b"marlo", b"stringer", b"omar", b"stringer", b"marlo", b"marlo"]))
self.assertAllEqual(wire_cast_val.dense_shape, np.array([3, 3]))
self.assertAllClose(measurement_val,
np.array([[0.2, 0.3], [0.1, 0.8], [0.5, 0.0]]))
class InferRealValuedColumnTest(test.TestCase):
def testTensorInt32(self):
self.assertEqual(
feature_column_ops.infer_real_valued_columns(
array_ops.zeros(
shape=[33, 4], dtype=dtypes.int32)), [
feature_column.real_valued_column(
"", dimension=4, dtype=dtypes.int32)
])
def testTensorInt64(self):
self.assertEqual(
feature_column_ops.infer_real_valued_columns(
array_ops.zeros(
shape=[33, 4], dtype=dtypes.int64)), [
feature_column.real_valued_column(
"", dimension=4, dtype=dtypes.int64)
])
def testTensorFloat32(self):
self.assertEqual(
feature_column_ops.infer_real_valued_columns(
array_ops.zeros(
shape=[33, 4], dtype=dtypes.float32)), [
feature_column.real_valued_column(
"", dimension=4, dtype=dtypes.float32)
])
def testTensorFloat64(self):
self.assertEqual(
feature_column_ops.infer_real_valued_columns(
array_ops.zeros(
shape=[33, 4], dtype=dtypes.float64)), [
feature_column.real_valued_column(
"", dimension=4, dtype=dtypes.float64)
])
def testDictionary(self):
self.assertItemsEqual(
feature_column_ops.infer_real_valued_columns({
"a": array_ops.zeros(
shape=[33, 4], dtype=dtypes.int32),
"b": array_ops.zeros(
shape=[3, 2], dtype=dtypes.float32)
}), [
feature_column.real_valued_column(
"a", dimension=4, dtype=dtypes.int32),
feature_column.real_valued_column(
"b", dimension=2, dtype=dtypes.float32)
])
def testNotGoodDtype(self):
with self.assertRaises(ValueError):
feature_column_ops.infer_real_valued_columns(
constant_op.constant(
[["a"]], dtype=dtypes.string))
def testSparseTensor(self):
with self.assertRaises(ValueError):
feature_column_ops.infer_real_valued_columns(
sparse_tensor.SparseTensor(
indices=[[0, 0]], values=["a"], dense_shape=[1, 1]))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/layers/python/layers/feature_column_ops_test.py
|
# -*- coding: utf-8 -*-
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-short-docstring-punctuation
"""Higher level ops for building layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import six
from tensorflow.contrib.framework.python.ops import add_arg_scope
from tensorflow.contrib.framework.python.ops import variables
from tensorflow.contrib.layers.python.layers import initializers
from tensorflow.contrib.layers.python.layers import utils
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras.engine import input_spec
from tensorflow.python.layers import base
from tensorflow.python.layers import convolutional as convolutional_layers
from tensorflow.python.layers import core as core_layers
from tensorflow.python.layers import normalization as normalization_layers
from tensorflow.python.layers import pooling as pooling_layers
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import standard_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as tf_variables
from tensorflow.python.training import moving_averages
# TODO(b/28426988): Replace legacy_* fns migrated from slim.
# TODO(b/28426988): Remove legacy_* when all uses have migrated to new API.
__all__ = [
'avg_pool2d', 'avg_pool3d', 'batch_norm', 'bias_add', 'conv1d', 'conv2d',
'conv3d', 'conv2d_in_plane', 'conv2d_transpose', 'conv3d_transpose',
'convolution', 'convolution1d', 'convolution2d', 'convolution2d_in_plane',
'convolution2d_transpose', 'convolution3d', 'convolution3d_transpose',
'dense_to_sparse', 'dropout', 'elu', 'flatten', 'fully_connected', 'GDN',
'gdn', 'images_to_sequence', 'layer_norm', 'linear', 'pool', 'max_pool2d',
'max_pool3d', 'one_hot_encoding', 'relu', 'relu6', 'repeat',
'scale_gradient', 'separable_conv2d', 'separable_convolution2d',
'sequence_to_images', 'softmax', 'spatial_softmax', 'stack', 'unit_norm',
'legacy_fully_connected', 'legacy_linear', 'legacy_relu', 'maxout'
]
DATA_FORMAT_NCHW = 'NCHW'
DATA_FORMAT_NHWC = 'NHWC'
DATA_FORMAT_NCDHW = 'NCDHW'
DATA_FORMAT_NDHWC = 'NDHWC'
@add_arg_scope
def avg_pool2d(inputs,
kernel_size,
stride=2,
padding='VALID',
data_format=DATA_FORMAT_NHWC,
outputs_collections=None,
scope=None):
"""Adds a 2D average pooling op.
It is assumed that the pooling is done per image but not in batch or channels.
Args:
inputs: A 4-D tensor of shape `[batch_size, height, width, channels]` if
`data_format` is `NHWC`, and `[batch_size, channels, height, width]` if
`data_format` is `NCHW`.
kernel_size: A list of length 2: [kernel_height, kernel_width] of the
pooling kernel over which the op is computed. Can be an int if both values
are the same.
stride: A list of length 2: [stride_height, stride_width]. Can be an int if
both strides are the same. Note that presently both strides must have the
same value.
padding: The padding method, either 'VALID' or 'SAME'.
data_format: A string. `NHWC` (default) and `NCHW` are supported.
outputs_collections: The collections to which the outputs are added.
scope: Optional scope for name_scope.
Returns:
A `Tensor` representing the results of the pooling operation.
Raises:
ValueError: If `data_format` is neither `NHWC` nor `NCHW`.
"""
if data_format not in (DATA_FORMAT_NCHW, DATA_FORMAT_NHWC):
raise ValueError('data_format has to be either NCHW or NHWC.')
with ops.name_scope(scope, 'AvgPool2D', [inputs]) as sc:
inputs = ops.convert_to_tensor(inputs)
df = ('channels_first'
if data_format and data_format.startswith('NC') else 'channels_last')
layer = pooling_layers.AveragePooling2D(
pool_size=kernel_size,
strides=stride,
padding=padding,
data_format=df,
_scope=sc)
outputs = layer.apply(inputs)
return utils.collect_named_outputs(outputs_collections, sc, outputs)
@add_arg_scope
def avg_pool3d(inputs,
kernel_size,
stride=2,
padding='VALID',
data_format=DATA_FORMAT_NDHWC,
outputs_collections=None,
scope=None):
"""Adds a 3D average pooling op.
It is assumed that the pooling is done per image but not in batch or channels.
Args:
inputs: A 5-D tensor of shape `[batch_size, depth, height, width, channels]`
if `data_format` is `NDHWC`, and `[batch_size, channels, depth, height,
width]` if `data_format` is `NCDHW`.
kernel_size: A list of length 3: [kernel_depth, kernel_height, kernel_width]
of the pooling kernel over which the op is computed. Can be an int if both
values are the same.
stride: A list of length 3: [stride_depth, stride_height, stride_width]. Can
be an int if both strides are the same. Note that presently both strides
must have the same value.
padding: The padding method, either 'VALID' or 'SAME'.
data_format: A string. `NDHWC` (default) and `NCDHW` are supported.
outputs_collections: The collections to which the outputs are added.
scope: Optional scope for name_scope.
Returns:
A `Tensor` representing the results of the pooling operation.
Raises:
ValueError: If `data_format` is neither `NDHWC` nor `NCDHW`.
"""
if data_format not in (DATA_FORMAT_NCDHW, DATA_FORMAT_NDHWC):
raise ValueError('data_format has to be either NCDHW or NDHWC.')
with ops.name_scope(scope, 'AvgPool3D', [inputs]) as sc:
inputs = ops.convert_to_tensor(inputs)
df = ('channels_first'
if data_format and data_format.startswith('NC') else 'channels_last')
layer = pooling_layers.AveragePooling3D(
pool_size=kernel_size,
strides=stride,
padding=padding,
data_format=df,
_scope=sc)
outputs = layer.apply(inputs)
return utils.collect_named_outputs(outputs_collections, sc, outputs)
def _fused_batch_norm(inputs,
decay=0.999,
center=True,
scale=False,
epsilon=0.001,
activation_fn=None,
param_initializers=None,
param_regularizers=None,
updates_collections=ops.GraphKeys.UPDATE_OPS,
is_training=True,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
data_format=DATA_FORMAT_NHWC,
zero_debias_moving_mean=False,
scope=None):
"""Adds a Batch Normalization layer from http://arxiv.org/abs/1502.03167.
"Batch Normalization: Accelerating Deep Network Training by Reducing
Internal Covariate Shift"
Sergey Ioffe, Christian Szegedy
Can be used as a normalizer function for conv2d and fully_connected.
Note: when training, the moving_mean and moving_variance need to be updated.
By default the update ops are placed in `tf.GraphKeys.UPDATE_OPS`, so they
need to be added as a dependency to the `train_op`. For example:
```python
update_ops = tf.compat.v1.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_op = optimizer.minimize(loss)
```
One can set updates_collections=None to force the updates in place, but that
can have a speed penalty, especially in distributed settings.
Args:
inputs: A tensor with 2 or more dimensions, where the first dimension has
`batch_size`. The normalization is over all but the last dimension if
`data_format` is `NHWC` and the second dimension if `data_format` is
`NCHW`.
decay: Decay for the moving average. Reasonable values for `decay` are close
to 1.0, typically in the multiple-nines range: 0.999, 0.99, 0.9, etc.
Lower `decay` value (recommend trying `decay`=0.9) if model experiences
reasonably good training performance but poor validation and/or test
performance.
center: If True, add offset of `beta` to normalized tensor. If False,
`beta` is ignored.
scale: If True, multiply by `gamma`. If False, `gamma` is not used. When the
next layer is linear (also e.g. `nn.relu`), this can be disabled since the
scaling can be done by the next layer.
epsilon: Small float added to variance to avoid dividing by zero.
activation_fn: Activation function, default set to None to skip it and
maintain a linear activation.
param_initializers: Optional initializers for beta, gamma, moving mean and
moving variance.
param_regularizers: Optional regularizer for beta and gamma.
updates_collections: Collections to collect the update ops for computation.
The updates_ops need to be executed with the train_op. If None, a control
dependency would be added to make sure the updates are computed in place.
is_training: Whether or not the layer is in training mode. In training mode
it would accumulate the statistics of the moments into `moving_mean` and
`moving_variance` using an exponential moving average with the given
`decay`. When it is not in training mode then it would use the values of
the `moving_mean` and the `moving_variance`.
reuse: Whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: Optional collections for the variables.
outputs_collections: Collections to add the outputs.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
data_format: A string. `NHWC` (default) and `NCHW` are supported.
zero_debias_moving_mean: Use zero_debias for moving_mean.
scope: Optional scope for `variable_scope`.
Returns:
A `Tensor` representing the output of the operation.
Raises:
ValueError: If `data_format` is neither `NHWC` nor `NCHW`.
ValueError: If the rank of `inputs` is undefined.
ValueError: If the rank of `inputs` is neither 2 or 4.
ValueError: If rank or `C` dimension of `inputs` is undefined.
"""
if data_format not in (DATA_FORMAT_NCHW, DATA_FORMAT_NHWC):
raise ValueError('data_format has to be either NCHW or NHWC.')
with variable_scope.variable_scope(
scope, 'BatchNorm', [inputs], reuse=reuse) as sc:
inputs = ops.convert_to_tensor(inputs)
original_shape = inputs.get_shape()
original_inputs = inputs
original_rank = original_shape.ndims
if original_rank is None:
raise ValueError('Inputs %s has undefined rank' % inputs.name)
elif original_rank not in [2, 4]:
raise ValueError('Inputs %s has unsupported rank.'
' Expected 2 or 4 but got %d' %
(inputs.name, original_rank))
if original_rank == 2:
channels = inputs.get_shape().dims[-1].value
if channels is None:
raise ValueError('`C` dimension must be known but is None')
new_shape = [-1, 1, 1, channels]
if data_format == DATA_FORMAT_NCHW:
new_shape = [-1, channels, 1, 1]
inputs = array_ops.reshape(inputs, new_shape)
inputs_shape = inputs.get_shape()
if data_format == DATA_FORMAT_NHWC:
params_shape = inputs_shape[-1:]
else:
params_shape = inputs_shape[1:2]
if not params_shape.is_fully_defined():
raise ValueError('Inputs %s has undefined `C` dimension %s.' %
(inputs.name, params_shape))
# Allocate parameters for the beta and gamma of the normalization.
beta_collections = utils.get_variable_collections(variables_collections,
'beta')
# Float32 required to avoid precision-loss when using fp16 input/output
variable_dtype = dtypes.float32
if not param_initializers:
param_initializers = {}
if not param_regularizers:
param_regularizers = {}
beta_regularizer = param_regularizers.get('beta')
gamma_regularizer = param_regularizers.get('gamma')
if center:
beta_initializer = param_initializers.get('beta',
init_ops.zeros_initializer())
beta = variables.model_variable(
'beta',
shape=params_shape,
dtype=variable_dtype,
initializer=beta_initializer,
regularizer=beta_regularizer,
collections=beta_collections,
trainable=trainable)
else:
beta = array_ops.constant(0.0, dtype=variable_dtype, shape=params_shape)
if scale:
gamma_collections = utils.get_variable_collections(
variables_collections, 'gamma')
gamma_initializer = param_initializers.get('gamma',
init_ops.ones_initializer())
gamma = variables.model_variable(
'gamma',
shape=params_shape,
dtype=variable_dtype,
initializer=gamma_initializer,
regularizer=gamma_regularizer,
collections=gamma_collections,
trainable=trainable)
else:
gamma = array_ops.constant(1.0, dtype=variable_dtype, shape=params_shape)
# Create moving_mean and moving_variance variables and add them to the
# appropriate collections. We disable variable partitioning while creating
# them, because assign_moving_average is not yet supported for partitioned
# variables (this needs to be handled carefully, as it may break
# the checkpoint backward compatibility).
with variable_scope.variable_scope(
variable_scope.get_variable_scope()) as local_scope:
local_scope.set_partitioner(None)
moving_mean_collections = utils.get_variable_collections(
variables_collections, 'moving_mean')
moving_mean_initializer = param_initializers.get(
'moving_mean', init_ops.zeros_initializer())
moving_mean = variables.model_variable(
'moving_mean',
shape=params_shape,
dtype=variable_dtype,
initializer=moving_mean_initializer,
trainable=False,
collections=moving_mean_collections)
moving_variance_collections = utils.get_variable_collections(
variables_collections, 'moving_variance')
moving_variance_initializer = param_initializers.get(
'moving_variance', init_ops.ones_initializer())
moving_variance = variables.model_variable(
'moving_variance',
shape=params_shape,
dtype=variable_dtype,
initializer=moving_variance_initializer,
trainable=False,
collections=moving_variance_collections)
def _fused_batch_norm_training():
return nn.fused_batch_norm(
inputs, gamma, beta, epsilon=epsilon, data_format=data_format)
def _fused_batch_norm_inference():
return nn.fused_batch_norm(
inputs,
gamma,
beta,
mean=moving_mean,
variance=moving_variance,
epsilon=epsilon,
is_training=False,
data_format=data_format)
outputs, mean, variance = utils.smart_cond(is_training,
_fused_batch_norm_training,
_fused_batch_norm_inference)
# If `is_training` doesn't have a constant value, because it is a `Tensor`,
# a `Variable` or `Placeholder` then is_training_value will be None and
# `need_updates` will be true.
is_training_value = utils.constant_value(is_training)
need_updates = is_training_value is None or is_training_value
if need_updates:
if updates_collections is None:
no_updates = lambda: outputs
def _force_updates():
"""Internal function forces updates moving_vars if is_training."""
update_moving_mean = moving_averages.assign_moving_average(
moving_mean, mean, decay, zero_debias=zero_debias_moving_mean)
update_moving_variance = moving_averages.assign_moving_average(
moving_variance, variance, decay, zero_debias=False)
with ops.control_dependencies(
[update_moving_mean, update_moving_variance]):
return array_ops.identity(outputs)
outputs = utils.smart_cond(is_training, _force_updates, no_updates)
else:
moving_vars_fn = lambda: (moving_mean, moving_variance)
def _delay_updates():
"""Internal function that delay updates moving_vars if is_training."""
update_moving_mean = moving_averages.assign_moving_average(
moving_mean, mean, decay, zero_debias=zero_debias_moving_mean)
update_moving_variance = moving_averages.assign_moving_average(
moving_variance, variance, decay, zero_debias=False)
return update_moving_mean, update_moving_variance
update_mean, update_variance = utils.smart_cond(is_training,
_delay_updates,
moving_vars_fn)
ops.add_to_collections(updates_collections, update_mean)
ops.add_to_collections(updates_collections, update_variance)
outputs.set_shape(inputs_shape)
if original_shape.ndims == 2:
outputs = array_ops.reshape(outputs, array_ops.shape(original_inputs))
if activation_fn is not None:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections, sc.name, outputs)
@add_arg_scope
def batch_norm(inputs,
decay=0.999,
center=True,
scale=False,
epsilon=0.001,
activation_fn=None,
param_initializers=None,
param_regularizers=None,
updates_collections=ops.GraphKeys.UPDATE_OPS,
is_training=True,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
batch_weights=None,
fused=None,
data_format=DATA_FORMAT_NHWC,
zero_debias_moving_mean=False,
scope=None,
renorm=False,
renorm_clipping=None,
renorm_decay=0.99,
adjustment=None):
"""Adds a Batch Normalization layer from http://arxiv.org/abs/1502.03167.
"Batch Normalization: Accelerating Deep Network Training by Reducing
Internal Covariate Shift"
Sergey Ioffe, Christian Szegedy
Can be used as a normalizer function for conv2d and fully_connected. The
normalization is over all but the last dimension if `data_format` is `NHWC`
and all but the second dimension if `data_format` is `NCHW`. In case of a 2D
tensor this corresponds to the batch dimension, while in case of a 4D tensor
this
corresponds to the batch and space dimensions.
Note: when training, the moving_mean and moving_variance need to be updated.
By default the update ops are placed in `tf.GraphKeys.UPDATE_OPS`, so they
need to be added as a dependency to the `train_op`. For example:
```python
update_ops = tf.compat.v1.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_op = optimizer.minimize(loss)
```
One can set updates_collections=None to force the updates in place, but that
can have a speed penalty, especially in distributed settings.
Args:
inputs: A tensor with 2 or more dimensions, where the first dimension has
`batch_size`. The normalization is over all but the last dimension if
`data_format` is `NHWC` and the second dimension if `data_format` is
`NCHW`.
decay: Decay for the moving average. Reasonable values for `decay` are close
to 1.0, typically in the multiple-nines range: 0.999, 0.99, 0.9, etc.
Lower `decay` value (recommend trying `decay`=0.9) if model experiences
reasonably good training performance but poor validation and/or test
performance. Try zero_debias_moving_mean=True for improved stability.
center: If True, add offset of `beta` to normalized tensor. If False, `beta`
is ignored.
scale: If True, multiply by `gamma`. If False, `gamma` is not used. When the
next layer is linear (also e.g. `nn.relu`), this can be disabled since the
scaling can be done by the next layer.
epsilon: Small float added to variance to avoid dividing by zero.
activation_fn: Activation function, default set to None to skip it and
maintain a linear activation.
param_initializers: Optional initializers for beta, gamma, moving mean and
moving variance.
param_regularizers: Optional regularizer for beta and gamma.
updates_collections: Collections to collect the update ops for computation.
The updates_ops need to be executed with the train_op. If None, a control
dependency would be added to make sure the updates are computed in place.
is_training: Whether or not the layer is in training mode. In training mode
it would accumulate the statistics of the moments into `moving_mean` and
`moving_variance` using an exponential moving average with the given
`decay`. When it is not in training mode then it would use the values of
the `moving_mean` and the `moving_variance`.
reuse: Whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: Optional collections for the variables.
outputs_collections: Collections to add the outputs.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
batch_weights: An optional tensor of shape `[batch_size]`, containing a
frequency weight for each batch item. If present, then the batch
normalization uses weighted mean and variance. (This can be used to
correct for bias in training example selection.)
fused: if `None` or `True`, use a faster, fused implementation if possible.
If `False`, use the system recommended implementation.
data_format: A string. `NHWC` (default) and `NCHW` are supported.
zero_debias_moving_mean: Use zero_debias for moving_mean. It creates a new
pair of variables 'moving_mean/biased' and 'moving_mean/local_step'.
scope: Optional scope for `variable_scope`.
renorm: Whether to use Batch Renormalization
(https://arxiv.org/abs/1702.03275). This adds extra variables during
training. The inference is the same for either value of this parameter.
renorm_clipping: A dictionary that may map keys 'rmax', 'rmin', 'dmax' to
scalar `Tensors` used to clip the renorm correction. The correction `(r,
d)` is used as `corrected_value = normalized_value * r + d`, with `r`
clipped to [rmin, rmax], and `d` to [-dmax, dmax]. Missing rmax, rmin,
dmax are set to inf, 0, inf, respectively.
renorm_decay: Momentum used to update the moving means and standard
deviations with renorm. Unlike `momentum`, this affects training and
should be neither too small (which would add noise) nor too large (which
would give stale estimates). Note that `decay` is still applied to get the
means and variances for inference.
adjustment: A function taking the `Tensor` containing the (dynamic) shape of
the input tensor and returning a pair (scale, bias) to apply to the
normalized values (before gamma and beta), only during training. For
example,
`adjustment = lambda shape: (
tf.random.uniform(shape[-1:], 0.93, 1.07),
tf.random.uniform(shape[-1:], -0.1, 0.1))` will scale the normalized
value by up to 7% up or down, then shift the result by up to 0.1
(with independent scaling and bias for each feature but shared
across all examples), and finally apply gamma and/or beta. If
`None`, no adjustment is applied.
Returns:
A `Tensor` representing the output of the operation.
Raises:
ValueError: If `data_format` is neither `NHWC` nor `NCHW`.
ValueError: If the rank of `inputs` is undefined.
ValueError: If rank or channels dimension of `inputs` is undefined.
"""
if fused is None:
fused = True
# Only use _fused_batch_norm if all of the following three
# conditions are true:
# (1) fused is set True;
# (2) it is possible to use (currently it doesn't support batch weights,
# renorm, and the case when rank is neither 2 nor 4);
# (3) it is used with zero_debias_moving_mean, or an input shape of rank 2,
# or non-default updates_collections (not implemented in
# normalization_layers.BatchNormalization yet); otherwise use the fused
# implementation in normalization_layers.BatchNormalization.
inputs = ops.convert_to_tensor(inputs)
rank = inputs.get_shape().ndims
possible_to_fuse = (
batch_weights is None and not renorm and rank in [2, 4] and
adjustment is None)
if fused and possible_to_fuse and (
zero_debias_moving_mean or rank == 2 or
updates_collections is not ops.GraphKeys.UPDATE_OPS):
return _fused_batch_norm(
inputs,
decay=decay,
center=center,
scale=scale,
epsilon=epsilon,
activation_fn=activation_fn,
param_initializers=param_initializers,
param_regularizers=param_regularizers,
updates_collections=updates_collections,
is_training=is_training,
reuse=reuse,
variables_collections=variables_collections,
outputs_collections=outputs_collections,
trainable=trainable,
data_format=data_format,
zero_debias_moving_mean=zero_debias_moving_mean,
scope=scope)
if data_format not in (DATA_FORMAT_NCHW, DATA_FORMAT_NHWC):
raise ValueError('data_format has to be either NCHW or NHWC.')
layer_variable_getter = _build_variable_getter()
with variable_scope.variable_scope(
scope,
'BatchNorm', [inputs],
reuse=reuse,
custom_getter=layer_variable_getter) as sc:
inputs = ops.convert_to_tensor(inputs)
# Determine whether we can use the core layer class.
if (batch_weights is None and
updates_collections is ops.GraphKeys.UPDATE_OPS and
not zero_debias_moving_mean):
# Use the core layer class.
axis = 1 if data_format == DATA_FORMAT_NCHW else -1
if not param_initializers:
param_initializers = {}
beta_initializer = param_initializers.get('beta',
init_ops.zeros_initializer())
gamma_initializer = param_initializers.get('gamma',
init_ops.ones_initializer())
moving_mean_initializer = param_initializers.get(
'moving_mean', init_ops.zeros_initializer())
moving_variance_initializer = param_initializers.get(
'moving_variance', init_ops.ones_initializer())
if not param_regularizers:
param_regularizers = {}
beta_regularizer = param_regularizers.get('beta')
gamma_regularizer = param_regularizers.get('gamma')
layer = normalization_layers.BatchNormalization(
axis=axis,
momentum=decay,
epsilon=epsilon,
center=center,
scale=scale,
beta_initializer=beta_initializer,
gamma_initializer=gamma_initializer,
moving_mean_initializer=moving_mean_initializer,
moving_variance_initializer=moving_variance_initializer,
beta_regularizer=beta_regularizer,
gamma_regularizer=gamma_regularizer,
trainable=trainable,
renorm=renorm,
renorm_clipping=renorm_clipping,
renorm_momentum=renorm_decay,
adjustment=adjustment,
name=sc.name,
_scope=sc,
_reuse=reuse,
fused=fused)
outputs = layer.apply(inputs, training=is_training)
# Add variables to collections.
_add_variable_to_collections(layer.moving_mean, variables_collections,
'moving_mean')
_add_variable_to_collections(layer.moving_variance, variables_collections,
'moving_variance')
if layer.beta is not None:
_add_variable_to_collections(layer.beta, variables_collections, 'beta')
if layer.gamma is not None:
_add_variable_to_collections(layer.gamma, variables_collections,
'gamma')
if activation_fn is not None:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections, sc.name, outputs)
# Not supported by layer class: batch_weights argument,
# and custom updates_collections. In that case, use the legacy BN
# implementation.
# Custom updates collections are not supported because the update logic
# is different in this case, in particular w.r.t. "forced updates" and
# update op reuse.
if renorm:
raise ValueError('renorm is not supported with batch_weights, '
'updates_collections or zero_debias_moving_mean')
inputs_shape = inputs.get_shape()
inputs_rank = inputs_shape.ndims
if inputs_rank is None:
raise ValueError('Inputs %s has undefined rank.' % inputs.name)
dtype = inputs.dtype.base_dtype
if batch_weights is not None:
batch_weights = ops.convert_to_tensor(batch_weights)
inputs_shape[0:1].assert_is_compatible_with(batch_weights.get_shape())
# Reshape batch weight values so they broadcast across inputs.
nshape = [-1] + [1 for _ in range(inputs_rank - 1)]
batch_weights = array_ops.reshape(batch_weights, nshape)
if data_format == DATA_FORMAT_NCHW:
moments_axes = [0] + list(range(2, inputs_rank))
params_shape = inputs_shape[1:2]
# For NCHW format, rather than relying on implicit broadcasting, we
# explicitly reshape the params to params_shape_broadcast when computing
# the moments and the batch normalization.
params_shape_broadcast = list([1, inputs_shape.dims[1].value] +
[1 for _ in range(2, inputs_rank)])
else:
moments_axes = list(range(inputs_rank - 1))
params_shape = inputs_shape[-1:]
params_shape_broadcast = None
if not params_shape.is_fully_defined():
raise ValueError('Inputs %s has undefined channels dimension %s.' %
(inputs.name, params_shape))
# Allocate parameters for the beta and gamma of the normalization.
beta, gamma = None, None
if not param_initializers:
param_initializers = {}
if center:
beta_collections = utils.get_variable_collections(variables_collections,
'beta')
beta_initializer = param_initializers.get('beta',
init_ops.zeros_initializer())
beta = variables.model_variable(
'beta',
shape=params_shape,
dtype=dtype,
initializer=beta_initializer,
collections=beta_collections,
trainable=trainable)
if scale:
gamma_collections = utils.get_variable_collections(
variables_collections, 'gamma')
gamma_initializer = param_initializers.get('gamma',
init_ops.ones_initializer())
gamma = variables.model_variable(
'gamma',
shape=params_shape,
dtype=dtype,
initializer=gamma_initializer,
collections=gamma_collections,
trainable=trainable)
# Create moving_mean and moving_variance variables and add them to the
# appropriate collections. We disable variable partitioning while creating
# them, because assign_moving_average is not yet supported for partitioned
# variables (this needs to be handled carefully, as it may break
# the checkpoint backward compatibility).
with variable_scope.variable_scope(
variable_scope.get_variable_scope()) as local_scope:
local_scope.set_partitioner(None)
moving_mean_collections = utils.get_variable_collections(
variables_collections, 'moving_mean')
moving_mean_initializer = param_initializers.get(
'moving_mean', init_ops.zeros_initializer())
moving_mean = variables.model_variable(
'moving_mean',
shape=params_shape,
dtype=dtype,
initializer=moving_mean_initializer,
trainable=False,
collections=moving_mean_collections)
moving_variance_collections = utils.get_variable_collections(
variables_collections, 'moving_variance')
moving_variance_initializer = param_initializers.get(
'moving_variance', init_ops.ones_initializer())
moving_variance = variables.model_variable(
'moving_variance',
shape=params_shape,
dtype=dtype,
initializer=moving_variance_initializer,
trainable=False,
collections=moving_variance_collections)
# If `is_training` doesn't have a constant value, because it is a `Tensor`,
# a `Variable` or `Placeholder` then is_training_value will be None and
# `needs_moments` will be true.
is_training_value = utils.constant_value(is_training)
need_moments = is_training_value is None or is_training_value
if need_moments:
# Calculate the moments based on the individual batch.
if batch_weights is None:
if data_format == DATA_FORMAT_NCHW:
mean, variance = nn.moments(inputs, moments_axes, keep_dims=True)
mean = array_ops.reshape(mean, [-1])
variance = array_ops.reshape(variance, [-1])
else:
mean, variance = nn.moments(inputs, moments_axes)
else:
if data_format == DATA_FORMAT_NCHW:
mean, variance = nn.weighted_moments(
inputs, moments_axes, batch_weights, keepdims=True)
mean = array_ops.reshape(mean, [-1])
variance = array_ops.reshape(variance, [-1])
else:
mean, variance = nn.weighted_moments(inputs, moments_axes,
batch_weights)
moving_vars_fn = lambda: (moving_mean, moving_variance)
if updates_collections is None:
def _force_updates():
"""Internal function forces updates moving_vars if is_training."""
update_moving_mean = moving_averages.assign_moving_average(
moving_mean, mean, decay, zero_debias=zero_debias_moving_mean)
update_moving_variance = moving_averages.assign_moving_average(
moving_variance, variance, decay, zero_debias=False)
with ops.control_dependencies(
[update_moving_mean, update_moving_variance]):
return array_ops.identity(mean), array_ops.identity(variance)
mean, variance = utils.smart_cond(is_training, _force_updates,
moving_vars_fn)
else:
def _delay_updates():
"""Internal function that delay updates moving_vars if is_training."""
update_moving_mean = moving_averages.assign_moving_average(
moving_mean, mean, decay, zero_debias=zero_debias_moving_mean)
update_moving_variance = moving_averages.assign_moving_average(
moving_variance, variance, decay, zero_debias=False)
return update_moving_mean, update_moving_variance
update_mean, update_variance = utils.smart_cond(is_training,
_delay_updates,
moving_vars_fn)
ops.add_to_collections(updates_collections, update_mean)
ops.add_to_collections(updates_collections, update_variance)
# Use computed moments during training and moving_vars otherwise.
vars_fn = lambda: (mean, variance)
mean, variance = utils.smart_cond(is_training, vars_fn, moving_vars_fn)
else:
mean, variance = moving_mean, moving_variance
if data_format == DATA_FORMAT_NCHW:
mean = array_ops.reshape(mean, params_shape_broadcast)
variance = array_ops.reshape(variance, params_shape_broadcast)
if beta is not None:
beta = array_ops.reshape(beta, params_shape_broadcast)
if gamma is not None:
gamma = array_ops.reshape(gamma, params_shape_broadcast)
# Compute batch_normalization.
outputs = nn.batch_normalization(inputs, mean, variance, beta, gamma,
epsilon)
outputs.set_shape(inputs_shape)
if activation_fn is not None:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections, sc.name, outputs)
@add_arg_scope
def bias_add(inputs,
activation_fn=None,
initializer=init_ops.zeros_initializer(),
regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
data_format=DATA_FORMAT_NHWC,
scope=None):
"""Adds a bias to the inputs.
Can be used as a normalizer function for conv2d and fully_connected.
Args:
inputs: A tensor of with at least rank 2 and value for the last dimension,
e.g. `[batch_size, depth]`, `[None, None, None, depth]`.
activation_fn: Activation function, default set to None to skip it and
maintain a linear activation.
initializer: An initializer for the bias, defaults to 0.
regularizer: A regularizer like the result of `l1_regularizer` or
`l2_regularizer`.
reuse: Whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: Optional collections for the variables.
outputs_collections: Collections to add the outputs.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
data_format: A string. 'NHWC' and 'NCHW' are supported.
scope: Optional scope for variable_scope.
Returns:
A tensor representing the result of adding biases to the inputs.
Raises:
ValueError: If `data_format` is neither `NHWC` nor `NCHW`.
ValueError: If `data_format` is `NCHW` and rank of `inputs` is not 4.
ValueError: If the rank of `inputs` is undefined.
ValueError: If rank or `C` dimension of `inputs` is undefined.
"""
if data_format not in (DATA_FORMAT_NCHW, DATA_FORMAT_NHWC):
raise ValueError('data_format has to be either NCHW or NHWC.')
with variable_scope.variable_scope(
scope, 'BiasAdd', [inputs], reuse=reuse) as sc:
inputs = ops.convert_to_tensor(inputs)
dtype = inputs.dtype.base_dtype
inputs_shape = inputs.get_shape()
inputs_rank = inputs_shape.ndims
if inputs_rank is None:
raise ValueError('Dims of shape must be known but is None')
elif inputs_rank != 4 and data_format == DATA_FORMAT_NCHW:
raise ValueError('Data format NCHW only supports 4D Tensor')
axis = 1 if data_format == DATA_FORMAT_NCHW else -1
num_features = inputs_shape.dims[axis].value
if num_features is None:
raise ValueError('`C` dimension must be known but is None')
biases_collections = utils.get_variable_collections(variables_collections,
'biases')
biases = variables.model_variable(
'biases',
shape=[
num_features,
],
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
collections=biases_collections,
trainable=trainable)
outputs = nn.bias_add(inputs, biases, data_format=data_format)
if activation_fn is not None:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections, sc.name, outputs)
# TODO(jbms): change `rate` parameter to `dilation_rate` for consistency with
# underlying op.
@add_arg_scope
def convolution(inputs,
num_outputs,
kernel_size,
stride=1,
padding='SAME',
data_format=None,
rate=1,
activation_fn=nn.relu,
normalizer_fn=None,
normalizer_params=None,
weights_initializer=initializers.xavier_initializer(),
weights_regularizer=None,
biases_initializer=init_ops.zeros_initializer(),
biases_regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None,
conv_dims=None):
"""Adds an N-D convolution followed by an optional batch_norm layer.
It is required that 1 <= N <= 3.
`convolution` creates a variable called `weights`, representing the
convolutional kernel, that is convolved (actually cross-correlated) with the
`inputs` to produce a `Tensor` of activations. If a `normalizer_fn` is
provided (such as `batch_norm`), it is then applied. Otherwise, if
`normalizer_fn` is None and a `biases_initializer` is provided then a `biases`
variable would be created and added the activations. Finally, if
`activation_fn` is not `None`, it is applied to the activations as well.
Performs atrous convolution with input stride/dilation rate equal to `rate`
if a value > 1 for any dimension of `rate` is specified. In this case
`stride` values != 1 are not supported.
Args:
inputs: A Tensor of rank N+2 of shape `[batch_size] + input_spatial_shape +
[in_channels]` if data_format does not start with "NC" (default), or
`[batch_size, in_channels] + input_spatial_shape` if data_format starts
with "NC".
num_outputs: Integer, the number of output filters.
kernel_size: A sequence of N positive integers specifying the spatial
dimensions of the filters. Can be a single integer to specify the same
value for all spatial dimensions.
stride: A sequence of N positive integers specifying the stride at which to
compute output. Can be a single integer to specify the same value for all
spatial dimensions. Specifying any `stride` value != 1 is incompatible
with specifying any `rate` value != 1.
padding: One of `"VALID"` or `"SAME"`.
data_format: A string or None. Specifies whether the channel dimension of
the `input` and output is the last dimension (default, or if `data_format`
does not start with "NC"), or the second dimension (if `data_format`
starts with "NC"). For N=1, the valid values are "NWC" (default) and
"NCW". For N=2, the valid values are "NHWC" (default) and "NCHW". For
N=3, the valid values are "NDHWC" (default) and "NCDHW".
rate: A sequence of N positive integers specifying the dilation rate to use
for atrous convolution. Can be a single integer to specify the same value
for all spatial dimensions. Specifying any `rate` value != 1 is
incompatible with specifying any `stride` value != 1.
activation_fn: Activation function. The default value is a ReLU function.
Explicitly set it to None to skip it and maintain a linear activation.
normalizer_fn: Normalization function to use instead of `biases`. If
`normalizer_fn` is provided then `biases_initializer` and
`biases_regularizer` are ignored and `biases` are not created nor added.
default set to None for no normalizer function
normalizer_params: Normalization function parameters.
weights_initializer: An initializer for the weights.
weights_regularizer: Optional regularizer for the weights.
biases_initializer: An initializer for the biases. If None skip biases.
biases_regularizer: Optional regularizer for the biases.
reuse: Whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: Optional list of collections for all the variables or
a dictionary containing a different list of collection per variable.
outputs_collections: Collection to add the outputs.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
scope: Optional scope for `variable_scope`.
conv_dims: Optional convolution dimensionality, when set it would use the
corresponding convolution (e.g. 2 for Conv 2D, 3 for Conv 3D, ..). When
leaved to None it would select the convolution dimensionality based on the
input rank (i.e. Conv ND, with N = input_rank - 2).
Returns:
A tensor representing the output of the operation.
Raises:
ValueError: If `data_format` is invalid.
ValueError: Both 'rate' and `stride` are not uniformly 1.
"""
if data_format not in [None, 'NWC', 'NCW', 'NHWC', 'NCHW', 'NDHWC', 'NCDHW']:
raise ValueError('Invalid data_format: %r' % (data_format,))
layer_variable_getter = _build_variable_getter({
'bias': 'biases',
'kernel': 'weights'
})
with variable_scope.variable_scope(
scope, 'Conv', [inputs], reuse=reuse,
custom_getter=layer_variable_getter) as sc:
inputs = ops.convert_to_tensor(inputs)
input_rank = inputs.get_shape().ndims
if conv_dims is not None and conv_dims + 2 != input_rank:
raise ValueError('Convolution expects input with rank %d, got %d' %
(conv_dims + 2, input_rank))
if input_rank == 3:
layer_class = convolutional_layers.Convolution1D
elif input_rank == 4:
layer_class = convolutional_layers.Convolution2D
elif input_rank == 5:
layer_class = convolutional_layers.Convolution3D
else:
raise ValueError('Convolution not supported for input with rank',
input_rank)
df = ('channels_first'
if data_format and data_format.startswith('NC') else 'channels_last')
layer = layer_class(
filters=num_outputs,
kernel_size=kernel_size,
strides=stride,
padding=padding,
data_format=df,
dilation_rate=rate,
activation=None,
use_bias=not normalizer_fn and biases_initializer,
kernel_initializer=weights_initializer,
bias_initializer=biases_initializer,
kernel_regularizer=weights_regularizer,
bias_regularizer=biases_regularizer,
activity_regularizer=None,
trainable=trainable,
name=sc.name,
dtype=inputs.dtype.base_dtype,
_scope=sc,
_reuse=reuse)
outputs = layer.apply(inputs)
# Add variables to collections.
_add_variable_to_collections(layer.kernel, variables_collections, 'weights')
if layer.use_bias:
_add_variable_to_collections(layer.bias, variables_collections, 'biases')
if normalizer_fn is not None:
normalizer_params = normalizer_params or {}
outputs = normalizer_fn(outputs, **normalizer_params)
if activation_fn is not None:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections, sc.name, outputs)
@add_arg_scope
def convolution1d(inputs,
num_outputs,
kernel_size,
stride=1,
padding='SAME',
data_format=None,
rate=1,
activation_fn=nn.relu,
normalizer_fn=None,
normalizer_params=None,
weights_initializer=initializers.xavier_initializer(),
weights_regularizer=None,
biases_initializer=init_ops.zeros_initializer(),
biases_regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None):
return convolution(
inputs,
num_outputs,
kernel_size,
stride,
padding,
data_format,
rate,
activation_fn,
normalizer_fn,
normalizer_params,
weights_initializer,
weights_regularizer,
biases_initializer,
biases_regularizer,
reuse,
variables_collections,
outputs_collections,
trainable,
scope,
conv_dims=1)
convolution1d.__doc__ = convolution.__doc__
@add_arg_scope
def convolution2d(inputs,
num_outputs,
kernel_size,
stride=1,
padding='SAME',
data_format=None,
rate=1,
activation_fn=nn.relu,
normalizer_fn=None,
normalizer_params=None,
weights_initializer=initializers.xavier_initializer(),
weights_regularizer=None,
biases_initializer=init_ops.zeros_initializer(),
biases_regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None):
return convolution(
inputs,
num_outputs,
kernel_size,
stride,
padding,
data_format,
rate,
activation_fn,
normalizer_fn,
normalizer_params,
weights_initializer,
weights_regularizer,
biases_initializer,
biases_regularizer,
reuse,
variables_collections,
outputs_collections,
trainable,
scope,
conv_dims=2)
convolution2d.__doc__ = convolution.__doc__
@add_arg_scope
def convolution3d(inputs,
num_outputs,
kernel_size,
stride=1,
padding='SAME',
data_format=None,
rate=1,
activation_fn=nn.relu,
normalizer_fn=None,
normalizer_params=None,
weights_initializer=initializers.xavier_initializer(),
weights_regularizer=None,
biases_initializer=init_ops.zeros_initializer(),
biases_regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None):
return convolution(
inputs,
num_outputs,
kernel_size,
stride,
padding,
data_format,
rate,
activation_fn,
normalizer_fn,
normalizer_params,
weights_initializer,
weights_regularizer,
biases_initializer,
biases_regularizer,
reuse,
variables_collections,
outputs_collections,
trainable,
scope,
conv_dims=3)
convolution3d.__doc__ = convolution.__doc__
@add_arg_scope
def convolution2d_in_plane(
inputs,
kernel_size,
stride=1,
padding='SAME',
activation_fn=nn.relu,
normalizer_fn=None,
normalizer_params=None,
weights_initializer=initializers.xavier_initializer(),
weights_regularizer=None,
biases_initializer=init_ops.zeros_initializer(),
biases_regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None):
"""Performs the same in-plane convolution to each channel independently.
This is useful for performing various simple channel-independent convolution
operations such as image gradients:
image = tf.constant(..., shape=(16, 240, 320, 3))
vert_gradients = layers.conv2d_in_plane(image,
kernel=[1, -1],
kernel_size=[2, 1])
horz_gradients = layers.conv2d_in_plane(image,
kernel=[1, -1],
kernel_size=[1, 2])
Args:
inputs: A 4-D tensor with dimensions [batch_size, height, width, channels].
kernel_size: A list of length 2 holding the [kernel_height, kernel_width] of
of the pooling. Can be an int if both values are the same.
stride: A list of length 2 `[stride_height, stride_width]`. Can be an int if
both strides are the same. Note that presently both strides must have the
same value.
padding: The padding type to use, either 'SAME' or 'VALID'.
activation_fn: Activation function. The default value is a ReLU function.
Explicitly set it to None to skip it and maintain a linear activation.
normalizer_fn: Normalization function to use instead of `biases`. If
`normalizer_fn` is provided then `biases_initializer` and
`biases_regularizer` are ignored and `biases` are not created nor added.
default set to None for no normalizer function
normalizer_params: Normalization function parameters.
weights_initializer: An initializer for the weights.
weights_regularizer: Optional regularizer for the weights.
biases_initializer: An initializer for the biases. If None skip biases.
biases_regularizer: Optional regularizer for the biases.
reuse: Whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: Optional list of collections for all the variables or
a dictionary containing a different list of collection per variable.
outputs_collections: Collection to add the outputs.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
scope: Optional scope for `variable_scope`.
Returns:
A `Tensor` representing the output of the operation.
"""
with variable_scope.variable_scope(
scope, 'ConvInPlane', [inputs], reuse=reuse) as sc:
dtype = inputs.dtype.base_dtype
kernel_h, kernel_w = utils.two_element_tuple(kernel_size)
stride_h, stride_w = utils.two_element_tuple(stride)
num_filters_in = utils.last_dimension(inputs.get_shape(), min_rank=4)
weights_shape = [kernel_h, kernel_w, 1, 1]
weights_collections = utils.get_variable_collections(
variables_collections, 'weights')
weights = variables.model_variable(
'weights',
shape=weights_shape,
dtype=dtype,
initializer=weights_initializer,
regularizer=weights_regularizer,
collections=weights_collections,
trainable=trainable)
depthwise_weights = array_ops.tile(weights, [1, 1, num_filters_in, 1])
outputs = nn.depthwise_conv2d(inputs, depthwise_weights,
[1, stride_h, stride_w, 1], padding)
if normalizer_fn is not None:
normalizer_params = normalizer_params or {}
outputs = normalizer_fn(outputs, **normalizer_params)
else:
if biases_initializer is not None:
biases_collections = utils.get_variable_collections(
variables_collections, 'biases')
biases = variables.model_variable(
'biases',
shape=[
num_filters_in,
],
dtype=dtype,
initializer=biases_initializer,
regularizer=biases_regularizer,
collections=biases_collections,
trainable=trainable)
outputs = nn.bias_add(outputs, biases)
if activation_fn is not None:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections, sc.name, outputs)
@add_arg_scope
def convolution2d_transpose(
inputs,
num_outputs,
kernel_size,
stride=1,
padding='SAME',
data_format=DATA_FORMAT_NHWC,
activation_fn=nn.relu,
normalizer_fn=None,
normalizer_params=None,
weights_initializer=initializers.xavier_initializer(),
weights_regularizer=None,
biases_initializer=init_ops.zeros_initializer(),
biases_regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None):
"""Adds a convolution2d_transpose with an optional batch normalization layer.
The function creates a variable called `weights`, representing the
kernel, that is convolved with the input. If `normalizer_fn` is `None`, a
second variable called 'biases' is added to the result of the operation.
Args:
inputs: A 4-D `Tensor` of type `float` and shape `[batch, height, width,
in_channels]` for `NHWC` data format or `[batch, in_channels, height,
width]` for `NCHW` data format.
num_outputs: Integer, the number of output filters.
kernel_size: A list of length 2 holding the [kernel_height, kernel_width] of
of the filters. Can be an int if both values are the same.
stride: A list of length 2: [stride_height, stride_width]. Can be an int if
both strides are the same. Note that presently both strides must have the
same value.
padding: One of 'VALID' or 'SAME'.
data_format: A string. `NHWC` (default) and `NCHW` are supported.
activation_fn: Activation function. The default value is a ReLU function.
Explicitly set it to None to skip it and maintain a linear activation.
normalizer_fn: Normalization function to use instead of `biases`. If
`normalizer_fn` is provided then `biases_initializer` and
`biases_regularizer` are ignored and `biases` are not created nor added.
default set to None for no normalizer function
normalizer_params: Normalization function parameters.
weights_initializer: An initializer for the weights.
weights_regularizer: Optional regularizer for the weights.
biases_initializer: An initializer for the biases. If None skip biases.
biases_regularizer: Optional regularizer for the biases.
reuse: Whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: Optional list of collections for all the variables or
a dictionary containing a different list of collection per variable.
outputs_collections: Collection to add the outputs.
trainable: Whether or not the variables should be trainable or not.
scope: Optional scope for variable_scope.
Returns:
A tensor representing the output of the operation.
Raises:
ValueError: If 'kernel_size' is not a list of length 2.
ValueError: If `data_format` is neither `NHWC` nor `NCHW`.
ValueError: If `C` dimension of `inputs` is None.
"""
layer_variable_getter = _build_variable_getter({
'bias': 'biases',
'kernel': 'weights'
})
with variable_scope.variable_scope(
scope,
'Conv2d_transpose', [inputs],
reuse=reuse,
custom_getter=layer_variable_getter) as sc:
if data_format not in (DATA_FORMAT_NCHW, DATA_FORMAT_NHWC):
raise ValueError('data_format has to be either NCHW or NHWC.')
inputs = ops.convert_to_tensor(inputs)
df = ('channels_first'
if data_format and data_format.startswith('NC') else 'channels_last')
layer = convolutional_layers.Convolution2DTranspose(
filters=num_outputs,
kernel_size=kernel_size,
strides=stride,
padding=padding,
data_format=df,
activation=None,
use_bias=not normalizer_fn and biases_initializer,
kernel_initializer=weights_initializer,
bias_initializer=biases_initializer,
kernel_regularizer=weights_regularizer,
bias_regularizer=biases_regularizer,
activity_regularizer=None,
trainable=trainable,
name=sc.name,
dtype=inputs.dtype.base_dtype,
_scope=sc,
_reuse=reuse)
outputs = layer.apply(inputs)
# Add variables to collections.
_add_variable_to_collections(layer.kernel, variables_collections, 'weights')
if layer.bias is not None:
_add_variable_to_collections(layer.bias, variables_collections, 'biases')
if normalizer_fn is not None:
normalizer_params = normalizer_params or {}
outputs = normalizer_fn(outputs, **normalizer_params)
if activation_fn is not None:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections, sc.name, outputs)
@add_arg_scope
def convolution3d_transpose(
inputs,
num_outputs,
kernel_size,
stride=1,
padding='SAME',
data_format=DATA_FORMAT_NDHWC,
activation_fn=nn.relu,
normalizer_fn=None,
normalizer_params=None,
weights_initializer=initializers.xavier_initializer(),
weights_regularizer=None,
biases_initializer=init_ops.zeros_initializer(),
biases_regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None):
"""Adds a convolution3d_transpose with an optional batch normalization layer.
The function creates a variable called `weights`, representing the
kernel, that is convolved with the input. If `batch_norm_params` is `None`, a
second variable called 'biases' is added to the result of the operation.
Args:
inputs: A 5-D `Tensor` of type `float` and shape `[batch, depth, height,
width, in_channels]` for `NDHWC` data format or `[batch, in_channels,
depth, height, width]` for `NCDHW` data format.
num_outputs: Integer, the number of output filters.
kernel_size: A list of length 3 holding the [kernel_depth, kernel_height,
kernel_width] of the filters. Can be an int if both values are the same.
stride: A list of length 3: [stride_depth, stride_height, stride_width]. Can
be an int if both strides are the same. Note that presently both strides
must have the same value.
padding: One of 'VALID' or 'SAME'.
data_format: A string. `NDHWC` (default) and `NCDHW` are supported.
activation_fn: Activation function. The default value is a ReLU function.
Explicitly set it to None to skip it and maintain a linear activation.
normalizer_fn: Normalization function to use instead of `biases`. If
`normalizer_fn` is provided then `biases_initializer` and
`biases_regularizer` are ignored and `biases` are not created nor added.
default set to None for no normalizer function
normalizer_params: Normalization function parameters.
weights_initializer: An initializer for the weights.
weights_regularizer: Optional regularizer for the weights.
biases_initializer: An initializer for the biases. If None skip biases.
biases_regularizer: Optional regularizer for the biases.
reuse: Whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: Optional list of collections for all the variables or
a dictionary containing a different list of collection per variable.
outputs_collections: Collection to add the outputs.
trainable: Whether or not the variables should be trainable or not.
scope: Optional scope for variable_scope.
Returns:
A tensor representing the output of the operation.
Raises:
ValueError: If 'kernel_size' is not a list of length 3.
ValueError: If `data_format` is neither `NDHWC` nor `NCDHW`.
ValueError: If `C` dimension of `inputs` is None.
"""
layer_variable_getter = _build_variable_getter({
'bias': 'biases',
'kernel': 'weights'
})
with variable_scope.variable_scope(
scope,
'Conv3d_transpose', [inputs],
reuse=reuse,
custom_getter=layer_variable_getter) as sc:
if data_format not in (DATA_FORMAT_NCDHW, DATA_FORMAT_NDHWC):
raise ValueError('data_format has to be either NCDHW or NDHWC.')
inputs = ops.convert_to_tensor(inputs)
df = ('channels_first'
if data_format and data_format.startswith('NC') else 'channels_last')
layer = convolutional_layers.Convolution3DTranspose(
filters=num_outputs,
kernel_size=kernel_size,
strides=stride,
padding=padding,
data_format=df,
activation=None,
use_bias=not normalizer_fn and biases_initializer,
kernel_initializer=weights_initializer,
bias_initializer=biases_initializer,
kernel_regularizer=weights_regularizer,
bias_regularizer=biases_regularizer,
activity_regularizer=None,
trainable=trainable,
name=sc.name,
dtype=inputs.dtype.base_dtype,
_scope=sc,
_reuse=reuse)
outputs = layer.apply(inputs)
# Add variables to collections.
_add_variable_to_collections(layer.kernel, variables_collections, 'weights')
if layer.bias is not None:
_add_variable_to_collections(layer.bias, variables_collections, 'biases')
if normalizer_fn is not None:
normalizer_params = normalizer_params or {}
outputs = normalizer_fn(outputs, **normalizer_params)
if activation_fn is not None:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections, sc.name, outputs)
@add_arg_scope
def dense_to_sparse(tensor, eos_token=0, outputs_collections=None, scope=None):
"""Converts a dense tensor into a sparse tensor.
An example use would be to convert dense labels to sparse ones
so that they can be fed to the ctc_loss.
Args:
tensor: An `int` `Tensor` to be converted to a `Sparse`.
eos_token: An integer. It is part of the target label that signifies the
end of a sentence.
outputs_collections: Collection to add the outputs.
scope: Optional scope for name_scope.
"""
with variable_scope.variable_scope(scope, 'dense_to_sparse', [tensor]) as sc:
tensor = ops.convert_to_tensor(tensor)
indices = array_ops.where(
math_ops.not_equal(tensor, constant_op.constant(eos_token,
tensor.dtype)))
values = array_ops.gather_nd(tensor, indices)
shape = array_ops.shape(tensor, out_type=dtypes.int64)
outputs = sparse_tensor.SparseTensor(indices, values, shape)
return utils.collect_named_outputs(outputs_collections, sc.name, outputs)
@add_arg_scope
def dropout(inputs,
keep_prob=0.5,
noise_shape=None,
is_training=True,
outputs_collections=None,
scope=None,
seed=None):
"""Returns a dropout op applied to the input.
With probability `keep_prob`, outputs the input element scaled up by
`1 / keep_prob`, otherwise outputs `0`. The scaling is so that the expected
sum is unchanged.
Args:
inputs: The tensor to pass to the nn.dropout op.
keep_prob: A scalar `Tensor` with the same type as x. The probability that
each element is kept.
noise_shape: A 1-D `Tensor` of type `int32`, representing the shape for
randomly generated keep/drop flags.
is_training: A bool `Tensor` indicating whether or not the model is in
training mode. If so, dropout is applied and values scaled. Otherwise,
inputs is returned.
outputs_collections: Collection to add the outputs.
scope: Optional scope for name_scope.
seed: A Python integer. Used to create random seeds. See
`tf.compat.v1.set_random_seed` for behavior.
Returns:
A tensor representing the output of the operation.
"""
with variable_scope.variable_scope(
scope, 'Dropout', [inputs], custom_getter=_model_variable_getter) as sc:
inputs = ops.convert_to_tensor(inputs)
layer = core_layers.Dropout(
rate=1 - keep_prob,
noise_shape=noise_shape,
seed=seed,
name=sc.name,
_scope=sc)
outputs = layer.apply(inputs, training=is_training)
return utils.collect_named_outputs(outputs_collections, sc.name, outputs)
@add_arg_scope
def flatten(inputs, outputs_collections=None, scope=None):
"""Flattens the input while maintaining the batch_size.
Assumes that the first dimension represents the batch.
Args:
inputs: A tensor of size [batch_size, ...].
outputs_collections: Collection to add the outputs.
scope: Optional scope for name_scope.
Returns:
A flattened tensor with shape [batch_size, k].
Raises:
ValueError: If inputs rank is unknown or less than 2.
"""
with ops.name_scope(scope, 'Flatten', [inputs]) as sc:
inputs = ops.convert_to_tensor(inputs)
outputs = core_layers.flatten(inputs)
return utils.collect_named_outputs(outputs_collections, sc, outputs)
def _sparse_inner_flatten(inputs, new_rank):
"""Helper function for `inner_flatten`."""
inputs_rank = inputs.dense_shape.get_shape().as_list()[0]
if inputs_rank < new_rank:
raise ValueError(
'Inputs has rank less than new_rank. {} must have rank at least'
' {}. Received rank {}, shape {}'.format(inputs, new_rank, inputs_rank,
inputs.get_shape()))
outer_dimensions = inputs.dense_shape[:new_rank - 1]
inner_dimensions = inputs.dense_shape[new_rank - 1:]
new_shape = array_ops.concat(
(outer_dimensions, [math_ops.reduce_prod(inner_dimensions)]), 0)
flattened = sparse_ops.sparse_reshape(inputs, new_shape)
return flattened
def _dense_inner_flatten(inputs, new_rank):
"""Helper function for `inner_flatten`."""
rank_assertion = check_ops.assert_rank_at_least(
inputs, new_rank, message='inputs has rank less than new_rank')
with ops.control_dependencies([rank_assertion]):
outer_dimensions = array_ops.strided_slice(
array_ops.shape(inputs), [0], [new_rank - 1])
new_shape = array_ops.concat((outer_dimensions, [-1]), 0)
reshaped = array_ops.reshape(inputs, new_shape)
# if `new_rank` is an integer, try to calculate new shape.
if isinstance(new_rank, six.integer_types):
static_shape = inputs.get_shape()
if static_shape is not None and static_shape.dims is not None:
static_shape = static_shape.as_list()
static_outer_dims = static_shape[:new_rank - 1]
static_inner_dims = static_shape[new_rank - 1:]
flattened_dimension = 1
for inner_dim in static_inner_dims:
if inner_dim is None:
flattened_dimension = None
break
flattened_dimension *= inner_dim
reshaped.set_shape(static_outer_dims + [flattened_dimension])
return reshaped
@add_arg_scope
def _inner_flatten(inputs, new_rank, output_collections=None, scope=None):
"""Flattens inner dimensions of `inputs`, returns a Tensor with `new_rank`.
For example:
'''
x = tf.random.uniform(shape=[1, 2, 3, 4, 5, 6])
y = _inner_flatten(x, 4)
assert y.get_shape().as_list() == [1, 2, 3, (4 * 5 * 6)]
'''
This layer will fail at run time if `new_rank` is greater than the current
rank of `inputs`.
Args:
inputs: A `Tensor` or `SparseTensor`.
new_rank: The desired rank of the returned `Tensor` or `SparseTensor`.
output_collections: Collection to which the outputs will be added.
scope: Optional scope for `name_scope`.
Returns:
A `Tensor` or `SparseTensor` containing the same values as `inputs`, but
with innermost dimensions flattened to obtain rank `new_rank`.
Raises:
TypeError: `inputs` is not a `Tensor` or `SparseTensor`.
"""
with ops.name_scope(scope, 'InnerFlatten', [inputs, new_rank]) as sc:
if isinstance(inputs, sparse_tensor.SparseTensor):
flattened = _sparse_inner_flatten(inputs, new_rank)
else:
inputs = ops.convert_to_tensor(inputs)
flattened = _dense_inner_flatten(inputs, new_rank)
return utils.collect_named_outputs(output_collections, sc, flattened)
def _model_variable_getter(
getter,
name,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
trainable=True,
collections=None,
caching_device=None,
partitioner=None,
rename=None,
use_resource=None,
synchronization=tf_variables.VariableSynchronization.AUTO,
aggregation=tf_variables.VariableAggregation.NONE,
**_):
"""Getter that uses model_variable for compatibility with core layers."""
short_name = name.split('/')[-1]
if rename and short_name in rename:
name_components = name.split('/')
name_components[-1] = rename[short_name]
name = '/'.join(name_components)
return variables.model_variable(
name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
collections=collections,
trainable=trainable,
caching_device=caching_device,
partitioner=partitioner,
custom_getter=getter,
use_resource=use_resource,
synchronization=synchronization,
aggregation=aggregation)
def _build_variable_getter(rename=None):
"""Build a model variable getter that respects scope getter and renames."""
# VariableScope will nest the getters
def layer_variable_getter(getter, *args, **kwargs):
kwargs['rename'] = rename
return _model_variable_getter(getter, *args, **kwargs)
return layer_variable_getter
def _add_variable_to_collections(variable, collections_set, collections_name):
"""Adds variable (or all its parts) to all collections with that name."""
collections = utils.get_variable_collections(collections_set,
collections_name) or []
variables_list = [variable]
if isinstance(variable, tf_variables.PartitionedVariable):
variables_list = [v for v in variable]
for collection in collections:
for var in variables_list:
if var not in ops.get_collection(collection):
ops.add_to_collection(collection, var)
@add_arg_scope
def fully_connected(inputs,
num_outputs,
activation_fn=nn.relu,
normalizer_fn=None,
normalizer_params=None,
weights_initializer=initializers.xavier_initializer(),
weights_regularizer=None,
biases_initializer=init_ops.zeros_initializer(),
biases_regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None):
"""Adds a fully connected layer.
`fully_connected` creates a variable called `weights`, representing a fully
connected weight matrix, which is multiplied by the `inputs` to produce a
`Tensor` of hidden units. If a `normalizer_fn` is provided (such as
`batch_norm`), it is then applied. Otherwise, if `normalizer_fn` is
None and a `biases_initializer` is provided then a `biases` variable would be
created and added the hidden units. Finally, if `activation_fn` is not `None`,
it is applied to the hidden units as well.
Note: that if `inputs` have a rank greater than 2, then `inputs` is flattened
prior to the initial matrix multiply by `weights`.
Args:
inputs: A tensor of at least rank 2 and static value for the last dimension;
i.e. `[batch_size, depth]`, `[None, None, None, channels]`.
num_outputs: Integer or long, the number of output units in the layer.
activation_fn: Activation function. The default value is a ReLU function.
Explicitly set it to None to skip it and maintain a linear activation.
normalizer_fn: Normalization function to use instead of `biases`. If
`normalizer_fn` is provided then `biases_initializer` and
`biases_regularizer` are ignored and `biases` are not created nor added.
default set to None for no normalizer function
normalizer_params: Normalization function parameters.
weights_initializer: An initializer for the weights.
weights_regularizer: Optional regularizer for the weights.
biases_initializer: An initializer for the biases. If None skip biases.
biases_regularizer: Optional regularizer for the biases.
reuse: Whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: Optional list of collections for all the variables or
a dictionary containing a different list of collections per variable.
outputs_collections: Collection to add the outputs.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
scope: Optional scope for variable_scope.
Returns:
The tensor variable representing the result of the series of operations.
Raises:
ValueError: If x has rank less than 2 or if its last dimension is not set.
"""
if not isinstance(num_outputs, six.integer_types):
raise ValueError('num_outputs type should be one of %s, got %s.' %
(list(six.integer_types), type(num_outputs)))
layer_variable_getter = _build_variable_getter({
'bias': 'biases',
'kernel': 'weights'
})
with variable_scope.variable_scope(
scope,
'fully_connected', [inputs],
reuse=reuse,
custom_getter=layer_variable_getter) as sc:
inputs = ops.convert_to_tensor(inputs)
layer = core_layers.Dense(
units=num_outputs,
activation=None,
use_bias=not normalizer_fn and biases_initializer,
kernel_initializer=weights_initializer,
bias_initializer=biases_initializer,
kernel_regularizer=weights_regularizer,
bias_regularizer=biases_regularizer,
activity_regularizer=None,
trainable=trainable,
name=sc.name,
dtype=inputs.dtype.base_dtype,
_scope=sc,
_reuse=reuse)
outputs = layer.apply(inputs)
# Add variables to collections.
_add_variable_to_collections(layer.kernel, variables_collections, 'weights')
if layer.bias is not None:
_add_variable_to_collections(layer.bias, variables_collections, 'biases')
# Apply normalizer function / layer.
if normalizer_fn is not None:
if not normalizer_params:
normalizer_params = {}
outputs = normalizer_fn(outputs, **normalizer_params)
if activation_fn is not None:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections, sc.name, outputs)
class GDN(base.Layer):
"""Generalized divisive normalization layer.
Based on the papers:
"Density Modeling of Images using a Generalized Normalization
Transformation"
Johannes Ballé, Valero Laparra, Eero P. Simoncelli
https://arxiv.org/abs/1511.06281
"End-to-end Optimized Image Compression"
Johannes Ballé, Valero Laparra, Eero P. Simoncelli
https://arxiv.org/abs/1611.01704
Implements an activation function that is essentially a multivariate
generalization of a particular sigmoid-type function:
```
y[i] = x[i] / sqrt(beta[i] + sum_j(gamma[j, i] * x[j]))
```
where `i` and `j` run over channels. This implementation never sums across
spatial dimensions. It is similar to local response normalization, but much
more flexible, as `beta` and `gamma` are trainable parameters.
Arguments:
inverse: If `False` (default), compute GDN response. If `True`, compute IGDN
response (one step of fixed point iteration to invert GDN; the division is
replaced by multiplication).
beta_min: Lower bound for beta, to prevent numerical error from causing
square root of zero or negative values.
gamma_init: The gamma matrix will be initialized as the identity matrix
multiplied with this value. If set to zero, the layer is effectively
initialized to the identity operation, since beta is initialized as one. A
good default setting is somewhere between 0 and 0.5.
reparam_offset: Offset added to the reparameterization of beta and gamma.
The reparameterization of beta and gamma as their square roots lets the
training slow down when their values are close to zero, which is desirable
as small values in the denominator can lead to a situation where gradient
noise on beta/gamma leads to extreme amounts of noise in the GDN
activations. However, without the offset, we would get zero gradients if
any elements of beta or gamma were exactly zero, and thus the training
could get stuck. To prevent this, we add this small constant. The default
value was empirically determined as a good starting point. Making it
bigger potentially leads to more gradient noise on the activations, making
it too small may lead to numerical precision issues.
data_format: Format of input tensor. Currently supports `'channels_first'`
and `'channels_last'`.
activity_regularizer: Regularizer function for the output.
trainable: Boolean, if `True`, also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: String, the name of the layer. Layers with the same name will share
weights, but to avoid mistakes we require `reuse=True` in such cases.
Properties:
inverse: Boolean, whether GDN is computed (`True`) or IGDN (`False`).
data_format: Format of input tensor. Currently supports `'channels_first'`
and `'channels_last'`.
beta: The beta parameter as defined above (1D `Tensor`).
gamma: The gamma parameter as defined above (2D `Tensor`).
"""
def __init__(self,
inverse=False,
beta_min=1e-6,
gamma_init=.1,
reparam_offset=2**-18,
data_format='channels_last',
activity_regularizer=None,
trainable=True,
name=None,
**kwargs):
super(GDN, self).__init__(
trainable=trainable,
name=name,
activity_regularizer=activity_regularizer,
**kwargs)
self.inverse = inverse
self._beta_min = beta_min
self._gamma_init = gamma_init
self._reparam_offset = reparam_offset
self.data_format = data_format
self._channel_axis() # trigger ValueError early
self.input_spec = input_spec.InputSpec(min_ndim=3, max_ndim=5)
def _channel_axis(self):
try:
return {'channels_first': 1, 'channels_last': -1}[self.data_format]
except KeyError:
raise ValueError('Unsupported `data_format` for GDN layer: {}.'.format(
self.data_format))
@staticmethod
def _lower_bound(inputs, bound, name=None):
"""Same as tf.maximum, but with helpful gradient for inputs < bound.
The gradient is overwritten so that it is passed through if the input is not
hitting the bound. If it is, only gradients that push `inputs` higher than
the bound are passed through. No gradients are passed through to the bound.
Args:
inputs: input tensor
bound: lower bound for the input tensor
name: name for this op
Returns:
tf.maximum(inputs, bound)
"""
with ops.name_scope(name, 'GDNLowerBound', [inputs, bound]) as scope:
inputs = ops.convert_to_tensor(inputs, name='inputs')
bound = ops.convert_to_tensor(bound, name='bound')
with ops.get_default_graph().gradient_override_map(
{'Maximum': 'GDNLowerBound'}):
return math_ops.maximum(inputs, bound, name=scope)
@staticmethod
def _lower_bound_grad(op, grad):
"""Gradient for `_lower_bound`.
Args:
op: the tensorflow op for which to calculate a gradient
grad: gradient with respect to the output of the op
Returns:
gradients with respect to the inputs of the op
"""
inputs = op.inputs[0]
bound = op.inputs[1]
pass_through_if = math_ops.logical_or(inputs >= bound, grad < 0)
return [math_ops.cast(pass_through_if, grad.dtype) * grad, None]
def build(self, input_shape):
channel_axis = self._channel_axis()
input_shape = tensor_shape.TensorShape(input_shape)
num_channels = input_shape.dims[channel_axis].value
if num_channels is None:
raise ValueError('The channel dimension of the inputs to `GDN` '
'must be defined.')
self._input_rank = input_shape.ndims
self.input_spec = input_spec.InputSpec(
ndim=input_shape.ndims, axes={channel_axis: num_channels})
pedestal = array_ops.constant(self._reparam_offset**2, dtype=self.dtype)
beta_bound = array_ops.constant(
(self._beta_min + self._reparam_offset**2)**.5, dtype=self.dtype)
gamma_bound = array_ops.constant(self._reparam_offset, dtype=self.dtype)
def beta_initializer(shape, dtype=None, partition_info=None):
del partition_info # unused
pedestal = array_ops.constant(self._reparam_offset**2, dtype=self.dtype)
return math_ops.sqrt(array_ops.ones(shape, dtype=dtype) + pedestal)
def gamma_initializer(shape, dtype=None, partition_info=None):
del partition_info # unused
assert len(shape) == 2
assert shape[0] == shape[1]
eye = linalg_ops.eye(shape[0], dtype=dtype)
pedestal = array_ops.constant(self._reparam_offset**2, dtype=self.dtype)
return math_ops.sqrt(self._gamma_init * eye + pedestal)
beta = self.add_variable(
'reparam_beta',
shape=[num_channels],
initializer=beta_initializer,
dtype=self.dtype,
trainable=True)
beta = self._lower_bound(beta, beta_bound)
self.beta = math_ops.square(beta) - pedestal
gamma = self.add_variable(
'reparam_gamma',
shape=[num_channels, num_channels],
initializer=gamma_initializer,
dtype=self.dtype,
trainable=True)
gamma = self._lower_bound(gamma, gamma_bound)
self.gamma = math_ops.square(gamma) - pedestal
self.built = True
def call(self, inputs):
inputs = ops.convert_to_tensor(inputs, dtype=self.dtype)
ndim = self._input_rank
shape = self.gamma.get_shape().as_list()
gamma = array_ops.reshape(self.gamma, (ndim - 2) * [1] + shape)
# Compute normalization pool.
if self.data_format == 'channels_first':
norm_pool = nn.convolution(
math_ops.square(inputs),
gamma,
'VALID',
data_format='NC' + 'DHW' [-(ndim - 2):])
if ndim == 3:
norm_pool = array_ops.expand_dims(norm_pool, 2)
norm_pool = nn.bias_add(norm_pool, self.beta, data_format='NCHW')
norm_pool = array_ops.squeeze(norm_pool, [2])
elif ndim == 5:
shape = array_ops.shape(norm_pool)
norm_pool = array_ops.reshape(norm_pool, shape[:3] + [-1])
norm_pool = nn.bias_add(norm_pool, self.beta, data_format='NCHW')
norm_pool = array_ops.reshape(norm_pool, shape)
else: # ndim == 4
norm_pool = nn.bias_add(norm_pool, self.beta, data_format='NCHW')
else: # channels_last
norm_pool = nn.convolution(math_ops.square(inputs), gamma, 'VALID')
norm_pool = nn.bias_add(norm_pool, self.beta, data_format='NHWC')
norm_pool = math_ops.sqrt(norm_pool)
if self.inverse:
outputs = inputs * norm_pool
else:
outputs = inputs / norm_pool
outputs.set_shape(inputs.get_shape())
return outputs
def compute_output_shape(self, input_shape):
channel_axis = self._channel_axis()
input_shape = tensor_shape.TensorShape(input_shape)
if not 3 <= input_shape.ndim <= 5:
raise ValueError('`input_shape` must be of rank 3 to 5, inclusive.')
if input_shape.dims[channel_axis].value is None:
raise ValueError(
'The channel dimension of `input_shape` must be defined.')
return input_shape
ops.RegisterGradient('GDNLowerBound')(GDN._lower_bound_grad) # pylint:disable=protected-access
def gdn(inputs,
inverse=False,
beta_min=1e-6,
gamma_init=.1,
reparam_offset=2**-18,
data_format='channels_last',
activity_regularizer=None,
trainable=True,
name=None,
reuse=None):
"""Functional interface for GDN layer.
Based on the papers:
"Density Modeling of Images using a Generalized Normalization
Transformation"
Johannes Ballé, Valero Laparra, Eero P. Simoncelli
https://arxiv.org/abs/1511.06281
"End-to-end Optimized Image Compression"
Johannes Ballé, Valero Laparra, Eero P. Simoncelli
https://arxiv.org/abs/1611.01704
Implements an activation function that is essentially a multivariate
generalization of a particular sigmoid-type function:
```
y[i] = x[i] / sqrt(beta[i] + sum_j(gamma[j, i] * x[j]))
```
where `i` and `j` run over channels. This implementation never sums across
spatial dimensions. It is similar to local response normalization, but much
more flexible, as `beta` and `gamma` are trainable parameters.
Args:
inputs: Tensor input.
inverse: If `False` (default), compute GDN response. If `True`, compute IGDN
response (one step of fixed point iteration to invert GDN; the division is
replaced by multiplication).
beta_min: Lower bound for beta, to prevent numerical error from causing
square root of zero or negative values.
gamma_init: The gamma matrix will be initialized as the identity matrix
multiplied with this value. If set to zero, the layer is effectively
initialized to the identity operation, since beta is initialized as one. A
good default setting is somewhere between 0 and 0.5.
reparam_offset: Offset added to the reparameterization of beta and gamma.
The reparameterization of beta and gamma as their square roots lets the
training slow down when their values are close to zero, which is desirable
as small values in the denominator can lead to a situation where gradient
noise on beta/gamma leads to extreme amounts of noise in the GDN
activations. However, without the offset, we would get zero gradients if
any elements of beta or gamma were exactly zero, and thus the training
could get stuck. To prevent this, we add this small constant. The default
value was empirically determined as a good starting point. Making it
bigger potentially leads to more gradient noise on the activations, making
it too small may lead to numerical precision issues.
data_format: Format of input tensor. Currently supports `'channels_first'`
and `'channels_last'`.
activity_regularizer: Regularizer function for the output.
trainable: Boolean, if `True`, also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: String, the name of the layer. Layers with the same name will share
weights, but to avoid mistakes we require `reuse=True` in such cases.
reuse: Boolean, whether to reuse the weights of a previous layer by the same
name.
Returns:
Output tensor.
"""
layer = GDN(
inverse=inverse,
beta_min=beta_min,
gamma_init=gamma_init,
reparam_offset=reparam_offset,
data_format=data_format,
activity_regularizer=activity_regularizer,
trainable=trainable,
name=name,
dtype=inputs.dtype.base_dtype,
_scope=name,
_reuse=reuse)
return layer.apply(inputs)
@add_arg_scope
def layer_norm(inputs,
center=True,
scale=True,
activation_fn=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
begin_norm_axis=1,
begin_params_axis=-1,
scope=None):
"""Adds a Layer Normalization layer.
Based on the paper:
"Layer Normalization"
Jimmy Lei Ba, Jamie Ryan Kiros, Geoffrey E. Hinton
https://arxiv.org/abs/1607.06450.
Can be used as a normalizer function for conv2d and fully_connected.
Given a tensor `inputs` of rank `R`, moments are calculated and normalization
is performed over axes `begin_norm_axis ... R - 1`. Scaling and centering,
if requested, is performed over axes `begin_params_axis .. R - 1`.
By default, `begin_norm_axis = 1` and `begin_params_axis = -1`,
meaning that normalization is performed over all but the first axis
(the `HWC` if `inputs` is `NHWC`), while the `beta` and `gamma` trainable
parameters are calculated for the rightmost axis (the `C` if `inputs` is
`NHWC`). Scaling and recentering is performed via broadcast of the
`beta` and `gamma` parameters with the normalized tensor.
The shapes of `beta` and `gamma` are `inputs.shape[begin_params_axis:]`,
and this part of the inputs' shape must be fully defined.
Args:
inputs: A tensor having rank `R`. The normalization is performed over axes
`begin_norm_axis ... R - 1` and centering and scaling parameters are
calculated over `begin_params_axis ... R - 1`.
center: If True, add offset of `beta` to normalized tensor. If False, `beta`
is ignored.
scale: If True, multiply by `gamma`. If False, `gamma` is not used. When the
next layer is linear (also e.g. `nn.relu`), this can be disabled since the
scaling can be done by the next layer.
activation_fn: Activation function, default set to None to skip it and
maintain a linear activation.
reuse: Whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: Optional collections for the variables.
outputs_collections: Collections to add the outputs.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
begin_norm_axis: The first normalization dimension: normalization will be
performed along dimensions `begin_norm_axis : rank(inputs)`
begin_params_axis: The first parameter (beta, gamma) dimension: scale and
centering parameters will have dimensions
`begin_params_axis : rank(inputs)` and will be broadcast with the
normalized inputs accordingly.
scope: Optional scope for `variable_scope`.
Returns:
A `Tensor` representing the output of the operation, having the same
shape and dtype as `inputs`.
Raises:
ValueError: If the rank of `inputs` is not known at graph build time,
or if `inputs.shape[begin_params_axis:]` is not fully defined at
graph build time.
"""
with variable_scope.variable_scope(
scope, 'LayerNorm', [inputs], reuse=reuse) as sc:
inputs = ops.convert_to_tensor(inputs)
inputs_shape = inputs.shape
inputs_rank = inputs_shape.ndims
if inputs_rank is None:
raise ValueError('Inputs %s has undefined rank.' % inputs.name)
dtype = inputs.dtype.base_dtype
if begin_norm_axis < 0:
begin_norm_axis = inputs_rank + begin_norm_axis
if begin_params_axis >= inputs_rank or begin_norm_axis >= inputs_rank:
raise ValueError('begin_params_axis (%d) and begin_norm_axis (%d) '
'must be < rank(inputs) (%d)' %
(begin_params_axis, begin_norm_axis, inputs_rank))
params_shape = inputs_shape[begin_params_axis:]
if not params_shape.is_fully_defined():
raise ValueError(
'Inputs %s: shape(inputs)[%s:] is not fully defined: %s' %
(inputs.name, begin_params_axis, inputs_shape))
# Allocate parameters for the beta and gamma of the normalization.
beta, gamma = None, None
if center:
beta_collections = utils.get_variable_collections(variables_collections,
'beta')
beta = variables.model_variable(
'beta',
shape=params_shape,
dtype=dtype,
initializer=init_ops.zeros_initializer(),
collections=beta_collections,
trainable=trainable)
if scale:
gamma_collections = utils.get_variable_collections(
variables_collections, 'gamma')
gamma = variables.model_variable(
'gamma',
shape=params_shape,
dtype=dtype,
initializer=init_ops.ones_initializer(),
collections=gamma_collections,
trainable=trainable)
# By default, compute the moments across all the dimensions except the one with index 0.
norm_axes = list(range(begin_norm_axis, inputs_rank))
mean, variance = nn.moments(inputs, norm_axes, keep_dims=True)
# Compute layer normalization using the batch_normalization function.
# Note that epsilon must be increased for float16 due to the limited
# representable range.
variance_epsilon = 1e-12 if dtype != dtypes.float16 else 1e-3
outputs = nn.batch_normalization(
inputs,
mean,
variance,
offset=beta,
scale=gamma,
variance_epsilon=variance_epsilon)
outputs.set_shape(inputs_shape)
if activation_fn is not None:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections, sc.name, outputs)
@add_arg_scope
def images_to_sequence(inputs,
data_format=DATA_FORMAT_NHWC,
outputs_collections=None,
scope=None):
"""Convert a batch of images into a batch of sequences.
Args:
inputs: a (num_images, height, width, depth) tensor
data_format: A string. `NHWC` (default) and `NCHW` are supported.
outputs_collections: The collections to which the outputs are added.
scope: Optional scope for name_scope.
Raises:
ValueError: If `data_format` is not either NCHW or NHWC.
Returns:
(width, num_images*height, depth) sequence tensor
"""
if data_format not in (DATA_FORMAT_NCHW, DATA_FORMAT_NHWC):
raise ValueError('data_format has to be either NCHW or NHWC.')
with ops.name_scope(scope, 'ImagesToSequence', [inputs]) as sc:
inputs = ops.convert_to_tensor(inputs)
df = ('channels_first'
if data_format and data_format.startswith('NC') else 'channels_last')
if df == 'channels_first':
inputs = array_ops.transpose(inputs, [0, 2, 3, 1])
_, _, width, depth = inputs.get_shape().as_list()
s = array_ops.shape(inputs)
batch_size, height = s[0], s[1]
transposed = array_ops.transpose(inputs, [2, 0, 1, 3])
outputs = array_ops.reshape(transposed, [width, batch_size * height, depth])
return utils.collect_named_outputs(outputs_collections, sc, outputs)
@add_arg_scope
def max_pool2d(inputs,
kernel_size,
stride=2,
padding='VALID',
data_format=DATA_FORMAT_NHWC,
outputs_collections=None,
scope=None):
"""Adds a 2D Max Pooling op.
It is assumed that the pooling is done per image but not in batch or channels.
Args:
inputs: A 4-D tensor of shape `[batch_size, height, width, channels]` if
`data_format` is `NHWC`, and `[batch_size, channels, height, width]` if
`data_format` is `NCHW`.
kernel_size: A list of length 2: [kernel_height, kernel_width] of the
pooling kernel over which the op is computed. Can be an int if both values
are the same.
stride: A list of length 2: [stride_height, stride_width]. Can be an int if
both strides are the same. Note that presently both strides must have the
same value.
padding: The padding method, either 'VALID' or 'SAME'.
data_format: A string. `NHWC` (default) and `NCHW` are supported.
outputs_collections: The collections to which the outputs are added.
scope: Optional scope for name_scope.
Returns:
A `Tensor` representing the results of the pooling operation.
Raises:
ValueError: If `data_format` is neither `NHWC` nor `NCHW`.
ValueError: If 'kernel_size' is not a 2-D list
"""
if data_format not in (DATA_FORMAT_NCHW, DATA_FORMAT_NHWC):
raise ValueError('data_format has to be either NCHW or NHWC.')
with ops.name_scope(scope, 'MaxPool2D', [inputs]) as sc:
inputs = ops.convert_to_tensor(inputs)
df = ('channels_first'
if data_format and data_format.startswith('NC') else 'channels_last')
layer = pooling_layers.MaxPooling2D(
pool_size=kernel_size,
strides=stride,
padding=padding,
data_format=df,
_scope=sc)
outputs = layer.apply(inputs)
return utils.collect_named_outputs(outputs_collections, sc, outputs)
@add_arg_scope
def max_pool3d(inputs,
kernel_size,
stride=2,
padding='VALID',
data_format=DATA_FORMAT_NDHWC,
outputs_collections=None,
scope=None):
"""Adds a 3D Max Pooling op.
It is assumed that the pooling is done per image but not in batch or channels.
Args:
inputs: A 5-D tensor of shape `[batch_size, depth, height, width, channels]`
if `data_format` is `NDHWC`, and `[batch_size, channels, depth, height,
width]` if `data_format` is `NCDHW`.
kernel_size: A list of length 3: [kernel_depth, kernel_height, kernel_width]
of the pooling kernel over which the op is computed. Can be an int if both
values are the same.
stride: A list of length 3: [stride_depth, stride_height, stride_width]. Can
be an int if both strides are the same. Note that presently both strides
must have the same value.
padding: The padding method, either 'VALID' or 'SAME'.
data_format: A string. `NDHWC` (default) and `NCDHW` are supported.
outputs_collections: The collections to which the outputs are added.
scope: Optional scope for name_scope.
Returns:
A `Tensor` representing the results of the pooling operation.
Raises:
ValueError: If `data_format` is neither `NDHWC` nor `NCDHW`.
ValueError: If 'kernel_size' is not a 3-D list
"""
if data_format not in (DATA_FORMAT_NCDHW, DATA_FORMAT_NDHWC):
raise ValueError('data_format has to be either NCDHW or NDHWC.')
with ops.name_scope(scope, 'MaxPool3D', [inputs]) as sc:
inputs = ops.convert_to_tensor(inputs)
df = ('channels_first'
if data_format and data_format.startswith('NC') else 'channels_last')
layer = pooling_layers.MaxPooling3D(
pool_size=kernel_size,
strides=stride,
padding=padding,
data_format=df,
_scope=sc)
outputs = layer.apply(inputs)
return utils.collect_named_outputs(outputs_collections, sc, outputs)
@add_arg_scope
def pool(inputs,
kernel_size,
pooling_type,
padding='VALID',
data_format=None,
dilation_rate=1,
stride=1,
outputs_collections=None,
scope=None):
# pylint: disable=line-too-long
"""Adds a pooling op.
Args:
inputs: Tensor of rank N+2, of shape `[batch_size] + input_spatial_shape +
[num_channels]` if data_format does not start with "NC" (default), or
`[batch_size, num_channels] + input_spatial_shape` if data_format starts
with "NC". Pooling happens over the spatial dimensions only.
kernel_size: Sequence of N ints >= 1. Can also be a single integer to
specify the same value for all spatial dimensions.
pooling_type: Specifies pooling operation, must be "AVG" or "MAX".
padding: The padding algorithm, must be "SAME" or "VALID".
data_format: A string or None. Specifies whether the channel dimension of
the `input` and output is the last dimension (default, or if `data_format`
does not start with "NC"), or the second dimension (if `data_format`
starts with "NC"). For N=1, the valid values are "NWC" (default) and
"NCW". For N=2, the valid values are "NHWC" (default) and "NCHW". For
N=3, the valid values are "NDHWC" (default) and "NCDHW".
dilation_rate: Optional. Dilation rate. Sequence of N ints >= 1. Defaults
to [1]*N. Can also be a single integer to specify the same value for all
spatial dimensions. If any value of dilation_rate is > 1, then all values
of stride must be 1.
stride: Optional. Sequence of N ints >= 1. Defaults to [1]*N. Can also be
a single integer to specify the same value for all spatial dimensions. If
any value of stride is > 1, then all values of dilation_rate must be 1.
outputs_collections: The collections to which the outputs are added.
scope: Optional scope for name_scope.
Returns:
A `Tensor` representing the results of the pooling operation.
Raises:
ValueError: If arguments are invalid.
"""
# pylint: enable=line-too-long
with ops.name_scope(scope, '%s_pool' % (pooling_type.lower()),
[inputs]) as sc:
inputs = ops.convert_to_tensor(inputs)
input_rank = inputs.get_shape().ndims
if input_rank is None:
raise ValueError('Rank of inputs must be known')
if input_rank < 3:
raise ValueError('Rank of inputs must be >= 3')
num_spatial_dims = input_rank - 2
output = nn.pool(
input=inputs,
window_shape=utils.n_positive_integers(num_spatial_dims, kernel_size),
pooling_type=pooling_type,
padding=padding,
data_format=data_format,
dilation_rate=utils.n_positive_integers(num_spatial_dims,
dilation_rate),
strides=utils.n_positive_integers(num_spatial_dims, stride),
name=sc)
return utils.collect_named_outputs(outputs_collections, sc, output)
@add_arg_scope
def one_hot_encoding(labels,
num_classes,
on_value=1.0,
off_value=0.0,
outputs_collections=None,
scope=None):
"""Transform numeric labels into onehot_labels using `tf.one_hot`.
Args:
labels: [batch_size] target labels.
num_classes: Total number of classes.
on_value: A scalar defining the on-value.
off_value: A scalar defining the off-value.
outputs_collections: Collection to add the outputs.
scope: Optional scope for name_scope.
Returns:
One-hot encoding of the labels.
"""
with ops.name_scope(scope, 'OneHotEncoding', [labels, num_classes]) as sc:
labels = ops.convert_to_tensor(labels)
if labels.dtype == dtypes.int32:
labels = standard_ops.to_int64(labels)
outputs = standard_ops.one_hot(
labels, num_classes, on_value=on_value, off_value=off_value)
return utils.collect_named_outputs(outputs_collections, sc, outputs)
def _apply_activation(y, activation_fn, output_collections):
if activation_fn is not None:
y = activation_fn(y)
ops.add_to_collections(
list(output_collections or []) + [ops.GraphKeys.ACTIVATIONS], y)
return y
def repeat(inputs, repetitions, layer, *args, **kwargs):
"""Applies the same layer with the same arguments repeatedly.
```python
y = repeat(x, 3, conv2d, 64, [3, 3], scope='conv1')
# It is equivalent to:
x = conv2d(x, 64, [3, 3], scope='conv1/conv1_1')
x = conv2d(x, 64, [3, 3], scope='conv1/conv1_2')
y = conv2d(x, 64, [3, 3], scope='conv1/conv1_3')
```
If the `scope` argument is not given in `kwargs`, it is set to
`layer.__name__`, or `layer.func.__name__` (for `functools.partial`
objects). If neither `__name__` nor `func.__name__` is available, the
layers are called with `scope='stack'`.
Args:
inputs: A `Tensor` suitable for layer.
repetitions: Int, number of repetitions.
layer: A layer with arguments `(inputs, *args, **kwargs)`
*args: Extra args for the layer.
**kwargs: Extra kwargs for the layer.
Returns:
A tensor result of applying the layer, repetitions times.
Raises:
ValueError: If the op is unknown or wrong.
"""
scope = kwargs.pop('scope', None)
with variable_scope.variable_scope(scope, 'Repeat', [inputs]):
inputs = ops.convert_to_tensor(inputs)
if scope is None:
if hasattr(layer, '__name__'):
scope = layer.__name__
elif hasattr(layer, 'func') and hasattr(layer.func, '__name__'):
scope = layer.func.__name__ # In case layer is a functools.partial.
else:
scope = 'repeat'
outputs = inputs
for i in range(repetitions):
kwargs['scope'] = scope + '_' + str(i + 1)
outputs = layer(outputs, *args, **kwargs)
return outputs
def _scale_gradient_shape(op):
"""Shape helper function for scale_gradient function below."""
return [op.inputs[0].shape]
def _scale_gradient_grad(op, grad):
"""Python gradient helper function for scale_gradient function below."""
return [grad * op.inputs[1], None]
@function.Defun(
python_grad_func=_scale_gradient_grad, shape_func=_scale_gradient_shape)
def scale_gradient(inputs, gradient_multiplier):
"""Identity operation, but with the gradient multiplied by a tensor.
The TensorFlow gradient system will compute the gradient with respect to
`inputs` as the product of the gradient with respect to the `output`
multiplied by a specified `gradient_multiplier` tensor. If
`gradient_multiplier` is equal to 1, then this results in the true gradient.
Otherwise, it results in a scaled gradient.
This can be useful for adjusting the relative learning rate of different
parameter tensors when performing gradient descent, and because this rescaling
can be inserted at arbitrary locations within a graph, is often more
convenient to apply than simply rescaling the final computed gradients.
Args:
inputs: Tensor to be output.
gradient_multiplier: Tensor by which to multiply the gradient with respect
to `output` to compute the gradient with respect to `inputs`. Its shape
must be broadcastable to the shape of `inputs`.
Returns:
output Tensor, equal to `inputs`.
"""
# gradient_multiplier is implicitly saved by decorator, and only used for
# gradient computation.
del gradient_multiplier
return inputs
@add_arg_scope
def separable_convolution2d(
inputs,
num_outputs,
kernel_size,
depth_multiplier=1,
stride=1,
padding='SAME',
data_format=DATA_FORMAT_NHWC,
rate=1,
activation_fn=nn.relu,
normalizer_fn=None,
normalizer_params=None,
weights_initializer=initializers.xavier_initializer(),
pointwise_initializer=None,
weights_regularizer=None,
biases_initializer=init_ops.zeros_initializer(),
biases_regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None):
"""Adds a depth-separable 2D convolution with optional batch_norm layer.
This op first performs a depthwise convolution that acts separately on
channels, creating a variable called `depthwise_weights`. If `num_outputs`
is not None, it adds a pointwise convolution that mixes channels, creating a
variable called `pointwise_weights`. Then, if `normalizer_fn` is None,
it adds bias to the result, creating a variable called 'biases', otherwise,
the `normalizer_fn` is applied. It finally applies an activation function
to produce the end result.
Args:
inputs: A tensor of size [batch_size, height, width, channels].
num_outputs: The number of pointwise convolution output filters. If is None,
then we skip the pointwise convolution stage.
kernel_size: A list of length 2: [kernel_height, kernel_width] of of the
filters. Can be an int if both values are the same.
depth_multiplier: The number of depthwise convolution output channels for
each input channel. The total number of depthwise convolution output
channels will be equal to `num_filters_in * depth_multiplier`.
stride: A list of length 2: [stride_height, stride_width], specifying the
depthwise convolution stride. Can be an int if both strides are the same.
padding: One of 'VALID' or 'SAME'.
data_format: A string. `NHWC` (default) and `NCHW` are supported.
rate: A list of length 2: [rate_height, rate_width], specifying the dilation
rates for atrous convolution. Can be an int if both rates are the same. If
any value is larger than one, then both stride values need to be one.
activation_fn: Activation function. The default value is a ReLU function.
Explicitly set it to None to skip it and maintain a linear activation.
normalizer_fn: Normalization function to use instead of `biases`. If
`normalizer_fn` is provided then `biases_initializer` and
`biases_regularizer` are ignored and `biases` are not created nor added.
default set to None for no normalizer function
normalizer_params: Normalization function parameters.
weights_initializer: An initializer for the depthwise weights.
pointwise_initializer: An initializer for the pointwise weights. default set
to None, means use weights_initializer.
weights_regularizer: Optional regularizer for the weights.
biases_initializer: An initializer for the biases. If None skip biases.
biases_regularizer: Optional regularizer for the biases.
reuse: Whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: Optional list of collections for all the variables or
a dictionary containing a different list of collection per variable.
outputs_collections: Collection to add the outputs.
trainable: Whether or not the variables should be trainable or not.
scope: Optional scope for variable_scope.
Returns:
A `Tensor` representing the output of the operation.
Raises:
ValueError: If `data_format` is invalid.
"""
if data_format not in (DATA_FORMAT_NCHW, DATA_FORMAT_NHWC):
raise ValueError('data_format has to be either NCHW or NHWC.')
layer_variable_getter = _build_variable_getter({
'bias': 'biases',
'depthwise_kernel': 'depthwise_weights',
'pointwise_kernel': 'pointwise_weights'
})
with variable_scope.variable_scope(
scope,
'SeparableConv2d', [inputs],
reuse=reuse,
custom_getter=layer_variable_getter) as sc:
inputs = ops.convert_to_tensor(inputs)
if pointwise_initializer is None:
pointwise_initializer = weights_initializer
df = ('channels_first'
if data_format and data_format.startswith('NC') else 'channels_last')
if num_outputs is not None:
# Apply separable conv using the SeparableConvolution2D layer.
layer = convolutional_layers.SeparableConvolution2D(
filters=num_outputs,
kernel_size=kernel_size,
strides=stride,
padding=padding,
data_format=df,
dilation_rate=utils.two_element_tuple(rate),
activation=None,
depth_multiplier=depth_multiplier,
use_bias=not normalizer_fn and biases_initializer,
depthwise_initializer=weights_initializer,
pointwise_initializer=pointwise_initializer,
bias_initializer=biases_initializer,
depthwise_regularizer=weights_regularizer,
pointwise_regularizer=weights_regularizer,
bias_regularizer=biases_regularizer,
activity_regularizer=None,
trainable=trainable,
name=sc.name,
dtype=inputs.dtype.base_dtype,
_scope=sc,
_reuse=reuse)
outputs = layer.apply(inputs)
# Add variables to collections.
_add_variable_to_collections(layer.depthwise_kernel,
variables_collections, 'weights')
_add_variable_to_collections(layer.pointwise_kernel,
variables_collections, 'weights')
if layer.bias is not None:
_add_variable_to_collections(layer.bias, variables_collections,
'biases')
if normalizer_fn is not None:
normalizer_params = normalizer_params or {}
outputs = normalizer_fn(outputs, **normalizer_params)
else:
# Actually apply depthwise conv instead of separable conv.
dtype = inputs.dtype.base_dtype
kernel_h, kernel_w = utils.two_element_tuple(kernel_size)
stride_h, stride_w = utils.two_element_tuple(stride)
num_filters_in = utils.channel_dimension(
inputs.get_shape(), df, min_rank=4)
weights_collections = utils.get_variable_collections(
variables_collections, 'weights')
depthwise_shape = [kernel_h, kernel_w, num_filters_in, depth_multiplier]
depthwise_weights = variables.model_variable(
'depthwise_weights',
shape=depthwise_shape,
dtype=dtype,
initializer=weights_initializer,
regularizer=weights_regularizer,
trainable=trainable,
collections=weights_collections)
strides = [
1, 1, stride_h, stride_w
] if data_format.startswith('NC') else [1, stride_h, stride_w, 1]
outputs = nn.depthwise_conv2d(
inputs,
depthwise_weights,
strides,
padding,
rate=utils.two_element_tuple(rate),
data_format=data_format)
num_outputs = depth_multiplier * num_filters_in
if normalizer_fn is not None:
normalizer_params = normalizer_params or {}
outputs = normalizer_fn(outputs, **normalizer_params)
else:
if biases_initializer is not None:
biases_collections = utils.get_variable_collections(
variables_collections, 'biases')
biases = variables.model_variable(
'biases',
shape=[
num_outputs,
],
dtype=dtype,
initializer=biases_initializer,
regularizer=biases_regularizer,
trainable=trainable,
collections=biases_collections)
outputs = nn.bias_add(outputs, biases, data_format=data_format)
if activation_fn is not None:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections, sc.name, outputs)
@add_arg_scope
def sequence_to_images(inputs,
height,
output_data_format='channels_last',
outputs_collections=None,
scope=None):
"""Convert a batch of sequences into a batch of images.
Args:
inputs: (num_steps, num_batches, depth) sequence tensor
height: the height of the images
output_data_format: Format of output tensor. Currently supports
`'channels_first'` and `'channels_last'`.
outputs_collections: The collections to which the outputs are added.
scope: Optional scope for name_scope.
Returns:
A tensor representing the output of the operation.
"""
with ops.name_scope(scope, 'SequenceToImages', [inputs]) as sc:
inputs = ops.convert_to_tensor(inputs)
width, num_batches, depth = inputs.get_shape().as_list()
if num_batches is None:
num_batches = -1
else:
num_batches //= height
reshaped = array_ops.reshape(inputs, [width, num_batches, height, depth])
if output_data_format == 'channels_first':
outputs = array_ops.transpose(reshaped, [1, 3, 2, 0])
else:
outputs = array_ops.transpose(reshaped, [1, 2, 0, 3])
return utils.collect_named_outputs(outputs_collections, sc, outputs)
@add_arg_scope
def softmax(logits, scope=None):
"""Performs softmax on Nth dimension of N-dimensional logit tensor.
For two-dimensional logits this reduces to tf.nn.softmax. The N-th dimension
needs to have a specified number of elements (number of classes).
Args:
logits: N-dimensional `Tensor` with logits, where N > 1.
scope: Optional scope for variable_scope.
Returns:
A `Tensor` with same shape and type as logits.
"""
# TODO(jrru): Add axis argument which defaults to last dimension.
with variable_scope.variable_scope(scope, 'softmax', [logits]):
num_logits = utils.last_dimension(logits.get_shape(), min_rank=2)
logits_2d = array_ops.reshape(logits, [-1, num_logits])
predictions = nn.softmax(logits_2d)
predictions = array_ops.reshape(predictions, array_ops.shape(logits))
if not context.executing_eagerly():
predictions.set_shape(logits.get_shape())
return predictions
@add_arg_scope
def spatial_softmax(features,
temperature=None,
name=None,
variables_collections=None,
trainable=True,
data_format='NHWC'):
"""Computes the spatial softmax of a convolutional feature map.
First computes the softmax over the spatial extent of each channel of a
convolutional feature map. Then computes the expected 2D position of the
points of maximal activation for each channel, resulting in a set of
feature keypoints [i1, j1, ... iN, jN] for all N channels.
Read more here:
"Learning visual feature spaces for robotic manipulation with
deep spatial autoencoders." Finn et al., http://arxiv.org/abs/1509.06113.
Args:
features: A `Tensor` of size [batch_size, W, H, num_channels]; the
convolutional feature map.
temperature: Softmax temperature (optional). If None, a learnable
temperature is created.
name: A name for this operation (optional).
variables_collections: Collections for the temperature variable.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
data_format: A string. `NHWC` (default) and `NCHW` are supported.
Returns:
feature_keypoints: A `Tensor` with size [batch_size, num_channels * 2];
the expected 2D locations of each channel's feature keypoint (normalized
to the range (-1,1)). The inner dimension is arranged as
[i1, j1, ... iN, jN].
Raises:
ValueError: If unexpected data_format specified.
ValueError: If num_channels dimension is unspecified.
"""
with variable_scope.variable_scope(name, 'spatial_softmax'):
shape = array_ops.shape(features)
static_shape = features.shape
if data_format == DATA_FORMAT_NHWC:
height, width, num_channels = shape[1], shape[2], static_shape[3]
elif data_format == DATA_FORMAT_NCHW:
num_channels, height, width = static_shape[1], shape[2], shape[3]
else:
raise ValueError('data_format has to be either NCHW or NHWC.')
if tensor_shape.dimension_value(num_channels) is None:
raise ValueError('The num_channels dimension of the inputs to '
'`spatial_softmax` should be defined. Found `None`.')
with ops.name_scope('spatial_softmax_op', 'spatial_softmax_op', [features]):
# Create tensors for x and y coordinate values, scaled to range [-1, 1].
pos_x, pos_y = array_ops.meshgrid(
math_ops.lin_space(-1., 1., num=height),
math_ops.lin_space(-1., 1., num=width),
indexing='ij')
pos_x = array_ops.reshape(pos_x, [height * width])
pos_y = array_ops.reshape(pos_y, [height * width])
if temperature is None:
temp_initializer = init_ops.ones_initializer()
else:
temp_initializer = init_ops.constant_initializer(temperature)
if not trainable:
temp_collections = None
else:
temp_collections = utils.get_variable_collections(
variables_collections, 'temperature')
temperature = variables.model_variable(
'temperature',
shape=(),
dtype=dtypes.float32,
initializer=temp_initializer,
collections=temp_collections,
trainable=trainable)
if data_format == 'NCHW':
features = array_ops.reshape(features, [-1, height * width])
else:
features = array_ops.reshape(
array_ops.transpose(features, [0, 3, 1, 2]), [-1, height * width])
softmax_attention = nn.softmax(features / temperature)
expected_x = math_ops.reduce_sum(
pos_x * softmax_attention, [1], keepdims=True)
expected_y = math_ops.reduce_sum(
pos_y * softmax_attention, [1], keepdims=True)
expected_xy = array_ops.concat([expected_x, expected_y], 1)
feature_keypoints = array_ops.reshape(
expected_xy, [-1, tensor_shape.dimension_value(num_channels) * 2])
feature_keypoints.set_shape(
[None, tensor_shape.dimension_value(num_channels) * 2])
return feature_keypoints
def stack(inputs, layer, stack_args, **kwargs):
"""Builds a stack of layers by applying layer repeatedly using stack_args.
`stack` allows you to repeatedly apply the same operation with different
arguments `stack_args[i]`. For each application of the layer, `stack` creates
a new scope appended with an increasing number. For example:
```python
y = stack(x, fully_connected, [32, 64, 128], scope='fc')
# It is equivalent to:
x = fully_connected(x, 32, scope='fc/fc_1')
x = fully_connected(x, 64, scope='fc/fc_2')
y = fully_connected(x, 128, scope='fc/fc_3')
```
If the `scope` argument is not given in `kwargs`, it is set to
`layer.__name__`, or `layer.func.__name__` (for `functools.partial`
objects). If neither `__name__` nor `func.__name__` is available, the
layers are called with `scope='stack'`.
Args:
inputs: A `Tensor` suitable for layer.
layer: A layer with arguments `(inputs, *args, **kwargs)`
stack_args: A list/tuple of parameters for each call of layer.
**kwargs: Extra kwargs for the layer.
Returns:
A `Tensor` result of applying the stacked layers.
Raises:
ValueError: If the op is unknown or wrong.
"""
scope = kwargs.pop('scope', None)
if not isinstance(stack_args, (list, tuple)):
raise ValueError('stack_args need to be a list or tuple')
with variable_scope.variable_scope(scope, 'Stack', [inputs]):
inputs = ops.convert_to_tensor(inputs)
if scope is None:
if hasattr(layer, '__name__'):
scope = layer.__name__
elif hasattr(layer, 'func') and hasattr(layer.func, '__name__'):
scope = layer.func.__name__ # In case layer is a functools.partial.
else:
scope = 'stack'
outputs = inputs
for i in range(len(stack_args)):
kwargs['scope'] = scope + '_' + str(i + 1)
layer_args = stack_args[i]
if not isinstance(layer_args, (list, tuple)):
layer_args = [layer_args]
outputs = layer(outputs, *layer_args, **kwargs)
return outputs
@add_arg_scope
def unit_norm(inputs, dim, epsilon=1e-7, scope=None):
"""Normalizes the given input across the specified dimension to unit length.
Note that the rank of `input` must be known.
Args:
inputs: A `Tensor` of arbitrary size.
dim: The dimension along which the input is normalized.
epsilon: A small value to add to the inputs to avoid dividing by zero.
scope: Optional scope for variable_scope.
Returns:
The normalized `Tensor`.
Raises:
ValueError: If dim is smaller than the number of dimensions in 'inputs'.
"""
with variable_scope.variable_scope(scope, 'UnitNorm', [inputs]):
if not inputs.get_shape():
raise ValueError('The input rank must be known.')
input_rank = len(inputs.get_shape().as_list())
if dim < 0 or dim >= input_rank:
raise ValueError('dim must be positive but smaller than the input rank.')
lengths = math_ops.sqrt(
epsilon + math_ops.reduce_sum(math_ops.square(inputs), dim, True))
multiples = []
if dim > 0:
multiples.append(array_ops.ones([dim], dtypes.int32))
multiples.append(
array_ops.strided_slice(array_ops.shape(inputs), [dim], [dim + 1]))
if dim < (input_rank - 1):
multiples.append(array_ops.ones([input_rank - 1 - dim], dtypes.int32))
multiples = array_ops.concat(multiples, 0)
return math_ops.div(inputs, array_ops.tile(lengths, multiples))
@add_arg_scope
def maxout(inputs, num_units, axis=-1, scope=None):
"""Adds a maxout op from https://arxiv.org/abs/1302.4389
"Maxout Networks" Ian J. Goodfellow, David Warde-Farley, Mehdi Mirza, Aaron
Courville,
Yoshua Bengio
Usually the operation is performed in the filter/channel dimension. This can
also be
used after fully-connected layers to reduce number of features.
Arguments:
inputs: Tensor input
num_units: Specifies how many features will remain after maxout in the
`axis` dimension (usually channel). This must be a factor of number of
features.
axis: The dimension where max pooling will be performed. Default is the last
dimension.
scope: Optional scope for variable_scope.
Returns:
A `Tensor` representing the results of the pooling operation.
Raises:
ValueError: if num_units is not multiple of number of features.
"""
with variable_scope.variable_scope(scope, 'MaxOut', [inputs]):
inputs = ops.convert_to_tensor(inputs)
shape = inputs.get_shape().as_list()
num_channels = shape[axis]
if num_channels % num_units:
raise ValueError('number of features({}) is not '
'a multiple of num_units({})'.format(
num_channels, num_units))
shape[axis] = num_units
shape += [num_channels // num_units]
# Dealing with batches with arbitrary sizes
for i in range(len(shape)):
if shape[i] is None:
shape[i] = array_ops.shape(inputs)[i]
outputs = math_ops.reduce_max(
array_ops.reshape(inputs, shape), -1, keepdims=False)
return outputs
def poincare_normalize(x, axis=1, epsilon=1e-5, name=None):
"""Project into the Poincare ball with norm <= 1.0 - epsilon.
https://en.wikipedia.org/wiki/Poincare_ball_model
Used in
Poincare Embeddings for Learning Hierarchical Representations
Maximilian Nickel, Douwe Kiela
https://arxiv.org/pdf/1705.08039.pdf
For a 1-D tensor with `axis = 0`, computes
(x * (1 - epsilon)) / ||x|| if ||x|| > 1 - epsilon
output =
x otherwise
For `x` with more dimensions, independently normalizes each 1-D slice along
dimension `axis`.
Args:
x: A `Tensor`.
axis: Axis along which to normalize. A scalar or a vector of integers.
epsilon: A small deviation from the edge of the unit sphere for numerical
stability.
name: A name for this operation (optional).
Returns:
A `Tensor` with the same shape as `x`.
"""
with ops.name_scope(name, 'poincare_normalize', [x]) as name:
x = ops.convert_to_tensor(x, name='x')
square_sum = math_ops.reduce_sum(math_ops.square(x), axis, keepdims=True)
x_inv_norm = math_ops.rsqrt(square_sum)
x_inv_norm = math_ops.minimum((1. - epsilon) * x_inv_norm, 1.)
return math_ops.multiply(x, x_inv_norm, name=name)
def legacy_fully_connected(x,
num_output_units,
activation_fn=None,
weight_init=initializers.xavier_initializer(),
bias_init=init_ops.zeros_initializer(),
name=None,
weight_collections=(ops.GraphKeys.WEIGHTS,),
bias_collections=(ops.GraphKeys.BIASES,),
output_collections=(ops.GraphKeys.ACTIVATIONS,),
trainable=True,
weight_regularizer=None,
bias_regularizer=None):
# pylint: disable=anomalous-backslash-in-string
r"""Adds the parameters for a fully connected layer and returns the output.
A fully connected layer is generally defined as a matrix multiply:
`y = f(w * x + b)` where `f` is given by `activation_fn`. If
`activation_fn` is `None`, the result of `y = w * x + b` is
returned.
If `x` has shape [\\(\text{dim}_0, \text{dim}_1, ..., \text{dim}_n\\)]
with more than 2 dimensions (\\(n > 1\\)), then we repeat the matrix
multiply along the first dimensions. The result r is a tensor of shape
[\\(\text{dim}_0, ..., \text{dim}_{n-1},\\) `num_output_units`],
where \\( r_{i_0, ..., i_{n-1}, k} =
\sum_{0 \leq j < \text{dim}_n} x_{i_0, ... i_{n-1}, j} \cdot w_{j, k}\\).
This is accomplished by reshaping `x` to 2-D
[\\(\text{dim}_0 \cdot ... \cdot \text{dim}_{n-1}, \text{dim}_n\\)]
before the matrix multiply and afterwards reshaping it to
[\\(\text{dim}_0, ..., \text{dim}_{n-1},\\) `num_output_units`].
This op creates `w` and optionally `b`. Bias (`b`) can be disabled by setting
`bias_init` to `None`.
The variable creation is compatible with `tf.compat.v1.variable_scope` and so
can be
reused with `tf.compat.v1.variable_scope` or `tf.compat.v1.make_template`.
Most of the details of variable creation can be controlled by specifying the
initializers (`weight_init` and `bias_init`) and in which collections to place
the created variables (`weight_collections` and `bias_collections`; note that
the variables are always added to the `VARIABLES` collection). The output of
the layer can be placed in custom collections using `output_collections`.
The collections arguments default to `WEIGHTS`, `BIASES` and `ACTIVATIONS`,
respectively.
A per layer regularization can be specified by setting `weight_regularizer`
and `bias_regularizer`, which are applied to the weights and biases
respectively, and whose output is added to the `REGULARIZATION_LOSSES`
collection.
Args:
x: The input `Tensor`.
num_output_units: The size of the output.
activation_fn: Activation function, default set to None to skip it and
maintain a linear activation.
weight_init: An optional weight initialization, defaults to
`xavier_initializer`.
bias_init: An initializer for the bias, defaults to 0. Set to `None` in
order to disable bias.
name: The name for this operation is used to name operations and to find
variables. If specified it must be unique for this scope, otherwise a
unique name starting with "fully_connected" will be created. See
`tf.compat.v1.variable_scope` for details.
weight_collections: List of graph collections to which weights are added.
bias_collections: List of graph collections to which biases are added.
output_collections: List of graph collections to which outputs are added.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
weight_regularizer: A regularizer like the result of `l1_regularizer` or
`l2_regularizer`. Used for weights.
bias_regularizer: A regularizer like the result of `l1_regularizer` or
`l2_regularizer`. Used for biases.
Returns:
The output of the fully connected layer.
Raises:
ValueError: If x has rank less than 2 or if its last dimension is not set.
"""
with variable_scope.variable_scope(name, 'fully_connected', [x]):
x = ops.convert_to_tensor(x)
dims = x.get_shape().dims
if dims is None:
raise ValueError('dims of x must be known but is None')
if len(dims) < 2:
raise ValueError('rank of x must be at least 2 not: %d' % len(dims))
num_input_units = dims[-1].value
if num_input_units is None:
raise ValueError('last dimension of x must be known but is None')
dtype = x.dtype.base_dtype
weight_collections = set(
list(weight_collections or []) + [ops.GraphKeys.GLOBAL_VARIABLES])
w = variable_scope.get_variable(
'weights',
shape=[num_input_units, num_output_units],
dtype=dtype,
initializer=weight_init,
collections=weight_collections,
regularizer=weight_regularizer,
trainable=trainable)
x_2_dim = x if len(dims) <= 2 else array_ops.reshape(
x, [-1, num_input_units])
y = standard_ops.matmul(x_2_dim, w)
if bias_init is not None:
bias_collections = set(
list(bias_collections or []) + [ops.GraphKeys.GLOBAL_VARIABLES])
b = variable_scope.get_variable(
'bias',
shape=[num_output_units],
dtype=dtype,
initializer=bias_init,
collections=bias_collections,
regularizer=bias_regularizer,
trainable=trainable)
y = nn.bias_add(y, b)
if len(dims) > 2:
out_shape = array_ops.unstack(array_ops.shape(x))
out_shape[-1] = num_output_units
y = array_ops.reshape(y, array_ops.stack(out_shape))
static_shape = x.get_shape().as_list()
static_shape[-1] = num_output_units
y.set_shape(static_shape)
return _apply_activation(y, activation_fn, output_collections)
# TODO(eiderm): Verify and fix autocomplete in colab (also relu6).
# Simple aliases which remove the activation_fn parameter.
elu = functools.partial(fully_connected, activation_fn=nn.elu)
legacy_relu = functools.partial(legacy_fully_connected, activation_fn=nn.relu)
legacy_linear = functools.partial(legacy_fully_connected, activation_fn=None)
relu = functools.partial(fully_connected, activation_fn=nn.relu)
relu6 = functools.partial(fully_connected, activation_fn=nn.relu6)
linear = functools.partial(fully_connected, activation_fn=None)
# Simple alias.
conv1d = convolution1d
conv2d = convolution2d
conv3d = convolution3d
conv2d_transpose = convolution2d_transpose
conv3d_transpose = convolution3d_transpose
conv2d_in_plane = convolution2d_in_plane
separable_conv2d = separable_convolution2d
|
tensorflow-master
|
tensorflow/contrib/layers/python/layers/layers.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Contains the normalization layer classes and their functional aliases."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.framework.python.ops import add_arg_scope
from tensorflow.contrib.framework.python.ops import variables
from tensorflow.contrib.layers.python.layers import utils
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import variable_scope
__all__ = [
'group_norm',
'instance_norm',
]
DATA_FORMAT_NCHW = 'NCHW'
DATA_FORMAT_NHWC = 'NHWC'
@add_arg_scope
def instance_norm(inputs,
center=True,
scale=True,
epsilon=1e-6,
activation_fn=None,
param_initializers=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
data_format=DATA_FORMAT_NHWC,
scope=None):
"""Functional interface for the instance normalization layer.
Reference: https://arxiv.org/abs/1607.08022.
"Instance Normalization: The Missing Ingredient for Fast Stylization"
Dmitry Ulyanov, Andrea Vedaldi, Victor Lempitsky
Args:
inputs: A tensor with 2 or more dimensions, where the first dimension has
`batch_size`. The normalization is over all but the last dimension if
`data_format` is `NHWC` and the second dimension if `data_format` is
`NCHW`.
center: If True, add offset of `beta` to normalized tensor. If False, `beta`
is ignored.
scale: If True, multiply by `gamma`. If False, `gamma` is
not used. When the next layer is linear (also e.g. `nn.relu`), this can be
disabled since the scaling can be done by the next layer.
epsilon: Small float added to variance to avoid dividing by zero.
activation_fn: Activation function, default set to None to skip it and
maintain a linear activation.
param_initializers: Optional initializers for beta, gamma, moving mean and
moving variance.
reuse: Whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: Optional collections for the variables.
outputs_collections: Collections to add the outputs.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
data_format: A string. `NHWC` (default) and `NCHW` are supported.
scope: Optional scope for `variable_scope`.
Returns:
A `Tensor` representing the output of the operation.
Raises:
ValueError: If `data_format` is neither `NHWC` nor `NCHW`.
ValueError: If the rank of `inputs` is undefined.
ValueError: If rank or channels dimension of `inputs` is undefined.
"""
inputs = ops.convert_to_tensor(inputs)
inputs_shape = inputs.shape
inputs_rank = inputs.shape.ndims
if inputs_rank is None:
raise ValueError('Inputs %s has undefined rank.' % inputs.name)
if data_format not in (DATA_FORMAT_NCHW, DATA_FORMAT_NHWC):
raise ValueError('data_format has to be either NCHW or NHWC.')
with variable_scope.variable_scope(
scope, 'InstanceNorm', [inputs], reuse=reuse) as sc:
if data_format == DATA_FORMAT_NCHW:
reduction_axis = 1
# For NCHW format, rather than relying on implicit broadcasting, we
# explicitly reshape the params to params_shape_broadcast when computing
# the moments and the batch normalization.
params_shape_broadcast = list(
[1, inputs_shape[1].value] + [1 for _ in range(2, inputs_rank)])
else:
reduction_axis = inputs_rank - 1
params_shape_broadcast = None
moments_axes = list(range(inputs_rank))
del moments_axes[reduction_axis]
del moments_axes[0]
params_shape = inputs_shape[reduction_axis:reduction_axis + 1]
if not params_shape.is_fully_defined():
raise ValueError('Inputs %s has undefined channels dimension %s.' % (
inputs.name, params_shape))
# Allocate parameters for the beta and gamma of the normalization.
beta, gamma = None, None
dtype = inputs.dtype.base_dtype
if param_initializers is None:
param_initializers = {}
if center:
beta_collections = utils.get_variable_collections(
variables_collections, 'beta')
beta_initializer = param_initializers.get(
'beta', init_ops.zeros_initializer())
beta = variables.model_variable('beta',
shape=params_shape,
dtype=dtype,
initializer=beta_initializer,
collections=beta_collections,
trainable=trainable)
if params_shape_broadcast:
beta = array_ops.reshape(beta, params_shape_broadcast)
if scale:
gamma_collections = utils.get_variable_collections(
variables_collections, 'gamma')
gamma_initializer = param_initializers.get(
'gamma', init_ops.ones_initializer())
gamma = variables.model_variable('gamma',
shape=params_shape,
dtype=dtype,
initializer=gamma_initializer,
collections=gamma_collections,
trainable=trainable)
if params_shape_broadcast:
gamma = array_ops.reshape(gamma, params_shape_broadcast)
# Calculate the moments (instance activations).
mean, variance = nn.moments(inputs, moments_axes, keep_dims=True)
# Compute instance normalization.
outputs = nn.batch_normalization(
inputs, mean, variance, beta, gamma, epsilon, name='instancenorm')
if activation_fn is not None:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections, sc.name, outputs)
@add_arg_scope
def group_norm(inputs,
groups=32,
channels_axis=-1,
reduction_axes=(-3, -2),
center=True,
scale=True,
epsilon=1e-6,
activation_fn=None,
param_initializers=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None,
mean_close_to_zero=False):
"""Functional interface for the group normalization layer.
Reference: https://arxiv.org/abs/1803.08494.
"Group Normalization", Yuxin Wu, Kaiming He
Args:
inputs: A Tensor with at least 2 dimensions one which is channels. All
shape dimensions except for batch must be fully defined.
groups: Integer. Divide the channels into this number of groups over which
normalization statistics are computed. This number must be commensurate
with the number of channels in `inputs`.
channels_axis: An integer. Specifies index of channels axis which will be
broken into `groups`, each of which whose statistics will be computed
across. Must be mutually exclusive with `reduction_axes`. Preferred usage
is to specify negative integers to be agnostic as to whether a batch
dimension is included.
reduction_axes: Tuple of integers. Specifies dimensions over which
statistics will be accumulated. Must be mutually exclusive with
`channels_axis`. Statistics will not be accumulated across axes not
specified in `reduction_axes` nor `channel_axis`. Preferred usage is to
specify negative integers to be agnostic to whether a batch dimension is
included.
Some sample usage cases:
NHWC format: channels_axis=-1, reduction_axes=[-3, -2]
NCHW format: channels_axis=-3, reduction_axes=[-2, -1]
center: If True, add offset of `beta` to normalized tensor. If False, `beta`
is ignored.
scale: If True, multiply by `gamma`. If False, `gamma` is
not used. When the next layer is linear (also e.g. `nn.relu`), this can be
disabled since the scaling can be done by the next layer.
epsilon: Small float added to variance to avoid dividing by zero.
activation_fn: Activation function, default set to None to skip it and
maintain a linear activation.
param_initializers: Optional initializers for beta, gamma, moving mean and
moving variance.
reuse: Whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: Optional collections for the variables.
outputs_collections: Collections to add the outputs.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
scope: Optional scope for `variable_scope`.
mean_close_to_zero: The mean of `input` before ReLU will be close to zero
when batch size >= 4k for Resnet-50 on TPU. If `True`, use
`nn.sufficient_statistics` and `nn.normalize_moments` to calculate the
variance. This is the same behavior as `fused` equals `True` in batch
normalization. If `False`, use `nn.moments` to calculate the variance.
When `mean` is close to zero, like 1e-4, use `mean` to calculate the
variance may have poor result due to repeated roundoff error and
denormalization in `mean`. When `mean` is large, like 1e2,
sum(`input`^2) is so large that only the high-order digits of the elements
are being accumulated. Thus, use sum(`input` - `mean`)^2/n to calculate
the variance has better accuracy compared to (sum(`input`^2)/n - `mean`^2)
when `mean` is large.
Returns:
A `Tensor` representing the output of the operation.
Raises:
ValueError: If the rank of `inputs` is undefined.
ValueError: If rank or channels dimension of `inputs` is undefined.
ValueError: If number of groups is not commensurate with number of channels.
ValueError: If reduction_axes or channels_axis are out of bounds.
ValueError: If reduction_axes are not mutually exclusive with channels_axis.
"""
# TODO(shlens): Support partially defined shapes for the inputs.
inputs = ops.convert_to_tensor(inputs)
if inputs.shape.ndims is None:
raise ValueError('Inputs %s has undefined rank.' % inputs.name)
if channels_axis > (inputs.shape.ndims - 1):
raise ValueError('Axis is out of bounds.')
# Use dynamic shape for not fully defined dimensions in the inputs.
dyanmic_shape = array_ops.shape(inputs)
input_shape_list = []
for i, dim in enumerate(inputs.shape):
if dim.value is None:
input_shape_list.append(dyanmic_shape[i])
else:
input_shape_list.append(dim)
# Standardize the channels_axis to be positive and identify # of channels.
if channels_axis < 0:
channels_axis = inputs.shape.ndims + channels_axis
channels = inputs.shape[channels_axis].value
if channels is None:
raise ValueError('Inputs %s has undefined channel dimension: %d.' % (
inputs.name, channels_axis))
# Standardize the reduction_axes to be positive.
reduction_axes = list(reduction_axes)
for i in range(len(reduction_axes)):
if reduction_axes[i] < 0:
reduction_axes[i] += inputs.shape.ndims
for a in reduction_axes:
if a > inputs.shape.ndims:
raise ValueError('Axis is out of bounds.')
if inputs.shape[a].value is None:
raise ValueError('Inputs %s has undefined dimensions %d.' % (
inputs.name, a))
if channels_axis == a:
raise ValueError('reduction_axis must be mutually exclusive '
'with channels_axis')
if groups > channels:
raise ValueError('Invalid groups %d for %d channels.' % (groups, channels))
if channels % groups != 0:
raise ValueError('%d channels is not commensurate with %d groups.' %
(channels, groups))
# Determine axes before channels. Some examples of common image formats:
# 'NCHW': before = [N], after = [HW]
# 'NHWC': before = [NHW], after = []
axes_before_channels = input_shape_list[:channels_axis]
axes_after_channels = input_shape_list[channels_axis+1:]
# Manually broadcast the parameters to conform to the number of groups.
params_shape_broadcast = ([1] * len(axes_before_channels) +
[groups, channels // groups] +
[1] * len(axes_after_channels))
# Reshape the input by the group within the channel dimension.
inputs_shape = (axes_before_channels + [groups, channels // groups] +
axes_after_channels)
inputs = array_ops.reshape(inputs, inputs_shape)
# Determine the dimensions across which moments are calculated.
moments_axes = [channels_axis + 1]
for a in reduction_axes:
if a > channels_axis:
moments_axes.append(a + 1)
else:
moments_axes.append(a)
with variable_scope.variable_scope(
scope, 'GroupNorm', [inputs], reuse=reuse) as sc:
# Note that the params_shape is the number of channels always.
params_shape = [channels]
# Allocate parameters for the beta and gamma of the normalization.
beta, gamma = None, None
dtype = inputs.dtype.base_dtype
if param_initializers is None:
param_initializers = {}
if center:
beta_collections = utils.get_variable_collections(
variables_collections, 'beta')
beta_initializer = param_initializers.get(
'beta', init_ops.zeros_initializer())
beta = variables.model_variable('beta',
shape=params_shape,
dtype=dtype,
initializer=beta_initializer,
collections=beta_collections,
trainable=trainable)
beta = array_ops.reshape(beta, params_shape_broadcast)
if scale:
gamma_collections = utils.get_variable_collections(
variables_collections, 'gamma')
gamma_initializer = param_initializers.get(
'gamma', init_ops.ones_initializer())
gamma = variables.model_variable('gamma',
shape=params_shape,
dtype=dtype,
initializer=gamma_initializer,
collections=gamma_collections,
trainable=trainable)
gamma = array_ops.reshape(gamma, params_shape_broadcast)
# Calculate the moments.
if mean_close_to_zero:
# One pass algorithm returns better result when mean is close to zero.
counts, means_ss, variance_ss, _ = nn.sufficient_statistics(
inputs, moments_axes, keep_dims=True)
mean, variance = nn.normalize_moments(
counts, means_ss, variance_ss, shift=None)
else:
mean, variance = nn.moments(inputs, moments_axes, keep_dims=True)
# Compute normalization.
# TODO(shlens): Fix nn.batch_normalization to handle the 5-D Tensor
# appropriately so that this operation may be faster.
gain = math_ops.rsqrt(variance + epsilon)
offset = -mean * gain
if gamma is not None:
gain *= gamma
offset *= gamma
if beta is not None:
offset += beta
outputs = inputs * gain + offset
# Collapse the groups into the channel dimension.
outputs = array_ops.reshape(outputs, input_shape_list)
if activation_fn is not None:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections, sc.name, outputs)
|
tensorflow-master
|
tensorflow/contrib/layers/python/layers/normalization.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility functions for summary creation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import re
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import standard_ops
from tensorflow.python.summary import summary
__all__ = [
'summarize_tensor',
'summarize_activation',
'summarize_tensors',
'summarize_collection',
'summarize_variables',
'summarize_weights',
'summarize_biases',
'summarize_activations',
]
# TODO(wicke): add more unit tests for summarization functions.
def _add_scalar_summary(tensor, tag=None):
"""Add a scalar summary operation for the tensor.
Args:
tensor: The tensor to summarize.
tag: The tag to use, if None then use tensor's op's name.
Returns:
The created histogram summary.
Raises:
ValueError: If the tag is already in use or the rank is not 0.
"""
tensor.get_shape().assert_has_rank(0)
tag = tag or '%s_summary' % tensor.op.name
return summary.scalar(tag, tensor)
def _add_histogram_summary(tensor, tag=None):
"""Add a summary operation for the histogram of a tensor.
Args:
tensor: The tensor to summarize.
tag: The tag to use, if None then use tensor's op's name.
Returns:
The created histogram summary.
Raises:
ValueError: If the tag is already in use.
"""
tag = tag or '%s_summary' % tensor.op.name
return summary.histogram(tag, tensor)
def summarize_activation(op):
"""Summarize an activation.
This applies the given activation and adds useful summaries specific to the
activation.
Args:
op: The tensor to summarize (assumed to be a layer activation).
Returns:
The summary op created to summarize `op`.
"""
if op.op.type in ('Relu', 'Softplus', 'Relu6'):
# Using inputs to avoid floating point equality and/or epsilons.
_add_scalar_summary(
standard_ops.reduce_mean(
standard_ops.to_float(
standard_ops.less(op.op.inputs[
0], standard_ops.cast(0.0, op.op.inputs[0].dtype)))),
'%s/zeros' % op.op.name)
if op.op.type == 'Relu6':
_add_scalar_summary(
standard_ops.reduce_mean(
standard_ops.to_float(
standard_ops.greater(op.op.inputs[
0], standard_ops.cast(6.0, op.op.inputs[0].dtype)))),
'%s/sixes' % op.op.name)
return _add_histogram_summary(op, '%s/activation' % op.op.name)
def summarize_tensor(tensor, tag=None):
"""Summarize a tensor using a suitable summary type.
This function adds a summary op for `tensor`. The type of summary depends on
the shape of `tensor`. For scalars, a `scalar_summary` is created, for all
other tensors, `histogram_summary` is used.
Args:
tensor: The tensor to summarize
tag: The tag to use, if None then use tensor's op's name.
Returns:
The summary op created or None for string tensors.
"""
# Skips string tensors and boolean tensors (not handled by the summaries).
if (tensor.dtype.is_compatible_with(dtypes.string) or
tensor.dtype.base_dtype == dtypes.bool):
return None
if tensor.get_shape().ndims == 0:
# For scalars, use a scalar summary.
return _add_scalar_summary(tensor, tag)
else:
# We may land in here if the rank is still unknown. The histogram won't
# hurt if this ends up being a scalar.
return _add_histogram_summary(tensor, tag)
def summarize_tensors(tensors, summarizer=summarize_tensor):
"""Summarize a set of tensors."""
return [summarizer(tensor) for tensor in tensors]
def summarize_collection(collection,
name_filter=None,
summarizer=summarize_tensor):
"""Summarize a graph collection of tensors, possibly filtered by name."""
tensors = []
for op in ops.get_collection(collection):
if name_filter is None or re.match(name_filter, op.op.name):
tensors.append(op)
return summarize_tensors(tensors, summarizer)
# Utility functions for commonly used collections
summarize_variables = functools.partial(summarize_collection,
ops.GraphKeys.GLOBAL_VARIABLES)
summarize_weights = functools.partial(summarize_collection,
ops.GraphKeys.WEIGHTS)
summarize_biases = functools.partial(summarize_collection, ops.GraphKeys.BIASES)
def summarize_activations(name_filter=None, summarizer=summarize_activation):
"""Summarize activations, using `summarize_activation` to summarize."""
return summarize_collection(ops.GraphKeys.ACTIVATIONS, name_filter,
summarizer)
|
tensorflow-master
|
tensorflow/contrib/layers/python/layers/summaries.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.