python_code
stringlengths 0
1.02M
| repo_name
stringlengths 9
48
| file_path
stringlengths 5
114
|
---|---|---|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python wrapper for prefetching_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.ops import prefetching_ops
from tensorflow.python.util import deprecation
@deprecation.deprecated(None,
"Use `tf.data.experimental.prefetch_to_device(...)`.")
def prefetch_to_device(device, buffer_size=None):
"""A transformation that prefetches dataset values to the given `device`.
NOTE: Although the transformation creates a `tf.data.Dataset`, the
transformation must be the final `Dataset` in the input pipeline.
Args:
device: A string. The name of a device to which elements will be prefetched.
buffer_size: (Optional.) The number of elements to buffer on `device`.
Defaults to an automatically chosen value.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
return prefetching_ops.prefetch_to_device(device, buffer_size)
@deprecation.deprecated(None, "Use `tf.data.experimental.copy_to_device(...)`.")
def copy_to_device(target_device, source_device="/cpu:0"):
"""A transformation that copies dataset elements to the given `target_device`.
Args:
target_device: The name of a device to which elements will be copied.
source_device: The original device on which `input_dataset` will be placed.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
return prefetching_ops.copy_to_device(target_device, source_device)
|
tensorflow-master
|
tensorflow/contrib/data/python/ops/prefetching_ops.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Grouping dataset transformations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.ops import grouping
from tensorflow.python.util import deprecation
@deprecation.deprecated(None,
"Use `tf.data.experimental.group_by_reducer(...)`.")
def group_by_reducer(key_func, reducer):
"""A transformation that groups elements and performs a reduction.
This transformation maps element of a dataset to a key using `key_func` and
groups the elements by key. The `reducer` is used to process each group; its
`init_func` is used to initialize state for each group when it is created, the
`reduce_func` is used to update the state every time an element is mapped to
the matching group, and the `finalize_func` is used to map the final state to
an output value.
Args:
key_func: A function mapping a nested structure of tensors
(having shapes and types defined by `self.output_shapes` and
`self.output_types`) to a scalar `tf.int64` tensor.
reducer: An instance of `Reducer`, which captures the reduction logic using
the `init_func`, `reduce_func`, and `finalize_func` functions.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
return grouping.group_by_reducer(key_func, reducer)
@deprecation.deprecated(None,
"Use `tf.data.experimental.group_by_window(...)`.")
def group_by_window(key_func,
reduce_func,
window_size=None,
window_size_func=None):
"""A transformation that groups windows of elements by key and reduces them.
This transformation maps each consecutive element in a dataset to a key
using `key_func` and groups the elements by key. It then applies
`reduce_func` to at most `window_size_func(key)` elements matching the same
key. All except the final window for each key will contain
`window_size_func(key)` elements; the final window may be smaller.
You may provide either a constant `window_size` or a window size determined by
the key through `window_size_func`.
Args:
key_func: A function mapping a nested structure of tensors
(having shapes and types defined by `self.output_shapes` and
`self.output_types`) to a scalar `tf.int64` tensor.
reduce_func: A function mapping a key and a dataset of up to `window_size`
consecutive elements matching that key to another dataset.
window_size: A `tf.int64` scalar `tf.Tensor`, representing the number of
consecutive elements matching the same key to combine in a single
batch, which will be passed to `reduce_func`. Mutually exclusive with
`window_size_func`.
window_size_func: A function mapping a key to a `tf.int64` scalar
`tf.Tensor`, representing the number of consecutive elements matching
the same key to combine in a single batch, which will be passed to
`reduce_func`. Mutually exclusive with `window_size`.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
Raises:
ValueError: if neither or both of {`window_size`, `window_size_func`} are
passed.
"""
return grouping.group_by_window(key_func, reduce_func, window_size,
window_size_func)
@deprecation.deprecated(
None, "Use `tf.data.experimental.bucket_by_sequence_length(...)`.")
def bucket_by_sequence_length(element_length_func,
bucket_boundaries,
bucket_batch_sizes,
padded_shapes=None,
padding_values=None,
pad_to_bucket_boundary=False,
no_padding=False):
"""A transformation that buckets elements in a `Dataset` by length.
Elements of the `Dataset` are grouped together by length and then are padded
and batched.
This is useful for sequence tasks in which the elements have variable length.
Grouping together elements that have similar lengths reduces the total
fraction of padding in a batch which increases training step efficiency.
Args:
element_length_func: function from element in `Dataset` to `tf.int32`,
determines the length of the element, which will determine the bucket it
goes into.
bucket_boundaries: `list<int>`, upper length boundaries of the buckets.
bucket_batch_sizes: `list<int>`, batch size per bucket. Length should be
`len(bucket_boundaries) + 1`.
padded_shapes: Nested structure of `tf.TensorShape` to pass to
`tf.data.Dataset.padded_batch`. If not provided, will use
`dataset.output_shapes`, which will result in variable length dimensions
being padded out to the maximum length in each batch.
padding_values: Values to pad with, passed to
`tf.data.Dataset.padded_batch`. Defaults to padding with 0.
pad_to_bucket_boundary: bool, if `False`, will pad dimensions with unknown
size to maximum length in batch. If `True`, will pad dimensions with
unknown size to bucket boundary minus 1 (i.e., the maximum length in each
bucket), and caller must ensure that the source `Dataset` does not contain
any elements with length longer than `max(bucket_boundaries)`.
no_padding: `bool`, indicates whether to pad the batch features (features
need to be either of type `tf.SparseTensor` or of same shape).
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
Raises:
ValueError: if `len(bucket_batch_sizes) != len(bucket_boundaries) + 1`.
"""
return grouping.bucket_by_sequence_length(
element_length_func, bucket_boundaries, bucket_batch_sizes, padded_shapes,
padding_values, pad_to_bucket_boundary, no_padding)
class Reducer(grouping.Reducer):
"""A reducer is used for reducing a set of elements.
A reducer is represented as a tuple of the three functions:
1) initialization function: key => initial state
2) reduce function: (old state, input) => new state
3) finalization function: state => result
"""
@deprecation.deprecated(None, "Use `tf.data.experimental.Reducer(...)`.")
def __init__(self, init_func, reduce_func, finalize_func):
super(Reducer, self).__init__(init_func, reduce_func, finalize_func)
|
tensorflow-master
|
tensorflow/contrib/data/python/ops/grouping.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Experimental API for controlling threading in `tf.data` pipelines."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from tensorflow.python.data.experimental.ops.threadpool import override_threadpool
from tensorflow.python.data.experimental.ops.threadpool import PrivateThreadPool
|
tensorflow-master
|
tensorflow/contrib/data/python/ops/threadpool.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Experimental `dataset` API for parsing example."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.ops import parsing_ops
from tensorflow.python.util import deprecation
@deprecation.deprecated(
None, "Use `tf.data.experimental.parse_example_dataset(...)`.")
def parse_example_dataset(features, num_parallel_calls=1):
"""A transformation that parses `Example` protos into a `dict` of tensors.
Parses a number of serialized `Example` protos given in `serialized`. We refer
to `serialized` as a batch with `batch_size` many entries of individual
`Example` protos.
This op parses serialized examples into a dictionary mapping keys to `Tensor`
and `SparseTensor` objects. `features` is a dict from keys to `VarLenFeature`,
`SparseFeature`, and `FixedLenFeature` objects. Each `VarLenFeature`
and `SparseFeature` is mapped to a `SparseTensor`, and each
`FixedLenFeature` is mapped to a `Tensor`. See `tf.io.parse_example` for more
details about feature dictionaries.
Args:
features: A `dict` mapping feature keys to `FixedLenFeature`,
`VarLenFeature`, and `SparseFeature` values.
num_parallel_calls: (Optional.) A `tf.int32` scalar `tf.Tensor`,
representing the number of parsing processes to call in parallel.
Returns:
A dataset transformation function, which can be passed to
`tf.data.Dataset.apply`.
Raises:
ValueError: if features argument is None.
"""
return parsing_ops.parse_example_dataset(features, num_parallel_calls)
|
tensorflow-master
|
tensorflow/contrib/data/python/ops/parsing_ops.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unique element dataset transformations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.ops import unique as experimental_unique
from tensorflow.python.util import deprecation
@deprecation.deprecated(None, "Use `tf.data.experimental.unique()`.")
def unique():
"""Creates a `Dataset` from another `Dataset`, discarding duplicates.
Use this transformation to produce a dataset that contains one instance of
each unique element in the input. For example:
```python
dataset = tf.data.Dataset.from_tensor_slices([1, 37, 2, 37, 2, 1])
# Using `unique()` will drop the duplicate elements.
dataset = dataset.apply(tf.data.experimental.unique()) # ==> { 1, 37, 2 }
```
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
return experimental_unique.unique()
|
tensorflow-master
|
tensorflow/contrib/data/python/ops/unique.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python wrappers for Datasets and Iterators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.data.experimental.ops import get_single_element as experimental_get_single_element
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.util import deprecation
@deprecation.deprecated(None,
"Use `tf.data.experimental.get_single_element(...)`.")
def get_single_element(dataset):
"""Returns the single element in `dataset` as a nested structure of tensors.
This function enables you to use a `tf.data.Dataset` in a stateless
"tensor-in tensor-out" expression, without creating a
`tf.compat.v1.data.Iterator`.
This can be useful when your preprocessing transformations are expressed
as a `Dataset`, and you want to use the transformation at serving time.
For example:
```python
input_batch = tf.compat.v1.placeholder(tf.string, shape=[BATCH_SIZE])
def preprocessing_fn(input_str):
# ...
return image, label
dataset = (tf.data.Dataset.from_tensor_slices(input_batch)
.map(preprocessing_fn, num_parallel_calls=BATCH_SIZE)
.batch(BATCH_SIZE))
image_batch, label_batch = tf.data.experimental.get_single_element(dataset)
```
Args:
dataset: A `tf.data.Dataset` object containing a single element.
Returns:
A nested structure of `tf.Tensor` objects, corresponding to the single
element of `dataset`.
Raises:
TypeError: if `dataset` is not a `tf.data.Dataset` object.
InvalidArgumentError (at runtime): if `dataset` does not contain exactly
one element.
"""
return experimental_get_single_element.get_single_element(dataset)
@deprecation.deprecated(None, "Use `tf.data.Dataset.reduce(...)`.")
def reduce_dataset(dataset, reducer):
"""Returns the result of reducing the `dataset` using `reducer`.
Args:
dataset: A `tf.data.Dataset` object.
reducer: A `tf.data.experimental.Reducer` object representing the reduce
logic.
Returns:
A nested structure of `tf.Tensor` objects, corresponding to the result
of reducing `dataset` using `reducer`.
Raises:
TypeError: if `dataset` is not a `tf.data.Dataset` object.
"""
if not isinstance(dataset, dataset_ops.Dataset):
raise TypeError("`dataset` must be a `tf.data.Dataset` object.")
return dataset.reduce(reducer.init_func(np.int64(0)), reducer.reduce_func)
|
tensorflow-master
|
tensorflow/contrib/data/python/ops/get_single_element.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Scan dataset transformation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.ops import scan_ops
from tensorflow.python.util import deprecation
@deprecation.deprecated(None, "Use `tf.data.experimental.scan(...)`.")
def scan(initial_state, scan_func):
"""A transformation that scans a function across an input dataset.
This transformation is a stateful relative of `tf.data.Dataset.map`.
In addition to mapping `scan_func` across the elements of the input dataset,
`scan()` accumulates one or more state tensors, whose initial values are
`initial_state`.
Args:
initial_state: A nested structure of tensors, representing the initial state
of the accumulator.
scan_func: A function that maps `(old_state, input_element)` to
`(new_state, output_element). It must take two arguments and return a
pair of nested structures of tensors. The `new_state` must match the
structure of `initial_state`.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
return scan_ops.scan(initial_state, scan_func)
|
tensorflow-master
|
tensorflow/contrib/data/python/ops/scan_ops.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Experimental shuffle ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.ops import shuffle_ops
from tensorflow.python.util import deprecation
@deprecation.deprecated(None,
"Use `tf.data.experimental.shuffle_and_repeat(...)`.")
def shuffle_and_repeat(buffer_size, count=None, seed=None):
"""Shuffles and repeats a Dataset returning a new permutation for each epoch.
`dataset.apply(tf.data.experimental.shuffle_and_repeat(buffer_size, count))`
is equivalent to
`dataset.shuffle(buffer_size, reshuffle_each_iteration=True).repeat(count)`
The difference is that the latter dataset is not serializable. So,
if you need to checkpoint an input pipeline with reshuffling you must use
this implementation.
Args:
buffer_size: A `tf.int64` scalar `tf.Tensor`, representing the
maximum number elements that will be buffered when prefetching.
count: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the
number of times the dataset should be repeated. The default behavior
(if `count` is `None` or `-1`) is for the dataset be repeated
indefinitely.
seed: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the
random seed that will be used to create the distribution. See
`tf.compat.v1.set_random_seed` for behavior.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
return shuffle_ops.shuffle_and_repeat(buffer_size, count, seed)
|
tensorflow-master
|
tensorflow/contrib/data/python/ops/shuffle_ops.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python wrappers for tf.data writers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.ops import writers
from tensorflow.python.util import deprecation
class TFRecordWriter(writers.TFRecordWriter):
"""Writes data to a TFRecord file."""
@deprecation.deprecated(
None, "Use `tf.data.experimental.TFRecordWriter(...)`.")
def __init__(self, filename, compression_type=None):
super(TFRecordWriter, self).__init__(filename, compression_type)
|
tensorflow-master
|
tensorflow/contrib/data/python/ops/writers.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Iterator ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.ops import iterator_ops
from tensorflow.python.util import deprecation
@deprecation.deprecated(
None, "Use `tf.data.experimental.make_saveable_from_iterator(...)`.")
def make_saveable_from_iterator(iterator):
"""Returns a SaveableObject for saving/restore iterator state using Saver.
Args:
iterator: Iterator.
For example:
```python
with tf.Graph().as_default():
ds = tf.data.Dataset.range(10)
iterator = ds.make_initializable_iterator()
# Build the iterator SaveableObject.
saveable_obj = tf.data.experimental.make_saveable_from_iterator(iterator)
# Add the SaveableObject to the SAVEABLE_OBJECTS collection so
# it can be automatically saved using Saver.
tf.compat.v1.add_to_collection(tf.GraphKeys.SAVEABLE_OBJECTS, saveable_obj)
saver = tf.compat.v1.train.Saver()
while continue_training:
... Perform training ...
if should_save_checkpoint:
saver.save()
```
Note: When restoring the iterator, the existing iterator state is completely
discarded. This means that any changes you may have made to the Dataset
graph will be discarded as well! This includes the new Dataset graph
that you may have built during validation. So, while running validation,
make sure to run the initializer for the validation input pipeline after
restoring the checkpoint.
Note: Not all iterators support checkpointing yet. Attempting to save the
state of an unsupported iterator will throw an error.
"""
return iterator_ops.make_saveable_from_iterator(iterator)
class CheckpointInputPipelineHook(iterator_ops.CheckpointInputPipelineHook):
"""Checkpoints input pipeline state every N steps or seconds.
This hook saves the state of the iterators in the `Graph` so that when
training is resumed the input pipeline continues from where it left off.
This could potentially avoid overfitting in certain pipelines where the
number of training steps per eval are small compared to the dataset
size or if the training pipeline is pre-empted.
Differences from `CheckpointSaverHook`:
1. Saves only the input pipelines in the "iterators" collection and not the
global variables or other saveable objects.
2. Does not write the `GraphDef` and `MetaGraphDef` to the summary.
Example of checkpointing the training pipeline:
```python
est = tf.estimator.Estimator(model_fn)
while True:
est.train(
train_input_fn,
hooks=[tf.data.experimental.CheckpointInputPipelineHook(est)],
steps=train_steps_per_eval)
# Note: We do not pass the hook here.
metrics = est.evaluate(eval_input_fn)
if should_stop_the_training(metrics):
break
```
This hook should be used if the input pipeline state needs to be saved
separate from the model checkpoint. Doing so may be useful for a few reasons:
1. The input pipeline checkpoint may be large, if there are large shuffle
or prefetch buffers for instance, and may bloat the checkpoint size.
2. If the input pipeline is shared between training and validation, restoring
the checkpoint during validation may override the validation input
pipeline.
For saving the input pipeline checkpoint alongside the model weights use
`tf.data.experimental.make_saveable_from_iterator` directly to create a
`SaveableObject` and add to the `SAVEABLE_OBJECTS` collection. Note, however,
that you will need to be careful not to restore the training iterator during
eval. You can do that by not adding the iterator to the SAVEABLE_OBJECTS
collector when building the eval graph.
"""
@deprecation.deprecated(
None, "Use `tf.data.experimental.CheckpointInputPipelineHook(...)`.")
def __init__(self, estimator):
super(CheckpointInputPipelineHook, self).__init__(estimator)
|
tensorflow-master
|
tensorflow/contrib/data/python/ops/iterator_ops.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Non-deterministic dataset transformations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.ops import interleave_ops
from tensorflow.python.util import deprecation
@deprecation.deprecated(None,
"Use `tf.data.experimental.parallel_interleave(...)`.")
def parallel_interleave(map_func,
cycle_length,
block_length=1,
sloppy=False,
buffer_output_elements=None,
prefetch_input_elements=None):
"""A parallel version of the `Dataset.interleave()` transformation.
`parallel_interleave()` maps `map_func` across its input to produce nested
datasets, and outputs their elements interleaved. Unlike
`tf.data.Dataset.interleave`, it gets elements from `cycle_length` nested
datasets in parallel, which increases the throughput, especially in the
presence of stragglers. Furthermore, the `sloppy` argument can be used to
improve performance, by relaxing the requirement that the outputs are produced
in a deterministic order, and allowing the implementation to skip over nested
datasets whose elements are not readily available when requested.
Example usage:
```python
# Preprocess 4 files concurrently.
filenames = tf.data.Dataset.list_files("/path/to/data/train*.tfrecords")
dataset = filenames.apply(
tf.data.experimental.parallel_interleave(
lambda filename: tf.data.TFRecordDataset(filename),
cycle_length=4))
```
WARNING: If `sloppy` is `True`, the order of produced elements is not
deterministic.
Args:
map_func: A function mapping a nested structure of tensors to a `Dataset`.
cycle_length: The number of input `Dataset`s to interleave from in parallel.
block_length: The number of consecutive elements to pull from an input
`Dataset` before advancing to the next input `Dataset`.
sloppy: If false, elements are produced in deterministic order. Otherwise,
the implementation is allowed, for the sake of expediency, to produce
elements in a non-deterministic order.
buffer_output_elements: The number of elements each iterator being
interleaved should buffer (similar to the `.prefetch()` transformation for
each interleaved iterator).
prefetch_input_elements: The number of input elements to transform to
iterators before they are needed for interleaving.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
return interleave_ops.parallel_interleave(
map_func, cycle_length, block_length, sloppy, buffer_output_elements,
prefetch_input_elements)
@deprecation.deprecated(
None, "Use `tf.contrib.data.parallel_interleave(..., sloppy=True)`.")
def sloppy_interleave(map_func, cycle_length, block_length=1):
"""A non-deterministic version of the `Dataset.interleave()` transformation.
`sloppy_interleave()` maps `map_func` across `dataset`, and
non-deterministically interleaves the results.
The resulting dataset is almost identical to `interleave`. The key
difference is that if retrieving a value from a given output iterator would
cause `get_next` to block, that iterator will be skipped, and consumed
when next available. If consuming from all iterators would cause the
`get_next` call to block, the `get_next` call blocks until the first value is
available.
If the underlying datasets produce elements as fast as they are consumed, the
`sloppy_interleave` transformation behaves identically to `interleave`.
However, if an underlying dataset would block the consumer,
`sloppy_interleave` can violate the round-robin order (that `interleave`
strictly obeys), producing an element from a different underlying
dataset instead.
Example usage:
```python
# Preprocess 4 files concurrently.
filenames = tf.data.Dataset.list_files("/path/to/data/train*.tfrecords")
dataset = filenames.apply(
tf.contrib.data.sloppy_interleave(
lambda filename: tf.data.TFRecordDataset(filename),
cycle_length=4))
```
WARNING: The order of elements in the resulting dataset is not
deterministic. Use `Dataset.interleave()` if you want the elements to have a
deterministic order.
Args:
map_func: A function mapping a nested structure of tensors (having shapes
and types defined by `self.output_shapes` and `self.output_types`) to a
`Dataset`.
cycle_length: The number of input `Dataset`s to interleave from in parallel.
block_length: The number of consecutive elements to pull from an input
`Dataset` before advancing to the next input `Dataset`. Note:
`sloppy_interleave` will skip the remainder of elements in the
`block_length` in order to avoid blocking.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
return interleave_ops.parallel_interleave(
map_func, cycle_length, block_length, sloppy=True)
@deprecation.deprecated(None,
"Use `tf.data.experimental.sample_from_datasets(...)`.")
def sample_from_datasets(datasets, weights=None, seed=None):
"""Samples elements at random from the datasets in `datasets`.
Args:
datasets: A list of `tf.data.Dataset` objects with compatible structure.
weights: (Optional.) A list of `len(datasets)` floating-point values where
`weights[i]` represents the probability with which an element should be
sampled from `datasets[i]`, or a `tf.data.Dataset` object where each
element is such a list. Defaults to a uniform distribution across
`datasets`.
seed: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the
random seed that will be used to create the distribution. See
`tf.compat.v1.set_random_seed` for behavior.
Returns:
A dataset that interleaves elements from `datasets` at random, according to
`weights` if provided, otherwise with uniform probability.
Raises:
TypeError: If the `datasets` or `weights` arguments have the wrong type.
ValueError: If the `weights` argument is specified and does not match the
length of the `datasets` element.
"""
return interleave_ops.sample_from_datasets(datasets, weights, seed)
@deprecation.deprecated(None,
"Use `tf.data.experimental.choose_from_datasets(...)`.")
def choose_from_datasets(datasets, choice_dataset):
"""Creates a dataset that deterministically chooses elements from `datasets`.
For example, given the following datasets:
```python
datasets = [tf.data.Dataset.from_tensors("foo").repeat(),
tf.data.Dataset.from_tensors("bar").repeat(),
tf.data.Dataset.from_tensors("baz").repeat()]
# Define a dataset containing `[0, 1, 2, 0, 1, 2, 0, 1, 2]`.
choice_dataset = tf.data.Dataset.range(3).repeat(3)
result = tf.data.experimental.choose_from_datasets(datasets, choice_dataset)
```
The elements of `result` will be:
```
"foo", "bar", "baz", "foo", "bar", "baz", "foo", "bar", "baz"
```
Args:
datasets: A list of `tf.data.Dataset` objects with compatible structure.
choice_dataset: A `tf.data.Dataset` of scalar `tf.int64` tensors between
`0` and `len(datasets) - 1`.
Returns:
A dataset that interleaves elements from `datasets` according to the values
of `choice_dataset`.
Raises:
TypeError: If the `datasets` or `choice_dataset` arguments have the wrong
type.
"""
return interleave_ops.choose_from_datasets(datasets, choice_dataset)
|
tensorflow-master
|
tensorflow/contrib/data/python/ops/interleave_ops.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Enumerate dataset transformations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.ops import enumerate_ops
from tensorflow.python.util import deprecation
@deprecation.deprecated(None,
"Use `tf.data.experimental.enumerate_dataset(...)`.")
def enumerate_dataset(start=0):
"""A transformation that enumerate the elements of a dataset.
It is Similar to python's `enumerate`.
For example:
```python
# NOTE: The following examples use `{ ... }` to represent the
# contents of a dataset.
a = { 1, 2, 3 }
b = { (7, 8), (9, 10) }
# The nested structure of the `datasets` argument determines the
# structure of elements in the resulting dataset.
a.apply(tf.contrib.data.enumerate(start=5)) == { (5, 1), (6, 2), (7, 3) }
b.apply(tf.contrib.data.enumerate()) == { (0, (7, 8)), (1, (9, 10)) }
```
Args:
start: A `tf.int64` scalar `tf.Tensor`, representing the start
value for enumeration.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
return enumerate_ops.enumerate_dataset(start)
|
tensorflow-master
|
tensorflow/contrib/data/python/ops/enumerate_ops.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ignore_errors dataset transformations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.ops import error_ops
from tensorflow.python.util import deprecation
@deprecation.deprecated(None, "Use `tf.data.experimental.ignore_errors()`.")
def ignore_errors():
"""Creates a `Dataset` from another `Dataset` and silently ignores any errors.
Use this transformation to produce a dataset that contains the same elements
as the input, but silently drops any elements that caused an error. For
example:
```python
dataset = tf.data.Dataset.from_tensor_slices([1., 2., 0., 4.])
# Computing `tf.debugging.check_numerics(1. / 0.)` will raise an
InvalidArgumentError.
dataset = dataset.map(lambda x: tf.debugging.check_numerics(1. / x, "error"))
# Using `ignore_errors()` will drop the element that causes an error.
dataset =
dataset.apply(tf.data.experimental.ignore_errors()) # ==> { 1., 0.5, 0.2
}
```
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
return error_ops.ignore_errors()
|
tensorflow-master
|
tensorflow/contrib/data/python/ops/error_ops.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Datasets for random number generators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.ops import random_ops
from tensorflow.python.util import deprecation
class RandomDataset(random_ops.RandomDataset):
"""A `Dataset` of pseudorandom values."""
@deprecation.deprecated(
None, "Use `tf.data.experimental.RandomDataset(...)`.")
def __init__(self, seed=None):
super(RandomDataset, self).__init__(seed)
|
tensorflow-master
|
tensorflow/contrib/data/python/ops/random_ops.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Counter Dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.ops import counter
from tensorflow.python.framework import dtypes
from tensorflow.python.util import deprecation
@deprecation.deprecated(None, "Use `tf.data.experimental.Counter(...)`.")
def Counter(start=0, step=1, dtype=dtypes.int64):
"""Creates a `Dataset` that counts from `start` in steps of size `step`.
For example:
```python
Dataset.count() == [0, 1, 2, ...)
Dataset.count(2) == [2, 3, ...)
Dataset.count(2, 5) == [2, 7, 12, ...)
Dataset.count(0, -1) == [0, -1, -2, ...)
Dataset.count(10, -1) == [10, 9, ...)
```
Args:
start: (Optional.) The starting value for the counter. Defaults to 0.
step: (Optional.) The step size for the counter. Defaults to 1.
dtype: (Optional.) The data type for counter elements. Defaults to
`tf.int64`.
Returns:
A `Dataset` of scalar `dtype` elements.
"""
return counter.Counter(start, step, dtype)
|
tensorflow-master
|
tensorflow/contrib/data/python/ops/counter.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Stateless random ops which take seed as a tensor input.
DEPRECATED: Use `tf.random.stateless_uniform` rather than
`tf.contrib.stateless.stateless_random_uniform`, and similarly for the other
routines.
Instead of taking `seed` as an attr which initializes a mutable state within
the op, these random ops take `seed` as an input, and the random numbers are
a deterministic function of `shape` and `seed`.
WARNING: These ops are in contrib, and are not stable. They should be
consistent across multiple runs on the same hardware, but only for the same
version of the code.
@@stateless_multinomial
@@stateless_random_uniform
@@stateless_random_normal
@@stateless_truncated_normal
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.ops.stateless_random_ops import stateless_random_uniform
from tensorflow.python.ops.stateless_random_ops import stateless_random_normal
from tensorflow.python.ops.stateless_random_ops import stateless_truncated_normal
from tensorflow.python.ops.stateless_random_ops import stateless_multinomial
from tensorflow.python.util.all_util import remove_undocumented
remove_undocumented(__name__)
|
tensorflow-master
|
tensorflow/contrib/stateless/__init__.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.contrib.stateless API.
The real tests are in python/kernel_tests/random/stateless_random_ops_test.py.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import stateless
from tensorflow.python.ops import stateless_random_ops
from tensorflow.python.platform import test
class StatelessOpsTest(test.TestCase):
def testAPI(self):
self.assertIs(stateless.stateless_random_uniform,
stateless_random_ops.stateless_random_uniform)
self.assertIs(stateless.stateless_random_normal,
stateless_random_ops.stateless_random_normal)
self.assertIs(stateless.stateless_truncated_normal,
stateless_random_ops.stateless_truncated_normal)
self.assertIs(stateless.stateless_multinomial,
stateless_random_ops.stateless_multinomial)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/stateless/python/kernel_tests/stateless_random_ops_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model pruning implementation in tensorflow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from tensorflow.contrib.model_pruning.python.layers.layers import masked_conv2d
from tensorflow.contrib.model_pruning.python.layers.layers import masked_convolution
from tensorflow.contrib.model_pruning.python.layers.layers import masked_fully_connected
from tensorflow.contrib.model_pruning.python.layers.rnn_cells import MaskedBasicLSTMCell
from tensorflow.contrib.model_pruning.python.layers.rnn_cells import MaskedLSTMCell
from tensorflow.contrib.model_pruning.python.learning import train
from tensorflow.contrib.model_pruning.python.pruning import apply_mask
from tensorflow.contrib.model_pruning.python.pruning import get_masked_weights
from tensorflow.contrib.model_pruning.python.pruning import get_masks
from tensorflow.contrib.model_pruning.python.pruning import get_pruning_hparams
from tensorflow.contrib.model_pruning.python.pruning import get_thresholds
from tensorflow.contrib.model_pruning.python.pruning import get_weight_sparsity
from tensorflow.contrib.model_pruning.python.pruning import get_weights
from tensorflow.contrib.model_pruning.python.pruning import Pruning
from tensorflow.contrib.model_pruning.python.strip_pruning_vars_lib import graph_def_from_checkpoint
from tensorflow.contrib.model_pruning.python.strip_pruning_vars_lib import strip_pruning_vars_fn
# pylint: enable=unused-import
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = [
'masked_convolution', 'masked_conv2d', 'masked_fully_connected',
'MaskedBasicLSTMCell', 'MaskedLSTMCell', 'train', 'apply_mask',
'get_masked_weights', 'get_masks', 'get_pruning_hparams', 'get_thresholds',
'get_weights', 'get_weight_sparsity', 'Pruning', 'strip_pruning_vars_fn',
'graph_def_from_checkpoint'
]
remove_undocumented(__name__, _allowed_symbols)
|
tensorflow-master
|
tensorflow/contrib/model_pruning/__init__.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for strip_pruning_vars."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
from tensorflow.contrib.model_pruning.python import pruning
from tensorflow.contrib.model_pruning.python import strip_pruning_vars_lib
from tensorflow.contrib.model_pruning.python.layers import layers
from tensorflow.contrib.model_pruning.python.layers import rnn_cells
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import rnn
from tensorflow.python.ops import rnn_cell as tf_rnn_cells
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import training_util
def _get_number_pruning_vars(graph_def):
number_vars = 0
for node in graph_def.node:
if re.match(r"^.*(mask$)|(threshold$)", node.name):
number_vars += 1
return number_vars
def _get_node_names(tensor_names):
return [
strip_pruning_vars_lib._node_name(tensor_name)
for tensor_name in tensor_names
]
class StripPruningVarsTest(test.TestCase):
def setUp(self):
param_list = [
"pruning_frequency=1", "begin_pruning_step=1", "end_pruning_step=10",
"nbins=2048", "threshold_decay=0.0"
]
self.initial_graph = ops.Graph()
self.initial_graph_def = None
self.final_graph = ops.Graph()
self.final_graph_def = None
self.pruning_spec = ",".join(param_list)
with self.initial_graph.as_default():
self.sparsity = variables.Variable(0.5, name="sparsity")
self.global_step = training_util.get_or_create_global_step()
self.increment_global_step = state_ops.assign_add(self.global_step, 1)
self.mask_update_op = None
def _build_convolutional_model(self, number_of_layers):
# Create a graph with several conv2d layers
kernel_size = 3
base_depth = 4
depth_step = 7
height, width = 7, 9
with variable_scope.variable_scope("conv_model"):
input_tensor = array_ops.ones((8, height, width, base_depth))
top_layer = input_tensor
for ix in range(number_of_layers):
top_layer = layers.masked_conv2d(
top_layer,
base_depth + (ix + 1) * depth_step,
kernel_size,
scope="Conv_" + str(ix))
return top_layer
def _build_fully_connected_model(self, number_of_layers):
base_depth = 4
depth_step = 7
input_tensor = array_ops.ones((8, base_depth))
top_layer = input_tensor
with variable_scope.variable_scope("fc_model"):
for ix in range(number_of_layers):
top_layer = layers.masked_fully_connected(
top_layer, base_depth + (ix + 1) * depth_step)
return top_layer
def _build_lstm_model(self, number_of_layers):
batch_size = 8
dim = 10
inputs = variables.Variable(random_ops.random_normal([batch_size, dim]))
def lstm_cell():
return rnn_cells.MaskedBasicLSTMCell(
dim, forget_bias=0.0, state_is_tuple=True, reuse=False)
cell = tf_rnn_cells.MultiRNNCell(
[lstm_cell() for _ in range(number_of_layers)], state_is_tuple=True)
outputs = rnn.static_rnn(
cell, [inputs],
initial_state=cell.zero_state(batch_size, dtypes.float32))
return outputs
def _prune_model(self, session):
pruning_hparams = pruning.get_pruning_hparams().parse(self.pruning_spec)
p = pruning.Pruning(pruning_hparams, sparsity=self.sparsity)
self.mask_update_op = p.conditional_mask_update_op()
variables.global_variables_initializer().run()
for _ in range(20):
session.run(self.mask_update_op)
session.run(self.increment_global_step)
def _get_outputs(self, session, input_graph, tensors_list, graph_prefix=None):
outputs = []
for output_tensor in tensors_list:
if graph_prefix:
output_tensor = graph_prefix + "/" + output_tensor
outputs.append(
session.run(session.graph.get_tensor_by_name(output_tensor)))
return outputs
def _get_initial_outputs(self, output_tensor_names_list):
with self.session(graph=self.initial_graph) as sess1:
self._prune_model(sess1)
reference_outputs = self._get_outputs(sess1, self.initial_graph,
output_tensor_names_list)
self.initial_graph_def = graph_util.convert_variables_to_constants(
sess1, sess1.graph.as_graph_def(),
_get_node_names(output_tensor_names_list))
return reference_outputs
def _get_final_outputs(self, output_tensor_names_list):
self.final_graph_def = strip_pruning_vars_lib.strip_pruning_vars_fn(
self.initial_graph_def, _get_node_names(output_tensor_names_list))
_ = importer.import_graph_def(self.final_graph_def, name="final")
with self.test_session(self.final_graph) as sess2:
final_outputs = self._get_outputs(
sess2,
self.final_graph,
output_tensor_names_list,
graph_prefix="final")
return final_outputs
def _check_removal_of_pruning_vars(self, number_masked_layers):
self.assertEqual(
_get_number_pruning_vars(self.initial_graph_def), number_masked_layers)
self.assertEqual(_get_number_pruning_vars(self.final_graph_def), 0)
def _check_output_equivalence(self, initial_outputs, final_outputs):
for initial_output, final_output in zip(initial_outputs, final_outputs):
self.assertAllEqual(initial_output, final_output)
def testConvolutionalModel(self):
with self.initial_graph.as_default():
number_masked_conv_layers = 5
top_layer = self._build_convolutional_model(number_masked_conv_layers)
output_tensor_names = [top_layer.name]
initial_outputs = self._get_initial_outputs(output_tensor_names)
# Remove pruning-related nodes.
with self.final_graph.as_default():
final_outputs = self._get_final_outputs(output_tensor_names)
# Check that the final graph has no pruning-related vars
self._check_removal_of_pruning_vars(number_masked_conv_layers)
# Check that outputs remain the same after removal of pruning-related nodes
self._check_output_equivalence(initial_outputs, final_outputs)
def testFullyConnectedModel(self):
with self.initial_graph.as_default():
number_masked_fc_layers = 3
top_layer = self._build_fully_connected_model(number_masked_fc_layers)
output_tensor_names = [top_layer.name]
initial_outputs = self._get_initial_outputs(output_tensor_names)
# Remove pruning-related nodes.
with self.final_graph.as_default():
final_outputs = self._get_final_outputs(output_tensor_names)
# Check that the final graph has no pruning-related vars
self._check_removal_of_pruning_vars(number_masked_fc_layers)
# Check that outputs remain the same after removal of pruning-related nodes
self._check_output_equivalence(initial_outputs, final_outputs)
def testLSTMModel(self):
with self.initial_graph.as_default():
number_masked_lstm_layers = 2
outputs = self._build_lstm_model(number_masked_lstm_layers)
output_tensor_names = [outputs[0][0].name]
initial_outputs = self._get_initial_outputs(output_tensor_names)
# Remove pruning-related nodes.
with self.final_graph.as_default():
final_outputs = self._get_final_outputs(output_tensor_names)
# Check that the final graph has no pruning-related vars
self._check_removal_of_pruning_vars(number_masked_lstm_layers)
# Check that outputs remain the same after removal of pruning-related nodes
self._check_output_equivalence(initial_outputs, final_outputs)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/model_pruning/python/strip_pruning_vars_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helper functions to add support for magnitude-based model pruning.
# Adds variables and ops to the graph to enable
# elementwise masking of weights
apply_mask(weights)
# Returns a list containing the sparsity of each of the weight tensors
get_weight_sparsity()
# Returns a list of all the masked weight tensorflow variables
get_masked_weights()
# Returns a list of all the mask tensorflow variables
get_masks()
# Returns a list of all the thresholds
get_thresholds()
# Returns a list of all the weight tensors that have been masked
get_weights()
The Pruning class uses a tf.hparams object to set up the
parameters for a model pruning. Here's a typical usage:
# Parse pruning hyperparameters
pruning_hparams = pruning.get_pruning_hparams().parse(FLAGS.pruning_hparams)
# Create a pruning object using the pruning_hparams
p = pruning.Pruning(pruning_hparams)
# Add mask update ops to the graph
mask_update_op = p.conditional_mask_update_op()
# Add the summaries
p.add_pruning_summaries()
# Run the op
session.run(mask_update_op)
# An object of the pruning also accepts externally defined sparsity:
sparsity = tf.Variable(0.5, name = "ConstantSparsity")
p = pruning.Pruning(pruning_hparams, sparsity=sparsity)
"""
# pylint: disable=missing-docstring
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
from tensorflow.contrib.model_pruning.python import pruning_utils
from tensorflow.contrib.model_pruning.python.layers import core_layers as core
from tensorflow.contrib.training.python.training import hparam
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_impl
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary
from tensorflow.python.training import training_util
_MASK_COLLECTION = core.MASK_COLLECTION
_THRESHOLD_COLLECTION = core.THRESHOLD_COLLECTION
_MASKED_WEIGHT_COLLECTION = core.MASKED_WEIGHT_COLLECTION
_WEIGHT_COLLECTION = core.WEIGHT_COLLECTION
_MASKED_WEIGHT_NAME = core.MASKED_WEIGHT_NAME
def apply_mask(x, scope=''):
"""Apply mask to a given weight tensor.
Args:
x: Input weight tensor
scope: The current variable scope. Defaults to "".
Returns:
Tensor representing masked_weights
"""
mask = pruning_utils.weight_mask_variable(x, scope)
threshold = pruning_utils.weight_threshold_variable(x, scope)
# Add masked_weights in the weights namescope so as to make it easier
# for the quantization library to add quant ops.
masked_weights = math_ops.multiply(mask, x, _MASKED_WEIGHT_NAME)
# Make sure the mask for a given variable are not added multiple times to the
# collection. This is particularly important when applying mask to RNN's
# weight variables
if mask not in ops.get_collection_ref(_MASK_COLLECTION):
ops.add_to_collection(_THRESHOLD_COLLECTION, threshold)
ops.add_to_collection(_MASK_COLLECTION, mask)
ops.add_to_collection(_MASKED_WEIGHT_COLLECTION, masked_weights)
ops.add_to_collection(_WEIGHT_COLLECTION, x)
return masked_weights
def get_masked_weights():
return ops.get_collection(_MASKED_WEIGHT_COLLECTION)
def get_masks():
return ops.get_collection(_MASK_COLLECTION)
def get_thresholds():
return ops.get_collection(_THRESHOLD_COLLECTION)
def get_weights():
return ops.get_collection(_WEIGHT_COLLECTION)
def get_weight_sparsity():
"""Get sparsity of the weights.
Args:
None
Returns:
A list containing the sparsity of each of the weight tensors
"""
masks = get_masks()
return [nn_impl.zero_fraction(mask) for mask in masks]
def get_pruning_hparams():
"""Get a tf.HParams object with the default values for the hyperparameters.
name: string
name of the pruning specification. Used for adding summaries and ops under
a common tensorflow name_scope
begin_pruning_step: integer
the global step at which to begin pruning
end_pruning_step: integer
the global step at which to terminate pruning. Defaults to -1 implying
that pruning continues till the training stops
weight_sparsity_map: list of strings
comma separed list of weight variable name regex:target sparsity pairs.
For layers/weights not in this list, sparsity as specified by the
target_sparsity hyperparameter is used.
Eg. [conv1:0.9,conv2/kernel:0.8]
threshold_decay: float
the decay factor to use for exponential decay of the thresholds
pruning_frequency: integer
How often should the masks be updated? (in # of global_steps)
nbins: integer
number of bins to use for histogram computation
block_height: integer
number of rows in a block (defaults to 1)
block_width: integer
number of cols in a block (defaults to 1)
block_pooling_function: string
Whether to perform average (AVG) or max (MAX) pooling in the block
(default: AVG)
initial_sparsity: float
initial sparsity value
target_sparsity: float
target sparsity value
sparsity_function_begin_step: integer
the global step at this which the gradual sparsity function begins to
take effect
sparsity_function_end_step: integer
the global step used as the end point for the gradual sparsity function
sparsity_function_exponent: float
exponent = 1 is linearly varying sparsity between initial and final.
exponent > 1 varies more slowly towards the end than the beginning
use_tpu: False
Indicates whether to use TPU
We use the following sparsity function:
num_steps = (sparsity_function_end_step -
sparsity_function_begin_step)/pruning_frequency
sparsity(step) = (initial_sparsity - target_sparsity)*
[1-step/(num_steps -1)]**exponent + target_sparsity
Args:
None
Returns:
tf.HParams object initialized to default values
"""
return hparam.HParams(
name='model_pruning',
begin_pruning_step=0,
end_pruning_step=-1,
weight_sparsity_map=[''],
threshold_decay=0.0,
pruning_frequency=10,
nbins=256,
block_height=1,
block_width=1,
block_pooling_function='AVG',
initial_sparsity=0.0,
target_sparsity=0.5,
sparsity_function_begin_step=0,
sparsity_function_end_step=100,
sparsity_function_exponent=3.0,
use_tpu=False)
class Pruning(object):
def __init__(self, spec=None, global_step=None, sparsity=None):
"""Set up the specification for model pruning.
If a spec is provided, the sparsity is set up based on the sparsity_function
in the spec. The effect of sparsity_function is overridden if the sparsity
variable is passed to the constructor. This enables setting up arbitrary
sparsity profiles externally and passing it to this pruning functions.
Args:
spec: Pruning spec as defined in pruning.proto
global_step: A tensorflow variable that is used while setting up the
sparsity function
sparsity: A tensorflow scalar variable storing the sparsity
"""
# Pruning specification
self._spec = spec if spec else get_pruning_hparams()
# Sanity check for pruning hparams
self._validate_spec()
# A tensorflow variable that tracks the sparsity function.
# If not provided as input, the graph must already contain the global_step
# variable before calling this constructor.
self._global_step = self._setup_global_step(global_step)
# Stores the tensorflow sparsity variable.
# Built using self._setup_sparsity() or provided externally
self._sparsity = (sparsity
if sparsity is not None else self._setup_sparsity())
# List of tensorflow assignments ops for new masks and thresholds
self._assign_ops = []
# Tensorflow variable keeping track of the last global step when the masks
# were updated
self._last_update_step = self._setup_last_update_step()
# Block dimensions
self._block_dim = [self._spec.block_height, self._spec.block_width]
# Block pooling function
self._block_pooling_function = self._spec.block_pooling_function
# Mapping of weight names and target sparsity
self._weight_sparsity_map = self._get_weight_sparsity_map()
def _validate_spec(self):
spec = self._spec
if spec.begin_pruning_step < 0:
raise ValueError('Illegal value for begin_pruning_step')
if spec.begin_pruning_step >= spec.end_pruning_step:
if spec.end_pruning_step != -1:
raise ValueError(
'Pruning must begin before it can end. begin_step=%d, end_step=%d.'
'Set end_pruning_step to -1 if pruning is required till training'
'stops' % (spec.begin_pruning_step, spec.end_pruning_step))
if spec.sparsity_function_begin_step < 0:
raise ValueError('Illegal value for sparsity_function_begin_step')
if spec.sparsity_function_begin_step >= spec.sparsity_function_end_step:
raise ValueError(
'Sparsity function requires begin_step < end_step')
if not 0.0 <= spec.threshold_decay < 1.0:
raise ValueError('threshold_decay must be in range [0,1)')
if not 0.0 <= spec.initial_sparsity < 1.0:
raise ValueError('initial_sparsity must be in range [0,1)')
if not 0.0 <= spec.target_sparsity < 1.0:
raise ValueError('target_sparsity must be in range [0,1)')
def _setup_global_step(self, global_step):
graph_global_step = global_step
if graph_global_step is None:
graph_global_step = training_util.get_global_step()
return math_ops.cast(graph_global_step, dtypes.int32)
def _setup_sparsity(self):
begin_step = self._spec.sparsity_function_begin_step
end_step = self._spec.sparsity_function_end_step
initial_sparsity = self._spec.initial_sparsity
target_sparsity = self._spec.target_sparsity
exponent = self._spec.sparsity_function_exponent
with ops.name_scope(self._spec.name):
p = math_ops.minimum(
1.0,
math_ops.maximum(
0.0,
math_ops.div(
math_ops.cast(self._global_step - begin_step, dtypes.float32),
end_step - begin_step)))
sparsity = math_ops.add(
math_ops.multiply(initial_sparsity - target_sparsity,
math_ops.pow(1 - p, exponent)),
target_sparsity,
name='sparsity')
return sparsity
def _setup_last_update_step(self):
with variable_scope.variable_scope(
self._spec.name, use_resource=self._spec.use_tpu) as scope:
try:
last_update_step = variable_scope.get_variable(
'last_mask_update_step', [],
initializer=init_ops.zeros_initializer(),
trainable=False,
dtype=dtypes.int32)
except ValueError:
scope.reuse_variables()
last_update_step = variable_scope.get_variable(
'last_mask_update_step', dtype=dtypes.int32)
return last_update_step
def _get_weight_sparsity_map(self):
"""Return the map of weight_name:sparsity parsed from the hparams."""
weight_sparsity_map = {}
val_list = self._spec.weight_sparsity_map
filtered_val_list = [l for l in val_list if l]
for val in filtered_val_list:
weight_name, sparsity = val.split(':')
if float(sparsity) >= 1.0:
raise ValueError('Weight sparsity can not exceed 1.0')
weight_sparsity_map[weight_name] = float(sparsity)
return weight_sparsity_map
def _get_sparsity(self, weight_name):
"""Return target sparsity for the given layer/weight name."""
target_sparsity = [
sparsity for regexp, sparsity in self._weight_sparsity_map.items()
if re.match(regexp, weight_name)
]
if not target_sparsity:
return self._sparsity
if len(target_sparsity) > 1:
raise ValueError(
'Multiple matches in weight_sparsity_map for weight %s' % weight_name)
# TODO(suyoggupta): This will work when initial_sparsity = 0. Generalize
# to handle other cases as well.
return math_ops.mul(
self._sparsity,
math_ops.div(target_sparsity[0], self._spec.target_sparsity))
def _update_mask(self, weights, threshold):
"""Updates the mask for a given weight tensor.
This functions first computes the cdf of the weight tensor, and estimates
the threshold value such that 'desired_sparsity' fraction of weights
have magnitude less than the threshold.
Args:
weights: The weight tensor that needs to be masked.
threshold: The current threshold value. The function will compute a new
threshold and return the exponential moving average using the current
value of threshold
Returns:
new_threshold: The new value of the threshold based on weights, and
sparsity at the current global_step
new_mask: A numpy array of the same size and shape as weights containing
0 or 1 to indicate which of the values in weights falls below
the threshold
Raises:
ValueError: if sparsity is not defined
"""
if self._sparsity is None:
raise ValueError('Sparsity variable undefined')
sparsity = self._get_sparsity(weights.op.name)
with ops.name_scope(weights.op.name + '_pruning_ops'):
abs_weights = math_ops.abs(weights)
k = math_ops.cast(
math_ops.round(
math_ops.cast(array_ops.size(abs_weights), dtypes.float32) *
(1 - sparsity)), dtypes.int32)
# Sort the entire array
values, _ = nn_ops.top_k(
array_ops.reshape(abs_weights, [-1]), k=array_ops.size(abs_weights))
# Grab the (k-1) th value
current_threshold = array_ops.gather(values, k - 1)
smoothed_threshold = math_ops.add_n([
math_ops.multiply(current_threshold, 1 - self._spec.threshold_decay),
math_ops.multiply(threshold, self._spec.threshold_decay)
])
new_mask = math_ops.cast(
math_ops.greater_equal(abs_weights, smoothed_threshold),
dtypes.float32)
return smoothed_threshold, new_mask
def _maybe_update_block_mask(self, weights, threshold):
"""Performs block-granular masking of the weights.
Block pruning occurs only if the block_height or block_width is > 1 and
if the weight tensor, when squeezed, has ndims = 2. Otherwise, elementwise
pruning occurs.
Args:
weights: The weight tensor that needs to be masked.
threshold: The current threshold value. The function will compute a new
threshold and return the exponential moving average using the current
value of threshold
Returns:
new_threshold: The new value of the threshold based on weights, and
sparsity at the current global_step
new_mask: A numpy array of the same size and shape as weights containing
0 or 1 to indicate which of the values in weights falls below
the threshold
Raises:
ValueError: if block pooling function is not AVG or MAX
"""
squeezed_weights = array_ops.squeeze(weights)
if squeezed_weights.get_shape().ndims != 2 or self._block_dim == [1, 1]:
return self._update_mask(weights, threshold)
if self._block_pooling_function not in ['AVG', 'MAX']:
raise ValueError('Unknown pooling function for block sparsity: %s' %
self._block_pooling_function)
with ops.name_scope(weights.op.name + '_pruning_ops'):
abs_weights = math_ops.abs(squeezed_weights)
pool_window = [self._block_dim[0], self._block_dim[1]]
pool_fn = pruning_utils.factorized_pool
squeeze_axis = None
if not self._spec.use_tpu:
pool_fn = nn_ops.pool
abs_weights = array_ops.reshape(
abs_weights,
[1, abs_weights.get_shape()[0],
abs_weights.get_shape()[1], 1])
squeeze_axis = [0, 3]
pooled_weights = pool_fn(
abs_weights,
window_shape=pool_window,
pooling_type=self._block_pooling_function,
strides=pool_window,
padding='SAME',
name=weights.op.name + '_pooled')
if pooled_weights.get_shape().ndims != 2:
pooled_weights = array_ops.squeeze(pooled_weights, axis=squeeze_axis)
smoothed_threshold, new_mask = self._update_mask(pooled_weights,
threshold)
updated_mask = pruning_utils.expand_tensor(new_mask, self._block_dim)
sliced_mask = array_ops.slice(
updated_mask, [0, 0],
[squeezed_weights.get_shape()[0],
squeezed_weights.get_shape()[1]])
return smoothed_threshold, array_ops.reshape(sliced_mask,
array_ops.shape(weights))
def _get_mask_assign_ops(self):
# Make sure the assignment ops have not already been added to the list
if self._assign_ops:
raise ValueError(
'Assign op list not empty. _get_mask_assign_ops() called twice?')
masks = get_masks()
weights = get_weights()
thresholds = get_thresholds()
if len(masks) != len(thresholds):
raise ValueError(
'Number of masks %s and number of thresholds %s mismatch' %
(len(masks), len(thresholds)))
for index, mask in enumerate(masks):
threshold = thresholds[index]
weight = weights[index]
is_partitioned = isinstance(weight, variables.PartitionedVariable)
if is_partitioned:
weight = weight.as_tensor()
new_threshold, new_mask = self._maybe_update_block_mask(weight, threshold)
self._assign_ops.append(
pruning_utils.variable_assign(threshold, new_threshold))
self._assign_ops.append(
pruning_utils.partitioned_variable_assign(mask, new_mask)
if is_partitioned else pruning_utils.variable_assign(mask, new_mask))
def mask_update_op(self):
with ops.name_scope(self._spec.name):
if not self._assign_ops:
self._get_mask_assign_ops()
with ops.control_dependencies([
state_ops.assign(
self._last_update_step,
self._global_step,
name='last_mask_update_step_assign')
]):
with ops.control_dependencies(self._assign_ops):
logging.info('Updating masks.')
return control_flow_ops.no_op('mask_update')
def conditional_mask_update_op(self):
def maybe_update_masks():
with ops.name_scope(self._spec.name):
is_step_within_pruning_range = math_ops.logical_and(
math_ops.greater_equal(self._global_step,
self._spec.begin_pruning_step),
# If end_pruning_step is negative, keep pruning forever!
math_ops.logical_or(
math_ops.less_equal(self._global_step,
self._spec.end_pruning_step),
math_ops.less(self._spec.end_pruning_step, 0)))
is_pruning_step = math_ops.less_equal(
math_ops.add(self._last_update_step, self._spec.pruning_frequency),
self._global_step)
return math_ops.logical_and(is_step_within_pruning_range,
is_pruning_step)
def mask_update_op():
return self.mask_update_op()
def no_update_op():
return control_flow_ops.no_op()
return control_flow_ops.cond(maybe_update_masks(), mask_update_op,
no_update_op)
def add_pruning_summaries(self):
"""Adds summaries of weight sparsities and thresholds."""
with ops.name_scope(self._spec.name + '_summaries'):
summary.scalar('sparsity', self._sparsity)
summary.scalar('last_mask_update_step', self._last_update_step)
masks = get_masks()
thresholds = get_thresholds()
for mask, threshold in zip(masks, thresholds):
summary.scalar(mask.op.name + '/sparsity', nn_impl.zero_fraction(mask))
summary.scalar(threshold.op.name + '/threshold', threshold)
def print_hparams(self):
logging.info(self._spec.to_json())
|
tensorflow-master
|
tensorflow/contrib/model_pruning/python/pruning.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for utility functions in pruning_utils.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.contrib.model_pruning.python import pruning_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
@parameterized.named_parameters(
("Input_32x32_block_1x1", [32, 32], [1, 1]),
# block size 6x6
("Input_3x3_block_6x6", [3, 3], [6, 6]),
("Input_32x32_block_6x6", [32, 32], [6, 6]),
("Input_2x32_block_6x6", [2, 32], [6, 6]),
("Input_32x2_block_6x6", [32, 2], [6, 6]),
("Input_30x30_block_6x6", [30, 30], [6, 6]),
# block size 4x4
("Input_32x32_block_4x4", [32, 32], [4, 4]),
("Input_2x32_block_4x4", [2, 32], [4, 4]),
("Input_32x2_block_4x4", [32, 2], [4, 4]),
("Input_30x30_block_4x4", [30, 30], [4, 4]),
# block size 1x4
("Input_32x32_block_1x4", [32, 32], [1, 4]),
("Input_2x32_block_1x4", [2, 32], [1, 4]),
("Input_32x2_block_1x4", [32, 2], [1, 4]),
("Input_30x30_block_1x4", [30, 30], [1, 4]),
# block size 4x1
("Input_32x32_block_4x1", [32, 32], [4, 1]),
("Input_2x32_block_4x1", [2, 32], [4, 1]),
("Input_32x2_block_4x1", [32, 2], [4, 1]),
("Input_30x30_block_4x1", [30, 30], [4, 1]))
class PruningUtilsParameterizedTest(test.TestCase, parameterized.TestCase):
def _compare_pooling_methods(self, weights, pooling_kwargs):
with self.cached_session():
variables.global_variables_initializer().run()
pooled_weights_tf = array_ops.squeeze(
nn_ops.pool(
array_ops.reshape(
weights,
[1, weights.get_shape()[0],
weights.get_shape()[1], 1]), **pooling_kwargs),
axis=[0, 3])
pooled_weights_factorized_pool = pruning_utils.factorized_pool(
weights, **pooling_kwargs)
self.assertAllClose(pooled_weights_tf.eval(),
pooled_weights_factorized_pool.eval())
def _compare_expand_tensor_with_kronecker_product(self, tensor, block_dim):
with self.cached_session() as session:
variables.global_variables_initializer().run()
expanded_tensor = pruning_utils.expand_tensor(tensor, block_dim)
kronecker_product = pruning_utils.kronecker_product(
tensor, array_ops.ones(block_dim))
expanded_tensor_val, kronecker_product_val = session.run(
[expanded_tensor, kronecker_product])
self.assertAllEqual(expanded_tensor_val, kronecker_product_val)
def testFactorizedAvgPool(self, input_shape, window_shape):
weights = variable_scope.get_variable("weights", shape=input_shape)
pooling_kwargs = {
"window_shape": window_shape,
"pooling_type": "AVG",
"strides": window_shape,
"padding": "SAME"
}
self._compare_pooling_methods(weights, pooling_kwargs)
def testFactorizedMaxPool(self, input_shape, window_shape):
weights = variable_scope.get_variable("weights", shape=input_shape)
pooling_kwargs = {
"window_shape": window_shape,
"pooling_type": "MAX",
"strides": window_shape,
"padding": "SAME"
}
self._compare_pooling_methods(weights, pooling_kwargs)
def testExpandTensor(self, input_shape, block_dim):
weights = random_ops.random_normal(shape=input_shape)
self._compare_expand_tensor_with_kronecker_product(weights, block_dim)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/model_pruning/python/pruning_utils_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities to remove pruning-related ops and variables from a GraphDef.
"""
# pylint: disable=missing-docstring
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import node_def_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import saver as saver_lib
def _node_name(tensor_name):
"""Remove the trailing ':0' from the variable name."""
if ':' not in tensor_name:
return tensor_name
return tensor_name.split(':')[0]
def _tensor_name(node_name):
"""Appends the :0 in the op name to get the canonical tensor name."""
if ':' in node_name:
return node_name
return node_name + ':0'
def _get_masked_weights(input_graph_def):
"""Extracts masked_weights from the graph as a dict of {var_name:ndarray}."""
input_graph = ops.Graph()
with input_graph.as_default():
importer.import_graph_def(input_graph_def, name='')
with session.Session(graph=input_graph) as sess:
masked_weights_dict = {}
for node in input_graph_def.node:
if 'masked_weight' in node.name:
masked_weight_val = sess.run(
sess.graph.get_tensor_by_name(_tensor_name(node.name)))
logging.info(
'%s has %d values, %1.2f%% zeros \n', node.name,
np.size(masked_weight_val),
100 - float(100 * np.count_nonzero(masked_weight_val)) /
np.size(masked_weight_val))
masked_weights_dict.update({node.name: masked_weight_val})
return masked_weights_dict
def strip_pruning_vars_fn(input_graph_def, output_node_names):
"""Removes mask variable from the graph.
Replaces the masked_weight tensor with element-wise multiplication of mask
and the corresponding weight variable.
Args:
input_graph_def: A GraphDef in which the variables have been converted to
constants. This is typically the output of
tf.graph_util.convert_variables_to_constant()
output_node_names: List of name strings for the result nodes of the graph
Returns:
A GraphDef in which pruning-related variables have been removed
"""
masked_weights_dict = _get_masked_weights(input_graph_def)
pruned_graph_def = graph_pb2.GraphDef()
# Replace masked_weight with a const op containing the
# result of tf.multiply(mask,weight)
for node in input_graph_def.node:
output_node = node_def_pb2.NodeDef()
if 'masked_weight' in node.name:
output_node.op = 'Const'
output_node.name = node.name
dtype = node.attr['T']
data = masked_weights_dict[node.name]
output_node.attr['dtype'].CopyFrom(dtype)
output_node.attr['value'].CopyFrom(
attr_value_pb2.AttrValue(tensor=tensor_util.make_tensor_proto(data)))
else:
output_node.CopyFrom(node)
pruned_graph_def.node.extend([output_node])
# Remove stranded nodes: mask and weights
return graph_util.extract_sub_graph(pruned_graph_def, output_node_names)
def graph_def_from_checkpoint(checkpoint_dir, output_node_names):
"""Converts checkpoint data to GraphDef.
Reads the latest checkpoint data and produces a GraphDef in which the
variables have been converted to constants.
Args:
checkpoint_dir: Path to the checkpoints.
output_node_names: List of name strings for the result nodes of the graph.
Returns:
A GraphDef from the latest checkpoint
Raises:
ValueError: if no checkpoint is found
"""
checkpoint_path = saver_lib.latest_checkpoint(checkpoint_dir)
if checkpoint_path is None:
raise ValueError('Could not find a checkpoint at: {0}.'
.format(checkpoint_dir))
saver_for_restore = saver_lib.import_meta_graph(
checkpoint_path + '.meta', clear_devices=True)
with session.Session() as sess:
saver_for_restore.restore(sess, checkpoint_path)
graph_def = ops.get_default_graph().as_graph_def()
output_graph_def = graph_util.convert_variables_to_constants(
sess, graph_def, output_node_names)
return output_graph_def
|
tensorflow-master
|
tensorflow/contrib/model_pruning/python/strip_pruning_vars_lib.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Removes the auxiliary variables and ops added by the pruning library.
Usage:
bazel build tensorflow/contrib/model_pruning:strip_pruning_vars && \
bazel-bin/tensorflow/contrib/model_pruning/strip_pruning_vars \
--checkpoint_dir=/tmp/model_ckpts \
--output_node_names=softmax \
--output_dir=/tmp \
--filename=pruning_stripped.pb
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import sys
from tensorflow.contrib.model_pruning.python import strip_pruning_vars_lib
from tensorflow.python.framework import graph_io
from tensorflow.python.platform import app
from tensorflow.python.platform import tf_logging as logging
FLAGS = None
def strip_pruning_vars(checkpoint_dir, output_node_names, output_dir, filename):
"""Remove pruning-related auxiliary variables and ops from the graph.
Accepts training checkpoints and produces a GraphDef in which the pruning vars
and ops have been removed.
Args:
checkpoint_dir: Path to the checkpoints.
output_node_names: The name of the output nodes, comma separated.
output_dir: Directory where to write the graph.
filename: Output GraphDef file name.
Returns:
None
Raises:
ValueError: if output_nodes_names are not provided.
"""
if not output_node_names:
raise ValueError(
'Need to specify atleast 1 output node through output_node_names flag')
output_node_names = output_node_names.replace(' ', '').split(',')
initial_graph_def = strip_pruning_vars_lib.graph_def_from_checkpoint(
checkpoint_dir, output_node_names)
final_graph_def = strip_pruning_vars_lib.strip_pruning_vars_fn(
initial_graph_def, output_node_names)
graph_io.write_graph(final_graph_def, output_dir, filename, as_text=False)
logging.info('\nFinal graph written to %s', os.path.join(
output_dir, filename))
def main(unused_args):
return strip_pruning_vars(FLAGS.checkpoint_dir, FLAGS.output_node_names,
FLAGS.output_dir, FLAGS.filename)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.register('type', 'bool', lambda v: v.lower() == 'true')
parser.add_argument(
'--checkpoint_dir', type=str, default='', help='Path to the checkpoints.')
parser.add_argument(
'--output_node_names',
type=str,
default='',
help='The name of the output nodes, comma separated.')
parser.add_argument(
'--output_dir',
type=str,
default='/tmp',
help='Directory where to write the graph.')
parser.add_argument(
'--filename',
type=str,
default='pruning_stripped.pb',
help='Output \'GraphDef\' file name.')
FLAGS, unparsed = parser.parse_known_args()
app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
tensorflow-master
|
tensorflow/contrib/model_pruning/python/strip_pruning_vars.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Wrapper around tf-slim's training code contrib/slim/python/slim/learning.py
to support training of pruned models
*******************************************************************
* A simple working training script with support for model pruning *
*******************************************************************
# Load data and create the model:
images, labels = LoadData(...)
predictions = MyModel(images)
# Define the loss:
slim.losses.log_loss(predictions, labels)
total_loss = slim.losses.get_total_loss()
# Define the optimizer:
optimizer = tf.compat.v1.train.MomentumOptimizer(FLAGS.learning_rate,
FLAGS.momentum)
# Create the train_op
train_op = slim.learning.create_train_op(total_loss, optimizer)
# Parse pruning hyperparameters
pruning_hparams = pruning.get_pruning_hparams().parse(FLAGS.pruning_hparams)
# Create a pruning object using the pruning_hparams
p = pruning.Pruning(pruning_hparams)
# Add mask update ops to the graph
mask_update_op = p.conditional_mask_update_op()
# Run training.
learning.train(train_op,
my_log_dir,
mask_update_op)
see contrib/slim/python/slim/learning.py for additional examples
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import slim as _slim
_USE_DEFAULT = 0
train_step = _slim.learning.train_step
def train(train_op,
logdir,
mask_update_op,
train_step_fn=train_step,
train_step_kwargs=_USE_DEFAULT,
log_every_n_steps=1,
graph=None,
master='',
is_chief=True,
global_step=None,
number_of_steps=None,
init_op=_USE_DEFAULT,
init_feed_dict=None,
local_init_op=_USE_DEFAULT,
init_fn=None,
ready_op=_USE_DEFAULT,
summary_op=_USE_DEFAULT,
save_summaries_secs=600,
summary_writer=_USE_DEFAULT,
startup_delay_steps=0,
saver=None,
save_interval_secs=600,
sync_optimizer=None,
session_config=None,
trace_every_n_steps=None):
"""Wrapper around tf-slim's train function.
Runs a training loop using a TensorFlow supervisor.
When the sync_optimizer is supplied, gradient updates are applied
synchronously. Otherwise, gradient updates are applied asynchronous.
Args:
train_op: A `Tensor` that, when executed, will apply the gradients and
return the loss value.
logdir: The directory where training logs are written to. If None, model
checkpoints and summaries will not be written.
mask_update_op: Operation that upon execution updates the weight masks and
thresholds.
train_step_fn: The function to call in order to execute a single gradient
step. The function must have take exactly four arguments: the current
session, the `train_op` `Tensor`, a global step `Tensor` and a
dictionary.
train_step_kwargs: A dictionary which is passed to the `train_step_fn`. By
default, two `Boolean`, scalar ops called "should_stop" and "should_log"
are provided.
log_every_n_steps: The frequency, in terms of global steps, that the loss
and global step and logged.
graph: The graph to pass to the supervisor. If no graph is supplied the
default graph is used.
master: The address of the tensorflow master.
is_chief: Specifies whether or not the training is being run by the primary
replica during replica training.
global_step: The `Tensor` representing the global step. If left as `None`,
then slim.variables.get_or_create_global_step() is used.
number_of_steps: The max number of gradient steps to take during training,
as measured by 'global_step': training will stop if global_step is greater
than 'number_of_steps'. If the value is left as None, training proceeds
indefinitely.
init_op: The initialization operation. If left to its default value, then
the session is initialized by calling
`tf.compat.v1.global_variables_initializer()`.
init_feed_dict: A feed dictionary to use when executing the `init_op`.
local_init_op: The local initialization operation. If left to its default
value, then the session is initialized by calling
`tf.compat.v1.local_variables_initializer()` and
`tf.compat.v1.tables_initializer()`.
init_fn: An optional callable to be executed after `init_op` is called. The
callable must accept one argument, the session being initialized.
ready_op: Operation to check if the model is ready to use. If left to its
default value, then the session checks for readiness by calling
`tf.compat.v1.report_uninitialized_variables()`.
summary_op: The summary operation.
save_summaries_secs: How often, in seconds, to save summaries.
summary_writer: `SummaryWriter` to use. Can be `None` to indicate that no
summaries should be written. If unset, we create a SummaryWriter.
startup_delay_steps: The number of steps to wait for before beginning. Note
that this must be 0 if a sync_optimizer is supplied.
saver: Saver to save checkpoints. If None, a default one will be created and
used.
save_interval_secs: How often, in seconds, to save the model to `logdir`.
sync_optimizer: an instance of tf.compat.v1.train.SyncReplicasOptimizer, or
a list of them. If the argument is supplied, gradient updates will be
synchronous. If left as `None`, gradient updates will be asynchronous.
session_config: An instance of `tf.compat.v1.ConfigProto` that will be used
to configure the `Session`. If left as `None`, the default will be used.
trace_every_n_steps: produce and save a `Timeline` in Chrome trace format
and add it to the summaries every `trace_every_n_steps`. If None, no trace
information will be produced or saved.
Returns:
the value of the loss function after training.
Raises:
ValueError: if `train_op` is empty or if `startup_delay_steps` is
non-zero when `sync_optimizer` is supplied, if `number_of_steps` is
negative, or if `trace_every_n_steps` is not `None` and no `logdir` is
provided.
"""
def train_step_with_pruning_fn(sess, train_op, global_step,
train_step_kwargs):
total_loss, should_stop = train_step_fn(sess, train_op, global_step,
train_step_kwargs)
sess.run(mask_update_op)
return total_loss, should_stop
total_loss, _ = _slim.learning.train(
train_op,
logdir,
train_step_fn=train_step_with_pruning_fn,
train_step_kwargs=train_step_kwargs,
log_every_n_steps=log_every_n_steps,
graph=graph,
master=master,
is_chief=is_chief,
global_step=global_step,
number_of_steps=number_of_steps,
init_op=init_op,
init_feed_dict=init_feed_dict,
local_init_op=local_init_op,
init_fn=init_fn,
ready_op=ready_op,
summary_op=summary_op,
save_summaries_secs=save_summaries_secs,
summary_writer=summary_writer,
startup_delay_steps=startup_delay_steps,
saver=saver,
save_interval_secs=save_interval_secs,
sync_optimizer=sync_optimizer,
session_config=session_config,
trace_every_n_steps=trace_every_n_steps)
return total_loss
|
tensorflow-master
|
tensorflow/contrib/model_pruning/python/learning.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility functions for adding pruning related ops to the graph.
"""
# pylint: disable=missing-docstring
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
def weight_mask_variable(var, scope):
"""Create a mask for the weights.
This function adds a variable 'mask' to the graph.
Args:
var: the weight variable that needs to be masked
scope: The variable scope of the variable var
Returns:
the mask variable of the same size and shape as var, initialized to all 1s.
"""
with variable_scope.variable_scope(scope):
mask = variable_scope.get_variable(
'mask',
var.get_shape(),
initializer=init_ops.ones_initializer(),
trainable=False,
dtype=var.dtype)
return mask
def weight_threshold_variable(var, scope):
"""Create a scalar threshold for the weights.
This function adds a variable
'threshold' to the graph.
Args:
var: The weight variable that needs to be masked
scope: The variable scope of the variable var
Returns:
A scalar threshold variable initialized to 0.
"""
with variable_scope.variable_scope(scope):
threshold = variable_scope.get_variable(
'threshold', [],
initializer=init_ops.zeros_initializer(),
trainable=False,
dtype=var.dtype)
return threshold
def kronecker_product(mat1, mat2):
"""Computes the Kronecker product of two matrices mat1 and mat2.
Args:
mat1: A matrix of size m x n
mat2: A matrix of size p x q
Returns:
Kronecker product of matrices mat1 and mat2 of size mp x nq
"""
m1, n1 = mat1.get_shape().as_list()
mat1_rsh = array_ops.reshape(mat1, [m1, 1, n1, 1])
m2, n2 = mat2.get_shape().as_list()
mat2_rsh = array_ops.reshape(mat2, [1, m2, 1, n2])
return array_ops.reshape(mat1_rsh * mat2_rsh, [m1 * m2, n1 * n2])
def expand_tensor(tensor, block_dims):
"""Expands a 2D tensor by replicating the tensor values.
This is equivalent to the kronecker product of the tensor and a matrix of
ones of size block_dims.
Example:
tensor = [[1,2]
[3,4]]
block_dims = [2,2]
result = [[1 1 2 2]
[1 1 2 2]
[3 3 4 4]
[3 3 4 4]]
Args:
tensor: A 2D tensor that needs to be expanded.
block_dims: List of integers specifying the expansion factor.
Returns:
The expanded tensor
Raises:
ValueError: if tensor is not rank-2 or block_dims is does not have 2
elements.
"""
if tensor.get_shape().ndims != 2:
raise ValueError('Input tensor must be rank 2')
if len(block_dims) != 2:
raise ValueError('block_dims must have 2 elements')
block_height, block_width = block_dims
def _tile_rows(tensor, multiple):
"""Create a new tensor by tiling the tensor along rows."""
return array_ops.tile(tensor, [multiple, 1])
def _generate_indices(num_rows, block_dim):
indices = np.zeros(shape=[num_rows * block_dim, 1], dtype=np.int32)
for k in range(block_dim):
for r in range(num_rows):
indices[k * num_rows + r] = r * block_dim + k
return indices
def _replicate_rows(tensor, multiple):
tensor_shape = tensor.shape.as_list()
expanded_shape = [tensor_shape[0] * multiple, tensor_shape[1]]
indices = constant_op.constant(_generate_indices(tensor_shape[0], multiple))
return array_ops.scatter_nd(indices, _tile_rows(tensor, multiple),
expanded_shape)
expanded_tensor = tensor
# Expand rows by factor block_height.
if block_height > 1:
expanded_tensor = _replicate_rows(tensor, block_height)
# Transpose and expand by factor block_width. Transpose the result.
if block_width > 1:
expanded_tensor = array_ops.transpose(
_replicate_rows(array_ops.transpose(expanded_tensor), block_width))
return expanded_tensor
def factorized_pool(input_tensor,
window_shape,
pooling_type,
strides,
padding,
name=None):
"""Performs m x n pooling through a combination of 1xm and 1xn pooling.
Args:
input_tensor: Input tensor. Must be rank 2
window_shape: Pooling window shape
pooling_type: Either 'MAX' or 'AVG'
strides: The stride of the pooling window
padding: 'SAME' or 'VALID'.
name: Name of the op
Returns:
A rank 2 tensor containing the pooled output
Raises:
ValueError: if the input tensor is not rank 2
"""
if input_tensor.get_shape().ndims != 2:
raise ValueError('factorized_pool() accepts tensors of rank 2 only')
[height, width] = input_tensor.get_shape()
with ops.name_scope(name, 'factorized_pool'):
input_tensor_aligned = array_ops.reshape(
input_tensor, [1, 1, height, width],
name=input_tensor.op.name + '_aligned')
height_pooling = nn_ops.pool(
input_tensor_aligned,
window_shape=[1, window_shape[0]],
pooling_type=pooling_type,
strides=[1, strides[0]],
padding=padding)
swap_height_width = array_ops.transpose(height_pooling, perm=[0, 1, 3, 2])
width_pooling = nn_ops.pool(
swap_height_width,
window_shape=[1, window_shape[1]],
pooling_type=pooling_type,
strides=[1, strides[1]],
padding=padding)
return array_ops.squeeze(
array_ops.transpose(width_pooling, perm=[0, 1, 3, 2]), axis=[0, 1])
def determine_partitioned_axis(partitioned_variable):
partitioned_axis = 0
concatenated_variable_shape = partitioned_variable.get_shape()
for partition in partitioned_variable:
partition_shape = partition.get_shape()
maybe_partitioned_axis = np.less(partition_shape,
concatenated_variable_shape)
# Sanity check: make sure number of partitioned axis == 1
if np.count_nonzero(maybe_partitioned_axis) != 1:
raise ValueError('Number of partitioned axes %s not equal to 1' %
np.count_nonzero(maybe_partitioned_axis))
partitioned_axis = np.where(maybe_partitioned_axis)[0][0]
return partitioned_axis
def variable_assign(var, new_value):
return state_ops.assign(var, new_value, name=var.op.name + '_assign')
def partitioned_variable_assign(partitioned_var, new_value):
"""Assign op for partitioned variables.
Args:
partitioned_var: A partitioned tensorflow variable
new_value: Value to be assigned to the variable var
Returns:
A tensorflow op that groups the assign ops for each of the variable slices
"""
# Determine which axis was used to partition the variable. Currently
# tensorflow allows partitioning variable only along 1 axis.
axis = 0 if len(partitioned_var) == 1 else determine_partitioned_axis(
partitioned_var)
partition_sizes = np.array(
[partition.get_shape()[axis] for partition in partitioned_var])
new_partitioned_values = array_ops.split(
new_value,
ops.convert_to_tensor(partition_sizes, dtype=dtypes.int32),
axis=axis)
op_list = []
for partition in partitioned_var:
op_list.append(
variable_assign(partition, new_partitioned_values[len(op_list)]))
return control_flow_ops.group(
*op_list, name=partitioned_var.name + '_group_assign')
|
tensorflow-master
|
tensorflow/contrib/model_pruning/python/pruning_utils.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the key functions in pruning library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.model_pruning.python import pruning
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import training_util
class PruningHParamsTest(test.TestCase):
PARAM_LIST = [
"name=test", "threshold_decay=0.9", "pruning_frequency=10",
"sparsity_function_end_step=100", "target_sparsity=0.9",
"weight_sparsity_map=[conv1:0.8,conv2/kernel:0.8]"
]
TEST_HPARAMS = ",".join(PARAM_LIST)
def setUp(self):
super(PruningHParamsTest, self).setUp()
# Add global step variable to the graph
self.global_step = training_util.get_or_create_global_step()
# Add sparsity
self.sparsity = variables.VariableV1(0.5, name="sparsity")
# Parse hparams
self.pruning_hparams = pruning.get_pruning_hparams().parse(
self.TEST_HPARAMS)
def testInit(self):
p = pruning.Pruning(self.pruning_hparams)
self.assertEqual(p._spec.name, "test")
self.assertAlmostEqual(p._spec.threshold_decay, 0.9)
self.assertEqual(p._spec.pruning_frequency, 10)
self.assertEqual(p._spec.sparsity_function_end_step, 100)
self.assertAlmostEqual(p._spec.target_sparsity, 0.9)
self.assertEqual(p._weight_sparsity_map["conv1"], 0.8)
self.assertEqual(p._weight_sparsity_map["conv2/kernel"], 0.8)
def testInitWithExternalSparsity(self):
with self.cached_session():
p = pruning.Pruning(spec=self.pruning_hparams, sparsity=self.sparsity)
variables.global_variables_initializer().run()
sparsity = p._sparsity.eval()
self.assertAlmostEqual(sparsity, 0.5)
def testInitWithVariableReuse(self):
with self.cached_session():
p = pruning.Pruning(spec=self.pruning_hparams, sparsity=self.sparsity)
p_copy = pruning.Pruning(
spec=self.pruning_hparams, sparsity=self.sparsity)
variables.global_variables_initializer().run()
sparsity = p._sparsity.eval()
self.assertAlmostEqual(sparsity, 0.5)
self.assertEqual(p._sparsity.eval(), p_copy._sparsity.eval())
class PruningTest(test.TestCase):
def setUp(self):
super(PruningTest, self).setUp()
self.global_step = training_util.get_or_create_global_step()
def testCreateMask2D(self):
width = 10
height = 20
with self.cached_session():
weights = variables.VariableV1(
random_ops.random_normal([width, height], stddev=1), name="weights")
masked_weights = pruning.apply_mask(weights,
variable_scope.get_variable_scope())
variables.global_variables_initializer().run()
weights_val = weights.eval()
masked_weights_val = masked_weights.eval()
self.assertAllEqual(weights_val, masked_weights_val)
def testUpdateSingleMask(self):
with self.cached_session() as session:
weights = variables.VariableV1(
math_ops.linspace(1.0, 100.0, 100), name="weights")
masked_weights = pruning.apply_mask(weights)
sparsity = variables.VariableV1(0.95, name="sparsity")
p = pruning.Pruning(sparsity=sparsity)
p._spec.threshold_decay = 0.0
mask_update_op = p.mask_update_op()
variables.global_variables_initializer().run()
masked_weights_val = masked_weights.eval()
self.assertAllEqual(np.count_nonzero(masked_weights_val), 100)
session.run(mask_update_op)
masked_weights_val = masked_weights.eval()
self.assertAllEqual(np.count_nonzero(masked_weights_val), 5)
def _blockMasking(self, hparams, weights, expected_mask):
threshold = variables.VariableV1(0.0, name="threshold")
sparsity = variables.VariableV1(0.5, name="sparsity")
test_spec = ",".join(hparams)
pruning_hparams = pruning.get_pruning_hparams().parse(test_spec)
# Set up pruning
p = pruning.Pruning(pruning_hparams, sparsity=sparsity)
with self.cached_session():
variables.global_variables_initializer().run()
_, new_mask = p._maybe_update_block_mask(weights, threshold)
# Check if the mask is the same size as the weights
self.assertAllEqual(new_mask.get_shape(), weights.get_shape())
mask_val = new_mask.eval()
self.assertAllEqual(mask_val, expected_mask)
def testBlockMasking(self):
param_list = ["block_height=2", "block_width=2", "threshold_decay=0"]
weights_avg = constant_op.constant(
[[0.1, 0.1, 0.2, 0.2], [0.1, 0.1, 0.2, 0.2], [0.3, 0.3, 0.4, 0.4],
[0.3, 0.3, 0.4, 0.4]])
weights_max = constant_op.constant(
[[0.1, 0.0, 0.2, 0.0], [0.0, -0.1, 0.0, -0.2], [0.3, 0.0, 0.4, 0.0],
[0.0, -0.3, 0.0, -0.4]])
expected_mask = [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0],
[1., 1., 1., 1.], [1., 1., 1., 1.]]
self._blockMasking(param_list + ["block_pooling_function=MAX"], weights_max,
expected_mask)
self._blockMasking(param_list + ["block_pooling_function=AVG"], weights_avg,
expected_mask)
def testBlockMaskingWithHigherDimensions(self):
param_list = ["block_height=2", "block_width=2", "threshold_decay=0"]
# Weights as in testBlockMasking, but with one extra dimension.
weights_avg = constant_op.constant(
[[[0.1, 0.1, 0.2, 0.2], [0.1, 0.1, 0.2, 0.2], [0.3, 0.3, 0.4, 0.4],
[0.3, 0.3, 0.4, 0.4]]])
weights_max = constant_op.constant(
[[[0.1, 0.0, 0.2, 0.0], [0.0, -0.1, 0.0, -0.2], [0.3, 0.0, 0.4, 0.0],
[0.0, -0.3, 0.0, -0.4]]])
expected_mask = [[[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0],
[1., 1., 1., 1.], [1., 1., 1., 1.]]]
self._blockMasking(param_list + ["block_pooling_function=MAX"], weights_max,
expected_mask)
self._blockMasking(param_list + ["block_pooling_function=AVG"],
weights_avg, expected_mask)
def testPartitionedVariableMasking(self):
partitioner = partitioned_variables.variable_axis_size_partitioner(40)
with self.cached_session() as session:
with variable_scope.variable_scope("", partitioner=partitioner):
sparsity = variables.VariableV1(0.5, name="Sparsity")
weights = variable_scope.get_variable(
"weights", initializer=math_ops.linspace(1.0, 100.0, 100))
masked_weights = pruning.apply_mask(
weights, scope=variable_scope.get_variable_scope())
p = pruning.Pruning(sparsity=sparsity)
p._spec.threshold_decay = 0.0
mask_update_op = p.mask_update_op()
variables.global_variables_initializer().run()
masked_weights_val = masked_weights.eval()
session.run(mask_update_op)
masked_weights_val = masked_weights.eval()
self.assertAllEqual(np.count_nonzero(masked_weights_val), 50)
def testConditionalMaskUpdate(self):
param_list = [
"pruning_frequency=2", "begin_pruning_step=1", "end_pruning_step=6",
"nbins=100"
]
test_spec = ",".join(param_list)
pruning_hparams = pruning.get_pruning_hparams().parse(test_spec)
weights = variables.VariableV1(
math_ops.linspace(1.0, 100.0, 100), name="weights")
masked_weights = pruning.apply_mask(weights)
sparsity = variables.VariableV1(0.00, name="sparsity")
# Set up pruning
p = pruning.Pruning(pruning_hparams, sparsity=sparsity)
p._spec.threshold_decay = 0.0
mask_update_op = p.conditional_mask_update_op()
sparsity_val = math_ops.linspace(0.0, 0.9, 10)
increment_global_step = state_ops.assign_add(self.global_step, 1)
non_zero_count = []
with self.cached_session() as session:
variables.global_variables_initializer().run()
for i in range(10):
session.run(state_ops.assign(sparsity, sparsity_val[i]))
session.run(mask_update_op)
session.run(increment_global_step)
non_zero_count.append(np.count_nonzero(masked_weights.eval()))
# Weights pruned at steps 0,2,4,and,6
expected_non_zero_count = [100, 100, 80, 80, 60, 60, 40, 40, 40, 40]
self.assertAllEqual(expected_non_zero_count, non_zero_count)
def testWeightSpecificSparsity(self):
param_list = [
"begin_pruning_step=1", "pruning_frequency=1", "end_pruning_step=100",
"target_sparsity=0.5", "weight_sparsity_map=[layer2/weights:0.75]",
"threshold_decay=0.0"
]
test_spec = ",".join(param_list)
pruning_hparams = pruning.get_pruning_hparams().parse(test_spec)
with variable_scope.variable_scope("layer1"):
w1 = variables.VariableV1(
math_ops.linspace(1.0, 100.0, 100), name="weights")
_ = pruning.apply_mask(w1)
with variable_scope.variable_scope("layer2"):
w2 = variables.VariableV1(
math_ops.linspace(1.0, 100.0, 100), name="weights")
_ = pruning.apply_mask(w2)
p = pruning.Pruning(pruning_hparams)
mask_update_op = p.conditional_mask_update_op()
increment_global_step = state_ops.assign_add(self.global_step, 1)
with self.cached_session() as session:
variables.global_variables_initializer().run()
for _ in range(110):
session.run(mask_update_op)
session.run(increment_global_step)
self.assertAllEqual(
session.run(pruning.get_weight_sparsity()), [0.5, 0.75])
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/model_pruning/python/pruning_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for imagingvision.intelligence.tensorflow.model_pruning.layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.model_pruning.python.layers import core_layers
from tensorflow.contrib.model_pruning.python.layers import layers
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class MaskedConvolutionLayerTest(test.TestCase):
def setUp(self):
super(MaskedConvolutionLayerTest, self).setUp()
self.height, self.width = 7, 9
def testInvalidRank3(self):
input_tensor = array_ops.ones((self.height, self.width, 3))
with self.assertRaisesRegexp(ValueError, 'rank'):
layers.masked_conv2d(input_tensor, 32, 3)
def testInvalidRank5(self):
input_tensor = array_ops.ones((8, 8, self.height, self.width, 3))
with self.assertRaisesRegexp(ValueError, 'rank'):
layers.masked_conv2d(input_tensor, 32, 3)
def testSingleConvMaskAdded(self):
kernel_size = 3
input_depth, output_depth = 8, 32
input_tensor = array_ops.ones((8, self.height, self.width, input_depth))
layers.masked_conv2d(input_tensor, output_depth, kernel_size)
masks = ops.get_collection(core_layers.MASK_COLLECTION)
self.assertEqual(len(masks), 1)
self.assertListEqual(masks[0].get_shape().as_list(),
[kernel_size, kernel_size, input_depth, output_depth])
masked_weight = ops.get_collection(core_layers.MASKED_WEIGHT_COLLECTION)
self.assertEqual(len(masked_weight), 1)
self.assertListEqual(masked_weight[0].get_shape().as_list(),
[kernel_size, kernel_size, input_depth, output_depth])
def testMultipleConvMaskAdded(self):
number_of_layers = 5
kernel_size = 3
base_depth = 4
depth_step = 7
input_tensor = array_ops.ones((8, self.height, self.width, base_depth))
top_layer = input_tensor
for ix in range(number_of_layers):
top_layer = layers.masked_conv2d(top_layer, base_depth +
(ix + 1) * depth_step, kernel_size)
masks = ops.get_collection(core_layers.MASK_COLLECTION)
self.assertEqual(len(masks), number_of_layers)
for ix in range(number_of_layers):
self.assertListEqual(masks[ix].get_shape().as_list(), [
kernel_size, kernel_size, base_depth + ix * depth_step,
base_depth + (ix + 1) * depth_step
])
masked_weight = ops.get_collection(core_layers.MASKED_WEIGHT_COLLECTION)
self.assertEqual(len(masked_weight), number_of_layers)
for ix in range(number_of_layers):
self.assertListEqual(masked_weight[ix].get_shape().as_list(), [
kernel_size, kernel_size, base_depth + ix * depth_step,
base_depth + (ix + 1) * depth_step
])
class MaskedFullyConnectedLayerTest(test.TestCase):
def testSingleFCMaskAdded(self):
input_depth, output_depth = 8, 32
input_tensor = array_ops.ones((5, input_depth))
layers.masked_fully_connected(input_tensor, output_depth)
masks = ops.get_collection(core_layers.MASK_COLLECTION)
self.assertEqual(len(masks), 1)
self.assertListEqual(masks[0].get_shape().as_list(),
[input_depth, output_depth])
masked_weight = ops.get_collection(core_layers.MASKED_WEIGHT_COLLECTION)
self.assertEqual(len(masked_weight), 1)
self.assertListEqual(masked_weight[0].get_shape().as_list(),
[input_depth, output_depth])
def testMultipleConvMaskAdded(self):
number_of_layers = 5
base_depth = 4
depth_step = 7
input_tensor = array_ops.ones((8, base_depth))
top_layer = input_tensor
for ix in range(number_of_layers):
top_layer = layers.masked_fully_connected(top_layer, base_depth +
(ix + 1) * depth_step)
masks = ops.get_collection(core_layers.MASK_COLLECTION)
self.assertEqual(len(masks), number_of_layers)
for ix in range(number_of_layers):
self.assertListEqual(masks[ix].get_shape().as_list(), [
base_depth + ix * depth_step, base_depth + (ix + 1) * depth_step
])
masked_weight = ops.get_collection(core_layers.MASKED_WEIGHT_COLLECTION)
self.assertEqual(len(masked_weight), number_of_layers)
for ix in range(number_of_layers):
self.assertListEqual(masked_weight[ix].get_shape().as_list(), [
base_depth + ix * depth_step, base_depth + (ix + 1) * depth_step
])
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/model_pruning/python/layers/layers_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for creating different number of masks in rnn_cells."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.model_pruning.python import pruning
from tensorflow.contrib.model_pruning.python.layers import rnn_cells
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import rnn_cell as tf_rnn_cells
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class RnnCellsTest(test.TestCase):
def setUp(self):
super(RnnCellsTest, self).setUp()
self.batch_size = 8
self.dim = 10
def testMaskedBasicLSTMCell(self):
expected_num_masks = 1
expected_num_rows = 2 * self.dim
expected_num_cols = 4 * self.dim
with self.cached_session():
inputs = variables.Variable(
random_ops.random_normal([self.batch_size, self.dim]))
c = variables.Variable(
random_ops.random_normal([self.batch_size, self.dim]))
h = variables.Variable(
random_ops.random_normal([self.batch_size, self.dim]))
state = tf_rnn_cells.LSTMStateTuple(c, h)
lstm_cell = rnn_cells.MaskedBasicLSTMCell(self.dim)
lstm_cell(inputs, state)
self.assertEqual(len(pruning.get_masks()), expected_num_masks)
self.assertEqual(len(pruning.get_masked_weights()), expected_num_masks)
self.assertEqual(len(pruning.get_thresholds()), expected_num_masks)
self.assertEqual(len(pruning.get_weights()), expected_num_masks)
for mask in pruning.get_masks():
self.assertEqual(mask.shape, (expected_num_rows, expected_num_cols))
for weight in pruning.get_weights():
self.assertEqual(weight.shape, (expected_num_rows, expected_num_cols))
def testMaskedLSTMCell(self):
expected_num_masks = 1
expected_num_rows = 2 * self.dim
expected_num_cols = 4 * self.dim
with self.cached_session():
inputs = variables.Variable(
random_ops.random_normal([self.batch_size, self.dim]))
c = variables.Variable(
random_ops.random_normal([self.batch_size, self.dim]))
h = variables.Variable(
random_ops.random_normal([self.batch_size, self.dim]))
state = tf_rnn_cells.LSTMStateTuple(c, h)
lstm_cell = rnn_cells.MaskedLSTMCell(self.dim)
lstm_cell(inputs, state)
self.assertEqual(len(pruning.get_masks()), expected_num_masks)
self.assertEqual(len(pruning.get_masked_weights()), expected_num_masks)
self.assertEqual(len(pruning.get_thresholds()), expected_num_masks)
self.assertEqual(len(pruning.get_weights()), expected_num_masks)
for mask in pruning.get_masks():
self.assertEqual(mask.shape, (expected_num_rows, expected_num_cols))
for weight in pruning.get_weights():
self.assertEqual(weight.shape, (expected_num_rows, expected_num_cols))
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/model_pruning/python/layers/rnn_cells_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the core layer classes for model pruning and its functional aliases.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras.engine import input_spec
from tensorflow.python.layers import base
from tensorflow.python.layers import utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import standard_ops
MASK_COLLECTION = 'masks'
THRESHOLD_COLLECTION = 'thresholds'
MASKED_WEIGHT_COLLECTION = 'masked_weights'
WEIGHT_COLLECTION = 'kernel'
# The 'weights' part of the name is needed for the quantization library
# to recognize that the kernel should be quantized.
MASKED_WEIGHT_NAME = 'weights/masked_weight'
class _MaskedConv(base.Layer):
"""Abstract nD convolution layer (private, used as implementation base).
This layer creates a convolution kernel that is convolved
(actually cross-correlated) with the layer input to produce a tensor of
outputs. The weight tensor of this layer is masked.
If `use_bias` is True (and a `bias_initializer` is provided),
a bias vector is created and added to the outputs. Finally, if
`activation` is not `None`, it is applied to the outputs as well.
Arguments:
rank: An integer, the rank of the convolution, e.g. "2" for 2D convolution.
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: An integer or tuple/list of n integers, specifying the
length of the convolution window.
strides: An integer or tuple/list of n integers,
specifying the stride length of the convolution.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"` or `"same"` (case-insensitive).
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, ..., channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, ...)`.
dilation_rate: An integer or tuple/list of n integers, specifying
the dilation rate to use for dilated convolution.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any `strides` value != 1.
activation: Activation function. Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: An initializer for the convolution kernel.
bias_initializer: An initializer for the bias vector. If None, the default
initializer will be used.
kernel_regularizer: Optional regularizer for the convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Regularizer function for the output.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: A string, the name of the layer.
"""
def __init__(self,
rank,
filters,
kernel_size,
strides=1,
padding='valid',
data_format='channels_last',
dilation_rate=1,
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
trainable=True,
name=None,
**kwargs):
super(_MaskedConv, self).__init__(
trainable=trainable,
name=name,
activity_regularizer=activity_regularizer,
**kwargs)
self.rank = rank
self.filters = filters
self.kernel_size = utils.normalize_tuple(kernel_size, rank, 'kernel_size')
self.strides = utils.normalize_tuple(strides, rank, 'strides')
self.padding = utils.normalize_padding(padding)
self.data_format = utils.normalize_data_format(data_format)
self.dilation_rate = utils.normalize_tuple(dilation_rate, rank,
'dilation_rate')
self.activation = activation
self.use_bias = use_bias
self.kernel_initializer = kernel_initializer
self.bias_initializer = bias_initializer
self.kernel_regularizer = kernel_regularizer
self.bias_regularizer = bias_regularizer
self.input_spec = input_spec.InputSpec(ndim=self.rank + 2)
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
channel_axis = 1 if self.data_format == 'channels_first' else -1
if tensor_shape.dimension_value(input_shape[channel_axis]) is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
input_dim = tensor_shape.dimension_value(input_shape[channel_axis])
kernel_shape = self.kernel_size + (input_dim, self.filters)
self.mask = self.add_variable(
name='mask',
shape=kernel_shape,
initializer=init_ops.ones_initializer(),
trainable=False,
dtype=self.dtype)
self.kernel = self.add_variable(
name='kernel',
shape=kernel_shape,
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
trainable=True,
dtype=self.dtype)
self.threshold = self.add_variable(
name='threshold',
shape=[],
initializer=init_ops.zeros_initializer(),
trainable=False,
dtype=self.dtype)
# Add masked_weights in the weights namescope so as to make it easier
# for the quantization library to add quant ops.
self.masked_kernel = math_ops.multiply(self.mask, self.kernel,
MASKED_WEIGHT_NAME)
ops.add_to_collection(MASK_COLLECTION, self.mask)
ops.add_to_collection(MASKED_WEIGHT_COLLECTION, self.masked_kernel)
ops.add_to_collection(THRESHOLD_COLLECTION, self.threshold)
ops.add_to_collection(WEIGHT_COLLECTION, self.kernel)
if self.use_bias:
self.bias = self.add_variable(
name='bias',
shape=(self.filters,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
trainable=True,
dtype=self.dtype)
else:
self.bias = None
self.input_spec = input_spec.InputSpec(
ndim=self.rank + 2, axes={channel_axis: input_dim})
self.built = True
def call(self, inputs):
outputs = nn.convolution(
input=inputs,
filter=self.masked_kernel,
dilation_rate=self.dilation_rate,
strides=self.strides,
padding=self.padding.upper(),
data_format=utils.convert_data_format(self.data_format, self.rank + 2))
if self.bias is not None:
if self.data_format == 'channels_first':
if self.rank == 1:
# nn.bias_add does not accept a 1D input tensor.
bias = array_ops.reshape(self.bias, (1, self.filters, 1))
outputs += bias
if self.rank == 2:
outputs = nn.bias_add(outputs, self.bias, data_format='NCHW')
if self.rank == 3:
# As of Mar 2017, direct addition is significantly slower than
# bias_add when computing gradients. To use bias_add, we collapse Z
# and Y into a single dimension to obtain a 4D input tensor.
outputs_shape = outputs.shape.as_list()
outputs_4d = array_ops.reshape(outputs, [
outputs_shape[0], outputs_shape[1],
outputs_shape[2] * outputs_shape[3], outputs_shape[4]
])
outputs_4d = nn.bias_add(outputs_4d, self.bias, data_format='NCHW')
outputs = array_ops.reshape(outputs_4d, outputs_shape)
else:
outputs = nn.bias_add(outputs, self.bias, data_format='NHWC')
if self.activation is not None:
return self.activation(outputs)
return outputs
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_last':
space = input_shape[1:-1]
new_space = []
for i in range(len(space)):
new_dim = utils.conv_output_length(
space[i],
self.kernel_size[i],
padding=self.padding,
stride=self.strides[i],
dilation=self.dilation_rate[i])
new_space.append(new_dim)
return tensor_shape.TensorShape([input_shape[0]] + new_space +
[self.filters])
else:
space = input_shape[2:]
new_space = []
for i in range(len(space)):
new_dim = utils.conv_output_length(
space[i],
self.kernel_size[i],
padding=self.padding,
stride=self.strides[i],
dilation=self.dilation_rate[i])
new_space.append(new_dim)
return tensor_shape.TensorShape([input_shape[0], self.filters] +
new_space)
class MaskedConv2D(_MaskedConv):
"""2D convolution layer (e.g. spatial convolution over images).
This layer creates a convolution kernel that is convolved
(actually cross-correlated) with the layer input to produce a tensor of
outputs. If `use_bias` is True (and a `bias_initializer` is provided),
a bias vector is created and added to the outputs. Finally, if
`activation` is not `None`, it is applied to the outputs as well.
Arguments:
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: An integer or tuple/list of 2 integers, specifying the
height and width of the 2D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution along the height and width.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"` or `"same"` (case-insensitive).
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, height, width)`.
dilation_rate: An integer or tuple/list of 2 integers, specifying
the dilation rate to use for dilated convolution.
Can be a single integer to specify the same value for
all spatial dimensions.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any stride value != 1.
activation: Activation function. Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: An initializer for the convolution kernel.
bias_initializer: An initializer for the bias vector. If None, the default
initializer will be used.
kernel_regularizer: Optional regularizer for the convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Regularizer function for the output.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: A string, the name of the layer.
"""
def __init__(self,
filters,
kernel_size,
strides=(1, 1),
padding='valid',
data_format='channels_last',
dilation_rate=(1, 1),
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
trainable=True,
name=None,
**kwargs):
super(MaskedConv2D, self).__init__(
rank=2,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
trainable=trainable,
name=name,
**kwargs)
class MaskedFullyConnected(base.Layer):
"""Fully-connected layer class with masked weights.
This layer implements the operation:
`outputs = activation(inputs.kernel + bias)`
Where `activation` is the activation function passed as the `activation`
argument (if not `None`), `kernel` is a weights matrix created by the layer,
and `bias` is a bias vector created by the layer
(only if `use_bias` is `True`).
Note: if the input to the layer has a rank greater than 2, then it is
flattened prior to the initial matrix multiply by `kernel`.
Arguments:
units: Integer or Long, dimensionality of the output space.
activation: Activation function (callable). Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: Initializer function for the weight matrix.
bias_initializer: Initializer function for the bias.
kernel_regularizer: Regularizer function for the weight matrix.
bias_regularizer: Regularizer function for the bias.
activity_regularizer: Regularizer function for the output.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: String, the name of the layer. Layers with the same name will
share weights, but to avoid mistakes we require reuse=True in such cases.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Properties:
units: Python integer, dimensionality of the output space.
activation: Activation function (callable).
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: Initializer instance (or name) for the weight matrix.
bias_initializer: Initializer instance (or name) for the bias.
kernel_regularizer: Regularizer instance for the weight matrix (callable)
bias_regularizer: Regularizer instance for the bias (callable).
activity_regularizer: Regularizer instance for the output (callable)
kernel: Weight matrix (TensorFlow variable or tensor).
bias: Bias vector, if applicable (TensorFlow variable or tensor).
"""
def __init__(self,
units,
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
trainable=True,
name=None,
**kwargs):
super(MaskedFullyConnected, self).__init__(
trainable=trainable,
name=name,
activity_regularizer=activity_regularizer,
**kwargs)
self.units = units
self.activation = activation
self.use_bias = use_bias
self.kernel_initializer = kernel_initializer
self.bias_initializer = bias_initializer
self.kernel_regularizer = kernel_regularizer
self.bias_regularizer = bias_regularizer
self.input_spec = input_spec.InputSpec(min_ndim=2)
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
if tensor_shape.dimension_value(input_shape[-1]) is None:
raise ValueError('The last dimension of the inputs to `Dense` '
'should be defined. Found `None`.')
self.input_spec = input_spec.InputSpec(
min_ndim=2, axes={-1: tensor_shape.dimension_value(input_shape[-1])})
self.kernel = self.add_variable(
'kernel',
shape=[tensor_shape.dimension_value(input_shape[-1]), self.units],
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
dtype=self.dtype,
trainable=True)
self.mask = self.add_variable(
name='mask',
shape=[tensor_shape.dimension_value(input_shape[-1]), self.units],
initializer=init_ops.ones_initializer(),
trainable=False,
dtype=self.dtype)
self.threshold = self.add_variable(
name='threshold',
shape=[],
initializer=init_ops.zeros_initializer(),
trainable=False,
dtype=self.dtype)
# Add masked_weights in the weights namescope so as to make it easier
# for the quantization library to add quant ops.
self.masked_kernel = math_ops.multiply(self.mask, self.kernel,
MASKED_WEIGHT_NAME)
ops.add_to_collection(MASK_COLLECTION, self.mask)
ops.add_to_collection(MASKED_WEIGHT_COLLECTION, self.masked_kernel)
ops.add_to_collection(THRESHOLD_COLLECTION, self.threshold)
ops.add_to_collection(WEIGHT_COLLECTION, self.kernel)
if self.use_bias:
self.bias = self.add_variable(
'bias',
shape=[
self.units,
],
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
dtype=self.dtype,
trainable=True)
else:
self.bias = None
self.built = True
def call(self, inputs):
inputs = ops.convert_to_tensor(inputs, dtype=self.dtype)
shape = inputs.get_shape().as_list()
output_shape = shape[:-1] + [self.units]
if len(output_shape) > 2:
# Broadcasting is required for the inputs.
outputs = standard_ops.tensordot(inputs, self.masked_kernel,
[[len(shape) - 1], [0]])
# Reshape the output back to the original ndim of the input.
outputs.set_shape(output_shape)
else:
outputs = standard_ops.matmul(inputs, self.masked_kernel)
if self.use_bias:
outputs = nn.bias_add(outputs, self.bias)
if self.activation is not None:
return self.activation(outputs) # pylint: disable=not-callable
return outputs
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
input_shape = input_shape.with_rank_at_least(2)
if tensor_shape.dimension_value(input_shape[-1]) is None:
raise ValueError(
'The innermost dimension of input_shape must be defined, but saw: %s'
% input_shape)
return input_shape[:-1].concatenate(self.units)
|
tensorflow-master
|
tensorflow/contrib/model_pruning/python/layers/core_layers.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tensorflow layers with added variables for parameter masking.
Branched from tensorflow/contrib/layers/python/layers/layers.py
"""
# pylint: disable=missing-docstring
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.contrib.framework.python.ops import add_arg_scope
from tensorflow.contrib.framework.python.ops import variables
from tensorflow.contrib.layers.python.layers import initializers
from tensorflow.contrib.layers.python.layers import utils
from tensorflow.contrib.model_pruning.python.layers import core_layers as core
from tensorflow.python.framework import ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as tf_variables
def _model_variable_getter(getter,
name,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
trainable=True,
collections=None,
caching_device=None,
partitioner=None,
rename=None,
use_resource=None,
**_):
"""Getter that uses model_variable for compatibility with core layers."""
short_name = name.split('/')[-1]
if rename and short_name in rename:
name_components = name.split('/')
name_components[-1] = rename[short_name]
name = '/'.join(name_components)
return variables.model_variable(
name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
collections=collections,
trainable=trainable,
caching_device=caching_device,
partitioner=partitioner,
custom_getter=getter,
use_resource=use_resource)
def _build_variable_getter(rename=None):
"""Build a model variable getter that respects scope getter and renames."""
# VariableScope will nest the getters
def layer_variable_getter(getter, *args, **kwargs):
kwargs['rename'] = rename
return _model_variable_getter(getter, *args, **kwargs)
return layer_variable_getter
def _add_variable_to_collections(variable, collections_set, collections_name):
"""Adds variable (or all its parts) to all collections with that name."""
collections = utils.get_variable_collections(collections_set,
collections_name) or []
variables_list = [variable]
if isinstance(variable, tf_variables.PartitionedVariable):
variables_list = [v for v in variable]
for collection in collections:
for var in variables_list:
if var not in ops.get_collection(collection):
ops.add_to_collection(collection, var)
@add_arg_scope
def masked_convolution(inputs,
num_outputs,
kernel_size,
stride=1,
padding='SAME',
data_format=None,
rate=1,
activation_fn=nn.relu,
normalizer_fn=None,
normalizer_params=None,
weights_initializer=initializers.xavier_initializer(),
weights_regularizer=None,
biases_initializer=init_ops.zeros_initializer(),
biases_regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None):
"""Adds an 2D convolution followed by an optional batch_norm layer.
The layer creates a mask variable on top of the weight variable. The input to
the convolution operation is the elementwise multiplication of the mask
variable and the weigh
It is required that 1 <= N <= 3.
`convolution` creates a variable called `weights`, representing the
convolutional kernel, that is convolved (actually cross-correlated) with the
`inputs` to produce a `Tensor` of activations. If a `normalizer_fn` is
provided (such as `batch_norm`), it is then applied. Otherwise, if
`normalizer_fn` is None and a `biases_initializer` is provided then a `biases`
variable would be created and added the activations. Finally, if
`activation_fn` is not `None`, it is applied to the activations as well.
Performs atrous convolution with input stride/dilation rate equal to `rate`
if a value > 1 for any dimension of `rate` is specified. In this case
`stride` values != 1 are not supported.
Args:
inputs: A Tensor of rank N+2 of shape
`[batch_size] + input_spatial_shape + [in_channels]` if data_format does
not start with "NC" (default), or
`[batch_size, in_channels] + input_spatial_shape` if data_format starts
with "NC".
num_outputs: Integer, the number of output filters.
kernel_size: A sequence of N positive integers specifying the spatial
dimensions of the filters. Can be a single integer to specify the same
value for all spatial dimensions.
stride: A sequence of N positive integers specifying the stride at which to
compute output. Can be a single integer to specify the same value for all
spatial dimensions. Specifying any `stride` value != 1 is incompatible
with specifying any `rate` value != 1.
padding: One of `"VALID"` or `"SAME"`.
data_format: A string or None. Specifies whether the channel dimension of
the `input` and output is the last dimension (default, or if `data_format`
does not start with "NC"), or the second dimension (if `data_format`
starts with "NC"). For N=1, the valid values are "NWC" (default) and
"NCW". For N=2, the valid values are "NHWC" (default) and "NCHW".
For N=3, the valid values are "NDHWC" (default) and "NCDHW".
rate: A sequence of N positive integers specifying the dilation rate to use
for atrous convolution. Can be a single integer to specify the same
value for all spatial dimensions. Specifying any `rate` value != 1 is
incompatible with specifying any `stride` value != 1.
activation_fn: Activation function. The default value is a ReLU function.
Explicitly set it to None to skip it and maintain a linear activation.
normalizer_fn: Normalization function to use instead of `biases`. If
`normalizer_fn` is provided then `biases_initializer` and
`biases_regularizer` are ignored and `biases` are not created nor added.
default set to None for no normalizer function
normalizer_params: Normalization function parameters.
weights_initializer: An initializer for the weights.
weights_regularizer: Optional regularizer for the weights.
biases_initializer: An initializer for the biases. If None skip biases.
biases_regularizer: Optional regularizer for the biases.
reuse: Whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: Optional list of collections for all the variables or
a dictionary containing a different list of collection per variable.
outputs_collections: Collection to add the outputs.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
scope: Optional scope for `variable_scope`.
Returns:
A tensor representing the output of the operation.
Raises:
ValueError: If `data_format` is invalid.
ValueError: Both 'rate' and `stride` are not uniformly 1.
"""
if data_format not in [None, 'NWC', 'NCW', 'NHWC', 'NCHW', 'NDHWC', 'NCDHW']:
raise ValueError('Invalid data_format: %r' % (data_format,))
layer_variable_getter = _build_variable_getter({
'bias': 'biases',
'kernel': 'weights'
})
with variable_scope.variable_scope(
scope, 'Conv', [inputs], reuse=reuse,
custom_getter=layer_variable_getter) as sc:
inputs = ops.convert_to_tensor(inputs)
input_rank = inputs.get_shape().ndims
if input_rank == 3:
raise ValueError('Sparse Convolution not supported for input with rank',
input_rank)
elif input_rank == 4:
layer_class = core.MaskedConv2D
elif input_rank == 5:
raise ValueError('Sparse Convolution not supported for input with rank',
input_rank)
else:
raise ValueError('Sparse Convolution not supported for input with rank',
input_rank)
if data_format is None or data_format == 'NHWC':
df = 'channels_last'
elif data_format == 'NCHW':
df = 'channels_first'
else:
raise ValueError('Unsupported data format', data_format)
layer = layer_class(
filters=num_outputs,
kernel_size=kernel_size,
strides=stride,
padding=padding,
data_format=df,
dilation_rate=rate,
activation=None,
use_bias=not normalizer_fn and biases_initializer,
kernel_initializer=weights_initializer,
bias_initializer=biases_initializer,
kernel_regularizer=weights_regularizer,
bias_regularizer=biases_regularizer,
activity_regularizer=None,
trainable=trainable,
name=sc.name,
dtype=inputs.dtype.base_dtype,
_scope=sc,
_reuse=reuse)
outputs = layer.apply(inputs)
# Add variables to collections.
_add_variable_to_collections(layer.kernel, variables_collections, 'weights')
if layer.use_bias:
_add_variable_to_collections(layer.bias, variables_collections, 'biases')
if normalizer_fn is not None:
normalizer_params = normalizer_params or {}
outputs = normalizer_fn(outputs, **normalizer_params)
if activation_fn is not None:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections,
sc.original_name_scope, outputs)
masked_conv2d = masked_convolution
@add_arg_scope
def masked_fully_connected(
inputs,
num_outputs,
activation_fn=nn.relu,
normalizer_fn=None,
normalizer_params=None,
weights_initializer=initializers.xavier_initializer(),
weights_regularizer=None,
biases_initializer=init_ops.zeros_initializer(),
biases_regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None):
"""Adds a sparse fully connected layer. The weight matrix is masked.
`fully_connected` creates a variable called `weights`, representing a fully
connected weight matrix, which is multiplied by the `inputs` to produce a
`Tensor` of hidden units. If a `normalizer_fn` is provided (such as
`batch_norm`), it is then applied. Otherwise, if `normalizer_fn` is
None and a `biases_initializer` is provided then a `biases` variable would be
created and added the hidden units. Finally, if `activation_fn` is not `None`,
it is applied to the hidden units as well.
Note: that if `inputs` have a rank greater than 2, then `inputs` is flattened
prior to the initial matrix multiply by `weights`.
Args:
inputs: A tensor of at least rank 2 and static value for the last dimension;
i.e. `[batch_size, depth]`, `[None, None, None, channels]`.
num_outputs: Integer or long, the number of output units in the layer.
activation_fn: Activation function. The default value is a ReLU function.
Explicitly set it to None to skip it and maintain a linear activation.
normalizer_fn: Normalization function to use instead of `biases`. If
`normalizer_fn` is provided then `biases_initializer` and
`biases_regularizer` are ignored and `biases` are not created nor added.
default set to None for no normalizer function
normalizer_params: Normalization function parameters.
weights_initializer: An initializer for the weights.
weights_regularizer: Optional regularizer for the weights.
biases_initializer: An initializer for the biases. If None skip biases.
biases_regularizer: Optional regularizer for the biases.
reuse: Whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: Optional list of collections for all the variables or
a dictionary containing a different list of collections per variable.
outputs_collections: Collection to add the outputs.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
scope: Optional scope for variable_scope.
Returns:
The tensor variable representing the result of the series of operations.
Raises:
ValueError: If x has rank less than 2 or if its last dimension is not set.
"""
if not isinstance(num_outputs, six.integer_types):
raise ValueError('num_outputs should be int or long, got %s.' %
(num_outputs,))
layer_variable_getter = _build_variable_getter({
'bias': 'biases',
'kernel': 'weights'
})
with variable_scope.variable_scope(
scope,
'fully_connected', [inputs],
reuse=reuse,
custom_getter=layer_variable_getter) as sc:
inputs = ops.convert_to_tensor(inputs)
layer = core.MaskedFullyConnected(
units=num_outputs,
activation=None,
use_bias=not normalizer_fn and biases_initializer,
kernel_initializer=weights_initializer,
bias_initializer=biases_initializer,
kernel_regularizer=weights_regularizer,
bias_regularizer=biases_regularizer,
activity_regularizer=None,
trainable=trainable,
name=sc.name,
dtype=inputs.dtype.base_dtype,
_scope=sc,
_reuse=reuse)
outputs = layer.apply(inputs)
# Add variables to collections.
_add_variable_to_collections(layer.kernel, variables_collections, 'weights')
if layer.bias is not None:
_add_variable_to_collections(layer.bias, variables_collections, 'biases')
# Apply normalizer function / layer.
if normalizer_fn is not None:
if not normalizer_params:
normalizer_params = {}
outputs = normalizer_fn(outputs, **normalizer_params)
if activation_fn is not None:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections,
sc.original_name_scope, outputs)
|
tensorflow-master
|
tensorflow/contrib/model_pruning/python/layers/layers.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module implementing RNN Cells with pruning.
This module implements BasicLSTMCell and LSTMCell with pruning.
Code adapted from third_party/tensorflow/python/ops/rnn_cell_impl.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.model_pruning.python.layers import core_layers
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import rnn_cell as tf_rnn
class MaskedBasicLSTMCell(tf_rnn.BasicLSTMCell):
"""Basic LSTM recurrent network cell with pruning.
Overrides the call method of tensorflow BasicLSTMCell and injects the weight
masks
The implementation is based on: http://arxiv.org/abs/1409.2329.
We add forget_bias (default: 1) to the biases of the forget gate in order to
reduce the scale of forgetting in the beginning of the training.
It does not allow cell clipping, a projection layer, and does not
use peep-hole connections: it is the basic baseline.
For advanced models, please use the full `tf.compat.v1.nn.rnn_cell.LSTMCell`
that follows.
"""
def __init__(self,
num_units,
forget_bias=1.0,
state_is_tuple=True,
activation=None,
reuse=None,
name=None):
"""Initialize the basic LSTM cell with pruning.
Args:
num_units: int, The number of units in the LSTM cell.
forget_bias: float, The bias added to forget gates (see above).
Must set to `0.0` manually when restoring from CudnnLSTM-trained
checkpoints.
state_is_tuple: If True, accepted and returned states are 2-tuples of
the `c_state` and `m_state`. If False, they are concatenated
along the column axis. The latter behavior will soon be deprecated.
activation: Activation function of the inner states. Default: `tanh`.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
name: String, the name of the layer. Layers with the same name will
share weights, but to avoid mistakes we require reuse=True in such
cases.
When restoring from CudnnLSTM-trained checkpoints, must use
CudnnCompatibleLSTMCell instead.
"""
super(MaskedBasicLSTMCell, self).__init__(
num_units,
forget_bias=forget_bias,
state_is_tuple=state_is_tuple,
activation=activation,
reuse=reuse,
name=name)
def build(self, inputs_shape):
# Call the build method of the parent class.
super(MaskedBasicLSTMCell, self).build(inputs_shape)
self.built = False
input_depth = inputs_shape.dims[1].value
h_depth = self._num_units
self._mask = self.add_variable(
name="mask",
shape=[input_depth + h_depth, 4 * h_depth],
initializer=init_ops.ones_initializer(),
trainable=False,
dtype=self.dtype)
self._threshold = self.add_variable(
name="threshold",
shape=[],
initializer=init_ops.zeros_initializer(),
trainable=False,
dtype=self.dtype)
# Add masked_weights in the weights namescope so as to make it easier
# for the quantization library to add quant ops.
self._masked_kernel = math_ops.multiply(self._mask, self._kernel,
core_layers.MASKED_WEIGHT_NAME)
if self._mask not in ops.get_collection_ref(core_layers.MASK_COLLECTION):
ops.add_to_collection(core_layers.MASK_COLLECTION, self._mask)
ops.add_to_collection(core_layers.MASKED_WEIGHT_COLLECTION,
self._masked_kernel)
ops.add_to_collection(core_layers.THRESHOLD_COLLECTION, self._threshold)
ops.add_to_collection(core_layers.WEIGHT_COLLECTION, self._kernel)
self.built = True
def call(self, inputs, state):
"""Long short-term memory cell (LSTM) with masks for pruning.
Args:
inputs: `2-D` tensor with shape `[batch_size, input_size]`.
state: An `LSTMStateTuple` of state tensors, each shaped
`[batch_size, self.state_size]`, if `state_is_tuple` has been set to
`True`. Otherwise, a `Tensor` shaped
`[batch_size, 2 * self.state_size]`.
Returns:
A pair containing the new hidden state, and the new state (either a
`LSTMStateTuple` or a concatenated state, depending on
`state_is_tuple`).
"""
sigmoid = math_ops.sigmoid
one = constant_op.constant(1, dtype=dtypes.int32)
# Parameters of gates are concatenated into one multiply for efficiency.
if self._state_is_tuple:
c, h = state
else:
c, h = array_ops.split(value=state, num_or_size_splits=2, axis=one)
gate_inputs = math_ops.matmul(
array_ops.concat([inputs, h], 1), self._masked_kernel)
gate_inputs = nn_ops.bias_add(gate_inputs, self._bias)
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
i, j, f, o = array_ops.split(
value=gate_inputs, num_or_size_splits=4, axis=one)
forget_bias_tensor = constant_op.constant(self._forget_bias, dtype=f.dtype)
# Note that using `add` and `multiply` instead of `+` and `*` gives a
# performance improvement. So using those at the cost of readability.
add = math_ops.add
multiply = math_ops.multiply
new_c = add(
multiply(c, sigmoid(add(f, forget_bias_tensor))),
multiply(sigmoid(i), self._activation(j)))
new_h = multiply(self._activation(new_c), sigmoid(o))
if self._state_is_tuple:
new_state = tf_rnn.LSTMStateTuple(new_c, new_h)
else:
new_state = array_ops.concat([new_c, new_h], 1)
return new_h, new_state
class MaskedLSTMCell(tf_rnn.LSTMCell):
"""LSTMCell with pruning.
Overrides the call method of tensorflow LSTMCell and injects the weight masks.
Masks are applied to only the weight matrix of the LSTM and not the
projection matrix.
"""
def __init__(self,
num_units,
use_peepholes=False,
cell_clip=None,
initializer=None,
num_proj=None,
proj_clip=None,
num_unit_shards=None,
num_proj_shards=None,
forget_bias=1.0,
state_is_tuple=True,
activation=None,
reuse=None):
"""Initialize the parameters for an LSTM cell with masks for pruning.
Args:
num_units: int, The number of units in the LSTM cell
use_peepholes: bool, set True to enable diagonal/peephole connections.
cell_clip: (optional) A float value, if provided the cell state is clipped
by this value prior to the cell output activation.
initializer: (optional) The initializer to use for the weight and
projection matrices.
num_proj: (optional) int, The output dimensionality for the projection
matrices. If None, no projection is performed.
proj_clip: (optional) A float value. If `num_proj > 0` and `proj_clip` is
provided, then the projected values are clipped elementwise to within
`[-proj_clip, proj_clip]`.
num_unit_shards: Deprecated, will be removed by Jan. 2017.
Use a variable_scope partitioner instead.
num_proj_shards: Deprecated, will be removed by Jan. 2017.
Use a variable_scope partitioner instead.
forget_bias: Biases of the forget gate are initialized by default to 1
in order to reduce the scale of forgetting at the beginning of
the training. Must set it manually to `0.0` when restoring from
CudnnLSTM trained checkpoints.
state_is_tuple: If True, accepted and returned states are 2-tuples of
the `c_state` and `m_state`. If False, they are concatenated
along the column axis. This latter behavior will soon be deprecated.
activation: Activation function of the inner states. Default: `tanh`.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
When restoring from CudnnLSTM-trained checkpoints, must use
CudnnCompatibleLSTMCell instead.
"""
super(MaskedLSTMCell, self).__init__(
num_units,
use_peepholes=use_peepholes,
cell_clip=cell_clip,
initializer=initializer,
num_proj=num_proj,
proj_clip=proj_clip,
num_unit_shards=num_unit_shards,
num_proj_shards=num_proj_shards,
forget_bias=forget_bias,
state_is_tuple=state_is_tuple,
activation=activation,
reuse=reuse)
def build(self, inputs_shape):
# Call the build method of the parent class.
super(MaskedLSTMCell, self).build(inputs_shape)
self.built = False
input_depth = inputs_shape.dims[1].value
h_depth = self._num_units
self._mask = self.add_variable(
name="mask",
shape=[input_depth + h_depth, 4 * h_depth],
initializer=init_ops.ones_initializer(),
trainable=False,
dtype=self.dtype)
self._threshold = self.add_variable(
name="threshold",
shape=[],
initializer=init_ops.zeros_initializer(),
trainable=False,
dtype=self.dtype)
# Add masked_weights in the weights namescope so as to make it easier
# for the quantization library to add quant ops.
self._masked_kernel = math_ops.multiply(self._mask, self._kernel,
core_layers.MASKED_WEIGHT_NAME)
if self._mask not in ops.get_collection_ref(core_layers.MASK_COLLECTION):
ops.add_to_collection(core_layers.MASK_COLLECTION, self._mask)
ops.add_to_collection(core_layers.MASKED_WEIGHT_COLLECTION,
self._masked_kernel)
ops.add_to_collection(core_layers.THRESHOLD_COLLECTION, self._threshold)
ops.add_to_collection(core_layers.WEIGHT_COLLECTION, self._kernel)
self.built = True
def call(self, inputs, state):
"""Run one step of LSTM.
Args:
inputs: input Tensor, 2D, `[batch, num_units].
state: if `state_is_tuple` is False, this must be a state Tensor,
`2-D, [batch, state_size]`. If `state_is_tuple` is True, this must be a
tuple of state Tensors, both `2-D`, with column sizes `c_state` and
`m_state`.
Returns:
A tuple containing:
- A `2-D, [batch, output_dim]`, Tensor representing the output of the
LSTM after reading `inputs` when previous state was `state`.
Here output_dim is:
num_proj if num_proj was set,
num_units otherwise.
- Tensor(s) representing the new state of LSTM after reading `inputs` when
the previous state was `state`. Same type and shape(s) as `state`.
Raises:
ValueError: If input size cannot be inferred from inputs via
static shape inference.
"""
num_proj = self._num_units if self._num_proj is None else self._num_proj
sigmoid = math_ops.sigmoid
if self._state_is_tuple:
(c_prev, m_prev) = state
else:
c_prev = array_ops.slice(state, [0, 0], [-1, self._num_units])
m_prev = array_ops.slice(state, [0, self._num_units], [-1, num_proj])
input_size = inputs.get_shape().with_rank(2).dims[1]
if input_size.value is None:
raise ValueError("Could not infer input size from inputs.get_shape()[-1]")
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
lstm_matrix = math_ops.matmul(
array_ops.concat([inputs, m_prev], 1), self._masked_kernel)
lstm_matrix = nn_ops.bias_add(lstm_matrix, self._bias)
i, j, f, o = array_ops.split(
value=lstm_matrix, num_or_size_splits=4, axis=1)
# Diagonal connections
if self._use_peepholes:
c = (
sigmoid(f + self._forget_bias + self._w_f_diag * c_prev) * c_prev +
sigmoid(i + self._w_i_diag * c_prev) * self._activation(j))
else:
c = (
sigmoid(f + self._forget_bias) * c_prev +
sigmoid(i) * self._activation(j))
if self._cell_clip is not None:
# pylint: disable=invalid-unary-operand-type
c = clip_ops.clip_by_value(c, -self._cell_clip, self._cell_clip)
# pylint: enable=invalid-unary-operand-type
if self._use_peepholes:
m = sigmoid(o + self._w_o_diag * c) * self._activation(c)
else:
m = sigmoid(o) * self._activation(c)
if self._num_proj is not None:
m = math_ops.matmul(m, self._proj_kernel)
if self._proj_clip is not None:
# pylint: disable=invalid-unary-operand-type
m = clip_ops.clip_by_value(m, -self._proj_clip, self._proj_clip)
# pylint: enable=invalid-unary-operand-type
new_state = (
tf_rnn.LSTMStateTuple(c, m)
if self._state_is_tuple else array_ops.concat([c, m], 1))
return m, new_state
|
tensorflow-master
|
tensorflow/contrib/model_pruning/python/layers/rnn_cells.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Builds the CIFAR-10 network with additional variables to support pruning.
Summary of available functions:
# Compute input images and labels for training. If you would like to run
# evaluations, use inputs() instead.
inputs, labels = distorted_inputs()
# Compute inference on the model inputs to make a prediction.
predictions = inference(inputs)
# Compute the total loss of the prediction with respect to the labels.
loss = loss(predictions, labels)
# Create a graph to run one step of training with respect to the loss.
train_op = train(loss, global_step)
"""
# pylint: disable=missing-docstring
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import re
import sys
import tarfile
from six.moves import urllib
import tensorflow as tf
from tensorflow.contrib.model_pruning.examples.cifar10 import cifar10_input
from tensorflow.contrib.model_pruning.python import pruning
# Global constants describing the CIFAR-10 data set.
IMAGE_SIZE = cifar10_input.IMAGE_SIZE
NUM_CLASSES = cifar10_input.NUM_CLASSES
NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = cifar10_input.NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN # pylint: disable=line-too-long
NUM_EXAMPLES_PER_EPOCH_FOR_EVAL = cifar10_input.NUM_EXAMPLES_PER_EPOCH_FOR_EVAL
BATCH_SIZE = 128
DATA_DIR = '/tmp/cifar10_data'
# Constants describing the training process.
MOVING_AVERAGE_DECAY = 0.9999 # The decay to use for the moving average.
NUM_EPOCHS_PER_DECAY = 350.0 # Epochs after which learning rate decays.
LEARNING_RATE_DECAY_FACTOR = 0.1 # Learning rate decay factor.
INITIAL_LEARNING_RATE = 0.1 # Initial learning rate.
# If a model is trained with multiple GPUs, prefix all Op names with tower_name
# to differentiate the operations. Note that this prefix is removed from the
# names of the summaries when visualizing a model.
TOWER_NAME = 'tower'
DATA_URL = 'http://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz'
def _activation_summary(x):
"""Helper to create summaries for activations.
Creates a summary that provides a histogram of activations.
Creates a summary that measures the sparsity of activations.
Args:
x: Tensor
Returns:
nothing
"""
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)
tf.summary.histogram(tensor_name + '/activations', x)
tf.summary.scalar(tensor_name + '/sparsity', tf.nn.zero_fraction(x))
def _variable_on_cpu(name, shape, initializer):
"""Helper to create a Variable stored on CPU memory.
Args:
name: name of the variable
shape: list of ints
initializer: initializer for Variable
Returns:
Variable Tensor
"""
with tf.device('/cpu:0'):
dtype = tf.float32
var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype)
return var
def _variable_with_weight_decay(name, shape, stddev, wd):
"""Helper to create an initialized Variable with weight decay.
Note that the Variable is initialized with a truncated normal distribution.
A weight decay is added only if one is specified.
Args:
name: name of the variable
shape: list of ints
stddev: standard deviation of a truncated Gaussian
wd: add L2Loss weight decay multiplied by this float. If None, weight decay
is not added for this Variable.
Returns:
Variable Tensor
"""
dtype = tf.float32
var = _variable_on_cpu(
name, shape, tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))
if wd is not None:
weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
return var
def distorted_inputs():
"""Construct distorted input for CIFAR training using the Reader ops.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
Raises:
ValueError: If no data_dir
"""
if not DATA_DIR:
raise ValueError('Please supply a data_dir')
data_dir = os.path.join(DATA_DIR, 'cifar-10-batches-bin')
images, labels = cifar10_input.distorted_inputs(
data_dir=data_dir, batch_size=BATCH_SIZE)
return images, labels
def inputs(eval_data):
"""Construct input for CIFAR evaluation using the Reader ops.
Args:
eval_data: bool, indicating if one should use the train or eval data set.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
Raises:
ValueError: If no data_dir
"""
if not DATA_DIR:
raise ValueError('Please supply a data_dir')
data_dir = os.path.join(DATA_DIR, 'cifar-10-batches-bin')
images, labels = cifar10_input.inputs(
eval_data=eval_data, data_dir=data_dir, batch_size=BATCH_SIZE)
return images, labels
def inference(images):
"""Build the CIFAR-10 model.
Args:
images: Images returned from distorted_inputs() or inputs().
Returns:
Logits.
"""
# We instantiate all variables using tf.compat.v1.get_variable() instead of
# tf.Variable() in order to share variables across multiple GPU training runs.
# If we only ran this model on a single GPU, we could simplify this function
# by replacing all instances of tf.compat.v1.get_variable() with tf.Variable().
#
# While instantiating conv and local layers, we add mask and threshold
# variables to the layer by calling the pruning.apply_mask() function.
# Note that the masks are applied only to the weight tensors
# conv1
with tf.variable_scope('conv1') as scope:
kernel = _variable_with_weight_decay(
'weights', shape=[5, 5, 3, 64], stddev=5e-2, wd=0.0)
conv = tf.nn.conv2d(
images, pruning.apply_mask(kernel, scope), [1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.0))
pre_activation = tf.nn.bias_add(conv, biases)
conv1 = tf.nn.relu(pre_activation, name=scope.name)
_activation_summary(conv1)
# pool1
pool1 = tf.nn.max_pool(
conv1,
ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1],
padding='SAME',
name='pool1')
# norm1
norm1 = tf.nn.lrn(
pool1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm1')
# conv2
with tf.variable_scope('conv2') as scope:
kernel = _variable_with_weight_decay(
'weights', shape=[5, 5, 64, 64], stddev=5e-2, wd=0.0)
conv = tf.nn.conv2d(
norm1, pruning.apply_mask(kernel, scope), [1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.1))
pre_activation = tf.nn.bias_add(conv, biases)
conv2 = tf.nn.relu(pre_activation, name=scope.name)
_activation_summary(conv2)
# norm2
norm2 = tf.nn.lrn(
conv2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm2')
# pool2
pool2 = tf.nn.max_pool(
norm2,
ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1],
padding='SAME',
name='pool2')
# local3
with tf.variable_scope('local3') as scope:
# Move everything into depth so we can perform a single matrix multiply.
reshape = tf.reshape(pool2, [BATCH_SIZE, -1])
dim = reshape.get_shape()[1].value
weights = _variable_with_weight_decay(
'weights', shape=[dim, 384], stddev=0.04, wd=0.004)
biases = _variable_on_cpu('biases', [384], tf.constant_initializer(0.1))
local3 = tf.nn.relu(
tf.matmul(reshape, pruning.apply_mask(weights, scope)) + biases,
name=scope.name)
_activation_summary(local3)
# local4
with tf.variable_scope('local4') as scope:
weights = _variable_with_weight_decay(
'weights', shape=[384, 192], stddev=0.04, wd=0.004)
biases = _variable_on_cpu('biases', [192], tf.constant_initializer(0.1))
local4 = tf.nn.relu(
tf.matmul(local3, pruning.apply_mask(weights, scope)) + biases,
name=scope.name)
_activation_summary(local4)
# linear layer(WX + b),
# We don't apply softmax here because
# tf.nn.sparse_softmax_cross_entropy_with_logits accepts the unscaled logits
# and performs the softmax internally for efficiency.
with tf.variable_scope('softmax_linear') as scope:
weights = _variable_with_weight_decay(
'weights', [192, NUM_CLASSES], stddev=1 / 192.0, wd=0.0)
biases = _variable_on_cpu('biases', [NUM_CLASSES],
tf.constant_initializer(0.0))
softmax_linear = tf.add(
tf.matmul(local4, pruning.apply_mask(weights, scope)),
biases,
name=scope.name)
_activation_summary(softmax_linear)
return softmax_linear
def loss(logits, labels):
"""Add L2Loss to all the trainable variables.
Add summary for "Loss" and "Loss/avg".
Args:
logits: Logits from inference().
labels: Labels from distorted_inputs or inputs(). 1-D tensor of shape
[batch_size]
Returns:
Loss tensor of type float.
"""
# Calculate the average cross entropy loss across the batch.
labels = tf.cast(labels, tf.int64)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits, name='cross_entropy_per_example')
cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
tf.add_to_collection('losses', cross_entropy_mean)
# The total loss is defined as the cross entropy loss plus all of the weight
# decay terms (L2 loss).
return tf.add_n(tf.get_collection('losses'), name='total_loss')
def _add_loss_summaries(total_loss):
"""Add summaries for losses in CIFAR-10 model.
Generates moving average for all losses and associated summaries for
visualizing the performance of the network.
Args:
total_loss: Total loss from loss().
Returns:
loss_averages_op: op for generating moving averages of losses.
"""
# Compute the moving average of all individual losses and the total loss.
loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
losses = tf.get_collection('losses')
loss_averages_op = loss_averages.apply(losses + [total_loss])
# Attach a scalar summary to all individual losses and the total loss; do the
# same for the averaged version of the losses.
for l in losses + [total_loss]:
# Name each loss as '(raw)' and name the moving average version of the loss
# as the original loss name.
tf.summary.scalar(l.op.name + ' (raw)', l)
tf.summary.scalar(l.op.name, loss_averages.average(l))
return loss_averages_op
def train(total_loss, global_step):
"""Train CIFAR-10 model.
Create an optimizer and apply to all trainable variables. Add moving
average for all trainable variables.
Args:
total_loss: Total loss from loss().
global_step: Integer Variable counting the number of training steps
processed.
Returns:
train_op: op for training.
"""
# Variables that affect learning rate.
num_batches_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN / BATCH_SIZE
decay_steps = int(num_batches_per_epoch * NUM_EPOCHS_PER_DECAY)
# Decay the learning rate exponentially based on the number of steps.
lr = tf.train.exponential_decay(
INITIAL_LEARNING_RATE,
global_step,
decay_steps,
LEARNING_RATE_DECAY_FACTOR,
staircase=True)
tf.summary.scalar('learning_rate', lr)
# Generate moving averages of all losses and associated summaries.
loss_averages_op = _add_loss_summaries(total_loss)
# Compute gradients.
with tf.control_dependencies([loss_averages_op]):
opt = tf.train.GradientDescentOptimizer(lr)
grads = opt.compute_gradients(total_loss)
# Apply gradients.
apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
# Add histograms for trainable variables.
for var in tf.trainable_variables():
tf.summary.histogram(var.op.name, var)
# Add histograms for gradients.
for grad, var in grads:
if grad is not None:
tf.summary.histogram(var.op.name + '/gradients', grad)
# Track the moving averages of all trainable variables.
variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY,
global_step)
variables_averages_op = variable_averages.apply(tf.trainable_variables())
with tf.control_dependencies([apply_gradient_op, variables_averages_op]):
train_op = tf.no_op(name='train')
return train_op
def maybe_download_and_extract():
"""Download and extract the tarball from Alex's website."""
dest_directory = DATA_DIR
if not os.path.exists(dest_directory):
os.makedirs(dest_directory)
filename = DATA_URL.split('/')[-1]
filepath = os.path.join(dest_directory, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write(
'\r>> Downloading %s %.1f%%' %
(filename, float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)
print()
statinfo = os.stat(filepath)
print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
tarfile.open(filepath, 'r:gz').extractall(dest_directory)
|
tensorflow-master
|
tensorflow/contrib/model_pruning/examples/cifar10/cifar10_pruning.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Routine for decoding the CIFAR-10 binary file format."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
# Process images of this size. Note that this differs from the original CIFAR
# image size of 32 x 32. If one alters this number, then the entire model
# architecture will change and any model would need to be retrained.
IMAGE_SIZE = 24
# Global constants describing the CIFAR-10 data set.
NUM_CLASSES = 10
NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = 50000
NUM_EXAMPLES_PER_EPOCH_FOR_EVAL = 10000
def read_cifar10(filename_queue):
"""Reads and parses examples from CIFAR10 data files.
Recommendation: if you want N-way read parallelism, call this function
N times. This will give you N independent Readers reading different
files & positions within those files, which will give better mixing of
examples.
Args:
filename_queue: A queue of strings with the filenames to read from.
Returns:
An object representing a single example, with the following fields:
height: number of rows in the result (32)
width: number of columns in the result (32)
depth: number of color channels in the result (3)
key: a scalar string Tensor describing the filename & record number
for this example.
label: an int32 Tensor with the label in the range 0..9.
uint8image: a [height, width, depth] uint8 Tensor with the image data
"""
class CIFAR10Record(object):
pass
result = CIFAR10Record()
# Dimensions of the images in the CIFAR-10 dataset.
# See http://www.cs.toronto.edu/~kriz/cifar.html for a description of the
# input format.
label_bytes = 1 # 2 for CIFAR-100
result.height = 32
result.width = 32
result.depth = 3
image_bytes = result.height * result.width * result.depth
# Every record consists of a label followed by the image, with a
# fixed number of bytes for each.
record_bytes = label_bytes + image_bytes
# Read a record, getting filenames from the filename_queue. No
# header or footer in the CIFAR-10 format, so we leave header_bytes
# and footer_bytes at their default of 0.
reader = tf.FixedLengthRecordReader(record_bytes=record_bytes)
result.key, value = reader.read(filename_queue)
# Convert from a string to a vector of uint8 that is record_bytes long.
record_bytes = tf.decode_raw(value, tf.uint8)
# The first bytes represent the label, which we convert from uint8->int32.
result.label = tf.cast(
tf.strided_slice(record_bytes, [0], [label_bytes]), tf.int32)
# The remaining bytes after the label represent the image, which we reshape
# from [depth * height * width] to [depth, height, width].
depth_major = tf.reshape(
tf.strided_slice(record_bytes, [label_bytes],
[label_bytes + image_bytes]),
[result.depth, result.height, result.width])
# Convert from [depth, height, width] to [height, width, depth].
result.uint8image = tf.transpose(depth_major, [1, 2, 0])
return result
def _generate_image_and_label_batch(image, label, min_queue_examples,
batch_size, shuffle):
"""Construct a queued batch of images and labels.
Args:
image: 3-D Tensor of [height, width, 3] of type.float32.
label: 1-D Tensor of type.int32
min_queue_examples: int32, minimum number of samples to retain
in the queue that provides of batches of examples.
batch_size: Number of images per batch.
shuffle: boolean indicating whether to use a shuffling queue.
Returns:
images: Images. 4D tensor of [batch_size, height, width, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
"""
# Create a queue that shuffles the examples, and then
# read 'batch_size' images + labels from the example queue.
num_preprocess_threads = 16
if shuffle:
images, label_batch = tf.train.shuffle_batch(
[image, label],
batch_size=batch_size,
num_threads=num_preprocess_threads,
capacity=min_queue_examples + 3 * batch_size,
min_after_dequeue=min_queue_examples)
else:
images, label_batch = tf.train.batch(
[image, label],
batch_size=batch_size,
num_threads=num_preprocess_threads,
capacity=min_queue_examples + 3 * batch_size)
# Display the training images in the visualizer.
tf.summary.image('images', images)
return images, tf.reshape(label_batch, [batch_size])
def distorted_inputs(data_dir, batch_size):
"""Construct distorted input for CIFAR training using the Reader ops.
Args:
data_dir: Path to the CIFAR-10 data directory.
batch_size: Number of images per batch.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
"""
filenames = [
os.path.join(data_dir, 'data_batch_%d.bin' % i) for i in xrange(1, 6)
]
for f in filenames:
if not tf.gfile.Exists(f):
raise ValueError('Failed to find file: ' + f)
# Create a queue that produces the filenames to read.
filename_queue = tf.train.string_input_producer(filenames)
# Read examples from files in the filename queue.
read_input = read_cifar10(filename_queue)
reshaped_image = tf.cast(read_input.uint8image, tf.float32)
height = IMAGE_SIZE
width = IMAGE_SIZE
# Image processing for training the network. Note the many random
# distortions applied to the image.
# Randomly crop a [height, width] section of the image.
distorted_image = tf.random_crop(reshaped_image, [height, width, 3])
# Randomly flip the image horizontally.
distorted_image = tf.image.random_flip_left_right(distorted_image)
# Because these operations are not commutative, consider randomizing
# the order their operation.
distorted_image = tf.image.random_brightness(distorted_image, max_delta=63)
distorted_image = tf.image.random_contrast(
distorted_image, lower=0.2, upper=1.8)
# Subtract off the mean and divide by the variance of the pixels.
float_image = tf.image.per_image_standardization(distorted_image)
# Set the shapes of tensors.
float_image.set_shape([height, width, 3])
read_input.label.set_shape([1])
# Ensure that the random shuffling has good mixing properties.
min_fraction_of_examples_in_queue = 0.4
min_queue_examples = int(
NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN * min_fraction_of_examples_in_queue)
print('Filling queue with %d CIFAR images before starting to train. '
'This will take a few minutes.' % min_queue_examples)
# Generate a batch of images and labels by building up a queue of examples.
return _generate_image_and_label_batch(
float_image,
read_input.label,
min_queue_examples,
batch_size,
shuffle=True)
def inputs(eval_data, data_dir, batch_size):
"""Construct input for CIFAR evaluation using the Reader ops.
Args:
eval_data: bool, indicating if one should use the train or eval data set.
data_dir: Path to the CIFAR-10 data directory.
batch_size: Number of images per batch.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
"""
if not eval_data:
filenames = [
os.path.join(data_dir, 'data_batch_%d.bin' % i) for i in xrange(1, 6)
]
num_examples_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN
else:
filenames = [os.path.join(data_dir, 'test_batch.bin')]
num_examples_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_EVAL
for f in filenames:
if not tf.gfile.Exists(f):
raise ValueError('Failed to find file: ' + f)
# Create a queue that produces the filenames to read.
filename_queue = tf.train.string_input_producer(filenames)
# Read examples from files in the filename queue.
read_input = read_cifar10(filename_queue)
reshaped_image = tf.cast(read_input.uint8image, tf.float32)
height = IMAGE_SIZE
width = IMAGE_SIZE
# Image processing for evaluation.
# Crop the central [height, width] of the image.
resized_image = tf.image.resize_image_with_crop_or_pad(
reshaped_image, width, height)
# Subtract off the mean and divide by the variance of the pixels.
float_image = tf.image.per_image_standardization(resized_image)
# Set the shapes of tensors.
float_image.set_shape([height, width, 3])
read_input.label.set_shape([1])
# Ensure that the random shuffling has good mixing properties.
min_fraction_of_examples_in_queue = 0.4
min_queue_examples = int(
num_examples_per_epoch * min_fraction_of_examples_in_queue)
# Generate a batch of images and labels by building up a queue of examples.
return _generate_image_and_label_batch(
float_image,
read_input.label,
min_queue_examples,
batch_size,
shuffle=False)
|
tensorflow-master
|
tensorflow/contrib/model_pruning/examples/cifar10/cifar10_input.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A binary to train pruned CIFAR-10 using a single GPU.
Accuracy:
cifar10_train.py achieves ~86% accuracy after 100K steps (256 epochs of
data) as judged by cifar10_eval.py when target sparsity in
cifar10_pruning_spec.pbtxt is set to zero
Results:
Sparsity | Accuracy after 150K steps
-------- | -------------------------
0% | 86%
50% | 86%
75% | TODO(suyoggupta)
90% | TODO(suyoggupta)
95% | 77%
Usage:
Please see the tutorial and website for how to download the CIFAR-10
data set, compile the program and train the model.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import datetime
import sys
import time
import tensorflow as tf
from tensorflow.contrib.model_pruning.examples.cifar10 import cifar10_pruning as cifar10
from tensorflow.contrib.model_pruning.python import pruning
FLAGS = None
def train():
"""Train CIFAR-10 for a number of steps."""
with tf.Graph().as_default():
global_step = tf.contrib.framework.get_or_create_global_step()
# Get images and labels for CIFAR-10.
images, labels = cifar10.distorted_inputs()
# Build a Graph that computes the logits predictions from the
# inference model.
logits = cifar10.inference(images)
# Calculate loss.
loss = cifar10.loss(logits, labels)
# Build a Graph that trains the model with one batch of examples and
# updates the model parameters.
train_op = cifar10.train(loss, global_step)
# Parse pruning hyperparameters
pruning_hparams = pruning.get_pruning_hparams().parse(FLAGS.pruning_hparams)
# Create a pruning object using the pruning hyperparameters
pruning_obj = pruning.Pruning(pruning_hparams, global_step=global_step)
# Use the pruning_obj to add ops to the training graph to update the masks
# The conditional_mask_update_op will update the masks only when the
# training step is in [begin_pruning_step, end_pruning_step] specified in
# the pruning spec proto
mask_update_op = pruning_obj.conditional_mask_update_op()
# Use the pruning_obj to add summaries to the graph to track the sparsity
# of each of the layers
pruning_obj.add_pruning_summaries()
class _LoggerHook(tf.train.SessionRunHook):
"""Logs loss and runtime."""
def begin(self):
self._step = -1
def before_run(self, run_context):
self._step += 1
self._start_time = time.time()
return tf.train.SessionRunArgs(loss) # Asks for loss value.
def after_run(self, run_context, run_values):
duration = time.time() - self._start_time
loss_value = run_values.results
if self._step % 10 == 0:
num_examples_per_step = 128
examples_per_sec = num_examples_per_step / duration
sec_per_batch = float(duration)
format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; %.3f '
'sec/batch)')
print(format_str % (datetime.datetime.now(), self._step, loss_value,
examples_per_sec, sec_per_batch))
with tf.train.MonitoredTrainingSession(
checkpoint_dir=FLAGS.train_dir,
hooks=[tf.train.StopAtStepHook(last_step=FLAGS.max_steps),
tf.train.NanTensorHook(loss),
_LoggerHook()],
config=tf.ConfigProto(
log_device_placement=FLAGS.log_device_placement)) as mon_sess:
while not mon_sess.should_stop():
mon_sess.run(train_op)
# Update the masks
mon_sess.run(mask_update_op)
def main(argv=None): # pylint: disable=unused-argument
cifar10.maybe_download_and_extract()
if tf.gfile.Exists(FLAGS.train_dir):
tf.gfile.DeleteRecursively(FLAGS.train_dir)
tf.gfile.MakeDirs(FLAGS.train_dir)
train()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--train_dir',
type=str,
default='/tmp/cifar10_train',
help='Directory where to write event logs and checkpoint.')
parser.add_argument(
'--pruning_hparams',
type=str,
default='',
help="""Comma separated list of pruning-related hyperparameters""")
parser.add_argument(
'--max_steps',
type=int,
default=1000000,
help='Number of batches to run.')
parser.add_argument(
'--log_device_placement',
type=bool,
default=False,
help='Whether to log device placement.')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
tensorflow-master
|
tensorflow/contrib/model_pruning/examples/cifar10/cifar10_train.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Evaluation for CIFAR-10.
Accuracy:
cifar10_train.py achieves 83.0% accuracy after 100K steps (256 epochs
of data) as judged by cifar10_eval.py.
Speed:
On a single Tesla K40, cifar10_train.py processes a single batch of 128 images
in 0.25-0.35 sec (i.e. 350 - 600 images /sec). The model reaches ~86%
accuracy after 100K steps in 8 hours of training time.
Usage:
Please see the tutorial and website for how to download the CIFAR-10
data set, compile the program and train the model.
http://tensorflow.org/tutorials/deep_cnn/
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import datetime
import math
import sys
import time
import numpy as np
import tensorflow as tf
from tensorflow.contrib.model_pruning.examples.cifar10 import cifar10_pruning as cifar10
FLAGS = None
def eval_once(saver, summary_writer, top_k_op, summary_op):
"""Run Eval once.
Args:
saver: Saver.
summary_writer: Summary writer.
top_k_op: Top K op.
summary_op: Summary op.
"""
with tf.Session() as sess:
ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
# Restores from checkpoint
saver.restore(sess, ckpt.model_checkpoint_path)
# Assuming model_checkpoint_path looks something like:
# /my-favorite-path/cifar10_train/model.ckpt-0,
# extract global_step from it.
global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
else:
print('No checkpoint file found')
return
# Start the queue runners.
coord = tf.train.Coordinator()
try:
threads = []
for qr in tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS):
threads.extend(qr.create_threads(sess, coord=coord, daemon=True,
start=True))
num_iter = int(math.ceil(FLAGS.num_examples / 128))
true_count = 0 # Counts the number of correct predictions.
total_sample_count = num_iter * 128
step = 0
while step < num_iter and not coord.should_stop():
predictions = sess.run([top_k_op])
true_count += np.sum(predictions)
step += 1
# Compute precision @ 1.
precision = true_count / total_sample_count
print('%s: precision @ 1 = %.3f' % (datetime.datetime.now(), precision))
summary = tf.Summary()
summary.ParseFromString(sess.run(summary_op))
summary.value.add(tag='Precision @ 1', simple_value=precision)
summary_writer.add_summary(summary, global_step)
except Exception as e: # pylint: disable=broad-except
coord.request_stop(e)
coord.request_stop()
coord.join(threads, stop_grace_period_secs=10)
def evaluate():
"""Eval CIFAR-10 for a number of steps."""
with tf.Graph().as_default() as g:
# Get images and labels for CIFAR-10.
eval_data = FLAGS.eval_data == 'test'
images, labels = cifar10.inputs(eval_data=eval_data)
# Build a Graph that computes the logits predictions from the
# inference model.
logits = cifar10.inference(images)
# Calculate predictions.
top_k_op = tf.nn.in_top_k(logits, labels, 1)
# Restore the moving average version of the learned variables for eval.
variable_averages = tf.train.ExponentialMovingAverage(
cifar10.MOVING_AVERAGE_DECAY)
variables_to_restore = variable_averages.variables_to_restore()
saver = tf.train.Saver(variables_to_restore)
# Build the summary operation based on the TF collection of Summaries.
summary_op = tf.summary.merge_all()
summary_writer = tf.summary.FileWriter(FLAGS.eval_dir, g)
while True:
eval_once(saver, summary_writer, top_k_op, summary_op)
if FLAGS.run_once:
break
time.sleep(FLAGS.eval_interval_secs)
def main(argv=None): # pylint: disable=unused-argument
cifar10.maybe_download_and_extract()
if tf.gfile.Exists(FLAGS.eval_dir):
tf.gfile.DeleteRecursively(FLAGS.eval_dir)
tf.gfile.MakeDirs(FLAGS.eval_dir)
evaluate()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--eval_dir',
type=str,
default='/tmp/cifar10_eval',
help='Directory where to write event logs.')
parser.add_argument(
'--eval_data',
type=str,
default='test',
help="""Either 'test' or 'train_eval'.""")
parser.add_argument(
'--checkpoint_dir',
type=str,
default='/tmp/cifar10_train',
help="""Directory where to read model checkpoints.""")
parser.add_argument(
'--eval_interval_secs',
type=int,
default=60 * 5,
help='How often to run the eval.')
parser.add_argument(
'--num_examples',
type=int,
default=10000,
help='Number of examples to run.')
parser.add_argument(
'--run_once',
type=bool,
default=False,
help='Whether to run eval only once.')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
tensorflow-master
|
tensorflow/contrib/model_pruning/examples/cifar10/cifar10_eval.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow Eager execution prototype.
EXPERIMENTAL: APIs here are unstable and likely to change without notice.
To use, at program startup, call `tf.compat.v1.enable_eager_execution()`.
@@metrics
@@list_devices
@@num_gpus
@@py_func
@@defun
@@function
@@make_template
@@implicit_gradients
@@implicit_value_and_gradients
@@gradients_function
@@value_and_gradients_function
@@GradientTape
@@run
@@enable_eager_execution
@@enable_remote_eager_execution
@@custom_gradient
@@add_execution_callback
@@clear_execution_callbacks
@@errstate
@@ExecutionCallback
@@inf_callback
@@inf_nan_callback
@@nan_callback
@@seterr
@@Iterator
@@Saver
@@restore_variables_on_create
@@Variable
@@get_optimizer_variables
@@EagerVariableStore
@@Network
@@Sequential
@@save_network_checkpoint
@@restore_network_checkpoint
@@Checkpoint
@@Checkpointable
@@executing_eagerly
@@in_eager_mode
@@set_execution_mode
@@execution_mode
@@async_wait
@@async_clear_error
@@set_server_def
@@run_test_in_graph_and_eager_modes
@@run_all_tests_in_graph_and_eager_modes
@@TensorSpec
@@connect_to_remote_host
@@DEVICE_PLACEMENT_EXPLICIT
@@DEVICE_PLACEMENT_WARN
@@DEVICE_PLACEMENT_SILENT
@@SYNC
@@ASYNC
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint:disable=g-bad-import-order,g-import-not-at-top,unused-import
#
from tensorflow.contrib.eager.python import metrics
from tensorflow.contrib.eager.python.datasets import Iterator
from tensorflow.contrib.eager.python.network import Network
from tensorflow.contrib.eager.python.network import Sequential
from tensorflow.contrib.eager.python.network import save_network_checkpoint
from tensorflow.contrib.eager.python.network import restore_network_checkpoint
from tensorflow.contrib.eager.python.saver import get_optimizer_variables
from tensorflow.contrib.eager.python.saver import restore_variables_on_create
from tensorflow.contrib.eager.python.saver import Saver
from tensorflow.python.eager import backprop
from tensorflow.python.eager import function as _function_lib
from tensorflow.python.eager.context import DEVICE_PLACEMENT_EXPLICIT
from tensorflow.python.eager.context import DEVICE_PLACEMENT_WARN
from tensorflow.python.eager.context import DEVICE_PLACEMENT_SILENT
from tensorflow.python.eager.context import executing_eagerly
from tensorflow.python.eager.context import list_devices
from tensorflow.python.eager.context import set_execution_mode
from tensorflow.python.eager.context import execution_mode
from tensorflow.python.eager.context import async_wait
from tensorflow.python.eager.context import async_clear_error
from tensorflow.python.eager.context import SYNC
from tensorflow.python.eager.context import ASYNC
from tensorflow.python.eager.context import num_gpus
from tensorflow.python.eager.context import set_server_def
from tensorflow.python.eager.def_function import function
from tensorflow.python.eager.execution_callbacks import add_execution_callback
from tensorflow.python.eager.execution_callbacks import clear_execution_callbacks
from tensorflow.python.eager.execution_callbacks import errstate
from tensorflow.python.eager.execution_callbacks import ExecutionCallback
from tensorflow.python.eager.execution_callbacks import inf_callback
from tensorflow.python.eager.execution_callbacks import inf_nan_callback
from tensorflow.python.eager.execution_callbacks import nan_callback
from tensorflow.python.eager.execution_callbacks import seterr
from tensorflow.python.eager.remote import connect_to_remote_host
from tensorflow.python.framework.tensor_spec import TensorSpec
from tensorflow.python.framework.ops import enable_eager_execution
from tensorflow.python.framework.ops import enable_eager_execution_internal as enable_remote_eager_execution
from tensorflow.python.framework.ops import eager_run as run
from tensorflow.python.framework.test_util import run_in_graph_and_eager_modes as run_test_in_graph_and_eager_modes
from tensorflow.python.framework.test_util import run_all_in_graph_and_eager_modes as run_all_tests_in_graph_and_eager_modes
from tensorflow.python.ops.custom_gradient import custom_gradient
from tensorflow.python.ops.resource_variable_ops import ResourceVariable as Variable
from tensorflow.python.ops.variable_scope import EagerVariableStore
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import template
from tensorflow.python.training.tracking.tracking import AutoTrackable as Checkpointable
from tensorflow.python.training.tracking.util import CheckpointV1 as Checkpoint
from tensorflow.python.util.all_util import remove_undocumented
py_func = script_ops.eager_py_func
defun = _function_lib.defun
make_template = template.make_template_internal
implicit_gradients = backprop.implicit_grad
implicit_value_and_gradients = backprop.implicit_val_and_grad
gradients_function = backprop.gradients_function
value_and_gradients_function = backprop.val_and_grad_function
GradientTape = backprop.GradientTape # pylint: disable=invalid-name
in_eager_mode = executing_eagerly
remove_undocumented(__name__)
|
tensorflow-master
|
tensorflow/contrib/eager/python/tfe.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Metrics namespace."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint:disable=wildcard-import
from tensorflow.contrib.eager.python.metrics_impl import *
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = ['Accuracy', 'Mean', 'Metric', 'CategoricalAccuracy',
'BinaryAccuracy', 'SparseAccuracy']
remove_undocumented(__name__, _allowed_symbols)
|
tensorflow-master
|
tensorflow/contrib/eager/python/metrics.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for class Evaluator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tempfile
from tensorflow.contrib.eager.python import evaluator
from tensorflow.contrib.eager.python import metrics
from tensorflow.contrib.summary import summary_test_util
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import context
from tensorflow.python.eager import test
from tensorflow.python.framework import ops
from tensorflow.python.ops import variables
from tensorflow.python.training import training_util
class IdentityModel(object):
def eval_data(self, d):
return d
class PrefixLModel(object):
def eval_data(self, d):
return {"l_" + key: d[key] for key in d}
class SimpleEvaluator(evaluator.Evaluator):
def __init__(self, model):
super(SimpleEvaluator, self).__init__(model)
self.mean = self.track_metric(metrics.Mean("mean"))
def call(self, eval_data):
self.mean(eval_data)
class DelegatingEvaluator(evaluator.Evaluator):
def __init__(self, model):
super(DelegatingEvaluator, self).__init__(model)
self.sub = self.track_evaluator("inner", SimpleEvaluator(model))
self.mean = self.track_metric(metrics.Mean("outer-mean"))
def call(self, eval_data):
# Keys here come from PrefixLModel, which adds "l_".
self.mean(eval_data["l_outer"])
self.sub.call(eval_data["l_inner"])
# pylint: disable=not-callable
class EvaluatorTest(test.TestCase):
def testSimple(self):
e = SimpleEvaluator(IdentityModel())
e(3.0)
e([5.0, 7.0, 9.0])
results = e.all_metric_results()
self.assertEqual(set(["mean"]), set(results.keys()))
self.assertEqual(6.0, results["mean"].numpy())
def testWriteSummaries(self):
e = SimpleEvaluator(IdentityModel())
e(3.0)
e([5.0, 7.0, 9.0])
training_util.get_or_create_global_step()
logdir = tempfile.mkdtemp()
e.all_metric_results(logdir)
events = summary_test_util.events_from_logdir(logdir)
self.assertEqual(len(events), 2)
self.assertEqual(events[1].summary.value[0].simple_value, 6.0)
def testComposition(self):
e = DelegatingEvaluator(PrefixLModel())
e({"inner": 2.0, "outer": 100.0})
e({"inner": 4.0, "outer": 1000.0})
results = e.all_metric_results()
self.assertEqual(set(["inner/mean", "outer-mean"]), set(results.keys()))
self.assertEqual(3.0, results["inner/mean"].numpy())
self.assertEqual(550.0, results["outer-mean"].numpy())
def testMetricVariables(self):
e = DelegatingEvaluator(PrefixLModel())
e({"inner": 2.0, "outer": 100.0})
prefix_count = {}
for v in e.metric_variables:
p = v.name.split("/")[0]
prefix_count[p] = prefix_count.get(p, 0) + 1
self.assertEqual({"outer_mean": 2, "mean": 2}, prefix_count)
def testDatasetEager(self):
e = SimpleEvaluator(IdentityModel())
ds = dataset_ops.Dataset.from_tensor_slices([3.0, 5.0, 7.0, 9.0])
results = e.evaluate_on_dataset(ds)
self.assertEqual(set(["mean"]), set(results.keys()))
self.assertEqual(6.0, results["mean"].numpy())
def testDatasetGraph(self):
with context.graph_mode(), ops.Graph().as_default(), self.cached_session():
e = SimpleEvaluator(IdentityModel())
ds = dataset_ops.Dataset.from_tensor_slices([3.0, 5.0, 7.0, 9.0])
init_op, call_op, results_op = e.evaluate_on_dataset(ds)
results = e.run_evaluation(init_op, call_op, results_op)
self.assertEqual(set(["mean"]), set(results.keys()))
self.assertEqual(6.0, results["mean"])
def testWriteSummariesGraph(self):
with context.graph_mode(), ops.Graph().as_default(), self.cached_session():
e = SimpleEvaluator(IdentityModel())
ds = dataset_ops.Dataset.from_tensor_slices([3.0, 5.0, 7.0, 9.0])
training_util.get_or_create_global_step()
logdir = tempfile.mkdtemp()
init_op, call_op, results_op = e.evaluate_on_dataset(
ds, summary_logdir=logdir)
variables.global_variables_initializer().run()
e.run_evaluation(init_op, call_op, results_op)
events = summary_test_util.events_from_logdir(logdir)
self.assertEqual(len(events), 2)
self.assertEqual(events[1].summary.value[0].simple_value, 6.0)
def testModelProperty(self):
m = IdentityModel()
e = SimpleEvaluator(m)
self.assertIs(m, e.model)
def testMetricsProperty(self):
e = DelegatingEvaluator(PrefixLModel())
names = set([(p, m.name) for p, m in e.metrics])
self.assertEqual(set([("", "outer-mean"), ("inner/", "mean")]), names)
def testSharedMetric(self):
class MetricArgEvaluator(evaluator.Evaluator):
def __init__(self, model, m):
super(MetricArgEvaluator, self).__init__(model)
self.m = self.track_metric(m)
metric = metrics.Mean("mean")
model = IdentityModel()
e = MetricArgEvaluator(model, metric)
with self.assertRaisesRegexp(ValueError, "already added"):
MetricArgEvaluator(model, metric)
del e
def testMetricTrackedTwice(self):
class MetricTwiceEvaluator(evaluator.Evaluator):
def __init__(self, model):
super(MetricTwiceEvaluator, self).__init__(model)
self.m = self.track_metric(metrics.Mean("mean"))
self.track_metric(self.m) # okay to track same metric again
MetricTwiceEvaluator(IdentityModel())
class SparseSoftmaxEvaluatorTest(test.TestCase):
def testSimple(self):
e = evaluator.SparseSoftmaxEvaluator(IdentityModel())
e({e.loss_key: 1.0, e.label_key: 5, e.predicted_class_key: 5})
e({e.loss_key: [0.0, 3.0, 4.0],
e.label_key: [1, 2, 3],
e.predicted_class_key: [1, 1, 3]})
results = e.all_metric_results()
self.assertEqual(set(["Avg Loss", "Accuracy"]), set(results.keys()))
self.assertEqual(2.0, results["Avg Loss"].numpy())
self.assertEqual(0.75, results["Accuracy"].numpy())
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/eager/python/evaluator_test.py
|
"""Tests for eager mode Saver."""
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.contrib.eager.python import saver as _saver
from tensorflow.python.eager import context
from tensorflow.python.eager import test
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.training import adam
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import momentum
from tensorflow.python.training import rmsprop
class SaverTest(test.TestCase):
def _dev(self):
return '/device:GPU:0' if context.num_gpus() else '/device:CPU:0'
def testBasics(self):
with ops.device(self._dev()):
v1 = resource_variable_ops.ResourceVariable(1.0, name='v1')
def model():
return array_ops.constant(2.0) * v1
ckpt_prefix = os.path.join(test.get_temp_dir(), 'ckpt')
_ = model()
saver = _saver.Saver([v1])
saver.save(ckpt_prefix)
v1.assign(2.0)
self.assertEqual(v1.read_value().numpy(), 2.0)
saver.restore(ckpt_prefix)
self.assertEqual(v1.read_value().numpy(), 1.0)
def testSameNameNoClobbering(self):
with ops.device(self._dev()):
v1 = resource_variable_ops.ResourceVariable(1.0, name='v1')
v2 = resource_variable_ops.ResourceVariable(2.0, name='v1')
saver = _saver.Saver([v1, v2])
ckpt_prefix = os.path.join(test.get_temp_dir(), 'ckpt')
with self.assertRaisesRegexp(ValueError, 'v1'):
saver.save(ckpt_prefix)
def testSameObjectOK(self):
with ops.device(self._dev()):
v1 = resource_variable_ops.ResourceVariable(1.0, name='v1')
# While different objects with the same shared_name are not good, passing
# in the same object multiple times is fine.
saver = _saver.Saver([v1, v1])
ckpt_prefix = os.path.join(test.get_temp_dir(), 'ckpt')
saver.save(ckpt_prefix)
def testSaveByDict(self):
with ops.device(self._dev()):
v1 = resource_variable_ops.ResourceVariable(1.0, name='v1')
v2 = resource_variable_ops.ResourceVariable(1.0, name='v2')
def model():
return array_ops.constant(2.0) * v1 * v2
ckpt_prefix = os.path.join(test.get_temp_dir(), 'ckpt')
# Save the variables under different names.
_ = model()
saver = _saver.Saver({'ckpt/v1': v1, 'ckpt/v2': v2})
saver.save(ckpt_prefix)
v1.assign(2.0)
v2.assign(2.0)
self.assertEqual(v1.read_value().numpy(), 2.0)
self.assertEqual(v2.read_value().numpy(), 2.0)
# Can still restore it.
saver.restore(ckpt_prefix)
self.assertEqual(v1.read_value().numpy(), 1.0)
# However, cannot restore it with default name.
with self.assertRaisesOpError('not found in checkpoint'):
saver = _saver.Saver([v1, v2]).restore(ckpt_prefix)
# Can specify which variable in ckpt to restore to which variable.
def map_func(x):
return {'v3': 'ckpt/v1', 'v4': 'ckpt/v2'}.get(x, x)
with _saver.restore_variables_on_create(ckpt_prefix, map_func):
v3 = resource_variable_ops.ResourceVariable(2.0, name='v3')
v4 = resource_variable_ops.ResourceVariable(2.0, name='v4')
self.assertEqual(v3.read_value().numpy(), 1.0)
self.assertEqual(v4.read_value().numpy(), 1.0)
def testRestoreOnCreate(self):
with ops.device(self._dev()):
def model(init_val):
v1 = resource_variable_ops.ResourceVariable(init_val, name='v1')
return array_ops.constant(1.0) * v1, v1
ckpt_prefix = os.path.join(test.get_temp_dir(), 'ckpt')
_, v1 = model(1.0)
saver = _saver.Saver([v1])
saver.save(ckpt_prefix)
saver = _saver.Saver([v1])
with _saver.restore_variables_on_create(ckpt_prefix):
# Value is from checkpoint, but not from argument.
ret, _ = model(2.0)
self.assertEqual(ret.numpy(), 1.0)
def testRestoreNotFound(self):
with ops.device(self._dev()):
def model(v):
return array_ops.constant(1.0) * v
ckpt_prefix = os.path.join(test.get_temp_dir(), 'ckpt')
v = resource_variable_ops.ResourceVariable(1.0, name='v1')
_ = model(v)
saver = _saver.Saver([v])
saver.save(ckpt_prefix)
with self.assertRaisesRegexp(errors.NotFoundError,
'v2 not found in checkpoint'):
with _saver.restore_variables_on_create(ckpt_prefix):
_ = model(resource_variable_ops.ResourceVariable(1.0, name='v2'))
class GetOptimizerTests(test.TestCase):
def _optimizer_test_template(self, optimizer):
"""Checks save and restore. Returns the optimizer variables."""
v = resource_variable_ops.ResourceVariable([[2., 3.]], name='v')
loss_fn = lambda: v[0, 0] ** 2 + v[0, 1] ** 2
optimizer.minimize(loss_fn)
optimizer_variables = _saver.get_optimizer_variables(optimizer)
saver = _saver.Saver(optimizer_variables + [v])
checkpoint_path = saver.save(self.get_temp_dir())
optimizer.minimize(loss_fn)
after_first_minimize = v.numpy()
# After we restore, the next step should be exactly the same as the one we
# just did.
saver.restore(checkpoint_path)
optimizer.minimize(loss_fn)
self.assertAllEqual(after_first_minimize, v.numpy())
return optimizer_variables
def testAdam(self):
optimizer = adam.AdamOptimizer(0.1)
self._optimizer_test_template(optimizer)
def testGradientDescent(self):
optimizer = gradient_descent.GradientDescentOptimizer(0.02)
self.assertEqual(0, len(self._optimizer_test_template(optimizer)))
def testMomentum(self):
optimizer = momentum.MomentumOptimizer(
learning_rate=0.03,
momentum=0.5)
self._optimizer_test_template(optimizer)
def testRMSProp(self):
optimizer = rmsprop.RMSPropOptimizer(0.01)
self._optimizer_test_template(optimizer)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/eager/python/saver_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Iteration over tf.data.Datasets when eager execution is enabled."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.ops import prefetching_ops
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
class Iterator(iterator_ops.IteratorV2):
"""An iterator producing tf.Tensor objects from a tf.data.Dataset.
NOTE: Unlike the iterator created by the
`tf.data.Dataset.make_one_shot_iterator` method, this class enables
additional experimental functionality, such as prefetching to the GPU.
"""
def __init__(self, dataset):
"""Creates a new iterator over the given dataset.
For example:
```python
dataset = tf.data.Dataset.range(4)
for x in Iterator(dataset):
print(x)
```
Tensors produced will be placed on the device on which this iterator object
was created.
Args:
dataset: A `tf.data.Dataset` object.
Raises:
TypeError: If `dataset` is an unsupported type.
RuntimeError: When invoked without eager execution enabled.
"""
if not context.context().device_spec.device_type:
is_remote_device = False
else:
is_remote_device = context.context().device_spec.device_type != "CPU"
if is_remote_device:
with ops.device(None):
# Let the placer figure out where to place the various functions etc.
# created by the CopyToDeviceDataset.
dataset = dataset.apply(prefetching_ops.copy_to_device(
context.context().device_name))
dataset = dataset.prefetch(1)
super(Iterator, self).__init__(dataset)
def _next_internal(self):
"""Returns a nested structure of `tf.Tensor`s containing the next element.
"""
# This runs in sync mode as iterators use an error status to communicate
# that there is no more data to iterate over.
# TODO(b/77291417): Fix
with context.execution_mode(context.SYNC):
return super(Iterator, self)._next_internal()
|
tensorflow-master
|
tensorflow/contrib/eager/python/datasets.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Metrics."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
from tensorflow.contrib.eager.python import metrics
from tensorflow.contrib.summary import summary_test_util
from tensorflow.python.eager import context
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import summary_ops_v2 as summary_ops
from tensorflow.python.training import training_util
from tensorflow.python.training.tracking import util as trackable_utils
class MetricsTest(test.TestCase):
def testMean(self):
m = metrics.Mean()
m([1, 10, 100])
m(1000)
m([10000.0, 100000.0])
self.assertEqual(111111.0/6, m.result().numpy())
self.assertEqual(dtypes.float64, m.dtype)
self.assertEqual(dtypes.float64, m.result().dtype)
def testVariableCollections(self):
with context.graph_mode(), ops.Graph().as_default():
m = metrics.Mean()
m(1000)
self.assertEqual(
set(m.variables),
set(ops.get_collection(ops.GraphKeys.LOCAL_VARIABLES)))
self.assertEqual(ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES), [])
self.assertEqual(
set(m.variables),
set(ops.get_collection(ops.GraphKeys.METRIC_VARIABLES)))
def testUseGlobalVariablesCollections(self):
with context.graph_mode(), ops.Graph().as_default():
m = metrics.Mean(use_global_variables=True)
m(1000)
self.assertEqual(
set(m.variables),
set(ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)))
self.assertEqual(ops.get_collection(ops.GraphKeys.LOCAL_VARIABLES), [])
self.assertEqual(
set(m.variables),
set(ops.get_collection(ops.GraphKeys.METRIC_VARIABLES)))
def testInitVariables(self):
m = metrics.Mean()
m([1, 10, 100, 1000])
m([10000.0, 100000.0])
self.assertEqual(111111.0/6, m.result().numpy())
m.init_variables()
m(7)
self.assertEqual(7.0, m.result().numpy())
def testWriteSummaries(self):
m = metrics.Mean()
m([1, 10, 100])
training_util.get_or_create_global_step()
logdir = tempfile.mkdtemp()
with summary_ops.create_file_writer(
logdir, max_queue=0,
name="t0").as_default(), summary_ops.always_record_summaries():
m.result() # As a side-effect will write summaries.
events = summary_test_util.events_from_logdir(logdir)
self.assertEqual(len(events), 2)
self.assertEqual(events[1].summary.value[0].simple_value, 37.0)
# Get result without saving the summary.
logdir = tempfile.mkdtemp()
with summary_ops.create_file_writer(
logdir, max_queue=0,
name="t0").as_default(), summary_ops.always_record_summaries():
m.result(write_summary=False) # As a side-effect will write summaries.
# events_from_logdir(_) asserts the directory exists.
events = summary_test_util.events_from_logdir(logdir)
self.assertEqual(len(events), 1)
def testWeightedMean(self):
m = metrics.Mean()
m([1, 100, 100000], weights=[1, 0.2, 0.3])
m([500000, 5000, 500]) # weights of 1 each
self.assertNear(535521/4.5, m.result().numpy(), 0.001)
def testMeanDtype(self):
# Can override default dtype of float64.
m = metrics.Mean(dtype=dtypes.float32)
m([0, 2])
self.assertEqual(1, m.result().numpy())
self.assertEqual(dtypes.float32, m.dtype)
self.assertEqual(dtypes.float32, m.result().dtype)
def testAccuracy(self):
m = metrics.Accuracy()
m([0, 1, 2, 3], [0, 0, 0, 0]) # 1 correct
m([4], [4]) # 1 correct
m([5], [0]) # 0 correct
m([6], [6]) # 1 correct
m([7], [2]) # 0 correct
self.assertEqual(3.0/8, m.result().numpy())
self.assertEqual(dtypes.float64, m.dtype)
self.assertEqual(dtypes.float64, m.result().dtype)
def testCategoricalAccuracy(self):
m = metrics.CategoricalAccuracy()
m([[1, 0, 0, 0], [0, 1, 0, 0]],
[[0.6, 0.1, 0.25, 0.05], [0.4, 0.05, 0.45, 0.0]]) # 1/2 correct
m([[0, 0, 0, 1]], [[0.25, 0.95, 0.25, 0.0]]) # 0/1 correct
m([[1, 0, 0, 0], [0, 1, 0, 0]],
[[0.99, 0.01, 0.0, 0.0], [0.35, 0.35, 0.3, 0.0]]) # 1/2 correct
self.assertEqual(2.0/5, m.result().numpy())
self.assertEqual(dtypes.float64, m.dtype)
self.assertEqual(dtypes.float64, m.result().dtype)
def testBinaryAccuracy(self):
m = metrics.BinaryAccuracy(threshold=0)
# as threshold is 0 hence the predictions are logits
m([[0, 0, 0, 0]],
[[-4.2, 4.5, 1.2, -1.1]]) # 2/4 correct
m([[0, 1]], [[-5.3, 11.65]]) # 2/2 correct
m([[0, 1], [1, 1]],
[[-5.3, 11.65], [-10.32, 56.38]]) # 3/4 correct
self.assertEqual(7.0/10, m.result().numpy())
self.assertEqual(dtypes.float64, m.dtype)
self.assertEqual(dtypes.float64, m.result().dtype)
def testSparseAccuracy(self):
m = metrics.SparseAccuracy()
m([0, 2],
[[0.6, 0.1, 0.25, 0.05], [0.4, 0.05, 0.45, 0.0]]) # 2/2 correct
m([1], [[0.25, 0.95, 0.25, 0.0]]) # 1/1 correct
m([0, 3], [[0.99, 0.01, 0.0, 0.0], [0.35, 0.35, 0.3, 0.0]]) # 1/2 correct
self.assertEqual(4.0/5, m.result().numpy())
self.assertEqual(dtypes.float64, m.dtype)
self.assertEqual(dtypes.float64, m.result().dtype)
def testAccuracyDifferentShapes(self):
m = metrics.Accuracy()
with self.assertRaises(errors.InvalidArgumentError):
m([[0], [0]], [0, 1])
def testWeightedAccuracy(self):
m = metrics.Accuracy()
# 1 correct, total weight of 2
m([0, 1, 2, 3], [0, 0, 0, 0], weights=[1, 1, 0, 0])
m([4], [4], weights=[0.5]) # 1 correct with a weight of 0.5
m([5], [0], weights=[0.5]) # 0 correct, weight 0.5
m([6], [6]) # 1 correct, weight 1
m([7], [2]) # 0 correct, weight 1
self.assertEqual(2.5/5, m.result().numpy())
def testAccuracyDtype(self):
# Can override default dtype of float64.
m = metrics.Accuracy(dtype=dtypes.float32)
m([0, 0], [0, 1])
self.assertEqual(0.5, m.result().numpy())
self.assertEqual(dtypes.float32, m.dtype)
self.assertEqual(dtypes.float32, m.result().dtype)
def testTwoMeans(self):
# Verify two metrics with the same class and name don't
# accidentally share state.
m1 = metrics.Mean()
m1(0)
m2 = metrics.Mean()
m2(2)
self.assertAllEqual(0.0, m1.result())
self.assertAllEqual(2.0, m2.result())
def testNamesWithSpaces(self):
m1 = metrics.Mean("has space")
m1(0)
self.assertEqual(m1.name, "has space")
self.assertEqual(m1.numer.name, "has_space/numer:0")
def testGraphWithPlaceholder(self):
with context.graph_mode(), self.cached_session() as sess:
m = metrics.Mean()
p = array_ops.placeholder(dtypes.float32)
accumulate = m(p)
init_op = m.init_variables()
init_op.run()
sess.run(accumulate, feed_dict={p: [1, 10, 100]})
sess.run(accumulate, feed_dict={p: 1000})
sess.run(accumulate, feed_dict={p: [10000, 100000]})
self.assertAllEqual(m.result().eval(), 111111.0/6)
# Second init resets all the variables.
init_op.run()
sess.run(accumulate, feed_dict={p: 7})
self.assertAllEqual(m.result().eval(), 7)
@test_util.run_in_graph_and_eager_modes
def testGraphAndEagerTensor(self):
m = metrics.Mean()
inputs = ops.convert_to_tensor([1.0, 2.0])
accumulate = m(inputs)
result = m.result()
self.evaluate(m.init_variables())
self.evaluate(accumulate)
self.assertEqual(self.evaluate(result), 1.5)
# Second init resets all the variables.
self.evaluate(m.init_variables())
inputs = ops.convert_to_tensor([2.0, 3.0])
self.evaluate(m(inputs))
value = m.value()
self.assertEqual(self.evaluate(value), 2.5)
@test_util.run_in_graph_and_eager_modes
def testGraphAndEagerTensorGlobalVariables(self):
m = metrics.Mean(use_global_variables=True)
inputs = ops.convert_to_tensor([1.0, 2.0])
accumulate = m(inputs)
result = m.result()
self.evaluate(m.init_variables())
self.evaluate(accumulate)
self.assertEqual(self.evaluate(result), 1.5)
# Second init resets all the variables.
self.evaluate(m.init_variables())
inputs = ops.convert_to_tensor([2.0, 3.0])
self.evaluate(m(inputs))
value = m.value()
self.assertEqual(self.evaluate(value), 2.5)
@test_util.run_in_graph_and_eager_modes
def testGraphAndEagerTensorWhileLoopDoubleCall(self):
m = metrics.Mean()
init_value = constant_op.constant(1)
cond = lambda i: math_ops.less(i, 3)
def body(x):
with ops.control_dependencies([m(x)]):
return math_ops.add(x, 1)
accumulate = control_flow_ops.while_loop(cond, body, [init_value])
result = m.result()
self.evaluate(m.init_variables())
self.evaluate(accumulate)
self.assertEqual(self.evaluate(result), 1.5)
# Second init resets all the variables.
self.evaluate(m.init_variables())
inputs = ops.convert_to_tensor([2.0, 3.0])
self.evaluate(m(inputs))
if ops.context.executing_eagerly():
self.evaluate(control_flow_ops.while_loop(cond, body, [init_value]))
else:
# Reuse the loop operators in graph mode
self.evaluate(accumulate)
value = m.value()
self.assertEqual(self.evaluate(value), 2.0)
def testTwoMeansGraph(self):
# Verify two metrics with the same name in the same graph raises a
# ValueError.
with context.graph_mode():
m1 = metrics.Mean()
m1(0)
with self.assertRaises(ValueError):
m2 = metrics.Mean()
m2(2)
def testBuildMean(self):
# Verify that calling build() on Mean and then calling it won't recreate
# variables.
m = metrics.Mean()
m.build()
old_numer = m.numer
m(0.0)
self.assertTrue(old_numer is m.numer)
def testMetricsChain(self):
with context.graph_mode(), self.cached_session():
m1 = metrics.Mean()
m2 = metrics.Mean(name="m2")
update_m2 = m2(3.0)
update_m2_2 = m2(m1(1.0))
m1.init_variables().run()
m2.init_variables().run()
update_m2.eval()
update_m2_2.eval()
self.assertAllEqual(m2.result().eval(), 2.0)
self.assertAllEqual(m1.result().eval(), 1.0)
@test_util.run_in_graph_and_eager_modes
def testSaveRestore(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
mean = metrics.Mean()
checkpoint = trackable_utils.Checkpoint(mean=mean)
mean.build()
mean._built = True
self.evaluate(mean.init_variables())
self.evaluate(mean(100.))
self.evaluate(mean(200.))
save_path = checkpoint.save(checkpoint_prefix)
self.evaluate(mean(1000.))
checkpoint.restore(save_path).assert_consumed().run_restore_ops()
self.evaluate(mean(300.))
self.assertAllEqual(200., self.evaluate(mean.value()))
restore_mean = metrics.Mean()
restore_checkpoint = trackable_utils.Checkpoint(mean=restore_mean)
status = restore_checkpoint.restore(save_path)
restore_update = restore_mean(300.)
status.assert_consumed().run_restore_ops()
self.evaluate(restore_update)
self.assertAllEqual(200., self.evaluate(restore_mean.value()))
self.assertEqual(3, self.evaluate(restore_mean.denom))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/eager/python/metrics_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import threading
import time
import numpy as np
from tensorflow.contrib import lookup
from tensorflow.contrib.eager.python import datasets
from tensorflow.python.data import Dataset
from tensorflow.python.data.experimental.ops import threadpool
from tensorflow.python.data.experimental.ops import unique
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training.tracking import util as trackable_utils
class IteratorTest(test.TestCase):
def testBasic(self):
got = []
for t in datasets.Iterator(Dataset.range(4)):
got.append(t.numpy())
self.assertAllEqual([0, 1, 2, 3], got)
def testBasicOneShotIterator(self):
got = []
for t in Dataset.range(4).make_one_shot_iterator():
got.append(t.numpy())
self.assertAllEqual([0, 1, 2, 3], got)
def testBasicImplicitIterator(self):
got = []
for t in Dataset.range(4):
got.append(t.numpy())
self.assertAllEqual([0, 1, 2, 3], got)
def testGetNext(self):
iterator = datasets.Iterator(Dataset.range(4))
self.assertEqual(0, iterator.get_next().numpy())
self.assertEqual(1, iterator.get_next().numpy())
self.assertEqual(2, iterator.get_next().numpy())
self.assertEqual(3, iterator.get_next().numpy())
with self.assertRaises(errors.OutOfRangeError):
iterator.get_next()
def testGetNextOneShotIterator(self):
iterator = Dataset.range(4).make_one_shot_iterator()
self.assertEqual(0, iterator.get_next().numpy())
self.assertEqual(1, iterator.get_next().numpy())
self.assertEqual(2, iterator.get_next().numpy())
self.assertEqual(3, iterator.get_next().numpy())
with self.assertRaises(errors.OutOfRangeError):
iterator.get_next()
def testMultipleIteratorsOnTheSameDataset(self):
ds = Dataset.range(4)
it1 = datasets.Iterator(ds)
it2 = datasets.Iterator(ds)
got = [x.numpy() for x in it1]
self.assertAllEqual([0, 1, 2, 3], got)
got = [x.numpy() for x in it2]
self.assertAllEqual([0, 1, 2, 3], got)
def testNestedOutputs(self):
ds = Dataset.zip((Dataset.range(4), Dataset.zip((Dataset.range(4),
Dataset.range(4)))))
total = 0
# The Iterator will return a nested structure of Tensor objects.
# Some funkiness to compare against simple integers.
for (i, x) in enumerate(datasets.Iterator(ds)):
want = (i, (i, i))
got = (x[0].numpy(), (x[1][0].numpy(), x[1][1].numpy()))
self.assertEqual(got, want)
total += 1
self.assertEqual(4, total)
def testMapAndFilter(self):
def even(x):
return math_ops.equal(math_ops.mod(x, 2), 0)
it = datasets.Iterator(Dataset.range(8).map(math_ops.square).filter(even))
got = [x.numpy() for x in it]
self.assertAllEqual([0, 4, 16, 36], got)
def testMapCaptureLookupTable(self):
default_val = -1
keys = constant_op.constant(['brain', 'salad', 'surgery'])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup.HashTable(
lookup.KeyValueTensorInitializer(keys, values), default_val)
dataset = Dataset.from_tensor_slices(['brain', 'salad', 'surgery'])
dataset = dataset.map(table.lookup)
it = datasets.Iterator(dataset)
got = [x.numpy() for x in it]
self.assertAllEqual([0, 1, 2], got)
def testMultipleIteratorsOnADatasetThatUsesFunctions(self):
ds = Dataset.from_tensor_slices([1, 2, 3, 4, 5, 6]).map(math_ops.square)
got1 = [x.numpy() for x in datasets.Iterator(ds)]
self.assertAllEqual([1, 4, 9, 16, 25, 36], got1)
got2 = [x.numpy() for x in datasets.Iterator(ds)]
self.assertAllEqual(got1, got2)
def assertSparseValuesEqual(self, a, b):
self.assertAllEqual(a.indices, b.indices)
self.assertAllEqual(a.values, b.values)
self.assertAllEqual(a.dense_shape, b.dense_shape)
def testSparseTensorElements(self):
components = (sparse_tensor.SparseTensorValue(
indices=np.array([[0, 0], [1, 0], [2, 0]]),
values=np.array([0, 0, 0]),
dense_shape=np.array([3, 1])),
sparse_tensor.SparseTensorValue(
indices=np.array([[0, 0], [1, 1], [2, 2]]),
values=np.array([1, 2, 3]),
dense_shape=np.array([3, 3])))
expected = [
(sparse_tensor.SparseTensorValue(
indices=np.array([[0]]),
values=np.array([0]),
dense_shape=np.array([1])),
sparse_tensor.SparseTensorValue(
indices=np.array([[0]]),
values=np.array([1]),
dense_shape=np.array([3]))),
(sparse_tensor.SparseTensorValue(
indices=np.array([[0]]),
values=np.array([0]),
dense_shape=np.array([1])),
sparse_tensor.SparseTensorValue(
indices=np.array([[1]]),
values=np.array([2]),
dense_shape=np.array([3]))),
(sparse_tensor.SparseTensorValue(
indices=np.array([[0]]),
values=np.array([0]),
dense_shape=np.array([1])),
sparse_tensor.SparseTensorValue(
indices=np.array([[2]]),
values=np.array([3]),
dense_shape=np.array([3]))),
]
for i, result in enumerate(
datasets.Iterator(Dataset.from_tensor_slices(components))):
self.assertSparseValuesEqual(expected[i][0], result[0])
self.assertSparseValuesEqual(expected[i][1], result[1])
def testPyFunc(self):
def my_map(inp):
return [[x + 1 for x in inp]]
ds = Dataset.range(4).map(
lambda x: script_ops.py_func(my_map, [[x]], dtypes.int64))
got = [x.numpy() for x in datasets.Iterator(ds)]
self.assertAllEqual([[1], [2], [3], [4]], got)
def testTensorsPlacedOnDevice(self):
ds = Dataset.from_tensors([0., 1.])
with ops.device(test.gpu_device_name()):
x = datasets.Iterator(ds).next()
x = math_ops.add(x, x)
self.assertAllEqual([0., 2.], x.numpy())
def testGpuTensor(self):
ds = Dataset.from_tensors([0., 1.])
with ops.device(test.gpu_device_name()):
for x in ds:
y = math_ops.add(x, x)
self.assertAllEqual([0., 2.], y.numpy())
def testOverrideThreadPool(self):
def get_thread_id(_):
# Python creates a dummy thread object to represent the current
# thread when called from an "alien" thread (such as a
# `PrivateThreadPool` thread in this case). It does not include
# the TensorFlow-given display name, but it has a unique
# identifier that maps one-to-one with the underlying OS thread.
return np.array(threading.current_thread().ident).astype(np.int64)
for num_threads in [1, 2, 4, 8, 16]:
dataset = (
Dataset.range(1000).map(
lambda x: script_ops.py_func(get_thread_id, [x], dtypes.int64),
num_parallel_calls=32).apply(unique.unique()))
dataset = threadpool.override_threadpool(
dataset,
threadpool.PrivateThreadPool(
num_threads, display_name='private_thread_pool_%d' % num_threads))
thread_ids = []
for next_element in datasets.Iterator(dataset):
thread_ids.append(next_element)
self.assertEqual(len(thread_ids), len(set(thread_ids)))
self.assertGreater(len(thread_ids), 0)
# NOTE(mrry): We don't control the thread pool scheduling, and
# so cannot guarantee that all of the threads in the pool will
# perform work.
self.assertLessEqual(len(thread_ids), num_threads)
def testSaveRestore(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, 'ckpt')
dataset = Dataset.from_tensor_slices([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])
dataset = dataset.map(math_ops.square).batch(2)
iterator = datasets.Iterator(dataset)
checkpoint = trackable_utils.Checkpoint(iterator=iterator)
self.assertAllEqual([1, 4], iterator.get_next().numpy())
save_path = checkpoint.save(checkpoint_prefix)
self.assertAllEqual([9, 16], iterator.get_next().numpy())
self.assertAllEqual([25, 36], iterator.get_next().numpy())
checkpoint.restore(save_path)
self.assertAllEqual([9, 16], iterator.get_next().numpy())
self.assertAllEqual([25, 36], iterator.get_next().numpy())
def testSaveRestoreMultipleIterator(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, 'ckpt')
dataset = Dataset.from_tensor_slices([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])
dataset = dataset.map(math_ops.square).batch(2)
iterator_1 = datasets.Iterator(dataset)
iterator_2 = datasets.Iterator(dataset)
dataset_2 = Dataset.range(10)
iterator_3 = datasets.Iterator(dataset_2)
checkpoint = trackable_utils.Checkpoint(
iterator_1=iterator_1, iterator_2=iterator_2, iterator_3=iterator_3)
self.assertAllEqual([1, 4], iterator_1.get_next().numpy())
self.assertEqual(0, iterator_3.get_next().numpy())
self.assertEqual(1, iterator_3.get_next().numpy())
self.assertEqual(2, iterator_3.get_next().numpy())
save_path = checkpoint.save(checkpoint_prefix)
self.assertAllEqual([1, 4], iterator_2.get_next().numpy())
self.assertAllEqual([9, 16], iterator_2.get_next().numpy())
self.assertEqual(3, iterator_3.get_next().numpy())
checkpoint.restore(save_path)
self.assertAllEqual([9, 16], iterator_1.get_next().numpy())
self.assertAllEqual([1, 4], iterator_2.get_next().numpy())
self.assertEqual(3, iterator_3.get_next().numpy())
def testRestoreExhaustedIterator(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, 'ckpt')
dataset = Dataset.range(3)
iterator = datasets.Iterator(dataset)
checkpoint = trackable_utils.Checkpoint(iterator=iterator)
self.assertEqual(0, iterator.get_next().numpy())
self.assertEqual(1, iterator.get_next().numpy())
save_path = checkpoint.save(checkpoint_prefix)
self.assertEqual(2, iterator.get_next().numpy())
checkpoint.restore(save_path)
self.assertEqual(2, iterator.get_next().numpy())
def testRestoreInReconstructedIterator(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, 'ckpt')
dataset = Dataset.range(10)
for i in range(5):
iterator = datasets.Iterator(dataset)
checkpoint = trackable_utils.Checkpoint(iterator=iterator)
checkpoint.restore(checkpoint_management.latest_checkpoint(
checkpoint_directory))
for j in range(2):
self.assertEqual(i * 2 + j, iterator.get_next().numpy())
checkpoint.save(file_prefix=checkpoint_prefix)
class DatasetConstructorBenchmark(test.Benchmark):
def benchmarkSliceRepeatBatchEager(self):
input_size = 10000
batch_size = 100
num_epochs = 100
input_data = np.random.randn(input_size)
dataset = (
Dataset.from_tensor_slices(input_data).repeat(num_epochs)
.batch(batch_size))
iterator = datasets.Iterator(dataset)
ends = [time.time()]
for _ in iterator:
ends.append(time.time())
deltas = np.ediff1d(ends)
median_wall_time = np.median(deltas)
print(
'Slice/repeat/batch eager input size: %d batch size: %d Median wall '
'time per element: %f'
% (input_size, batch_size, median_wall_time))
self.report_benchmark(
iters=len(deltas),
wall_time=median_wall_time,
name='benchmark_slice_repeat_batch_eager_input_%d_batch_%d' %
(input_size, batch_size))
def benchmarkSliceBatchCacheRepeatCallable(self):
input_size = 10000
batch_size = 100
num_epochs = 100
input_data = np.random.randn(input_size)
dataset = (
Dataset.from_tensor_slices(input_data).batch(batch_size).cache()
.repeat(num_epochs))
iterator = datasets.Iterator(dataset)
ends = [time.time()]
for _ in iterator:
ends.append(time.time())
deltas = np.ediff1d(ends)
median_wall_time = np.median(deltas)
print(
'Slice/batch/cache/repeat eager input size: %d batch size: %d Median '
'wall time per element: %f'
% (input_size, batch_size, median_wall_time))
self.report_benchmark(
iters=len(deltas),
wall_time=median_wall_time,
name='benchmark_slice_batch_cache_repeat_eager_input_%d_batch_%d' %
(input_size, batch_size))
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/eager/python/datasets_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gc
from tensorflow.contrib.eager.python import network
from tensorflow.contrib.layers.python.layers import regularizers
from tensorflow.python.eager import function
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import test_util
from tensorflow.python.layers import core
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.training import training_util
from tensorflow.python.training.tracking import util as trackable_utils
# pylint: disable=not-callable
class MyNetwork(network.Network):
def __init__(self, name=None):
super(MyNetwork, self).__init__(name=name)
self.l1 = self.track_layer(core.Dense(1, use_bias=False))
def call(self, x):
return self.l1(x)
class RegularizedNetwork(network.Network):
def __init__(self):
super(RegularizedNetwork, self).__init__()
self.l1 = self.track_layer(core.Dense(
1,
bias_regularizer=regularizers.l1_regularizer(2.0),
kernel_regularizer=regularizers.l1_regularizer(2.0)))
self.l2 = self.track_layer(core.Dense(
1,
bias_regularizer=regularizers.l1_regularizer(2.0)))
def call(self, values):
return self.l2(self.l1(values))
class NetworkTest(test.TestCase):
def test_checkpointing_not_implemented(self):
checkpoint_directory = self.get_temp_dir()
checkpoint = trackable_utils.Checkpoint(net=MyNetwork())
with self.assertRaises(NotImplementedError):
checkpoint.save(checkpoint_directory)
def _save_modify_load_network_built(self, net, global_step=None):
checkpoint_directory = self.get_temp_dir()
checkpoint_path = network.save_network_checkpoint(
network=net, save_path=checkpoint_directory, global_step=global_step)
input_value = constant_op.constant([[42.0]])
original_output = self.evaluate(net(input_value))
for var in net.variables:
self.evaluate(var.assign(var + 1.))
self.assertGreater(
self.evaluate(net(input_value)),
original_output)
# Either the returned explicit checkpoint path or the directory should work.
network.restore_network_checkpoint(net, save_path=checkpoint_directory)
self.assertAllEqual(
original_output,
self.evaluate(net(input_value)))
for var in net.variables:
self.evaluate(var.assign(var + 2.))
network.restore_network_checkpoint(net, save_path=checkpoint_path)
self.assertAllEqual(
original_output,
self.evaluate(net(input_value)))
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def testTrainableAttribute(self):
net = network.Network()
self.assertTrue(net.trainable)
with self.assertRaises(AttributeError):
net.trainable = False
self.assertTrue(net.trainable)
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def testNetworkCall(self):
net = MyNetwork(name="abcd")
net(constant_op.constant([[2.0]])) # Force variables to be created.
self.assertEqual(1, len(net.trainable_variables))
self.evaluate(net.trainable_variables[0].assign([[17.0]]))
# TODO(josh11b): Support passing Python values to networks.
result = net(constant_op.constant([[2.0]]))
self.assertEqual(34.0, self.evaluate(result))
def testReplacingNetworkCallWithDefun(self):
net = MyNetwork(name="abcd")
net.call = function.defun(net.call)
x = constant_op.constant([[2.0]])
net(x) # Force variables to be created.
self.evaluate(net.trainable_variables[0].assign([[17.0]]))
result = net(x) # Build and execute the TensorFlow function
self.assertEqual(34.0, self.evaluate(result))
# Force the creation of another TensorFlow function by changing input shape
y = constant_op.constant([[1.0], [2.0]])
result = net(y)
self.assertAllEqual([[17.0], [34.0]], self.evaluate(result))
# TODO(allenl): This test creates garbage in some Python versions
@test_util.run_in_graph_and_eager_modes
def testNetworkSaveRestoreAlreadyBuilt(self):
net = MyNetwork(name="abcd")
with self.assertRaisesRegexp(
ValueError, "Attempt to save the Network before it was first called"):
network.save_network_checkpoint(net, self.get_temp_dir())
net(constant_op.constant([[2.0]]))
self.evaluate(net.trainable_variables[0].assign([[17.0]]))
self._save_modify_load_network_built(net, global_step=None)
self._save_modify_load_network_built(net, global_step=10)
# TODO(allenl): This test creates garbage in some Python versions
@test_util.run_in_graph_and_eager_modes
def testSaveRestoreDefaultGlobalStep(self):
net = MyNetwork(name="abcd")
net(constant_op.constant([[2.0]]))
self.evaluate(net.variables[0].assign([[3.]]))
default_global_step = training_util.get_or_create_global_step()
self.evaluate(default_global_step.assign(4242))
save_path = network.save_network_checkpoint(net, self.get_temp_dir())
self.assertIn("abcd-4242", save_path)
# TODO(allenl): This test creates garbage in some Python versions
@test_util.run_in_graph_and_eager_modes
def testNetworkSaveAndRestoreIntoUnbuilt(self):
save_dir = self.get_temp_dir()
net1 = MyNetwork()
test_input = constant_op.constant([[2.0]])
net1(test_input)
self.evaluate(net1.trainable_variables[0].assign([[17.0]]))
save_path = network.save_network_checkpoint(net1, save_dir)
# With a pre-build restore we should have the same value.
net2 = MyNetwork()
network.restore_network_checkpoint(net2, save_path)
self.assertAllEqual(self.evaluate(net1(test_input)),
self.evaluate(net2(test_input)))
self.assertIsNot(net1.variables[0], net2.variables[0])
self.assertAllEqual(self.evaluate(net1.variables[0]),
self.evaluate(net2.variables[0]))
@test_util.run_in_graph_and_eager_modes
def testNetworkMatchesLayerVariableNames(self):
zero = constant_op.constant([[0.]])
layer_one = core.Dense(1, use_bias=False)
layer_one(zero)
layer_two = core.Dense(1, use_bias=False)
layer_two(zero)
class TwoLayerNet(network.Network):
def __init__(self, name=None):
super(TwoLayerNet, self).__init__(name=name)
self.first = self.track_layer(core.Dense(
1, use_bias=False))
self.second = self.track_layer(core.Dense(
1, use_bias=False))
def call(self, x):
return self.second(self.first(x))
net = TwoLayerNet()
net(zero)
self.assertEqual("two_layer_net/" + layer_one.variables[0].name,
net.first.variables[0].name)
self.assertEqual("two_layer_net/" + layer_two.variables[0].name,
net.second.variables[0].name)
@test_util.run_in_graph_and_eager_modes
def testLoadIntoUnbuiltSharedLayer(self):
class Owner(network.Network):
def __init__(self, name=None):
super(Owner, self).__init__(name=name)
self.first = self.track_layer(core.Dense(
1, name="first_layer", use_bias=False))
def call(self, x):
return self.first(x)
first_owner = Owner()
class User(network.Network):
def __init__(self, use_layer, name=None):
super(User, self).__init__(name=name)
self.first = self.track_layer(use_layer)
self.second = self.track_layer(core.Dense(
1, name="second_layer", use_bias=False))
def call(self, x):
return self.second(self.first(x))
class LikeUserButNotSharing(network.Network):
def __init__(self, name=None):
super(LikeUserButNotSharing, self).__init__(name=name)
self.first = self.track_layer(core.Dense(
1, name="first_layer", use_bias=False))
self.second = self.track_layer(core.Dense(
1, name="second_layer", use_bias=False))
def call(self, x):
return self.second(self.first(x))
checkpoint_creator = LikeUserButNotSharing(name="checkpoint_creator")
one = constant_op.constant([[1.0]])
checkpoint_creator(one)
self.assertEqual(2, len(checkpoint_creator.variables))
self.evaluate(checkpoint_creator.variables[0].assign([[5.]]))
self.evaluate(checkpoint_creator.variables[1].assign([[6.]]))
# Re-map the variable names so that with default restore mapping we'll
# attempt to restore into the unbuilt Layer.
name_mapping = {
"checkpoint_creator/first_layer/kernel": "owner/first_layer/kernel",
"checkpoint_creator/second_layer/kernel": "second_layer/kernel",
}
save_path = network.save_network_checkpoint(
checkpoint_creator,
self.get_temp_dir(),
map_func=lambda full_name: name_mapping[full_name])
load_into = User(use_layer=first_owner.first)
network.restore_network_checkpoint(load_into, save_path)
self.assertEqual(0, len(first_owner.variables))
self.assertAllEqual(self.evaluate(checkpoint_creator(one)),
self.evaluate(load_into(one)))
self.assertEqual(1, len(first_owner.variables))
self.assertAllEqual([[5.]], self.evaluate(load_into.variables[0]))
self.assertAllEqual([[6.]], self.evaluate(load_into.variables[1]))
first_owner(one)
self.assertAllEqual([[5.]], self.evaluate(first_owner.variables[0]))
# Try again with a garbage collected parent.
first_owner = Owner()
load_into = User(use_layer=first_owner.first)
del first_owner
gc.collect()
def _restore_map_func(original_name):
if original_name.startswith("owner/"):
return original_name.replace("owner/", "owner_1/")
else:
return "user_1/" + original_name
with self.assertRaisesRegexp(ValueError, "garbage collected"):
network.restore_network_checkpoint(
load_into, save_path, map_func=_restore_map_func)
@test_util.run_in_graph_and_eager_modes
def testRestoreIntoSubNetwork(self):
class Parent(network.Network):
def __init__(self, name=None):
super(Parent, self).__init__(name=name)
self.first = self.track_layer(MyNetwork())
self.second = self.track_layer(MyNetwork())
def call(self, x):
return self.first(self.second(x))
one = constant_op.constant([[3.]])
whole_model_saver = Parent()
whole_model_saver(one)
self.evaluate(whole_model_saver.variables[0].assign([[15.]]))
self.evaluate(whole_model_saver.variables[1].assign([[16.]]))
whole_model_checkpoint = network.save_network_checkpoint(
whole_model_saver, self.get_temp_dir())
save_from = MyNetwork()
save_from(one)
self.evaluate(save_from.variables[0].assign([[5.]]))
checkpoint = network.save_network_checkpoint(save_from, self.get_temp_dir())
save_into_parent = Parent()
network.restore_network_checkpoint(save_into_parent, whole_model_checkpoint)
network.restore_network_checkpoint(save_into_parent.first, checkpoint)
# deferred loading multiple times is fine
network.restore_network_checkpoint(save_into_parent.first, checkpoint)
save_into_parent(one) # deferred loading
self.assertAllEqual([[5.]], self.evaluate(save_into_parent.variables[0]))
self.assertAllEqual([[16.]], self.evaluate(save_into_parent.variables[1]))
# Try again with the opposite ordering, and we should get different results
# (deferred restoration should happen the same way non-deferred happens,
# with later restorations overwriting older ones).
save_into_parent = Parent()
# deferred loading multiple times is fine
network.restore_network_checkpoint(save_into_parent.first, checkpoint)
network.restore_network_checkpoint(save_into_parent, whole_model_checkpoint)
save_into_parent(one) # deferred loading
# We've overwritten the sub-Network restore.
self.assertAllEqual([[15.]], self.evaluate(save_into_parent.variables[0]))
self.assertAllEqual([[16.]], self.evaluate(save_into_parent.variables[1]))
self.evaluate(save_into_parent.variables[0].assign([[3.]]))
self.evaluate(save_into_parent.variables[1].assign([[4.]]))
network.restore_network_checkpoint(save_into_parent.second, checkpoint)
self.assertAllEqual([[5.]], self.evaluate(save_into_parent.variables[1]))
with self.assertRaisesRegexp(errors_impl.NotFoundError,
"not found in checkpoint"):
# The checkpoint is incompatible.
network.restore_network_checkpoint(save_into_parent, checkpoint)
@test_util.run_in_graph_and_eager_modes
def testCustomMapCollisionErrors(self):
class Parent(network.Network):
def __init__(self, name=None):
super(Parent, self).__init__(name=name)
self.first = self.track_layer(MyNetwork())
self.second = self.track_layer(MyNetwork())
def call(self, x):
return self.first(self.second(x))
make_checkpoint = Parent()
one = constant_op.constant([[1.]])
make_checkpoint(one)
self.evaluate(make_checkpoint.variables[0].assign([[2.]]))
self.evaluate(make_checkpoint.variables[1].assign([[3.]]))
with self.assertRaisesRegexp(
ValueError,
"The map_func passed to save_network_checkpoint for the Network "
"'parent' resulted in two variables named 'foo'"):
network.save_network_checkpoint(
make_checkpoint, self.get_temp_dir(), map_func=lambda n: "foo")
checkpoint = network.save_network_checkpoint(
network=make_checkpoint.first,
save_path=self.get_temp_dir(),
map_func=lambda n: "foo")
loader = Parent()
network.restore_network_checkpoint(
loader, checkpoint, map_func=lambda n: "foo")
with self.assertRaisesRegexp(
ValueError,
("The map_func passed to restore_network_checkpoint for the Network"
" 'parent_1' resulted in two variables named 'foo'")):
loader(one)
loader = Parent()
loader(one)
with self.assertRaisesRegexp(
ValueError,
("The map_func passed to restore_network_checkpoint for the Network"
" 'parent_2' resulted in two variables named 'foo'")):
network.restore_network_checkpoint(
loader, checkpoint, map_func=lambda n: "foo")
@test_util.run_in_graph_and_eager_modes
def testDefaultMapCollisionErrors(self):
one = constant_op.constant([[1.]])
first = core.Dense(1, name="dense", use_bias=False)
first(one)
class Parent(network.Network):
def __init__(self, name=None):
super(Parent, self).__init__(name=name)
self.first = self.track_layer(first)
self.second = self.track_layer(core.Dense(1, use_bias=False))
def call(self, x):
return self.first(self.second(x))
make_checkpoint = Parent()
one = constant_op.constant([[1.]])
make_checkpoint(one)
self.evaluate(make_checkpoint.variables[0].assign([[2.]]))
self.evaluate(make_checkpoint.variables[1].assign([[3.]]))
with self.assertRaisesRegexp(
ValueError,
("The default checkpoint variable name mapping strategy for Network "
"'parent' resulted in a naming conflict.")):
network.save_network_checkpoint(make_checkpoint, self.get_temp_dir())
class Compatible(network.Network):
def __init__(self, name=None):
super(Compatible, self).__init__(name=name)
self.first = self.track_layer(core.Dense(1, use_bias=False))
def call(self, x):
return self.first(x)
successful_checkpoint = Compatible()
successful_checkpoint(one)
self.evaluate(successful_checkpoint.variables[0].assign([[-1.]]))
checkpoint_path = network.save_network_checkpoint(
successful_checkpoint, self.get_temp_dir())
load_checkpoint = Parent()
load_checkpoint(one)
with self.assertRaisesRegexp(
ValueError,
("The default checkpoint variable name mapping strategy for Network "
"'parent_1' resulted in a naming conflict.")):
network.restore_network_checkpoint(load_checkpoint, checkpoint_path)
def testNoReferenceCyclesAfterCall(self):
class ChildNetwork(network.Network):
def __init__(self, name=None):
super(ChildNetwork, self).__init__(name=name)
def call(self, x):
return x * 2.
class ParentNetwork(network.Network):
def __init__(self, name=None):
super(ParentNetwork, self).__init__(name=name)
self.l1 = self.track_layer(ChildNetwork())
def call(self, x):
return self.l1(x)
one = constant_op.constant([[1.0]])
gc.disable()
gc.collect()
previous_gc_debug_flags = gc.get_debug()
gc.set_debug(gc.DEBUG_SAVEALL)
preexisting = len(gc.garbage)
net = ParentNetwork()
net(one)
del net
gc.collect()
# There should be no additional garbage requiring collection.
self.assertEqual(preexisting, len(gc.garbage))
gc.set_debug(previous_gc_debug_flags)
gc.enable()
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def testAnonymousNoNameInitially(self):
net = MyNetwork()
with self.assertRaisesRegexp(ValueError, "does not yet have a final name"):
net.name # pylint: disable=pointless-statement
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def testExplicitHasNameInitially(self):
net = MyNetwork(name="abcd")
self.assertEqual("abcd", net.name)
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def testUsingResourceVariables(self):
net = MyNetwork()
net(constant_op.constant([[0.]]))
self.assertIsInstance(net.trainable_weights[0],
resource_variable_ops.ResourceVariable)
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def testVariableRegularizers(self):
net = RegularizedNetwork()
net(constant_op.constant([[1.]]))
self.evaluate(net.variables[0].assign([[2.]]))
self.evaluate(net.variables[1].assign([3.]))
self.evaluate(net.variables[2].assign([[-2.]]))
self.evaluate(net.variables[3].assign([4.]))
self.assertAllEqual([4., 6., 8.], self.evaluate(net.losses))
self.evaluate(net.variables[3].assign([5.]))
self.assertAllEqual([4., 6., 10.], self.evaluate(net.losses))
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def testDuplicateNameError(self):
one = constant_op.constant([[1.]])
net = MyNetwork(name="foo")
net(one)
with self.assertRaisesRegexp(
ValueError, "named 'foo' already exists"):
net1 = MyNetwork(name="foo")
net1(one)
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def testWrappingInVariableScope(self):
one = constant_op.constant([[1.]])
# Naming happens in the order of first build rather than the order of
# construction, but for clarity they're the same here and construction is
# annotated.
outside_net_before = MyNetwork() # name=my_network
outside_net_before(one)
captured_scope = variable_scope.get_variable_scope()
with variable_scope.variable_scope("outside_scope"):
net1 = MyNetwork() # name=outside_scope/my_network
net1(one)
name_conflict1 = MyNetwork(name="name_conflict") # fine, unique so far
name_conflict2 = MyNetwork(name="name_conflict") # error on build
with variable_scope.variable_scope("inside_scope"):
# No issue here since the name is unique within its scope.
name_conflict3 = MyNetwork(name="name_conflict")
net2 = MyNetwork() # name=outside_scope/my_network_2 to avoid the
# variable_scope my_network_1 below.
vs_name_conflict = MyNetwork(name="vs_name_conflict") # conflict below
with variable_scope.variable_scope("intervening_scope"):
with variable_scope.variable_scope(captured_scope):
with variable_scope.variable_scope("outside_scope"):
name_conflict4 = MyNetwork(name="name_conflict") # error on build
with variable_scope.variable_scope("my_network_1"):
pass
with variable_scope.variable_scope("vs_name_conflict"):
pass
net3 = MyNetwork() # name=outside_scope/my_network_4
name_conflict1(one)
with self.assertRaisesRegexp(
ValueError, "named 'name_conflict' already exists"):
name_conflict2(one)
name_conflict3(one)
net2(one)
with self.assertRaisesRegexp(
ValueError, "or a variable_scope was created with this name"):
vs_name_conflict(one)
with self.assertRaisesRegexp(
ValueError, "named 'name_conflict' already exists"):
name_conflict4(one)
self.assertEqual("outside_scope/name_conflict",
name_conflict1.name)
self.assertStartsWith(
expected_start="outside_scope/name_conflict/dense/",
actual=name_conflict1.variables[0].name)
self.assertEqual("outside_scope/inside_scope/name_conflict",
name_conflict3.name)
self.assertStartsWith(
expected_start="outside_scope/inside_scope/name_conflict/dense/",
actual=name_conflict3.variables[0].name)
self.assertEqual("outside_scope/my_network", net1.name)
self.assertStartsWith(
expected_start="outside_scope/my_network/dense/",
actual=net1.trainable_weights[0].name)
self.assertEqual("outside_scope/my_network_2", net2.name)
self.assertStartsWith(
expected_start="outside_scope/my_network_2/dense/",
actual=net2.trainable_weights[0].name)
net3(one)
self.assertEqual("outside_scope/my_network_3", net3.name)
self.assertStartsWith(
expected_start="outside_scope/my_network_3/dense/",
actual=net3.trainable_weights[0].name)
outside_net_after = MyNetwork()
outside_net_after(one)
self.assertEqual("my_network", outside_net_before.name)
self.assertStartsWith(
expected_start="my_network/dense/",
actual=outside_net_before.trainable_weights[0].name)
self.assertEqual("my_network_1", outside_net_after.name)
self.assertStartsWith(
expected_start="my_network_1/dense/",
actual=outside_net_after.trainable_weights[0].name)
@test_util.run_in_graph_and_eager_modes
def testVariableScopeStripping(self):
with variable_scope.variable_scope("scope1"):
with variable_scope.variable_scope("scope2"):
net = MyNetwork()
net(constant_op.constant([[2.0]]))
self.evaluate(net.variables[0].assign([[42.]]))
self.assertEqual(net.name, "scope1/scope2/my_network")
self.assertStartsWith(
expected_start="scope1/scope2/my_network/dense/",
actual=net.trainable_weights[0].name)
save_path = network.save_network_checkpoint(net, self.get_temp_dir())
self.assertIn("scope1_scope2_my_network", save_path)
restore_net = MyNetwork()
# Delayed restoration
network.restore_network_checkpoint(restore_net, save_path)
restore_net(constant_op.constant([[1.0]]))
self.assertAllEqual([[42.]],
self.evaluate(restore_net.variables[0]))
self.evaluate(restore_net.variables[0].assign([[-1.]]))
# Immediate restoration
network.restore_network_checkpoint(restore_net, save_path)
self.assertAllEqual([[42.]],
self.evaluate(restore_net.variables[0]))
@test_util.run_in_graph_and_eager_modes
def testLayerNamesRespected(self):
class ParentNetwork(network.Network):
def __init__(self):
super(ParentNetwork, self).__init__()
self.first = self.track_layer(
core.Dense(1, use_bias=False, name="explicit_name"))
def call(self, x):
return self.first(x)
one = constant_op.constant([[1.]])
net = ParentNetwork()
net(one)
self.assertStartsWith(expected_start="parent_network/explicit_name/",
actual=net.trainable_weights[0].name)
self.assertEqual("explicit_name", net.first.name)
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def testWrappingInAnonymousVariableScope(self):
# Named outside variable_scopes are not supported at the moment. However,
# blank-named top level variable scopes do not change variable names, and so
# can be used to set the properties of Network variables.
was_called = [False]
def _custom_getter(getter, *args, **kwargs):
was_called[0] = True
return getter(*args, **kwargs)
with variable_scope.variable_scope("", custom_getter=_custom_getter):
net = MyNetwork()
one = constant_op.constant([[1.]])
net(one)
self.assertTrue(was_called[0])
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def testReasonableSlashError(self):
with self.assertRaisesRegexp(
ValueError, "not allowed in Network names"):
MyNetwork(name="slash/slash")
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def testNoVariableScopeNames(self):
with self.assertRaisesRegexp(
ValueError, "VariableScopes are not valid Network names"):
with variable_scope.variable_scope("some_scope") as vs:
MyNetwork(name=vs)
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def testVariableScopeNameCollision(self):
with variable_scope.variable_scope("abcd"):
pass
with self.assertRaisesRegexp(
ValueError, "or a variable_scope was created with this name"):
net = MyNetwork(name="abcd")
one = constant_op.constant([[1.]])
net(one)
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def testNetworkVariablesDoNotInterfere(self):
core.Dense(1, use_bias=True) # Should not interfere with naming.
net1 = MyNetwork()
net2 = MyNetwork()
one = constant_op.constant([[1.]])
net1(one)
net2(one)
# Layer names typically are globally unique rather than being unique within
# the scope of their first use. However, within a Network they must be named
# locally so that previous Layer construction does not interfere with
# variable naming (e.g. add a Layer construction before the Network,
# suddenly your previously saved checkpoint is incompatible).
self.assertEqual("dense", net1.l1.name)
self.assertEqual("dense", net2.l1.name)
self.evaluate(net1.trainable_weights[0].assign([[1.]]))
self.evaluate(net2.trainable_weights[0].assign([[2.]]))
self.assertEqual(2., self.evaluate(net2.trainable_weights[0]))
self.assertEqual(1., self.evaluate(net1.trainable_weights[0]))
self.assertStartsWith(expected_start="my_network/dense/",
actual=net1.trainable_weights[0].name)
self.assertStartsWith(expected_start="my_network_1/dense/",
actual=net2.trainable_weights[0].name)
@test_util.run_in_graph_and_eager_modes
def testNestableAnonymous(self):
# The case where no explicit names are specified. We make up unique names,
# and these should match the variable names.
class ParentNetwork(network.Network):
def __init__(self):
super(ParentNetwork, self).__init__()
self.first = self.track_layer(MyNetwork())
self.second = self.track_layer(MyNetwork())
def call(self, x):
return self.second(self.first(x))
one = constant_op.constant([[1.]])
net = ParentNetwork()
net(one)
self.assertStartsWith(expected_start="parent_network/my_network/dense",
actual=net.trainable_weights[0].name)
self.assertStartsWith(expected_start="parent_network/my_network/dense",
actual=net.first.trainable_weights[0].name)
self.assertStartsWith(expected_start="parent_network/my_network_1/dense",
actual=net.trainable_weights[1].name)
self.assertStartsWith(expected_start="parent_network/my_network_1/dense",
actual=net.second.trainable_weights[0].name)
self.assertEqual("parent_network", net.name)
self.assertEqual("my_network", net.first.name)
self.assertEqual("my_network_1", net.second.name)
net2 = ParentNetwork()
net2(one)
self.assertStartsWith(expected_start="parent_network_1/my_network/dense",
actual=net2.trainable_weights[0].name)
self.assertStartsWith(expected_start="parent_network_1/my_network/dense",
actual=net2.first.trainable_weights[0].name)
self.assertStartsWith(expected_start="parent_network_1/my_network_1/dense",
actual=net2.trainable_weights[1].name)
self.assertStartsWith(expected_start="parent_network_1/my_network_1/dense",
actual=net2.second.trainable_weights[0].name)
self.assertEqual("parent_network_1", net2.name)
self.assertEqual("my_network", net2.first.name)
self.assertEqual("my_network_1", net2.second.name)
@test_util.run_in_graph_and_eager_modes
def testNestableExplicit(self):
# We have explicit network names and everything is globally unique.
class ParentNetwork(network.Network):
def __init__(self):
super(ParentNetwork, self).__init__(name="unique_parent_name")
self.first = self.track_layer(
MyNetwork(name="first_unique_child_name"))
self.second = self.track_layer(
MyNetwork(name="second_unique_child_name"))
def call(self, x):
return self.second(self.first(x))
one = constant_op.constant([[1.]])
net = ParentNetwork()
net(one)
self.assertStartsWith(
expected_start="unique_parent_name/first_unique_child_name/dense",
actual=net.trainable_weights[0].name)
self.assertStartsWith(
expected_start="unique_parent_name/second_unique_child_name/dense",
actual=net.trainable_weights[1].name)
self.assertEqual("unique_parent_name", net.name)
self.assertEqual("first_unique_child_name", net.first.name)
self.assertEqual("second_unique_child_name", net.second.name)
@test_util.run_in_graph_and_eager_modes
def testLayerNetworkNameInteractions(self):
# Same base name as core.Dense; Networks and non-Network Layers with the
# same base name should use the same numbering system.
class Dense(network.Network):
def __init__(self):
super(Dense, self).__init__()
self.first = self.track_layer(core.Dense(1, use_bias=False))
def call(self, x):
return self.first(x)
class MixedLayerNetwork(network.Network):
def __init__(self):
super(MixedLayerNetwork, self).__init__()
self.first = self.track_layer(core.Dense(1, use_bias=False))
self.second = self.track_layer(core.Dense(1, use_bias=False))
self.third = self.track_layer(Dense())
self.fourth = self.track_layer(core.Dense(1, use_bias=False))
self.fifth = self.track_layer(core.Dense(1, use_bias=False))
def call(self, x):
return self.fifth(self.fourth(self.third(self.second(self.first(x)))))
one = constant_op.constant([[1.]])
net = MixedLayerNetwork()
net(one)
self.assertEqual("dense", net.first.name)
self.assertEqual("dense_1", net.second.name)
self.assertEqual("dense_2", net.third.name)
self.assertEqual("dense_3", net.fourth.name)
self.assertEqual("dense_4", net.fifth.name)
# Note that this is _not_ the default naming behavior for Layers. Layers
# which are added to Networks follow Network variable naming conventions
# (i.e. variable names = network name unless variable sharing). Nested
# Layers revert to Layer behavior.
self.assertStartsWith(expected_start="mixed_layer_network/dense/",
actual=net.trainable_weights[0].name)
self.assertStartsWith(expected_start="mixed_layer_network/dense_1/",
actual=net.trainable_weights[1].name)
self.assertStartsWith(expected_start="mixed_layer_network/dense_2/",
actual=net.trainable_weights[2].name)
self.assertStartsWith(expected_start="mixed_layer_network/dense_3/",
actual=net.trainable_weights[3].name)
self.assertStartsWith(expected_start="mixed_layer_network/dense_4/",
actual=net.trainable_weights[4].name)
self.assertEqual("mixed_layer_network", net.name)
@test_util.run_in_graph_and_eager_modes
def testNestableExplicitCollisions(self):
# We have explicit network names and they are unique within the layer
# they're added to.
class ParentNetwork(network.Network):
def __init__(self):
super(ParentNetwork, self).__init__(name="nonunique_name")
self.first = self.track_layer(
MyNetwork(name="nonunique_name"))
self.second = self.track_layer(
MyNetwork(name="second_unique_child_name"))
def call(self, x):
return self.second(self.first(x))
one = constant_op.constant([[1.]])
net = ParentNetwork()
net(one)
self.assertStartsWith(
expected_start="nonunique_name/nonunique_name/dense",
actual=net.trainable_weights[0].name)
self.assertStartsWith(
expected_start="nonunique_name/second_unique_child_name/dense",
actual=net.trainable_weights[1].name)
self.assertEqual("nonunique_name", net.name)
self.assertEqual("nonunique_name", net.first.name)
self.assertEqual("second_unique_child_name", net.second.name)
@test_util.run_in_graph_and_eager_modes
def testNestableExplicitWithAnonymousParent(self):
# A parent network is instantiated multiple times with explicitly named
# children. We shouldn't throw any name errors.
class ParentNetwork(network.Network):
def __init__(self):
super(ParentNetwork, self).__init__()
self.first = self.track_layer(
MyNetwork(name="first_unique_child_name"))
self.second = self.track_layer(
MyNetwork(name="second_unique_child_name"))
def call(self, x):
return self.second(self.first(x))
one = constant_op.constant([[1.]])
net = ParentNetwork()
net(one)
self.assertStartsWith(
expected_start="parent_network/first_unique_child_name/dense/",
actual=net.trainable_weights[0].name)
self.assertStartsWith(
expected_start="parent_network/second_unique_child_name/dense/",
actual=net.trainable_weights[1].name)
self.assertEqual("parent_network", net.name)
self.assertEqual("first_unique_child_name", net.first.name)
self.assertEqual("second_unique_child_name", net.second.name)
net2 = ParentNetwork()
net2(one)
self.assertStartsWith(
expected_start="parent_network_1/first_unique_child_name/dense",
actual=net2.trainable_weights[0].name)
self.assertStartsWith(
expected_start="parent_network_1/second_unique_child_name/dense",
actual=net2.trainable_weights[1].name)
self.assertEqual("parent_network_1", net2.name)
self.assertEqual("first_unique_child_name", net2.first.name)
self.assertEqual("second_unique_child_name", net2.second.name)
@test_util.run_in_graph_and_eager_modes
def testNestableExplicitSameLayerCollisions(self):
# We have explicit network names and they are _not_ unique within the layer
# they're added to. Error.
class ParentNetwork(network.Network):
def __init__(self):
super(ParentNetwork, self).__init__(name="unique_parent_name")
self.first = self.track_layer(MyNetwork(name="nonunique_name"))
self.second = self.track_layer(MyNetwork(name="nonunique_name"))
def call(self, x):
return self.second(self.first(x))
with self.assertRaisesRegexp(ValueError, "nonunique_name"):
ParentNetwork()
@test_util.run_in_graph_and_eager_modes
def testAnonymousVariableSharing(self):
# Two "owned" Networks
class FirstParentNetwork(network.Network):
def __init__(self):
super(FirstParentNetwork, self).__init__()
self.first = self.track_layer(MyNetwork())
self.second = self.track_layer(MyNetwork())
def call(self, x):
return self.second(self.first(x))
one = constant_op.constant([[1.]])
net = FirstParentNetwork()
net(one)
# One Network shared with FirstParentNetwork, one owned Network. Same name,
# but this is OK because only one is owned. This name collision is
# avoidable; we could have looked at the base_name of the non-owned Network
# and incremented our naming based on that.
class SecondParentNetwork(network.Network):
def __init__(self):
super(SecondParentNetwork, self).__init__()
self.first = self.track_layer(net.first)
self.second = self.track_layer(MyNetwork())
def call(self, x):
return self.second(self.first(x))
net2 = SecondParentNetwork()
net2(one)
self.assertStartsWith(
expected_start="first_parent_network/my_network/dense/",
actual=net2.trainable_weights[0].name)
self.assertStartsWith(
expected_start="second_parent_network/my_network/dense/",
actual=net2.trainable_weights[1].name)
self.assertEqual("second_parent_network", net2.name)
self.assertTrue(net2.first is net.first)
self.assertEqual("my_network", net2.first.name)
self.assertEqual("my_network", net2.second.name)
# No name collision; the owned Network is added first and has a different
# name than the shared Network.
class ThirdParentNetwork(network.Network):
def __init__(self):
super(ThirdParentNetwork, self).__init__()
self.first = self.track_layer(MyNetwork())
self.second = self.track_layer(net.second)
def call(self, x):
return self.second(self.first(x))
net3 = ThirdParentNetwork()
net3(one)
self.assertStartsWith(
expected_start="third_parent_network/my_network/dense",
actual=net3.trainable_weights[0].name)
self.assertStartsWith(
expected_start="first_parent_network/my_network_1/dense",
actual=net3.trainable_weights[1].name)
self.assertEqual("third_parent_network", net3.name)
self.assertTrue(net3.second is net.second)
self.assertEqual("my_network", net3.first.name)
self.assertEqual("my_network_1", net3.second.name)
# "Unavoidable" same-name Layer. The owned name is added first (fixed), then
# a shared Network is added with the same name.
class FourthParentNetwork(network.Network):
def __init__(self):
super(FourthParentNetwork, self).__init__()
self.first = self.track_layer(MyNetwork())
self.second = self.track_layer(net.first)
def call(self, x):
return self.second(self.first(x))
net4 = FourthParentNetwork()
net4(one)
self.assertStartsWith(
expected_start="fourth_parent_network/my_network/dense/",
actual=net4.trainable_weights[0].name)
self.assertStartsWith(
expected_start="first_parent_network/my_network/dense/",
actual=net4.trainable_weights[1].name)
self.assertEqual("fourth_parent_network", net4.name)
self.assertTrue(net4.second is net.first)
self.assertEqual("my_network", net4.first.name)
self.assertEqual("my_network", net4.second.name)
@test_util.run_in_graph_and_eager_modes
def testRecursiveLayerRenaming(self):
core.Dense(1) # Under default Layer naming, would change subsequent names.
class NetworkWithLayerChildren(network.Network):
def __init__(self):
super(NetworkWithLayerChildren, self).__init__()
self.first = self.track_layer(core.Dense(1, use_bias=False))
self.second = self.track_layer(core.Dense(1, use_bias=False))
def call(self, x):
return self.second(self.first(x))
class ParentNetwork(network.Network):
def __init__(self):
super(ParentNetwork, self).__init__()
self.first = self.track_layer(NetworkWithLayerChildren())
self.second = self.track_layer(NetworkWithLayerChildren())
def call(self, x):
return self.second(self.first(x))
net = ParentNetwork()
one = constant_op.constant([[1.]])
net(one)
self.assertStartsWith(
expected_start=("parent_network/network_with_layer_children/"
"dense/"),
actual=net.trainable_weights[0].name)
self.assertStartsWith(
expected_start=("parent_network/network_with_layer_children/"
"dense_1/"),
actual=net.trainable_weights[1].name)
self.assertStartsWith(
expected_start=("parent_network/network_with_layer_children_1/"
"dense/"),
actual=net.trainable_weights[2].name)
self.assertStartsWith(
expected_start=("parent_network/network_with_layer_children_1/"
"dense_1/"),
actual=net.trainable_weights[3].name)
self.assertEqual("parent_network", net.name)
self.assertEqual("network_with_layer_children", net.first.name)
self.assertEqual("network_with_layer_children_1", net.second.name)
self.assertEqual("dense", net.first.first.name)
self.assertEqual("dense_1", net.first.second.name)
self.assertEqual("dense", net.second.first.name)
self.assertEqual("dense_1", net.second.second.name)
@test_util.run_in_graph_and_eager_modes
def testCallInDifferentOrderThanConstruct(self):
shared_network = MyNetwork()
class FirstNetwork(network.Network):
def __init__(self):
super(FirstNetwork, self).__init__()
self.first = self.track_layer(shared_network)
self.second = self.track_layer(MyNetwork())
def call(self, x):
return self.second(self.first(x))
class SecondNetwork(network.Network):
def __init__(self):
super(SecondNetwork, self).__init__()
self.first = self.track_layer(shared_network)
self.second = self.track_layer(MyNetwork())
def call(self, x):
return self.second(self.first(x))
net1 = FirstNetwork()
net2 = SecondNetwork()
one = constant_op.constant([[1.]])
net2(one)
net1(one)
self.assertStartsWith(
expected_start="first_network/my_network/dense/",
actual=net1.trainable_weights[0].name)
self.assertStartsWith(
expected_start="first_network/my_network_1/dense/",
actual=net1.trainable_weights[1].name)
self.assertStartsWith(
expected_start="first_network/my_network/dense/",
actual=net2.trainable_weights[0].name)
self.assertStartsWith(
expected_start="second_network/my_network/dense/",
actual=net2.trainable_weights[1].name)
self.assertTrue(net1.trainable_weights[0] is net2.trainable_weights[0])
self.assertEqual("first_network", net1.name)
self.assertEqual("my_network", net1.first.name)
self.assertEqual("my_network_1", net1.second.name)
self.assertTrue(net2.first is net1.first)
self.assertEqual("my_network", net2.second.name)
@test_util.run_in_graph_and_eager_modes
def testLayerCallInDifferentOrderThanConstruct(self):
# Same idea as testCallInDifferentOrderThanConstruct, but this time with a
# non-Network Layer shared between two Networks rather than a
# Network. Naming should follow the same rules.
shared_layer = core.Dense(1, use_bias=False)
class FirstNetwork(network.Network):
def __init__(self):
super(FirstNetwork, self).__init__()
self.first = self.track_layer(shared_layer)
self.second = self.track_layer(core.Dense(1, use_bias=False))
def call(self, x):
return self.second(self.first(x))
class SecondNetwork(network.Network):
def __init__(self):
super(SecondNetwork, self).__init__()
self.first = self.track_layer(shared_layer)
self.second = self.track_layer(core.Dense(1, use_bias=False))
def call(self, x):
return self.second(self.first(x))
net1 = FirstNetwork()
net2 = SecondNetwork()
one = constant_op.constant([[1.]])
net2(one)
net1(one)
self.assertStartsWith(
expected_start="first_network/dense/",
actual=net1.trainable_weights[0].name)
self.assertStartsWith(
expected_start="first_network/dense_1/",
actual=net1.trainable_weights[1].name)
self.assertStartsWith(
expected_start="first_network/dense/",
actual=net2.trainable_weights[0].name)
self.assertStartsWith(
expected_start="second_network/dense/",
actual=net2.trainable_weights[1].name)
self.assertTrue(net1.trainable_weights[0] is net2.trainable_weights[0])
self.assertEqual("first_network", net1.name)
self.assertEqual("dense", net1.first.name)
self.assertEqual("dense_1", net1.second.name)
self.assertTrue(net2.first is net1.first)
self.assertEqual("dense", net2.second.name)
@test_util.run_in_graph_and_eager_modes
def testLayerAlreadyBuilt(self):
one = constant_op.constant([[1.]])
core.Dense(1, use_bias=False) # pre-built layers use global naming
one = constant_op.constant([[1.]])
core.Dense(1, use_bias=False)(one)
shared_layer = core.Dense(1, use_bias=False)
shared_layer(one)
class FirstNetwork(network.Network):
def __init__(self):
super(FirstNetwork, self).__init__()
self.first = self.track_layer(shared_layer)
self.second = self.track_layer(core.Dense(1, use_bias=False))
def call(self, x):
return self.second(self.first(x))
net = FirstNetwork()
net(one)
self.assertStartsWith(
expected_start="dense_1/", # Pre-built layers have variable names which
# do not match their layer names.
actual=net.trainable_weights[0].name)
self.assertStartsWith(
expected_start="first_network/dense/",
actual=net.trainable_weights[1].name)
self.assertTrue(
net.trainable_weights[0] is shared_layer.trainable_weights[0])
self.assertEqual("first_network", net.name)
self.assertEqual("dense_3", net.first.name)
self.assertEqual("dense", net.second.name)
class SequentialTest(test.TestCase):
@test_util.assert_no_garbage_created
def testTwoLayers(self):
# Create a sequential network with one layer.
net = network.Sequential([core.Dense(1, use_bias=False)])
# Set that layer's weights so it multiplies by 3
l1 = net.get_layer(index=0)
net(constant_op.constant([[2.0]])) # Create l1's variables
self.assertEqual(1, len(l1.trainable_variables))
l1.trainable_variables[0].assign([[3.0]])
self.assertEqual(21.0, net(constant_op.constant([[7.0]])).numpy())
# Add a second layer to the network.
l2 = core.Dense(1, use_bias=False)
net.add(l2)
# Set the second layer's weights so it multiplies by 11
net(constant_op.constant([[2.0]])) # Create l2's variables
self.assertEqual(1, len(l2.trainable_variables))
l2.trainable_variables[0].assign([[11.0]])
self.assertEqual(231.0, net(constant_op.constant([[7.0]])).numpy())
@test_util.assert_no_garbage_created
def testFunctions(self):
# Create a sequential network with one function.
net = network.Sequential([nn_ops.relu])
two = constant_op.constant(2.0)
self.assertEqual(2.0, net(two).numpy())
self.assertEqual(0.0, net(-two).numpy())
# Add a second function.
net.add(math_ops.negative)
self.assertEqual(-2.0, net(two).numpy())
@test_util.assert_no_garbage_created
def testTrainingLayer(self):
net = network.Sequential([core.Dropout(0.99999)])
two = constant_op.constant(2.0)
self.assertEqual(2.0, net(two).numpy())
self.assertEqual(2.0, net(two, training=False).numpy())
for _ in range(20):
with_dropout = net(two, training=True).numpy()
self.assertIn(with_dropout, [0.0, 2.0])
if with_dropout == 0.0:
return
# Should only fail spuriously 1 in 10^100 runs.
self.fail("Didn't see dropout happen after 20 tries.")
@test_util.assert_no_garbage_created
def testTrainingFunction(self):
# Output depends on value of "training".
def add_training(input_value, training=None):
if training is None:
return input_value
elif training:
return input_value + 1
return input_value - 1
# Passing a "training" argument to double would cause an error.
def double(input_value):
return 2 * input_value
net = network.Sequential([add_training, double])
two = constant_op.constant(2)
self.assertEqual(4, net(two).numpy())
self.assertEqual(2, net(two, training=False).numpy())
self.assertEqual(6, net(two, training=True).numpy())
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/eager/python/network_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for remote eager execution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
import numpy as np
from tensorflow.python import pywrap_tensorflow
from tensorflow.contrib.eager.python import parameter_server
from tensorflow.core.protobuf import cluster_pb2
from tensorflow.core.protobuf import tensorflow_server_pb2
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import function
from tensorflow.python.eager import remote
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import server_lib
JOB_NAME = "remote_device"
ALT_JOB_NAME = "alt_remote_device"
def run_sync_and_async(f):
"""Execute all test methods in the given class in sync and async modes."""
@functools.wraps(f)
def decorator(self, *args, **kwargs):
# TODO(b/117110239): Re-enable.
# with context.execution_mode(context.ASYNC):
# f(self, *args, **kwargs)
with context.execution_mode(context.SYNC):
f(self, *args, **kwargs)
return decorator
def get_server_def(job_name, local_server_port, remote_server_addresses,
task_index):
"""Returns a server def with a single job + multiple tasks."""
cluster_def = cluster_pb2.ClusterDef()
job_def = cluster_def.job.add()
job_def.name = job_name
job_def.tasks[0] = "localhost:%d" % local_server_port
for i, remote_server_address in enumerate(remote_server_addresses, start=1):
job_def.tasks[i] = remote_server_address
server_def = tensorflow_server_pb2.ServerDef(
cluster=cluster_def,
job_name=job_name,
task_index=task_index,
protocol="grpc")
return server_def
class RemoteExecutionTest(test.TestCase):
def __init__(self, methodName="runTest"): # pylint: disable=invalid-name
super(RemoteExecutionTest, self).__init__(methodName)
self._cached_server1 = server_lib.Server.create_local_server()
self._cached_server2 = server_lib.Server.create_local_server()
os.environ["TF_EAGER_REMOTE_USE_SEND_TENSOR_RPC"] = "1"
self._cached_server1_target = self._cached_server1.target[len("grpc://"):]
self._cached_server2_target = self._cached_server2.target[len("grpc://"):]
def setUp(self):
# Start the local server.
local_port = pywrap_tensorflow.TF_PickUnusedPortOrDie()
context.set_server_def(
server_def=get_server_def(
JOB_NAME,
local_server_port=local_port,
remote_server_addresses=[
self._cached_server1_target, self._cached_server2_target
],
task_index=0))
@run_sync_and_async
def testDefunMatmul(self):
"""Basic remote eager execution with defun."""
mm_defun = function.defun(math_ops.matmul)
with ops.device("job:%s/replica:0/task:1/device:CPU:0" % JOB_NAME):
x1 = array_ops.ones([2, 2])
with ops.device("job:%s/replica:0/task:2/device:CPU:0" % JOB_NAME):
x2 = array_ops.ones([2, 2])
y = mm_defun(x1, x2)
np.testing.assert_array_equal([[2, 2], [2, 2]], y.numpy())
@run_sync_and_async
def testSimpleMatmul(self):
"""Basic remote eager execution."""
with ops.device("job:%s/replica:0/task:1/device:CPU:0" % JOB_NAME):
x1 = array_ops.ones([2, 2])
with ops.device("job:%s/replica:0/task:2/device:CPU:0" % JOB_NAME):
x2 = array_ops.ones([2, 2])
y = math_ops.matmul(x1, x2)
np.testing.assert_array_equal([[2, 2], [2, 2]], y.numpy())
def testParameterServer(self):
with parameter_server.parameter_server_scope(
is_chief=True, ps_job_name=JOB_NAME, num_ps_tasks=3):
v0 = variables.Variable([1.0], name="v0")
v1 = variables.Variable([2.0], name="v1")
v0.assign(v0 * v1)
self.assertAllEqual(v0.read_value(), [2.0])
self.assertAllEqual(v0.device,
"/job:%s/replica:0/task:0/device:CPU:0" % JOB_NAME)
self.assertAllEqual(v1.device,
"/job:%s/replica:0/task:1/device:CPU:0" % JOB_NAME)
v1.assign_add(v1)
# Simulate aliasing another variable of the same name as v1
with ops.device("/job:%s/replica:0/task:1/device:CPU:0" % JOB_NAME):
v1_replica = parameter_server.SharedVariable(
[1.0], name="v1", initialize=False)
self.assertAllEqual(v1_replica.read_value(), [4.0])
@run_sync_and_async
def testSimpleWeightRead(self):
"""Basic remote eager weight read."""
with ops.device("job:%s/replica:0/task:1/device:CPU:0" % JOB_NAME):
w = resource_variable_ops.ResourceVariable([[2.0]])
loss = w * w
np.testing.assert_array_equal([[4.0]], loss.numpy())
@run_sync_and_async
def testTapeWeightRead(self):
"""Remote eager weight read in a tape."""
with ops.device("job:%s/replica:0/task:1/device:CPU:0" % JOB_NAME):
w = resource_variable_ops.ResourceVariable([[3.0]])
with backprop.GradientTape() as tape:
loss = w * w
grad = tape.gradient(loss, w)
np.testing.assert_array_equal([[9.0]], loss.numpy())
np.testing.assert_array_equal([[6.0]], grad.numpy())
@run_sync_and_async
def testServerDefChanged(self):
"""Update server def, and run ops on new cluster."""
context.set_server_def(
server_def=get_server_def(
ALT_JOB_NAME,
local_server_port=0,
remote_server_addresses=[
self._cached_server1_target, self._cached_server2_target
],
task_index=0))
with ops.device("job:%s/replica:0/task:1/device:CPU:0" % ALT_JOB_NAME):
x1 = array_ops.ones([2, 2])
y = math_ops.matmul(x1, x1)
np.testing.assert_array_equal([[2, 2], [2, 2]], y.numpy())
# Set the server def back to JOB_NAME
context.set_server_def(
server_def=get_server_def(
JOB_NAME,
local_server_port=0,
remote_server_addresses=[
self._cached_server1_target, self._cached_server2_target
],
task_index=0))
with ops.device("job:%s/replica:0/task:1/device:CPU:0" % JOB_NAME):
x1 = array_ops.ones([2, 2])
y = math_ops.matmul(x1, x1)
np.testing.assert_array_equal([[2, 2], [2, 2]], y.numpy())
@run_sync_and_async
def testConnectToRemoteServer(self):
"""Basic server connection."""
remote.connect_to_remote_host(self._cached_server1_target)
with ops.device("job:worker/replica:0/task:0/device:CPU:0"):
x1 = array_ops.ones([2, 2])
x2 = array_ops.ones([2, 2])
y = math_ops.matmul(x1, x2)
np.testing.assert_array_equal([[2, 2], [2, 2]], y.numpy())
@run_sync_and_async
def testContextDeviceUpdated(self):
"""Tests that the context device is correctly updated."""
with ops.device("cpu:0"):
x1 = array_ops.ones([2, 2])
x2 = array_ops.ones([2, 2])
y = math_ops.matmul(x1, x2)
np.testing.assert_array_equal([[2, 2], [2, 2]], y.numpy())
# `y` is placed on the local CPU as expected.
self.assertEqual(y.device,
"/job:%s/replica:0/task:0/device:CPU:0" % JOB_NAME)
@test_util.run_gpu_only
@run_sync_and_async
def testGPUToRemoteCopy(self):
"""Tests that the remote copy happens satisfactorily."""
x1 = array_ops.ones([2, 2]).gpu()
with ops.device("/job:remote_device/replica:0/task:1/device:CPU:0"):
x2 = x1._copy() # pylint: disable=protected-access
np.testing.assert_array_equal(x1.numpy(), x2.numpy())
if __name__ == "__main__":
ops.enable_eager_execution()
test.main()
|
tensorflow-master
|
tensorflow/contrib/eager/python/remote_test.py
|
"""Saver for eager mode TensorFlow."""
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
from tensorflow.python.eager import context
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.training import checkpoint_utils
from tensorflow.python.training import saver as _saver
def _init_from_checkpoint(self, *args, **kwargs):
"""Overrides default init by loading value from checkpoint."""
# pylint: disable=protected-access
self._old_init(*args, **kwargs)
ckpt_name = self._map_func(self._shared_name)
if ckpt_name not in self._ckpt_var_cache:
raise errors.NotFoundError(None, None,
"%s not found in checkpoint" % ckpt_name)
val = self._ckpt_var_cache.get(ckpt_name, None)
if val is not None:
self.assign(val)
# Avoid assigning for the second time.
self._ckpt_var_cache[ckpt_name] = None
# pylint: enable=protected-access
@contextlib.contextmanager
def restore_variables_on_create(save_path, map_func=None):
"""ContextManager that restores variables on creation.
When save_path is None (e.g. No checkpoint), does nothing.
Otherwise, it preloads all values from checkpoint. When the
corresponding variable is first created, it assigns the checkpoint
value to the variable.
```python
with restore_variables_on_create(
tf.train.latest_checkpoint(checkpoint_dir)):
```
Args:
save_path: The checkpoint file prefix.
map_func: A function that given the variable name as argument
and returns a variable name in checkpoint for restore. If
None, use the variable with the same name in checkpoint to restore.
It's an error that the mapped variable name doesn't exist in
checkpoint.
Yields:
Nothing.
Raises:
NotFoundError: If the variable is not found in checkpoint.
ValueError: If not used in eager mode or map_func is not callable.
"""
if not context.executing_eagerly():
raise ValueError(
"Currently, restore_variables_on_create can only be used with "
"eager execution enabled.")
if save_path:
if map_func is None:
map_func_wrapper = lambda self, x: x
else:
if not callable(map_func):
raise ValueError("map_func must be callable.")
map_func_wrapper = lambda self, x: map_func(x)
ckpt_var_cache = {}
reader = checkpoint_utils.load_checkpoint(save_path)
for k, _ in checkpoint_utils.list_variables(save_path):
ckpt_var_cache[k] = reader.get_tensor(k)
old_init = getattr(resource_variable_ops.ResourceVariable,
"_init_from_args", None)
assert old_init, "ResourceVariable misses _init_from_args method."
setattr(resource_variable_ops.ResourceVariable, "_init_from_args",
_init_from_checkpoint)
setattr(resource_variable_ops.ResourceVariable, "_old_init", old_init)
setattr(resource_variable_ops.ResourceVariable, "_map_func",
map_func_wrapper)
setattr(resource_variable_ops.ResourceVariable, "_ckpt_var_cache",
ckpt_var_cache)
try:
yield
except Exception as e:
raise e
finally:
if save_path:
setattr(resource_variable_ops.ResourceVariable, "_init_from_args",
old_init)
setattr(resource_variable_ops.ResourceVariable, "_old_init", None)
setattr(resource_variable_ops.ResourceVariable, "_map_func", None)
setattr(resource_variable_ops.ResourceVariable, "_ckpt_var_cache", None)
class Saver(object):
"""A tf.compat.v1.train.Saver adapter for use when eager execution is enabled.
`Saver`'s name-based checkpointing strategy is fragile. Please switch to
`tf.train.Checkpoint` or `tf.keras.Model.save_weights`, which perform a more
robust object-based saving. These APIs will load checkpoints written by
`Saver`.
"""
def __init__(self, var_list):
"""A tf.compat.v1.train.Saver adapter for use when eager execution is enabled.
The API, and on-disk format, mimic tf.compat.v1.train.Saver except that no
Session is needed.
Args:
var_list: The list of variables that will be saved and restored. Either a
list of `tf.Variable` objects, or a dictionary mapping names to
`tf.Variable` objects.
Raises:
RuntimeError: if invoked when eager execution has not been enabled.
"""
if not context.executing_eagerly():
raise RuntimeError("tfe.Saver can only be used when eager "
"execution is enabled. Use tf.train.Saver when "
"building graphs.")
self._saver = _saver.Saver(var_list=var_list)
def save(self, file_prefix, global_step=None):
"""Saves variables.
Args:
file_prefix: Path prefix of files created for the checkpoint.
global_step: If provided the global step number is appended to file_prefix
to create the checkpoint filename. The optional argument can be a
Tensor, a Variable, or an integer.
Returns:
A string: prefix of filenames created for the checkpoint. This may be
an extension of file_prefix that is suitable to pass as an argument
to a subsequent call to `restore()`.
"""
with ops.device("/device:CPU:0"):
return self._saver.save(
None, file_prefix, write_meta_graph=False, global_step=global_step)
def restore(self, file_prefix):
"""Restores previously saved variables.
Args:
file_prefix: Path prefix where parameters were previously saved.
Typically obtained from a previous `save()` call, or from
`tf.train.latest_checkpoint`.
"""
with ops.device("/device:CPU:0"):
self._saver.restore(None, file_prefix)
def get_optimizer_variables(optimizer):
"""Returns a list of variables for the given `tf.compat.v1.train.Optimizer`.
Equivalent to `optimizer.variables()`.
Args:
optimizer: An instance of `tf.compat.v1.train.Optimizer` which has created
variables (typically after a call to `Optimizer.minimize`).
Returns:
A list of variables which have been created by the `Optimizer`.
"""
return optimizer.variables()
|
tensorflow-master
|
tensorflow/contrib/eager/python/saver.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow Eager Execution: Sanity tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tempfile
from tensorflow.contrib.eager.python import tfe
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import numerics
from tensorflow.python.platform import test
from tensorflow.python.summary import summary
from tensorflow.python.summary.writer import writer
class TFETest(test_util.TensorFlowTestCase):
def testMatmul(self):
x = [[2.]]
y = math_ops.matmul(x, x) # tf.matmul
self.assertAllEqual([[4.]], y.numpy())
def testInstantError(self):
if test_util.is_gpu_available():
# TODO(nareshmodi): make this test better
self.skipTest("Gather doesn't do index checking on GPUs")
with self.assertRaisesRegexp(errors.InvalidArgumentError,
r'indices = 7 is not in \[0, 3\)'):
array_ops.gather([0, 1, 2], 7)
def testGradients(self):
def square(x):
return math_ops.multiply(x, x)
grad = tfe.gradients_function(square)
self.assertEquals([6], [x.numpy() for x in grad(3.)])
def testGradOfGrad(self):
def square(x):
return math_ops.multiply(x, x)
grad = tfe.gradients_function(square)
gradgrad = tfe.gradients_function(lambda x: grad(x)[0])
self.assertEquals([2], [x.numpy() for x in gradgrad(3.)])
def testCustomGrad(self):
@tfe.custom_gradient
def f(x):
y = math_ops.multiply(x, x)
def grad_fn(_):
return [x + y]
return y, grad_fn
grad = tfe.gradients_function(f)
self.assertEquals([12], [x.numpy() for x in grad(3.)])
@test_util.run_gpu_only
def testGPU(self):
# tf.Tensor.as_gpu_device() moves a tensor to GPU.
x = constant_op.constant([[1., 2.], [3., 4.]]).gpu()
# Alternatively, tf.device() as a context manager places tensors and
# operations.
with ops.device('gpu:0'):
x += 1.
# Without a device context, heuristics are used to place ops.
# In this case, ops.reduce_mean runs on the GPU.
axis = range(x.shape.ndims)
m = math_ops.reduce_mean(x, axis)
# m is on GPU, bring it back to CPU and compare.
self.assertEqual(3.5, m.cpu().numpy())
def testListDevices(self):
# Expect at least one device.
self.assertTrue(tfe.list_devices())
def testAddCheckNumericsOpsRaisesError(self):
with self.assertRaisesRegexp(
RuntimeError,
r'add_check_numerics_ops\(\) is not compatible with eager execution'):
numerics.add_check_numerics_ops()
def testClassicSummaryOpsErrorOut(self):
x = constant_op.constant(42)
x_summary = summary.scalar('x', x)
y = constant_op.constant([1, 3, 3, 7])
y_summary = summary.histogram('hist', y)
with self.assertRaisesRegexp(
RuntimeError,
r'Merging tf\.summary\.\* ops is not compatible with eager execution'):
summary.merge([x_summary, y_summary])
with self.assertRaisesRegexp(
RuntimeError,
r'Merging tf\.summary\.\* ops is not compatible with eager execution'):
summary.merge_all()
def testClassicSummaryFileWriterErrorsOut(self):
with self.assertRaisesRegexp(
RuntimeError,
r'tf\.summary\.FileWriter is not compatible with eager execution'):
writer.FileWriter(tempfile.mkdtemp())
if __name__ == '__main__':
tfe.enable_eager_execution()
test.main()
|
tensorflow-master
|
tensorflow/contrib/eager/python/tfe_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A Network is a composition of Layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import weakref
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.keras import backend
from tensorflow.python.layers import base
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import checkpoint_utils
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.training import training_util
from tensorflow.python.util import deprecation
from tensorflow.python.util import function_utils
# pylint: disable=protected-access
# Explanation for protected-access disable: Network has lots of same-class and
# parent-class references across different objects, and some to private
# functions in base.py which should be reused.
def _network_name_scope_naming(current_variable_scope):
"""Name scope naming to match operation names to variable names.
Used in Networks and also applied to non-Network Layers which are added to
Networks before being built.
Args:
current_variable_scope: A VariableScope object.
Returns:
A name scope name.
"""
return current_variable_scope.name + "/"
_NETWORK_DEPRECATION_MESSAGE = (
"Please inherit from `tf.keras.Model`, and see its documentation for "
"details. `tf.keras.Model` should be a drop-in replacement for "
"`tfe.Network` in most cases, but note that `track_layer` is no longer "
"necessary or supported. Instead, `Layer` instances are tracked on "
"attribute assignment (see the section of `tf.keras.Model`'s documentation "
"on subclassing). Since the output of `track_layer` is often assigned to "
"an attribute anyway, most code can be ported by simply removing the "
"`track_layer` calls.\n\n`tf.keras.Model` works with all TensorFlow "
"`Layer` instances, including those from `tf.layers`, but switching to "
"the `tf.keras.layers` versions along with the migration to "
"`tf.keras.Model` is recommended, since it will preserve variable names. "
"Feel free to import it with an alias to avoid excess typing :).")
class Network(base.Layer):
"""Represents the composition of a set of Layers.
*Deprecated*. Please inherit from `tf.keras.Model`, and see its documentation
for details. `tf.keras.Model` should be a drop-in replacement for
`tfe.Network` in most cases, but note that `track_layer` is no longer
necessary or supported. Instead, `Layer` instances are tracked on attribute
assignment (see the section of `tf.keras.Model`'s documentation on
subclassing). Since the output of `track_layer` is often assigned to an
attribute anyway, most code can be ported by simply removing the `track_layer`
calls.
`tf.keras.Model` works with all TensorFlow `Layer` instances, including those
from `tf.layers`, but switching to the `tf.keras.layers` versions along with
the migration to `tf.keras.Model` is recommended, since it will preserve
variable names. Feel free to import it with an alias to avoid excess typing
:).
`Network` implements the `Layer` interface and adds convenience methods for
managing sub-`Layer`s, such as listing variables.
`Layer`s (including other `Network`s) should be added via `track_layer`. They
can then be used when overriding the `Network.call` method:
```python
class TwoLayerNetwork(tfe.Network):
def __init__(self, name):
super(TwoLayerNetwork, self).__init__(name=name)
self.layer_one = self.track_layer(tf.compat.v1.layers.Dense(16,
input_shape=(8,)))
self.layer_two = self.track_layer(tf.compat.v1.layers.Dense(1,
input_shape=(16,)))
def call(self, inputs):
return self.layer_two(self.layer_one(inputs))
```
After constructing an object and calling the `Network`, a list of variables
created by tracked `Layer`s is available via `Network.variables`:
```python
net = TwoLayerNetwork(name="net")
output = net(tf.ones([1, 8]))
print([v.name for v in net.variables])
```
This example prints variable names, one kernel and one bias per
`tf.compat.v1.layers.Dense` layer:
```
['net/dense/kernel:0',
'net/dense/bias:0',
'net/dense_1/kernel:0',
'net/dense_1/bias:0']
```
These variables can be passed to a `Saver` (`tf.compat.v1.train.Saver`, or
`tf.contrib.eager.Saver` when executing eagerly) to save or restore the
`Network`, typically alongside a global step and
`tf.compat.v1.train.Optimizer`
variables when checkpointing during training.
Note that the semantics of calling a `Network` with graph execution (i.e. not
executing eagerly) may change slightly in the future. Currently stateful ops
are pruned from the graph unless they or something that depends on them is
executed in a session, but this behavior is not consistent with eager
execution (where stateful ops are executed eagerly). `Layer`s from `tf.layers`
do not depend on this pruning and so will not be affected, but `Network`s
which rely on stateful ops being added to the graph but not executed (e.g. via
custom `Layer`s which manage stateful ops) may break with this change.
"""
# TODO(josh11b,ashankar,allenl):
# - Should 'trainable' be changeable on the Network object?
# - Do we allow add_variable in Network?
# - Detect layers used in __call__ that weren't registered with track_layer.
# - Convert inputs to __call__ to tensors.
@deprecation.deprecated(date=None, instructions=_NETWORK_DEPRECATION_MESSAGE)
def __init__(self, name=None):
"""Configure the `Network`.
Args:
name: The name to use for this `Network`. If specified, it must be unique
in the context where this `Network` is first (1) added to another
`Network` (in which case it must not share a name with other `Layers`
added to that `Network`), or (2) built/called (in which case no other
'top-level' `Network`s may share this name). If unspecified or None, the
`Network` will be named using its class name, with a number appended if
necessary for uniqueness (e.g. MyNetwork -> 'my_network_1').
Raises:
ValueError: If `name` is not valid. Note that some naming errors will
instead be raised when the `Network` is called.
"""
if context.executing_eagerly():
logging.warning(
("** tfe.Network is deprecated and will be removed in a future "
"version.\n\n%s"), _NETWORK_DEPRECATION_MESSAGE)
if isinstance(name, variable_scope.VariableScope):
raise ValueError("VariableScopes are not valid Network names.")
if name is not None and "/" in name:
raise ValueError(
"Forward slashes ('/') are not allowed in Network names.")
super(Network, self).__init__(name=name)
self._layers = []
self._sub_layer_name_uids = collections.defaultdict(int)
# Initially None, but set to False for networks which are first built as
# top-level.
self._first_parent = None # A weak reference to our first parent.
self._non_network_sublayers = []
self._owned_layers = {}
# The scope to use if we end up without a parent.
self._default_parent_variable_scope = variable_scope.get_variable_scope()
# Hold on to the variable scope counts from init to check whether a scope
# with the name we want was ever created in our parent scope. Without this
# check we might have name collisions if the parent scope on init gets
# closed before build is called.
self._variable_scope_counts_on_init = (
variable_scope.get_variable_scope_store().variable_scopes_count)
def _gather_saveables_for_checkpoint(self):
raise NotImplementedError(
"tfe.Network does not support object-based checkpointing.\n\n%s" %
_NETWORK_DEPRECATION_MESSAGE)
def _name_scope_name(self, current_variable_scope):
"""Overrides Layer op naming to match variable naming."""
return _network_name_scope_naming(
current_variable_scope=current_variable_scope)
def _init_set_name(self, name):
# Anonymous Networks (name=None) defer setting a final name until they are
# (1) added to another Network, or (2) built/called (where (2) is only used
# for a "top level" network).
#
# However, if we were provided an explicit name (name is not None), that
# will always be the final name of the Network; if it turns out not to be
# unique or if variable names can't be prefixed by it we will throw an
# error.
self._name = name
self._base_name = None
def _finalize_name(self, parent_network):
if not self._name:
# Were were not passed a name explicitly (or it was blank), so this is an
# anonymous Network. We make up a unique name.
if parent_network:
avoid_names = parent_network._owned_layers
name_uid_map = parent_network._sub_layer_name_uids
else:
name_uid_map = backend.get_default_graph_uid_map()
# Figure out which names we have to avoid based on which variable scope
# we're nested in.
strip_name = self._default_parent_variable_scope.name
if strip_name:
strip_name += "/"
def _strip_on_init_scope(name):
if name.startswith(strip_name):
return name[len(strip_name):]
else:
return None
avoid_names = set(
_strip_on_init_scope(name)
for name in self._variable_scope_counts_on_init.keys()
if name)
self._name, self._base_name = self._make_unique_name(
name_uid_map=name_uid_map,
avoid_names=avoid_names,
namespace=self._default_parent_variable_scope.name,
zero_based=True)
if self._first_parent is None or (self._first_parent # False = no parent
and self._first_parent() is None):
# Save a pointer to the parent Network so that we can later check that the
# scope name we get is correct.
if not parent_network:
self._first_parent = parent_network
else:
self._first_parent = weakref.ref(parent_network)
def _set_scope(self, scope=None):
if self._scope is None:
if not self._first_parent:
first_parent = self._first_parent
else:
first_parent = self._first_parent()
if first_parent is None:
# If we were never added to another Network, or that Network has beed
# garbage collected before being called, then we're a top-level Network.
self._finalize_name(
# Use False to make sure the value sticks and we don't inherit a
# parent if we're added to a network later.
parent_network=False)
if scope is not None:
raise ValueError("Networks may not be created with explicit scopes.")
if first_parent:
first_parent._set_scope()
parent_scope = first_parent._scope
else:
parent_scope = self._default_parent_variable_scope
with variable_scope.variable_scope(parent_scope) as parent_vs:
expected_scope_name = parent_vs.name + "/" + self._name
if expected_scope_name in self._variable_scope_counts_on_init:
raise ValueError(
("A Network named '%s' already exists (or a variable_scope was "
"created with this name). Names must be unique.") %
(self._name,))
# Make sure variables with this prefix will be unique.
with variable_scope.variable_scope(
None, use_resource=True, default_name=self._name) as scope:
self._scope = scope
scope_name = scope.name
suffix_start = scope_name.rfind("/") + 1
# rfind is -1 if there is no slash in the string, in which case the
# suffix starts at the beginning of the string (there is no prefix).
scope_suffix = scope_name[suffix_start:]
scope_prefix = scope_name[:suffix_start]
if scope_suffix != self._name:
raise ValueError(
("A Network named '%s' already exists (or a variable_scope was "
"created with this name). Names must be unique.") %
(self._name,))
if (first_parent and scope_prefix[:-1] != first_parent.scope_name):
raise ValueError(
("Network variable names must match a nesting of sub-Network "
"names. Expected prefix '%s' from parent network, but got "
"'%s' when attempting to create a variable_scope for Network "
"'%s'. Likely an explicit variable_scope was inserted into "
"the nesting.") %
(first_parent.scope_name, scope_prefix[:-1], self._name))
elif not first_parent and scope_prefix:
# For the case when this Network is not nested inside any other
# Network, but is in a variable_scope. This Network's name takes on
# the full variable scope prefix.
self._name = scope_name
for non_network_sublayer in self._non_network_sublayers:
self._set_scope_for_nonnetwork_sublayer(non_network_sublayer)
def _set_scope_for_nonnetwork_sublayer(self, sublayer):
if sublayer._scope is None:
if sublayer._first_parent is None:
constituent_first_parent = None
else:
constituent_first_parent = sublayer._first_parent()
if constituent_first_parent:
constituent_first_parent._set_scope()
parent_scope = constituent_first_parent._scope
else:
self._finalize_name(False)
raise ValueError(
("The parent of a Layer added to Network %s was garbage collected "
"before the Layer was built. If this limitation bothers you "
"please file a feature request.") % (self.name,))
with variable_scope.variable_scope(parent_scope):
# Horrid hack to make Layer variable names which are direct
# sub-layers of Networks conform to the Network variable naming
# conventions.
with variable_scope.variable_scope(
None, use_resource=True, default_name=sublayer.name) as sub_scope:
sublayer._scope = sub_scope
# Also switch op naming for this Layer to match Network conventions,
# i.e. op naming matching variable naming.
sublayer._name_scope_name = _network_name_scope_naming
@base.Layer.name.getter
def name(self):
if self._name is None:
raise ValueError(
"The network does not yet have a final name, but a name was "
"requested for it. Networks get a name when they are added to "
"another Network via track_layer, or when they are first "
"called/built.")
return self._name
def track_layer(self, layer):
"""Track a Layer in this Network.
`Network` requires that all `Layer`s used in `call()` be tracked so that the
`Network` can export a complete list of variables.
Args:
layer: A `tf.compat.v1.layers.Layer` object.
Returns:
The passed in `layer`.
Raises:
RuntimeError: If __init__ has not been called.
TypeError: If `layer` is the wrong type.
ValueError: If a `Layer` with the same name has already been added.
"""
if not hasattr(self, "_layers"):
raise RuntimeError("Need to call Network.__init__ before adding layers")
if not isinstance(layer, base.Layer):
raise TypeError(
"Network.track_layer() passed type %s, not a tf.layers.Layer" %
(type(layer),))
# Always use `ResourceVariable` with legacy layers.
layer._use_resource_variables = True
if isinstance(layer, Network):
layer._finalize_name(parent_network=self)
else:
# `layer` is a non-Network, so it hasn't been named to follow Network
# conventions for contained Layers (i.e. the same conventions as for
# sub-Networks). This renaming is necessary to isolate Network variable
# naming from Layers constructed outside the Network and never added to it
# (because Layers are named globally).
if not layer.built:
if not hasattr(layer, "_first_parent"):
dereferenced_layer_first_parent = None
else:
dereferenced_layer_first_parent = layer._first_parent()
if dereferenced_layer_first_parent is None:
if layer._name != layer._base_name:
# If name and base_name do not match, then this Layer used anonymous
# naming and we have to rename it. Otherwise there's an explicit
# name, and we should respect it (subject to error checking).
layer._name, layer._base_name = layer._make_unique_name(
name_uid_map=self._sub_layer_name_uids,
avoid_names=self._owned_layers,
zero_based=True
# No namespace required, since we've specified our own UID map.
)
layer._first_parent = weakref.ref(self)
self._non_network_sublayers.append(layer)
if (not layer.built and layer._first_parent and
self is layer._first_parent()):
if layer.name in self._owned_layers:
if self._owned_layers[layer.name] is layer:
return layer
raise ValueError(
"Attempt to add two Layers with the name '%s' to the same Network."
% (layer.name))
self._owned_layers[layer.name] = layer
self._layers.append(layer)
return layer
def get_layer(self, name=None, index=None):
"""Get a contained `tf.compat.v1.layers.Layer` either by name or index.
Args:
name: String matching one of the names of a contained `Layer`. Note that
the names of `Layer`s added to `Network`s may not be unique when doing
layer sharing (i.e. adding a `Layer` to this `Network` which was already
added to another `Network`). The lowest index `Layer` with a matching
name will be returned.
index: Integer in [0, number of layers). Layers are assigned an index by
the order they are added.
Returns:
A `tf.compat.v1.layers.Layer` object.
Raises:
ValueError: If neither or both of 'index' or 'name' is specified, or the
lookup failed.
"""
if index is not None:
if name is not None:
raise ValueError("Exactly one of 'index' or 'name' must be provided")
if len(self._layers) <= index:
raise ValueError("Was asked to retrieve layer at index " + str(index) +
" but model only has " + str(len(self._layers)) +
" layers.")
else:
return self._layers[index]
else:
if not name:
raise ValueError("Provide either a layer name or layer index.")
for layer in self._layers:
if layer.name == name:
return layer
raise ValueError("No such layer: " + name)
# The following methods are for implementing the Layer interface.
@property
def weights(self):
# TODO(josh11b): Should this return a set or perform de-duplication of
# variables in the case of shared layers/variables that appear in
# multiple places in the Network?
weights = []
for layer in self._layers:
weights += layer.weights
return weights
@property
def trainable_weights(self):
weights = []
for layer in self._layers:
weights += layer.trainable_weights
return weights
@property
def non_trainable_weights(self):
weights = []
for layer in self._layers:
weights += layer.non_trainable_weights
return weights
@property
def trainable(self):
return True
@trainable.setter
def trainable(self, value):
if not value:
# We believe it better to decide which layers & networks are trainable
# at the Trainer level than here. Otherwise you can run into trouble if a
# layer/network is shared between two models, but is trainable in one
# but not the other (like with adversarial networks).
raise AttributeError("cannot mark Network as not trainable")
@property
def layers(self):
return self._layers
def add_variable(self,
name,
shape,
dtype=None,
initializer=None,
regularizer=None,
trainable=True,
constraint=None):
raise RuntimeError(
"add_variable not supported in Network class yet. Please file an issue "
"at https://github.com/tensorflow/tensorflow/issues/new if this is "
"important to you")
def add_loss(self, losses, inputs=None):
raise RuntimeError(
"add_loss is not supported in Network class yet. Please file an issue "
"at https://github.com/tensorflow/tensorflow/issues/new if this is "
"important to you")
@property
def losses(self):
"""Gather losses from `Layer`s in the `Network`.
Note that when executing eagerly, `Layer.losses` evaluates
regularizers. When using graph execution, variable regularization ops have
already been created and are simply returned here.
Returns:
A list of tensors.
"""
layer_losses = []
for layer in self.layers:
layer_losses.extend(layer.losses)
return layer_losses
# TODO(allenl): Support other Layer methods needed for graph mode, such as for
# updates
class Sequential(Network):
"""Represents a linear sequence of Layers or functions.
The output of each layer/function is provided as the input to the next.
The inputs passed to `__call__` are passed to the inputs of the first
Layer, and it returns the outputs of the last Layer.
Args:
layers_funcs: An optional sequence where each element is either a
tf.compat.v1.layers.Layer object or a callable.
name: An optional string name to use for this Network.
"""
def __init__(self, layers_funcs=None, name=None):
super(Sequential, self).__init__(name=name)
self._layers_funcs = []
if layers_funcs:
for l in layers_funcs:
self.add(l)
def add(self, layer_func):
if isinstance(layer_func, base.Layer):
args = function_utils.fn_args(layer_func.call)
self.track_layer(layer_func)
elif callable(layer_func):
args = function_utils.fn_args(layer_func)
else:
raise TypeError(
"Sequential.add() takes only tf.layers.Layer objects or callables; "
"not '%s' of type '%s'." % (layer_func, type(layer_func)))
self._layers_funcs.append((("training" in args), layer_func))
def call(self, inputs, training=None):
"""Call each Layer in the order they were added."""
# TODO(josh11b): Support "mode" and maybe other arguments
if training is None:
for _, l in self._layers_funcs:
inputs = l(inputs)
else:
for has_training_arg, l in self._layers_funcs:
if has_training_arg:
inputs = l(inputs, training)
else:
inputs = l(inputs)
return inputs
_DeferredRestoration = collections.namedtuple(
"_DeferredRestoration",
[
# The map_func to use (either user-specified or the default).
"map_func",
# Boolean, True if the user specified an explicit map_func, for error
# messages.
"map_func_is_user",
# A mapping from checkpoint names to initial values of not-yet-created
# variables which should be restored. These values come from parsing a
# checkpoint.
"checkpointed_variables_to_restore",
# A mapping from checkpoint name to variable objects of variables which
# have already been restored, for error checking.
"restored_variables",
# The session to restore with (if in graph mode).
"session",
# Names of the Network where the restore was requested, for error
# messages.
"network_name",
"network_scope_name"
])
def _default_naming_conflict_error_message(mapped_name, first_variable,
second_variable, network_name,
network_scope_name):
return (
("The default checkpoint variable name mapping strategy for Network "
"'%s' resulted in a naming conflict. We attempted to strip off the "
"variable prefix for the Network ('%s'), but this resulted in two "
"variables named '%s' (originally '%s' and '%s'). This should only "
"happen when using variable sharing (i.e. the Network contains Networks "
"or Layers which were first added to another Network, and therefore "
"have that Network's variable prefix). One solution is to pass "
"`map_func=lambda n: n` to save and restore to use fully qualified "
"variable names in the checkpoint, although this will require that the "
"variable prefix of the Network being restored into is also '%s'. You "
"may alternatively write an arbitrary mapping.") %
(network_name, network_scope_name, mapped_name,
first_variable._shared_name, second_variable._shared_name,
network_scope_name))
def _restore_custom_map_func_error_message(mapped_name, first_variable,
second_variable, network_name,
network_scope_name):
return (
("The map_func passed to restore_network_checkpoint for the Network '%s' "
"resulted in two variables named '%s' (originally '%s' and '%s'). Since "
"this is also an error when saving, this Network was "
"probably not saved with this map_func. Note that map_func "
"always maps from full variable names to checkpoint names; "
"there is no need to specify an inverse mapping.\n\n"
"Try stripping less from the variable names, or renaming parts "
"of the Network. For reference, variables created by sub-Layers "
"of this Network are prefixed with '%s', but if they are "
"re-used after being added to another Network they will have "
"that Network's full variable prefix instead.") %
(network_name, mapped_name, first_variable._shared_name,
second_variable._shared_name, network_scope_name))
def _make_custom_getter_for_deferred_restorations():
"""Returns a custom getter which searches `deferred_restorations`.
Returns: A tuple of (_custom_getter, deferred_restorations)
_custom_getter: The getter which should be added to variable_scopes where
variables will be created.
deferred_restorations: A list for _DeferredRestoration objects. Typically
empty when the getter is set, and expanded as deferred restorations are
requested. All new deferred restorations should be appended to the end of
the list, where they will have priority over older deferred restorations.
"""
deferred_restorations = []
def _custom_getter(getter,
name,
shape=None,
dtype=None,
initializer=None,
*args,
**kwargs):
"""A custom getter which processes deferred restorations."""
# Iterate over restorations, newest first (newer restorations will take
# precedence over older restorations, just like with immediate restorations
# into existing variables).
delayed_restoration = None
found_value = False
value_to_restore = None
for delayed_restoration in reversed(deferred_restorations):
checkpoint_name = delayed_restoration.map_func(name)
if (checkpoint_name in
delayed_restoration.checkpointed_variables_to_restore):
found_value = True
value_to_restore = (
delayed_restoration
.checkpointed_variables_to_restore[checkpoint_name])
if found_value:
break
# value_to_restore may be False because this variable is not in any
# checkpoint we are restoring, or None because we have explicitly set it to
# None when it was previously fetched. In either case, we don't need to
# set an initializer.
if found_value and value_to_restore is not None:
initializer = value_to_restore
shape = None
variable = getter(
name,
shape=shape,
dtype=dtype,
initializer=initializer,
*args,
**kwargs)
if found_value and value_to_restore is not None:
# Mark as already restored from this checkpoint.
delayed_restoration.checkpointed_variables_to_restore[
checkpoint_name] = None
if not context.executing_eagerly():
delayed_restoration.session.run(variable.initializer)
if found_value:
# Error checking should run even if we've already restored a value.
if delayed_restoration.restored_variables.setdefault(
checkpoint_name, variable) is not variable:
# Naming conflict. We've tried to initialize two variables with the
# same value from the checkpoint.
if delayed_restoration.map_func_is_user:
raise ValueError(
_restore_custom_map_func_error_message(
mapped_name=checkpoint_name,
first_variable=delayed_restoration
.restored_variables[checkpoint_name],
second_variable=variable,
network_name=delayed_restoration.network_name,
network_scope_name=delayed_restoration.network_scope_name))
else:
raise ValueError(
_default_naming_conflict_error_message(
mapped_name=checkpoint_name,
first_variable=delayed_restoration
.restored_variables[checkpoint_name],
second_variable=variable,
network_name=delayed_restoration.network_name,
network_scope_name=delayed_restoration.network_scope_name))
return variable
return _custom_getter, deferred_restorations
def _make_prefix_stripping_map_fn(scope_name):
"""Closure for stripping the scope name of a Network.
Implemented as a closure rather than a member function to avoid reference
cycles in deferred restorations (this function should not have a reference to
the Network which created it).
Args:
scope_name: The Network.scope_name to strip from variables.
Returns:
A scope_name-stripping default `map_fn` for the Network.
"""
def _strip_variable_prefix(original_variable_name):
"""The default map_func for saving or restoring variables.
Strips the variable prefix for the Network on which save/restore was called,
and leaves other variable names fully qualified in the checkpoint.
Args:
original_variable_name: The _shared_name of the variable (no :0 suffix) to
map.
Returns:
The checkpoint name of the variable.
"""
scope_name_with_slash = scope_name + "/"
if original_variable_name.startswith(scope_name_with_slash):
return original_variable_name[len(scope_name_with_slash):]
else:
return original_variable_name
return _strip_variable_prefix
@deprecation.deprecated(
date=None,
instructions=(
"Please inherit from tf.keras.Model instead of tfe.Network, and use "
"tf.keras.Model.save_weights."))
def save_network_checkpoint(network, save_path, global_step=None,
map_func=None):
"""Save variables from the Network to a checkpoint.
Args:
network: A Network object to save.
save_path: Either a checkpoint prefix or the name of a directory to save the
checkpoint in (in which case the checkpoint will be named based on the
Network name).
global_step: The global step to use when naming the checkpoint. If None
(default), we will first try to get the default global step. If that fails
because no default global step exists, then the checkpoint is created
without a global step suffix.
map_func: A function mapping fully qualified variable names (e.g.
'my_network_1/dense_1/kernel') to names in the checkpoint. By default (if
`map_func=None`), the variable prefix for the network being restored
(`Network.scope_name + '/'`, e.g. 'my_network_1/') is stripped and all
other variable names (shared with other Networks) are left unchanged.
Returns:
The checkpoint prefix for the saved checkpoint, which may be passed to
`Network.restore`.
Raises:
ValueError: If the Network has not yet been called, or if map_func results
in a name collision.
"""
if not network.built:
raise ValueError(
"Attempt to save the Network before it was first called. This means "
"variables have not yet been created, so there is nothing to save.")
network._set_scope() # scope_name should be available to map_funcs
if global_step is None:
global_step = training_util.get_global_step()
if os.path.isdir(save_path):
# If we were passed a directory, default to naming based on the Network
# name.
save_path = os.path.join(save_path, network.name.replace("/", "_"))
user_map_func = map_func
if map_func is None:
map_func = _make_prefix_stripping_map_fn(network.scope_name)
variable_map = {}
for variable in network.variables:
mapped_name = map_func(variable._shared_name)
if variable_map.setdefault(mapped_name, variable) is not variable:
if user_map_func is None:
# Instead of erroring out, we could just re-try and silently use the
# full variable names in the checkpoint. This could be odd for deeply
# nested sub-Networks (since the full prefix from the nesting would
# get added), so for now we'll let the user deal with this case.
raise ValueError(
_default_naming_conflict_error_message(
mapped_name=mapped_name,
first_variable=variable_map[mapped_name],
second_variable=variable,
network_name=network.name,
network_scope_name=network.scope_name))
else:
# The user passed their own problematic map_func.
raise ValueError(
("The map_func passed to save_network_checkpoint for the Network "
"'%s' resulted in two variables named '%s' ('%s' and '%s'). Try "
"stripping less from the variable names, or renaming parts of "
"the Network. For reference, variables created by sub-Layers of "
"this Network are prefixed with '%s', but if they are re-used "
"after being added to another Network, they will have that "
"Network's full variable prefix instead.") %
(network.name, mapped_name, variable_map[mapped_name]._shared_name,
variable._shared_name, network.scope_name))
if context.executing_eagerly():
sess = None
else:
sess = ops.get_default_session()
return saver_lib.Saver(variable_map).save(
sess=sess,
save_path=save_path,
write_meta_graph=False,
global_step=global_step)
def _add_deferred_restoration(layer, deferred_restoration):
"""Add a deferred restoration to this Layer and all children.
Restorations which are requested later have higher priority, and the highest
priority matching restoration is applied to a variable when it is created.
Args:
layer: The Layer (may not be a Network) to operate on.
deferred_restoration: A _DeferredRestoration object.
"""
# Networks don't create variables at the moment, so this append isn't strictly
# necessary. We could get by with only adding deferred restorations to
# non-Network Layers.
if isinstance(layer, Network):
layer._set_scope()
# Make sure this Layer has a deferred restoration queue and a custom getter,
# then add our request to it.
if not hasattr(layer, "_custom_getter"):
assert not hasattr(layer, "_deferred_restorations")
layer._custom_getter, layer._deferred_restorations = (
_make_custom_getter_for_deferred_restorations())
# We use set_custom_getter because it avoids recursively calling up the
# variable_scope tree. We've done the tree traversal ourselves and have added
# the request to each Layer which needs it.
layer._scope.set_custom_getter(layer._custom_getter)
layer._deferred_restorations.append(deferred_restoration)
if isinstance(layer, Network):
for sublayer in layer.layers:
if not isinstance(sublayer, Network):
layer._set_scope_for_nonnetwork_sublayer(sublayer)
_add_deferred_restoration(sublayer, deferred_restoration)
def _restore_existing_variables(network, save_path, map_func, user_map_func):
"""Use a standard Saver to restore existing variables from a checkpoint.
Args:
network: A Network object to restore.
save_path: The checkpoint prefix or directory to read from.
map_func: The function to use when mapping from variable names to checkpoint
names.
user_map_func: The original map_func passed by the user, for error checking.
Returns:
A dictionary mapping from checkpoint names to variable objects which have
been restored (for bookkeeping to avoid deferred restorations on these
variables).
Raises:
ValueError: If there is a name collision.
"""
existing_variables_by_checkpoint_name = {}
for variable in network.variables:
checkpoint_name = map_func(variable._shared_name)
if existing_variables_by_checkpoint_name.setdefault(
checkpoint_name, variable) is not variable:
if user_map_func is None:
raise ValueError(
_default_naming_conflict_error_message(
mapped_name=checkpoint_name,
first_variable=existing_variables_by_checkpoint_name[
checkpoint_name],
second_variable=variable,
network_name=network.name,
network_scope_name=network.scope_name))
else:
raise ValueError(
_restore_custom_map_func_error_message(
mapped_name=checkpoint_name,
first_variable=existing_variables_by_checkpoint_name[
checkpoint_name],
second_variable=variable,
network_name=network.name,
network_scope_name=network.scope_name))
if existing_variables_by_checkpoint_name:
if context.executing_eagerly():
sess = None
else:
sess = ops.get_default_session()
saver_lib.Saver(var_list=existing_variables_by_checkpoint_name).restore(
sess=sess, save_path=save_path)
return existing_variables_by_checkpoint_name
def _set_restore_on_create(network, save_path, map_func, user_map_func,
existing_variables_by_checkpoint_name):
"""If necessary, request deferred restorations of variables."""
checkpoint_reader = checkpoint_utils.load_checkpoint(save_path)
checkpointed_variables_to_restore = {}
for checkpoint_name, _ in checkpoint_utils.list_variables(save_path):
if checkpoint_name in existing_variables_by_checkpoint_name:
# This variable was already created and restored.
continue
# Save the variable for later restoration in a custom getter.
checkpointed_variables_to_restore[checkpoint_name] = (
checkpoint_reader.get_tensor(checkpoint_name))
# Only set a deferred restoration if there are checkpoint variables which
# have not been assigned to existing variables. Note that this loses out on
# some opportunity for error checking, but avoids creating
# _DeferredRestoration objects once a Network has been built (so that
# restoring in a loop does not take increasing amounts of memory).
if checkpointed_variables_to_restore:
if context.executing_eagerly():
sess = None
else:
sess = ops.get_default_session()
# We need a name for error messages. If we haven't been added to another
# Network yet, we're top-level.
network._finalize_name(False)
network._set_scope()
# Save a record of this restoration for use in the custom getter.
deferred_restoration = _DeferredRestoration(
map_func=map_func,
map_func_is_user=(user_map_func is not None),
checkpointed_variables_to_restore=checkpointed_variables_to_restore,
restored_variables={},
session=sess,
network_name=network.name,
network_scope_name=network.scope_name)
# Add the deferred registration to non-Network children, and request that
# Networks propagate the request to their children.
_add_deferred_restoration(network, deferred_restoration)
@deprecation.deprecated(
date=None,
instructions=(
"Please inherit from tf.keras.Model instead of tfe.Network, and use "
"tf.keras.Model.load_weights."))
def restore_network_checkpoint(network, save_path, map_func=None):
"""Restore the Network from a checkpoint.
If variables have already been created (typically when some or all of the
`Network` is built), they are assigned values from the checkpoint immediately,
overwriting any existing values (in graph mode the default session is used for
the assignments).
If there are checkpoint entries which do not correspond to any existing
variables in the `Network`, these values are saved for deferred restoration;
their initial values will be the checkpointed values once they are
created. Requests for multiple deferred restorations behave the same way as
immediate restorations, in that later requests will take priority over earlier
requests relevant to the same variable.
If this `Network` shares `Layer`s with another network, those `Layer`s will
also have their variables restored from the checkpoint.
Args:
network: A Network object to restore.
save_path: The return value of `tfe.save_network_checkpoint`, or a directory
to search for a checkpoint.
map_func: A function mapping fully qualified variable names (e.g.
'my_network_1/dense_1/kernel') to names in the checkpoint. By default (if
`map_func=None`), the variable prefix for the network being restored
(`Network.scope_name + '/'`, e.g. 'my_network_1/') is stripped and all
other variable names (shared with other Networks) are left unchanged. Note
that this is the _same_ map_func as `tfe.save_network_checkpoint`, not an
inverse mapping.
"""
network._finalize_name(parent_network=False)
network._set_scope() # scope_name should be available to map_funcs
if os.path.isdir(save_path):
# If we don't have a name yet, set no parent.
save_path = os.path.join(save_path, network.name.replace("/", "_"))
user_map_func = map_func
if map_func is None:
map_func = _make_prefix_stripping_map_fn(network.scope_name)
# Step one is to restore any existing variables from the checkpoint.
existing_variables_by_checkpoint_name = _restore_existing_variables(
network=network,
save_path=save_path,
map_func=map_func,
user_map_func=user_map_func)
# Step two is to set a custom getter which restores variables on creation,
# for those variables which have not been added to sub-Layers yet.
_set_restore_on_create(
network=network,
save_path=save_path,
map_func=map_func,
user_map_func=user_map_func,
existing_variables_by_checkpoint_name=(
existing_variables_by_checkpoint_name))
|
tensorflow-master
|
tensorflow/contrib/eager/python/network.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Metrics classes for computing the output of an evaluation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
from tensorflow.python.eager import context
from tensorflow.python.eager import function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import smart_cond
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import summary_ops_v2 as summary_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.training.tracking import base as trackable
_to_replace = re.compile("[^A-Za-z0-9.]")
class Metric(trackable.Trackable):
"""A metric holds state for aggregating statistics over an evaluation run.
Example use with eager execution:
```python
m = SomeMetric(...)
for input in ...:
m(input)
print(m.result())
```
Example use with graph execution:
```python
m = SomeMetric(...)
inputs = ... # Some tensors to compute the metric on.
m_update = m(inputs)
# Variables defined in first call, so get the initialization op afterwards.
m_init = m.init_variables() # or tf.compat.v1.global_variables_initializer()
m_result = m.result()
with tf.compat.v1.Session() as sess:
sess.run(m_init)
for input in ...:
sess.run(m_update)
print(sess.run(m_result))
```
Example use with graph execution with placeholders and feed_dict:
```python
m = SomeMetric(...)
m_placeholder = tf.compat.v1.placeholder(...)
m_update = m(m_placeholder)
# Variables defined in first call, so get the initialization op afterwards.
m_init = m.init_variables() # or tf.compat.v1.global_variables_initializer()
m_result = m.result()
with tf.compat.v1.Session() as sess:
sess.run(m_init)
for input in ...:
sess.run(m_update, feed_dict={m_placeholder: input})
print(sess.run(m_result))
```
Descendants will implement:
* `build()`: All variables should be created in this method, by calling
`self.add_variable()` as in: `self.var = self.add_variable(...)`
build() will be called in the first invocation of `__call__()`, with
the same arguments passed `call()`.
* `call()`: Has all updates to variables, as in:
self.var.assign_add(...)
* `result()`: Computes and returns a final value for the metric
from the variables in `self`.
Descendants may override `aggregate()`, but usually won't need to. It
adds in the state from a list of metrics of the same type as `self`.
(Default is to sum all the variables.) Note that users should not call
`aggregate()`, it is for use by TensorFlow infrastructure.
"""
def __init__(self, name=None, use_global_variables=False):
self._built = False
self._vars = []
self._initial_values = {}
self._updates = []
self._use_global_variables = use_global_variables
name = name or self.__class__.__name__
# Replace things like spaces in name to create a valid scope name.
scope_name = _to_replace.sub("_", name)
# We create the variable scope now to get the unique name that will
# be used as a variable prefix when build() calls add_variable().
with variable_scope.variable_scope(
scope_name, use_resource=True, reuse=False) as scope:
pos = scope.name.rfind(scope_name)
self._name = name + scope.name[pos + len(scope_name):]
self._scope = scope
# Ensures that if the user calls build directly we still set self._built to
# True to prevent variables from being recreated.
self._build = self.build
def actual_build(*args, **kwargs):
self._build(*args, **kwargs)
self._built = True
self.build = actual_build
self.build.__doc__ = self._build.__doc__
# Captures construction scope for proper initialization.
if context.executing_eagerly():
self._construction_scope = context.eager_mode
else:
# We make self.call() into a graph callable here, so that we can
# return a single op that performs all of the variable updates.
self._construction_scope = ops.get_default_graph().as_default
self.call = function.defun(self.call)
# ---- API for users ----
def __call__(self, *args, **kwargs):
"""Returns op to execute to update this metric for these inputs.
Returns None if eager execution is enabled.
Returns a graph-mode function if graph execution is enabled.
Args:
*args:
**kwargs: A mini-batch of inputs to the Metric, passed on to `call()`.
"""
if not self._built:
with variable_scope.variable_scope(
self._scope), self._construction_scope():
self.build(*args, **kwargs)
self._built = True
return self.call(*args, **kwargs)
@property
def name(self):
return self._name
@property
def variables(self):
return self._vars
def init_variables(self):
"""Initializes this Metric's variables.
Should be called after variables are created in the first execution
of `__call__()`. If using graph execution, the return value should be
`run()` in a session before running the op returned by `__call__()`.
(See example above.)
Returns:
If using graph execution, this returns an op to perform the
initialization. Under eager execution, the variables are reset to their
initial values as a side effect and this function returns None.
"""
if context.executing_eagerly():
for v in self._vars:
v.assign(self._initial_values[v])
else:
return control_flow_ops.group([v.initializer for v in self._vars])
# ---- To be implemented by descendants ---
def build(self, *args, **kwargs):
"""Method to create variables.
Called by `__call__()` before `call()` for the first time.
Args:
*args:
**kwargs: The arguments to the first invocation of `__call__()`.
`build()` may use the shape and/or dtype of these arguments
when deciding how to create variables.
"""
raise NotImplementedError("Metrics must define a build() member function")
def call(self, *args, **kwargs):
"""Accumulates statistics for the metric. Users should use __call__ instead.
Note: This function is executed as a graph function in graph mode.
This means:
a) Operations on the same resource are executed in textual order.
This should make it easier to do things like add the updated
value of a variable to another, for example.
b) You don't need to worry about collecting the update ops to execute.
All update ops added to the graph by this function will be executed.
As a result, code should generally work the same way with graph or
eager execution.
Args:
*args:
**kwargs: A mini-batch of inputs to the Metric, as passed to
`__call__()`.
"""
raise NotImplementedError("Metrics must define a call() member function")
def result(self): # TODO(josh11b): Add an optional summary_writer parameter.
"""Computes and returns a final value for the metric."""
raise NotImplementedError("Metrics must define a result() member function")
def value(self):
"""In graph mode returns the result Tensor while in eager the callable."""
if context.executing_eagerly():
return self.result
else:
return self.result()
# We can support two different strategies of for doing data-parallel
# distributed metric computations:
# * Put metric variables on the first device and rely on small
# bandwidth needed to do updates. (Doesn't require any particular
# code in Metric implementations.)
# * Ask each type of metric to define an aggregation method to run
# at the end of eval to merge across devices. Note: this is good
# for the use case where they want to record the metric's state
# for each example and then later decide which examples they want
# to aggregate over. (Recommended -- not too much harder and adds
# flexibility over previous option.)
# I'm going with the second strategy since we can define a default
# implementation of aggregate() that will work for most descendants.
def aggregate(self, metrics):
"""Adds in the state from a list of metrics.
Default implementation sums all the metric variables.
Args:
metrics: A list of metrics with the same type as `self`.
Raises:
ValueError: If metrics contains invalid data.
"""
for m in metrics:
if type(self) != type(m): # pylint: disable=unidiomatic-typecheck
raise TypeError("All metrics must be the same type, '%s' != '%s'." %
(type(self), type(m)))
# pylint: disable=protected-access
for i in range(len(self._vars)):
if any(m._vars[i].name != self._vars[i].name for m in metrics):
raise ValueError("All metrics must have variables in the same order.")
self._vars[i].assign_add(math_ops.add_n([m._vars[i] for m in metrics]))
# pylint: enable=protected-access
# ---- For use by descendants ---
def add_variable(self, name, shape=None, dtype=None, initializer=None):
"""***Only for use by descendants of Metric***."""
if self._built:
raise RuntimeError("Can't call add_variable() except in build().")
if context.executing_eagerly():
collections = None
else:
if self._use_global_variables:
collections = [ops.GraphKeys.GLOBAL_VARIABLES]
else:
collections = [ops.GraphKeys.LOCAL_VARIABLES]
collections += [ops.GraphKeys.METRIC_VARIABLES]
# Variables are Trackable dependencies of Metrics regardless of the
# global/local distinction. Users can avoid saving variables by not adding a
# dependency on the Metric.
v = self._add_variable_with_custom_getter(
name=name,
shape=shape,
dtype=dtype,
initializer=initializer,
trainable=False,
collections=collections,
use_resource=True,
getter=variable_scope.get_variable,
# Raise duplicate variable exceptions from get_variable rather than
# Trackable.
overwrite=True)
self._vars.append(v)
if context.executing_eagerly():
self._initial_values[v] = v.value()
return v
class Mean(Metric):
"""Computes the (weighted) mean of the given values."""
def __init__(self, name=None, dtype=dtypes.float64,
use_global_variables=False):
super(Mean, self).__init__(name=name,
use_global_variables=use_global_variables)
self.dtype = dtype
def build(self, *args, **kwargs):
# build() does not use call's arguments, by using *args, **kwargs
# we make it easier to inherit from Mean().
del args, kwargs
self.numer = self.add_variable(name="numer", shape=(),
dtype=self.dtype,
initializer=init_ops.zeros_initializer)
self.denom = self.add_variable(name="denom", shape=(),
dtype=self.dtype,
initializer=init_ops.zeros_initializer)
def call(self, values, weights=None):
"""Accumulate statistics for computing the mean.
For example, if values is [1, 3, 5, 7] then the mean is 4.
If the weights were specified as [1, 1, 0, 0] then the mean would be 2.
Args:
values: Tensor with the per-example value.
weights: Optional weighting of each example. Defaults to 1.
Returns:
The arguments, for easy chaining.
"""
if weights is None:
self.denom.assign_add(
math_ops.cast(array_ops.identity(array_ops.size(values)), self.dtype))
values = math_ops.reduce_sum(values)
self.numer.assign_add(math_ops.cast(values, self.dtype))
else:
weights = math_ops.cast(weights, self.dtype)
self.denom.assign_add(math_ops.reduce_sum(weights))
values = math_ops.cast(values, self.dtype) * weights
self.numer.assign_add(math_ops.reduce_sum(values))
if weights is None:
return values
return values, weights
def result(self, write_summary=True):
"""Returns the result of the Metric.
Args:
write_summary: bool indicating whether to feed the result to the summary
before returning.
Returns:
aggregated metric as float.
Raises:
ValueError: if the optional argument is not bool
"""
# Convert the boolean to tensor for tf.cond, if it is not.
if not isinstance(write_summary, ops.Tensor):
write_summary = ops.convert_to_tensor(write_summary)
t = self.numer / self.denom
def write_summary_f():
summary_ops.scalar(name=self.name, tensor=t)
return t
smart_cond.smart_cond(write_summary,
write_summary_f,
lambda: t,
name="")
return t
class Accuracy(Mean):
"""Calculates how often `predictions` matches `labels`.
Attributes:
name: name of the accuracy object
dtype: data type of the tensor
"""
def __init__(self, name=None, dtype=dtypes.float64):
"""Inits Accuracy class with name and dtype."""
super(Accuracy, self).__init__(name=name, dtype=dtype)
def call(self, labels, predictions, weights=None):
"""Accumulate accuracy statistics.
For example, if labels is [1, 2, 3, 4] and predictions is [0, 2, 3, 4]
then the accuracy is 3/4 or .75. If the weights were specified as
[1, 1, 0, 0] then the accuracy would be 1/2 or .5.
`labels` and `predictions` should have the same shape and type.
Args:
labels: Tensor with the true labels for each example. One example
per element of the Tensor.
predictions: Tensor with the predicted label for each example.
weights: Optional weighting of each example. Defaults to 1.
Returns:
The arguments, for easy chaining.
"""
check_ops.assert_equal(
array_ops.shape(labels), array_ops.shape(predictions),
message="Shapes of labels and predictions are unequal")
matches = math_ops.equal(labels, predictions)
matches = math_ops.cast(matches, self.dtype)
super(Accuracy, self).call(matches, weights=weights)
if weights is None:
return labels, predictions
return labels, predictions, weights
class CategoricalAccuracy(Mean):
"""Calculates how often `predictions` matches `labels`.
This class is compatible with `tf.keras.losses.categorical_crossentropy`,
`tf.nn.softmax_cross_entropy_with_logits`,
`tf.compat.v1.losses.softmax_cross_entropy`.
Attributes:
name: name of the accuracy object.
dtype: data type of tensor.
"""
def __init__(self, name=None, dtype=dtypes.float64):
"""Inits CategoricalAccuracy with name and dtype."""
super(CategoricalAccuracy, self).__init__(name=name, dtype=dtype)
def call(self, labels, predictions, weights=None):
"""Accumulate accuracy statistics.
`labels` and `predictions` should have the same shape.
As argmax is being done here, labels and predictions type
can be different.
Args:
labels: One-hot Tensor.
predictions: Tensor with the logits or probabilities for each example.
weights: Optional weighting of each example. Defaults to 1.
Returns:
The arguments, for easy chaining.
"""
check_ops.assert_equal(
array_ops.shape(labels), array_ops.shape(predictions),
message="Shapes of labels and predictions are unequal")
labels = math_ops.argmax(labels, axis=-1)
predictions = math_ops.argmax(predictions, axis=-1)
matches = math_ops.equal(labels, predictions)
matches = math_ops.cast(matches, self.dtype)
super(CategoricalAccuracy, self).call(matches, weights=weights)
if weights is None:
return labels, predictions
return labels, predictions, weights
class BinaryAccuracy(Mean):
"""Calculates how often `predictions` matches `labels`.
This class is compatible with `tf.keras.losses.binary_crossentropy`,
`tf.compat.v1.losses.sigmoid_cross_entropy`,
`tf.nn.sigmoid_cross_entropy_with_logits`.
If there is more than one label, this will become multi-label classification.
Attributes:
name: name of the accuracy object.
threshold: Used for rounding off the predictions.
If the predictions are,
1. probabilities then set the threshold to 0.5.
2. logits then set the threshold to 0.
You can set the threshold appropriately,
to trade off with precision and recall.
dtype: data type of tensor.
"""
def __init__(self, threshold, name=None, dtype=dtypes.float64):
"""Inits BinaryAccuracy with name, threshold and dtype."""
super(BinaryAccuracy, self).__init__(name=name, dtype=dtype)
self.threshold = threshold
def call(self, labels, predictions, weights=None):
"""Accumulate accuracy statistics.
`labels` and `predictions` should have the same shape and type.
Args:
labels: Binary Tensor(containing 0 or 1).
predictions: Tensor with probabilities or logits.
weights: Optional weighting of each example. Defaults to 1.
Returns:
The arguments, for easy chaining.
"""
check_ops.assert_equal(
array_ops.shape(labels), array_ops.shape(predictions),
message="Shapes of labels and predictions are unequal")
predictions = ops.convert_to_tensor(predictions)
predictions = predictions > self.threshold
# Convert labels to bool to match predictions.
labels = math_ops.cast(labels, dtypes.bool)
matches = math_ops.equal(labels, predictions)
matches = math_ops.cast(matches, self.dtype)
super(BinaryAccuracy, self).call(matches, weights=weights)
if weights is None:
return labels, predictions
return labels, predictions, weights
class SparseAccuracy(Mean):
"""Calculates how often `predictions` matches `labels`.
This class is compatible with
`tf.keras.losses.sparse_categorical_crossentropy`,
`tf.nn.sparse_softmax_cross_entropy_with_logits`,
`tf.compat.v1.losses.sparse_softmax_cross_entropy`.
Attributes:
name: name of the accuracy object
dtype: data type of tensor.
"""
def __init__(self, name=None, dtype=dtypes.float64):
"""Inits SparseAccuracy with name and dtype."""
super(SparseAccuracy, self).__init__(name=name, dtype=dtype)
def call(self, labels, predictions, weights=None):
"""Accumulate accuracy statistics.
`labels` and `predictions` should have the same shape except the
predictions must have one additional trailing dimension equal to the
number of classes(you want to predict).
Type of labels and predictions can be different.
Args:
labels: Tensor of shape (batch_size, ) containing integers
predictions: Tensor with the logits or probabilities for each example.
weights: Optional weighting of each example. Defaults to 1.
Returns:
The arguments, for easy chaining.
"""
check_ops.assert_equal(
array_ops.shape(labels), array_ops.shape(predictions)[0],
message="First axis of labels and predictions is unequal")
predictions = math_ops.argmax(predictions, axis=-1)
labels = math_ops.cast(labels, dtypes.int64)
matches = math_ops.equal(labels, predictions)
matches = math_ops.cast(matches, self.dtype)
super(SparseAccuracy, self).call(matches, weights=weights)
if weights is None:
return labels, predictions
return labels, predictions, weights
|
tensorflow-master
|
tensorflow/contrib/eager/python/metrics_impl.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Class Evaluator holds Metrics for the duration of an evaluation run."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.contrib.eager.python import datasets
from tensorflow.contrib.eager.python import metrics
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import context
from tensorflow.python.eager import function
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import summary_ops_v2 as summary_ops
class Evaluator(object):
"""This holds and updates Metrics for the duration of a single eval run.
Usage:
evaluator = my_model.evaluator() # or MyEvaluator(my_model)
for example_batch in ...:
evaluator(example_batch)
results = evaluator.all_metric_results(optional_summary_logdir)
Or, if you are getting your examples from a tf.data.Dataset, you can use
the evaluate_on_dataset() method.
Implementers of Evaluators should
(a) Call `track_metric()` and/or `track_evaluator()` in __init__().
(b) Override the `call()` method. It will be passed the output of the
model's `eval_data()` method, and should call its contained metrics
(treating them as callables) and any child Evaluators (using their
call() method to avoid calling eval_data() again).
Args:
model: A `Model` object with an `eval_data()` method.
"""
def __init__(self, model):
self._model = model
self._metrics = {}
self._evaluators = {}
if not context.executing_eagerly():
self.call = function.defun(self.call)
# ---- API for users ----
def __call__(self, *args, **kwargs):
"""Update metrics with a minibatch of input examples.
Args:
*args:
**kwargs: Arguments representing an input mini-batch of examples to
pass to self.model.eval_data().
Returns:
The op to execute or None if executing eagerly.
"""
return self.call(self._model.eval_data(*args, **kwargs))
def init_variables(self):
"""Return an op for initializing all contained uninitialized variables.
Only for graph execution. Should be called after variables are created
in the first execution of __call__().
Returns:
An op.
Raises:
RuntimeError: if eager execution is enabled.
@compatibility(eager)
Only for graph execution.
@end_compatibility
"""
if context.executing_eagerly():
raise RuntimeError("Evaluator.init_variables() not needed when "
"eager execution is enabled.")
return control_flow_ops.group([m.init_variables() for _, m in self.metrics])
def all_metric_results(self, summary_logdir=None):
"""Computes results for all contained metrics.
Args:
summary_logdir: An optional string. If specified, metric results
will be written as summaries to this directory.
Returns:
A `dict` mapping string names to tensors.
"""
if summary_logdir is None:
with summary_ops.never_record_summaries():
return self._all_metric_results()
else:
def f():
with summary_ops.create_file_writer(
summary_logdir).as_default(), summary_ops.always_record_summaries():
return self._all_metric_results()
if context.executing_eagerly():
return f()
else:
return function.defun(f)()
def _all_metric_results(self):
"""Implementation of `all_metric_results` in the summary context."""
results = {}
for name, metric in six.iteritems(self._metrics):
results[name] = metric.result()
for prefix, evaluator in six.iteritems(self._evaluators):
for name, metric in six.iteritems(evaluator._metrics): # pylint: disable=protected-access
results[prefix + "/" + name] = metric.result()
return results
def evaluate_on_dataset(self, dataset, *args, **kwargs):
"""Convenience method for performing an eval on a Dataset.
Args:
dataset: Dataset object with the input data to evaluate on.
*args:
**kwargs: Optional additional arguments to __call__(), except
`summary_logdir`: if specified, metrics will be written as summaries
to this directory.
Returns:
@compatibility(eager)
When eager execution is enabled, this returns the result of performing
an evaluation as a dictionary. With graph execution, this returns a tuple
(init_op, call_op, results_op) which may be executed using this code:
```python
sess.run(init_op)
try:
while True:
sess.run(call_op)
except tf.errors.OutOfRangeError:
pass
return sess.run(results_op) # A dictionary
# equivalently:
return evaluator.run_evaluation(init_op, call_op, results_op, sess=sess)
```
@end_compatibility
"""
summary_logdir = kwargs.pop("summary_logdir", None)
if context.executing_eagerly():
for example in datasets.Iterator(dataset):
self.__call__(example, *args, **kwargs)
return self.all_metric_results(summary_logdir)
# Graph construction
next_value = dataset_ops.make_one_shot_iterator(dataset).get_next()
# Function inlining destroys strict inputs semantics (function body might
# start execution before all inputs are ready). When iterator is exhausted
# and throws out of range error, function body might be partially executed.
# To prevent this we add an explicit control dependency from the 'get_next'.
with ops.control_dependencies([next_value]):
has_next_value = control_flow_ops.no_op(name="iterator_has_next")
with ops.control_dependencies([has_next_value]):
call_op = self.__call__(next_value, *args, **kwargs)
init_op = self.init_variables()
results_op = self.all_metric_results(summary_logdir)
return (init_op, call_op, results_op)
@staticmethod
def run_evaluation(init_op, call_op, results_op, sess=None):
"""Convenience method for running the ops returned by evaluate_on_dataset.
Args:
init_op: An op that initializes/resets evaluation state.
call_op: An op that updates evaluation state on a mini-batch of examples.
Must generate an tf.errors.OutOfRangeError when done.
results_op: A dictionary of tensors that compute the final evaluation
results from the evaluation state.
sess: The Session to run the evaluation in. Defaults to the default
Session.
Returns:
A dictionary of values, parallel to results_op.
Raises:
RuntimeError: if eager execution is enabled.
@compatibility(eager)
Only for graph execution.
@end_compatibility
"""
if context.executing_eagerly():
raise RuntimeError("Evaluator.run_evaluation() not supported when "
"eager execution is enabled.")
sess = sess or ops.get_default_session()
sess.run(init_op)
try:
while True:
sess.run(call_op)
except errors_impl.OutOfRangeError:
pass
return sess.run(results_op)
# ---- To be implemented by descendants ---
def call(self, eval_data):
"""Update metrics using the output of self.model.
Note: This function is executed as a graph function in graph mode.
This means:
a) Operations on the same resource are executed in textual order.
This should make it easier to do things like add the updated
value of a variable to another, for example.
b) You don't need to worry about collecting the update ops to execute.
All update ops added to the graph by this function will be executed.
As a result, code should generally work the same way with graph or
eager execution.
Args:
eval_data: The output of self.model.eval_data() on a mini-batch of
examples.
"""
raise NotImplementedError("Evaluators must define a call member function.")
# ---- For use by descendants ---
@property
def model(self):
return self._model
def track_metric(self, metric):
"""Add a Metric to be tracked.
Metrics can only be tracked by one `Evaluator`. Metrics must be
tracked or they will not appear in `all_metric_results()`.
Args:
metric: A `Metric` object.
Returns:
The `metric` passed into this function.
Raises:
RuntimeError: If called before __init__.
TypeError: If `metric` is not of the correct type.
ValueError: If there is a name collision between Metrics or `metric`
has already been added to another `Evaluator`.
"""
if not hasattr(self, "_metrics"):
raise RuntimeError(
"Need to call Evaluator.__init__ before adding metrics")
if not isinstance(metric, metrics.Metric):
raise TypeError(
"Evaluator.track_metric() passed type %s, not a tfe.metrics.Metric" %
(type(metric),))
if metric.name in self._metrics:
if metric is self._metrics[metric.name]:
return metric
raise ValueError(
"Attempt to add two Metrics with the name '%s' to the same Evaluator "
"'%s'" % (metric.name, self.name))
# pylint: disable=protected-access
if hasattr(metric, "_added_to_an_evaluator"):
raise ValueError("Metric %s already added to Evaluator %s" %
(metric.name, metric._added_to_an_evaluator))
metric._added_to_an_evaluator = self.__class__.__name__
# pylint: enable=protected-access
self._metrics[metric.name] = metric
return metric
def track_evaluator(self, prefix, evaluator):
"""Add a contained `Evaluator`.
This is for delegating to another `Evaluator`, e.g. for when you have a
model with multiple heads. Users should manually invoke the child
`Evaluator`'s `call` method from their `call` method.
Args:
prefix: A string. Metrics from `evaluator` are exported with this
prefix and a '/'.
evaluator: An `Evaluator` object.
Returns:
The value of `evaluator` passed into this function.
Raises:
RuntimeError: If called before __init__.
TypeError: If `evaluator` is not of the correct type.
ValueError: If an `Evaluator` has already been added with that `prefix`.
"""
if not hasattr(self, "_evaluators"):
raise RuntimeError(
"Need to call Evaluator.__init__ before adding evaluators")
if not isinstance(evaluator, Evaluator):
raise TypeError(
"Evaluator.track_evaluator() passed type %s, not a tfe.Evaluator." %
(type(evaluator),))
if prefix in self._evaluators:
if evaluator is self._evaluators[prefix]:
return evaluator
raise RuntimeError(
"Attempt to add two Evaluators with the same prefix '%s'." % prefix)
self._evaluators[prefix] = evaluator
return evaluator
@property
def metric_variables(self):
v = []
for metric in six.itervalues(self._metrics):
v += metric.variables
for evaluator in six.itervalues(self._evaluators):
v += evaluator.metric_variables
return v
@property
def metrics(self):
"""Returns a list of (prefix, metric) pairs."""
m = []
for metric in six.itervalues(self._metrics):
m.append(("", metric))
for prefix, evaluator in six.iteritems(self._evaluators):
m += [(prefix + "/" + p, m) for p, m in evaluator.metrics]
return m
class SparseSoftmaxEvaluator(Evaluator):
"""Evaluator for a sparse softmax model.
Computes a standard set of metrics for single-label, multi-class
models.
Args:
model: A `SparseSoftmaxModel` object or a `Model` whose `eval_data()`
method produces a `dict` containing values for the loss, true
label, predicted class, and optional weights.
loss_key: Optional key for looking up the value of the loss in the
`eval_data()` dict. Defaults to "loss".
label_key: Optional key for looking up the value of the label in the
`eval_data()` dict. Defaults to "label".
predicted_class_key: Optional key for looking up the value of the
predicted class in the `eval_data()` dict. Defaults to "predicted_class".
weights_key: Optional key for looking up the value of the weights
in the `eval_data()` dict. Defaults to "weights". Note that weights
are optional, and default to 1 if not present in `eval_data`.
"""
def __init__(self, model, loss_key="loss", label_key="label",
predicted_class_key="predicted_class", weights_key="weights"):
super(SparseSoftmaxEvaluator, self).__init__(model)
# TODO(josh11b): Expand this to include everything from the standard
# SparseSoftmax Head.
self.avg_loss = self.track_metric(metrics.Mean("Avg Loss"))
self.accuracy = self.track_metric(metrics.Accuracy())
self.loss_key = loss_key
self.label_key = label_key
self.predicted_class_key = predicted_class_key
self.weights_key = weights_key
def call(self, eval_data):
"""Update metrics for `eval_data` dict (described above)."""
weights = eval_data.get(self.weights_key, None)
if weights is None:
self.avg_loss(eval_data[self.loss_key])
self.accuracy(eval_data[self.label_key],
eval_data[self.predicted_class_key])
else:
self.avg_loss(eval_data[self.loss_key], weights=weights)
self.accuracy(eval_data[self.label_key],
eval_data[self.predicted_class_key],
weights=weights)
|
tensorflow-master
|
tensorflow/contrib/eager/python/evaluator.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""EXPERIMENTAL utilities for parameter server training with eager execution.
Note: this should eventually be merged with the distribution strategy for
ParameterServer.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import time
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.training.tracking import base as trackable
def _eager_safe_variable_handle(shape, dtype, shared_name, name, graph_mode):
"""Creates a variable handle with information to do shape inference."""
container = ops.get_default_graph()._container # pylint: disable=protected-access
if container is None:
container = ""
handle = resource_variable_ops.var_handle_op(shape=shape, dtype=dtype,
shared_name=shared_name,
name=name,
container=container)
if graph_mode:
return handle
with context.graph_mode(), ops.Graph().as_default() as graph:
h = resource_variable_ops.var_handle_op(shape=shape, dtype=dtype,
shared_name=shared_name,
name=name,
container=container)
# Tensor._handle_data contains information for the shape-inference code to
# know the shape and dtype of the variable pointed to by a handle. Since
# shape inference doesn't run in eager mode we copy this data here for when
# the handle is captured by an eager mode function.
# pylint: disable=protected-access
handle._handle_data = resource_variable_ops.get_resource_handle_data(h)
# pylint: enable=protected-access
# Clean up op->graph->op reference cycles.
ops.dismantle_graph(graph)
return handle
class SharedVariable(resource_variable_ops.BaseResourceVariable):
"""Experimental Variable designed for parameter server training.
A SharedVariable has a name and two instances of SharedVariable with the
same name will have the same value, even if they are in different Sessions,
as long as they are placed on the same device.
The storage associated with SharedVariables is also not deleted when they go
out of scope.
"""
def __init__(self, # pylint: disable=super-init-not-called
initial_value=None,
trainable=True,
name=None,
dtype=None,
constraint=None,
initialize=True,
**unused_kwargs):
"""Creates a variable.
Args:
initial_value: A `Tensor`, or Python object convertible to a `Tensor`,
which is the initial value for the Variable. The initial value must have
a shape specified unless `validate_shape` is set to False. Can also be a
callable with no argument that returns the initial value when called.
(Note that initializer functions from init_ops.py must first be bound
to a shape before being used here.)
trainable: If `True`, automatically watches this variable on GradientTape
whenever it's used.
name: Optional name for the variable. Defaults to `'Variable'` and gets
uniquified automatically.
dtype: If set, initial_value will be converted to the given type.
If None, either the datatype will be kept (if initial_value is
a Tensor) or float32 will be used (if it is a Python object convertible
to a Tensor).
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value
(which must have the same shape). Constraints are not safe to
use when doing asynchronous distributed training.
initialize: if True, runs initialization in eager execution; leaves the
variable uninitialized otherwise.
Raises:
ValueError: If the initial value is not specified, or does not have a
shape and `validate_shape` is `True`.
"""
if initial_value is None:
raise ValueError("initial_value must be specified.")
init_from_fn = callable(initial_value)
if isinstance(initial_value, ops.Tensor) and hasattr(
initial_value, "graph") and initial_value.graph.building_function:
raise ValueError("Tensor-typed variable initializers must either be "
"wrapped in an init_scope or callable "
"(e.g., `tf.Variable(lambda : "
"tf.truncated_normal([10, 40]))`) when building "
"functions. Please file a feature request if this "
"restriction inconveniences you.")
if constraint is not None and not callable(constraint):
raise ValueError("The `constraint` argument must be a callable.")
if isinstance(initial_value, trackable.CheckpointInitialValue):
self._maybe_initialize_trackable()
self._update_uid = initial_value.checkpoint_position.restore_uid
initial_value = initial_value.wrapped_value
self._trainable = trainable
self._save_slice_info = None
# Store the graph key so optimizers know how to only retrieve variables from
# this graph.
self._graph_key = ops.get_default_graph()._graph_key # pylint: disable=protected-access
with ops.init_scope():
self._in_graph_mode = not context.executing_eagerly()
with ops.name_scope(name, "Variable", []
if init_from_fn else [initial_value]) as name:
# pylint: disable=protected-access
handle_name = ops.name_from_scope_name(name)
shared_name = handle_name
if init_from_fn:
# Use attr_scope and device(None) to simulate the behavior of
# colocate_with when the variable we want to colocate with doesn't
# yet exist.
if self._in_graph_mode:
with ops.name_scope("Initializer"), ops.device(None):
initial_value = ops.convert_to_tensor(
initial_value(), name="initial_value", dtype=dtype)
self._handle = _eager_safe_variable_handle(
shape=initial_value.get_shape(),
dtype=initial_value.dtype.base_dtype,
shared_name=shared_name,
name=name,
graph_mode=self._in_graph_mode)
self._shape = initial_value.get_shape()
else:
initial_value = initial_value()
with ops.name_scope("Initializer"):
initial_value = ops.convert_to_tensor(
initial_value, name="initial_value", dtype=dtype)
self._handle = _eager_safe_variable_handle(
shape=initial_value.get_shape(),
dtype=initial_value.dtype.base_dtype,
shared_name=shared_name,
name=name,
graph_mode=False)
self._shape = initial_value.get_shape()
# pylint: enable=protected-access
# Or get the initial value from a Tensor or Python object.
else:
with ops.name_scope("Initializer"):
initial_value = ops.convert_to_tensor(
initial_value, name="initial_value", dtype=dtype)
# pylint: disable=protected-access
if (self._in_graph_mode and initial_value is not None and
initial_value.op._get_control_flow_context() is not None):
raise ValueError(
"Initializer for variable %s is from inside a control-flow "
"construct, such as a loop or conditional. When creating a "
"variable inside a loop or conditional, use a lambda as the "
"initializer." % name)
# pylint: enable=protected-access
self._handle = _eager_safe_variable_handle(
shape=initial_value.get_shape(),
dtype=initial_value.dtype.base_dtype,
shared_name=shared_name,
name=name,
graph_mode=self._in_graph_mode)
self._shape = initial_value.get_shape()
self._unique_id = shared_name
self._initial_value = initial_value if self._in_graph_mode else None
self._handle_name = handle_name + ":0"
self._dtype = initial_value.dtype.base_dtype
self._constraint = constraint
if self._in_graph_mode:
with ops.name_scope("IsInitialized"):
self._is_initialized_op = (
resource_variable_ops.var_is_initialized_op(self._handle))
if initial_value is not None:
with ops.name_scope("Assign") as n, ops.colocate_with(self._handle):
self._initializer_op = (
resource_variable_ops.assign_variable_op(
self._handle,
self._try_guard_against_uninitialized_dependencies(
initial_value),
name=n))
with ops.name_scope("Read"), ops.colocate_with(self._handle):
# Manually assign reads to the handle's device to avoid log
# messages.
with ops.device(self._handle.device):
value = self._read_variable_op()
self._graph_element = value
self._cached_value = None
else:
if initialize:
resource_variable_ops.assign_variable_op(self._handle,
initial_value)
self._is_initialized_op = None
self._initializer_op = None
self._graph_element = None
self._cached_value = None
self._handle_deleter = object()
self._cached_shape_as_list = None
@contextlib.contextmanager
def parameter_server_scope(is_chief, ps_job_name, num_ps_tasks):
"""Strategy to use parameter servers in eager.
Creates SharedVariable objects for variables created in this scope. These
SharedVariable objects will be placed round-robin on the parameter servers
specified by the ps_job_name and num_ps_tasks arguments.
To use parameter servers you need only to wrap your model initialization in
this scope:
```
with tf.contrib.eager.parameter_server_scope(
is_chief, ps_job_name, num_ps_tasks):
my_model = tf.keras.Sequential([...]) # Or
input = tf.keras.Input(...)
....
my_model = tf.keras.Model(input, output)
my_model.compile(...)
# or other usages of the model.
```
Args:
is_chief: Boolean. Whether this worker is responsible for initializing
variables.
ps_job_name: The name of the ps job in this cluster.
num_ps_tasks: The number of ps tasks to use.
Yields:
a context manager.
"""
# Note: capturing in a list to allow assignment.
ps_index = [0]
def variable_creator_scope(unused_next_creator, **kwargs):
kwargs["initialize"] = is_chief
with ops.device(
"/job:%s/task:%s" % (ps_job_name, ps_index[0] % num_ps_tasks)):
ps_index[0] += 1
v = SharedVariable(**kwargs)
if not is_chief:
while not resource_variable_ops.var_is_initialized_op(v.handle):
time.sleep(10)
return v
with variable_scope.variable_creator_scope(variable_creator_scope):
yield
|
tensorflow-master
|
tensorflow/contrib/eager/python/parameter_server.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tempfile
import time
import numpy as np
import tensorflow as tf
from tensorflow.contrib.eager.python.examples.gan import mnist
NOISE_DIM = 100
# Big enough so that summaries are never recorded.
# Lower this value if would like to benchmark with some summaries.
SUMMARY_INTERVAL = 10000
SUMMARY_FLUSH_MS = 100 # Flush summaries every 100ms
def data_format():
return 'channels_first' if tf.test.is_gpu_available() else 'channels_last'
class MnistGraphGanBenchmark(tf.test.Benchmark):
def _create_graph(self, batch_size):
# Generate some random data.
images_data = np.random.randn(batch_size, 784).astype(np.float32)
dataset = tf.data.Dataset.from_tensors(images_data)
images = tf.compat.v1.data.make_one_shot_iterator(
dataset.repeat()).get_next()
# Create the models and optimizers
generator = mnist.Generator(data_format())
discriminator = mnist.Discriminator(data_format())
with tf.variable_scope('generator'):
generator_optimizer = tf.train.AdamOptimizer(0.001)
with tf.variable_scope('discriminator'):
discriminator_optimizer = tf.train.AdamOptimizer(0.001)
# Run models and compute loss
noise_placeholder = tf.placeholder(tf.float32,
shape=[batch_size, NOISE_DIM])
generated_images = generator(noise_placeholder)
tf.contrib.summary.image('generated_images',
tf.reshape(generated_images, [-1, 28, 28, 1]),
max_images=10)
discriminator_gen_outputs = discriminator(generated_images)
discriminator_real_outputs = discriminator(images)
generator_loss = mnist.generator_loss(discriminator_gen_outputs)
discriminator_loss = mnist.discriminator_loss(discriminator_real_outputs,
discriminator_gen_outputs)
# Get train ops
with tf.variable_scope('generator'):
generator_train = generator_optimizer.minimize(
generator_loss, var_list=generator.variables)
with tf.variable_scope('discriminator'):
discriminator_train = discriminator_optimizer.minimize(
discriminator_loss, var_list=discriminator.variables)
return (generator_train, discriminator_train, noise_placeholder)
def _report(self, test_name, start, num_iters, batch_size):
avg_time = (time.time() - start) / num_iters
dev = 'gpu' if tf.test.is_gpu_available() else 'cpu'
name = 'graph_%s_%s_batch_%d_%s' % (test_name, dev, batch_size,
data_format())
extras = {'examples_per_sec': batch_size / avg_time}
self.report_benchmark(
iters=num_iters, wall_time=avg_time, name=name, extras=extras)
def benchmark_train(self):
for batch_size in [64, 128, 256]:
with tf.Graph().as_default():
global_step = tf.train.get_or_create_global_step()
increment_global_step = tf.assign_add(global_step, 1)
with tf.contrib.summary.create_file_writer(
tempfile.mkdtemp(), flush_millis=SUMMARY_FLUSH_MS).as_default(), (
tf.contrib.summary.record_summaries_every_n_global_steps(
SUMMARY_INTERVAL)):
(generator_train, discriminator_train, noise_placeholder
) = self._create_graph(batch_size)
with tf.Session() as sess:
tf.contrib.summary.initialize(graph=tf.get_default_graph(),
session=sess)
sess.run(tf.global_variables_initializer())
num_burn, num_iters = (3, 100)
for _ in range(num_burn):
noise = np.random.uniform(-1.0, 1.0, size=[batch_size, NOISE_DIM])
# Increment global step before evaluating summary ops to avoid
# race condition.
sess.run(increment_global_step)
sess.run([generator_train, discriminator_train,
tf.contrib.summary.all_summary_ops()],
feed_dict={noise_placeholder: noise})
# Run and benchmark 2 epochs
start = time.time()
for _ in range(num_iters):
noise = np.random.uniform(-1.0, 1.0, size=[batch_size, NOISE_DIM])
sess.run(increment_global_step)
sess.run([generator_train, discriminator_train,
tf.contrib.summary.all_summary_ops()],
feed_dict={noise_placeholder: noise})
self._report('train', start, num_iters, batch_size)
def benchmark_generate(self):
for batch_size in [64, 128, 256]:
with tf.Graph().as_default():
# Using random weights. This will generate garbage.
generator = mnist.Generator(data_format())
noise_placeholder = tf.placeholder(tf.float32,
shape=[batch_size, NOISE_DIM])
generated_images = generator(noise_placeholder)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
noise = np.random.uniform(-1.0, 1.0, size=[batch_size, NOISE_DIM])
num_burn, num_iters = (30, 1000)
for _ in range(num_burn):
sess.run(generated_images, feed_dict={noise_placeholder: noise})
start = time.time()
for _ in range(num_iters):
# Comparison with the eager execution benchmark in mnist_test.py
# isn't entirely fair as the time here includes the cost of copying
# the feeds from CPU memory to GPU.
sess.run(generated_images, feed_dict={noise_placeholder: noise})
self._report('generate', start, num_iters, batch_size)
if __name__ == '__main__':
tf.test.main()
|
tensorflow-master
|
tensorflow/contrib/eager/python/examples/gan/mnist_graph_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tempfile
import time
import tensorflow as tf
from tensorflow.contrib.eager.python.examples.gan import mnist
NOISE_DIM = 100
# Big enough so that summaries are never recorded.
# Lower this value if would like to benchmark with some summaries.
SUMMARY_INTERVAL = 10000
SUMMARY_FLUSH_MS = 100 # Flush summaries every 100ms
def data_format():
return 'channels_first' if tf.test.is_gpu_available() else 'channels_last'
def device():
return '/gpu:0' if tf.test.is_gpu_available() else '/cpu:0'
class MnistEagerGanBenchmark(tf.test.Benchmark):
def _report(self, test_name, start, num_iters, batch_size):
avg_time = (time.time() - start) / num_iters
dev = 'gpu' if tf.test.is_gpu_available() else 'cpu'
name = 'eager_%s_%s_batch_%d_%s' % (test_name, dev, batch_size,
data_format())
extras = {'examples_per_sec': batch_size / avg_time}
self.report_benchmark(
iters=num_iters, wall_time=avg_time, name=name, extras=extras)
def benchmark_train(self):
for batch_size in [64, 128, 256]:
# Generate some random data.
burn_batches, measure_batches = (3, 100)
burn_images = [tf.random_normal([batch_size, 784])
for _ in range(burn_batches)]
burn_dataset = tf.data.Dataset.from_tensor_slices(burn_images)
measure_images = [tf.random_normal([batch_size, 784])
for _ in range(measure_batches)]
measure_dataset = tf.data.Dataset.from_tensor_slices(measure_images)
step_counter = tf.train.get_or_create_global_step()
with tf.device(device()):
# Create the models and optimizers
generator = mnist.Generator(data_format())
discriminator = mnist.Discriminator(data_format())
with tf.variable_scope('generator'):
generator_optimizer = tf.train.AdamOptimizer(0.001)
with tf.variable_scope('discriminator'):
discriminator_optimizer = tf.train.AdamOptimizer(0.001)
with tf.contrib.summary.create_file_writer(
tempfile.mkdtemp(), flush_millis=SUMMARY_FLUSH_MS).as_default():
# warm up
mnist.train_one_epoch(generator, discriminator, generator_optimizer,
discriminator_optimizer,
burn_dataset, step_counter,
log_interval=SUMMARY_INTERVAL,
noise_dim=NOISE_DIM)
# measure
start = time.time()
mnist.train_one_epoch(generator, discriminator, generator_optimizer,
discriminator_optimizer,
measure_dataset, step_counter,
log_interval=SUMMARY_INTERVAL,
noise_dim=NOISE_DIM)
self._report('train', start, measure_batches, batch_size)
def benchmark_generate(self):
for batch_size in [64, 128, 256]:
with tf.device(device()):
# Using random weights. This will generate garbage.
generator = mnist.Generator(data_format())
num_burn, num_iters = (30, 1000)
for _ in range(num_burn):
noise = tf.random_uniform(shape=[batch_size, NOISE_DIM],
minval=-1., maxval=1.)
generator(noise)
start = time.time()
for _ in range(num_iters):
noise = tf.random_uniform(shape=[batch_size, NOISE_DIM],
minval=-1., maxval=1.)
generator(noise)
self._report('generate', start, num_iters, batch_size)
if __name__ == '__main__':
tf.enable_eager_execution()
tf.test.main()
|
tensorflow-master
|
tensorflow/contrib/eager/python/examples/gan/mnist_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A deep MNIST classifier using convolutional layers.
Sample usage:
python mnist.py --help
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import sys
import time
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
layers = tf.keras.layers
FLAGS = None
class Discriminator(tf.keras.Model):
"""GAN Discriminator.
A network to differentiate between generated and real handwritten digits.
"""
def __init__(self, data_format):
"""Creates a model for discriminating between real and generated digits.
Args:
data_format: Either 'channels_first' or 'channels_last'.
'channels_first' is typically faster on GPUs while 'channels_last' is
typically faster on CPUs. See
https://www.tensorflow.org/performance/performance_guide#data_formats
"""
super(Discriminator, self).__init__(name='')
if data_format == 'channels_first':
self._input_shape = [-1, 1, 28, 28]
else:
assert data_format == 'channels_last'
self._input_shape = [-1, 28, 28, 1]
self.conv1 = layers.Conv2D(
64, 5, padding='SAME', data_format=data_format, activation=tf.tanh)
self.pool1 = layers.AveragePooling2D(2, 2, data_format=data_format)
self.conv2 = layers.Conv2D(
128, 5, data_format=data_format, activation=tf.tanh)
self.pool2 = layers.AveragePooling2D(2, 2, data_format=data_format)
self.flatten = layers.Flatten()
self.fc1 = layers.Dense(1024, activation=tf.tanh)
self.fc2 = layers.Dense(1, activation=None)
def call(self, inputs):
"""Return two logits per image estimating input authenticity.
Users should invoke __call__ to run the network, which delegates to this
method (and not call this method directly).
Args:
inputs: A batch of images as a Tensor with shape [batch_size, 28, 28, 1]
or [batch_size, 1, 28, 28]
Returns:
A Tensor with shape [batch_size] containing logits estimating
the probability that corresponding digit is real.
"""
x = tf.reshape(inputs, self._input_shape)
x = self.conv1(x)
x = self.pool1(x)
x = self.conv2(x)
x = self.pool2(x)
x = self.flatten(x)
x = self.fc1(x)
x = self.fc2(x)
return x
class Generator(tf.keras.Model):
"""Generator of handwritten digits similar to the ones in the MNIST dataset.
"""
def __init__(self, data_format):
"""Creates a model for discriminating between real and generated digits.
Args:
data_format: Either 'channels_first' or 'channels_last'.
'channels_first' is typically faster on GPUs while 'channels_last' is
typically faster on CPUs. See
https://www.tensorflow.org/performance/performance_guide#data_formats
"""
super(Generator, self).__init__(name='')
self.data_format = data_format
# We are using 128 6x6 channels as input to the first deconvolution layer
if data_format == 'channels_first':
self._pre_conv_shape = [-1, 128, 6, 6]
else:
assert data_format == 'channels_last'
self._pre_conv_shape = [-1, 6, 6, 128]
self.fc1 = layers.Dense(6 * 6 * 128, activation=tf.tanh)
# In call(), we reshape the output of fc1 to _pre_conv_shape
# Deconvolution layer. Resulting image shape: (batch, 14, 14, 64)
self.conv1 = layers.Conv2DTranspose(
64, 4, strides=2, activation=None, data_format=data_format)
# Deconvolution layer. Resulting image shape: (batch, 28, 28, 1)
self.conv2 = layers.Conv2DTranspose(
1, 2, strides=2, activation=tf.nn.sigmoid, data_format=data_format)
def call(self, inputs):
"""Return a batch of generated images.
Users should invoke __call__ to run the network, which delegates to this
method (and not call this method directly).
Args:
inputs: A batch of noise vectors as a Tensor with shape
[batch_size, length of noise vectors].
Returns:
A Tensor containing generated images. If data_format is 'channels_last',
the shape of returned images is [batch_size, 28, 28, 1], else
[batch_size, 1, 28, 28]
"""
x = self.fc1(inputs)
x = tf.reshape(x, shape=self._pre_conv_shape)
x = self.conv1(x)
x = self.conv2(x)
return x
def discriminator_loss(discriminator_real_outputs, discriminator_gen_outputs):
"""Original discriminator loss for GANs, with label smoothing.
See `Generative Adversarial Nets` (https://arxiv.org/abs/1406.2661) for more
details.
Args:
discriminator_real_outputs: Discriminator output on real data.
discriminator_gen_outputs: Discriminator output on generated data. Expected
to be in the range of (-inf, inf).
Returns:
A scalar loss Tensor.
"""
loss_on_real = tf.losses.sigmoid_cross_entropy(
tf.ones_like(discriminator_real_outputs),
discriminator_real_outputs,
label_smoothing=0.25)
loss_on_generated = tf.losses.sigmoid_cross_entropy(
tf.zeros_like(discriminator_gen_outputs), discriminator_gen_outputs)
loss = loss_on_real + loss_on_generated
tf.contrib.summary.scalar('discriminator_loss', loss)
return loss
def generator_loss(discriminator_gen_outputs):
"""Original generator loss for GANs.
L = -log(sigmoid(D(G(z))))
See `Generative Adversarial Nets` (https://arxiv.org/abs/1406.2661)
for more details.
Args:
discriminator_gen_outputs: Discriminator output on generated data. Expected
to be in the range of (-inf, inf).
Returns:
A scalar loss Tensor.
"""
loss = tf.losses.sigmoid_cross_entropy(
tf.ones_like(discriminator_gen_outputs), discriminator_gen_outputs)
tf.contrib.summary.scalar('generator_loss', loss)
return loss
def train_one_epoch(generator, discriminator, generator_optimizer,
discriminator_optimizer, dataset, step_counter,
log_interval, noise_dim):
"""Trains `generator` and `discriminator` models on `dataset`.
Args:
generator: Generator model.
discriminator: Discriminator model.
generator_optimizer: Optimizer to use for generator.
discriminator_optimizer: Optimizer to use for discriminator.
dataset: Dataset of images to train on.
step_counter: An integer variable, used to write summaries regularly.
log_interval: How many steps to wait between logging and collecting
summaries.
noise_dim: Dimension of noise vector to use.
"""
total_generator_loss = 0.0
total_discriminator_loss = 0.0
for (batch_index, images) in enumerate(dataset):
with tf.device('/cpu:0'):
tf.assign_add(step_counter, 1)
with tf.contrib.summary.record_summaries_every_n_global_steps(
log_interval, global_step=step_counter):
current_batch_size = images.shape[0]
noise = tf.random_uniform(
shape=[current_batch_size, noise_dim],
minval=-1.,
maxval=1.,
seed=batch_index)
# we can use 2 tapes or a single persistent tape.
# Using two tapes is memory efficient since intermediate tensors can be
# released between the two .gradient() calls below
with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:
generated_images = generator(noise)
tf.contrib.summary.image(
'generated_images',
tf.reshape(generated_images, [-1, 28, 28, 1]),
max_images=10)
discriminator_gen_outputs = discriminator(generated_images)
discriminator_real_outputs = discriminator(images)
discriminator_loss_val = discriminator_loss(discriminator_real_outputs,
discriminator_gen_outputs)
total_discriminator_loss += discriminator_loss_val
generator_loss_val = generator_loss(discriminator_gen_outputs)
total_generator_loss += generator_loss_val
generator_grad = gen_tape.gradient(generator_loss_val,
generator.variables)
discriminator_grad = disc_tape.gradient(discriminator_loss_val,
discriminator.variables)
generator_optimizer.apply_gradients(
zip(generator_grad, generator.variables))
discriminator_optimizer.apply_gradients(
zip(discriminator_grad, discriminator.variables))
if log_interval and batch_index > 0 and batch_index % log_interval == 0:
print('Batch #%d\tAverage Generator Loss: %.6f\t'
'Average Discriminator Loss: %.6f' %
(batch_index, total_generator_loss / batch_index,
total_discriminator_loss / batch_index))
def main(_):
(device, data_format) = ('/gpu:0', 'channels_first')
if FLAGS.no_gpu or tf.contrib.eager.num_gpus() <= 0:
(device, data_format) = ('/cpu:0', 'channels_last')
print('Using device %s, and data format %s.' % (device, data_format))
# Load the datasets
data = input_data.read_data_sets(FLAGS.data_dir)
dataset = (
tf.data.Dataset.from_tensor_slices(data.train.images).shuffle(60000)
.batch(FLAGS.batch_size))
# Create the models and optimizers.
model_objects = {
'generator': Generator(data_format),
'discriminator': Discriminator(data_format),
'generator_optimizer': tf.train.AdamOptimizer(FLAGS.lr),
'discriminator_optimizer': tf.train.AdamOptimizer(FLAGS.lr),
'step_counter': tf.train.get_or_create_global_step(),
}
# Prepare summary writer and checkpoint info
summary_writer = tf.contrib.summary.create_summary_file_writer(
FLAGS.output_dir, flush_millis=1000)
checkpoint_prefix = os.path.join(FLAGS.checkpoint_dir, 'ckpt')
latest_cpkt = tf.train.latest_checkpoint(FLAGS.checkpoint_dir)
if latest_cpkt:
print('Using latest checkpoint at ' + latest_cpkt)
checkpoint = tf.train.Checkpoint(**model_objects)
# Restore variables on creation if a checkpoint exists.
checkpoint.restore(latest_cpkt)
with tf.device(device):
for _ in range(100):
start = time.time()
with summary_writer.as_default():
train_one_epoch(dataset=dataset, log_interval=FLAGS.log_interval,
noise_dim=FLAGS.noise, **model_objects)
end = time.time()
checkpoint.save(checkpoint_prefix)
print('\nTrain time for epoch #%d (step %d): %f' %
(checkpoint.save_counter.numpy(),
checkpoint.step_counter.numpy(),
end - start))
if __name__ == '__main__':
tf.enable_eager_execution()
parser = argparse.ArgumentParser()
parser.add_argument(
'--data-dir',
type=str,
default='/tmp/tensorflow/mnist/input_data',
help=('Directory for storing input data (default '
'/tmp/tensorflow/mnist/input_data)'))
parser.add_argument(
'--batch-size',
type=int,
default=128,
metavar='N',
help='input batch size for training (default: 128)')
parser.add_argument(
'--log-interval',
type=int,
default=100,
metavar='N',
help=('number of batches between logging and writing summaries '
'(default: 100)'))
parser.add_argument(
'--output_dir',
type=str,
default=None,
metavar='DIR',
help='Directory to write TensorBoard summaries (defaults to none)')
parser.add_argument(
'--checkpoint_dir',
type=str,
default='/tmp/tensorflow/mnist/checkpoints/',
metavar='DIR',
help=('Directory to save checkpoints in (once per epoch) (default '
'/tmp/tensorflow/mnist/checkpoints/)'))
parser.add_argument(
'--lr',
type=float,
default=0.001,
metavar='LR',
help='learning rate (default: 0.001)')
parser.add_argument(
'--noise',
type=int,
default=100,
metavar='N',
help='Length of noise vector for generator input (default: 100)')
parser.add_argument(
'--no-gpu',
action='store_true',
default=False,
help='disables GPU usage even if a GPU is available')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
tensorflow-master
|
tensorflow/contrib/eager/python/examples/gan/mnist.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests and benchmarks for the ResNet50 model, executed eagerly."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gc
import tempfile
import time
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
import tensorflow.contrib.eager as tfe
from tensorflow.contrib.eager.python.examples.resnet50 import resnet50
from tensorflow.contrib.summary import summary_test_util
from tensorflow.python.client import device_lib
from tensorflow.python.eager import tape
def device_and_data_format():
return ('/gpu:0', 'channels_first') if tfe.num_gpus() else ('/cpu:0',
'channels_last')
def random_batch(batch_size, data_format):
shape = (3, 224, 224) if data_format == 'channels_first' else (224, 224, 3)
shape = (batch_size,) + shape
num_classes = 1000
images = tf.random_uniform(shape)
labels = tf.random_uniform(
[batch_size], minval=0, maxval=num_classes, dtype=tf.int32)
one_hot = tf.one_hot(labels, num_classes)
return images, one_hot
def compute_gradients(model, images, labels, num_replicas=1):
with tf.GradientTape() as grad_tape:
logits = model(images, training=True)
loss = tf.losses.softmax_cross_entropy(
logits=logits, onehot_labels=labels)
tf.contrib.summary.scalar(name='loss', tensor=loss)
if num_replicas != 1:
loss /= num_replicas
# TODO(b/110991947): We can mistakenly trace the gradient call in
# multi-threaded environment. Explicitly disable recording until
# this is fixed.
with tape.stop_recording():
grads = grad_tape.gradient(loss, model.variables)
return grads
def apply_gradients(model, optimizer, gradients):
optimizer.apply_gradients(zip(gradients, model.variables))
class ResNet50Test(tf.test.TestCase):
def _apply(self, defun=False, execution_mode=None):
device, data_format = device_and_data_format()
model = resnet50.ResNet50(data_format)
if defun:
model.call = tfe.function(model.call)
with tf.device(device), tfe.execution_mode(execution_mode):
images, _ = random_batch(2, data_format)
output = model(images, training=False)
tfe.async_wait()
self.assertEqual((2, 1000), output.shape)
def test_apply(self):
self._apply(defun=False)
def test_apply_async(self):
self._apply(defun=False, execution_mode=tfe.ASYNC)
def test_apply_with_defun(self):
self._apply(defun=True)
def test_apply_with_defun_async(self):
self._apply(defun=True, execution_mode=tfe.ASYNC)
def test_apply_no_top(self):
device, data_format = device_and_data_format()
model = resnet50.ResNet50(data_format, include_top=False)
with tf.device(device):
images, _ = random_batch(2, data_format)
output = model(images, training=False)
output_shape = ((2, 2048, 1, 1)
if data_format == 'channels_first' else (2, 1, 1, 2048))
self.assertEqual(output_shape, output.shape)
def test_apply_with_pooling(self):
device, data_format = device_and_data_format()
model = resnet50.ResNet50(data_format, include_top=False, pooling='avg')
with tf.device(device):
images, _ = random_batch(2, data_format)
output = model(images, training=False)
self.assertEqual((2, 2048), output.shape)
def _test_train(self, execution_mode=None):
device, data_format = device_and_data_format()
model = resnet50.ResNet50(data_format)
tf.train.get_or_create_global_step()
logdir = tempfile.mkdtemp()
with tf.contrib.summary.create_file_writer(
logdir, max_queue=0,
name='t0').as_default(), tf.contrib.summary.always_record_summaries():
with tf.device(device), tfe.execution_mode(execution_mode):
optimizer = tf.train.GradientDescentOptimizer(0.1)
images, labels = random_batch(2, data_format)
apply_gradients(model, optimizer,
compute_gradients(model, images, labels))
self.assertEqual(320, len(model.variables))
tfe.async_wait()
events = summary_test_util.events_from_logdir(logdir)
self.assertEqual(len(events), 2)
self.assertEqual(events[1].summary.value[0].tag, 'loss')
def test_train(self):
self._test_train()
def test_train_async(self):
self._test_train(execution_mode=tfe.ASYNC)
def test_no_garbage(self):
device, data_format = device_and_data_format()
model = resnet50.ResNet50(data_format)
optimizer = tf.train.GradientDescentOptimizer(0.1)
with tf.device(device):
images, labels = random_batch(2, data_format)
gc.disable()
# Warm up. Note that this first run does create significant amounts of
# garbage to be collected. The hope is that this is a build-only effect,
# and a subsequent training loop will create nothing which needs to be
# collected.
apply_gradients(model, optimizer,
compute_gradients(model, images, labels))
gc.collect()
previous_gc_debug_flags = gc.get_debug()
gc.set_debug(gc.DEBUG_SAVEALL)
for _ in range(2):
# Run twice to ensure that garbage that is created on the first
# iteration is no longer accessible.
apply_gradients(model, optimizer,
compute_gradients(model, images, labels))
gc.collect()
# There should be no garbage requiring collection.
self.assertEqual(0, len(gc.garbage))
gc.set_debug(previous_gc_debug_flags)
gc.enable()
class MockIterator(object):
def __init__(self, tensors):
self._tensors = [tf.identity(x) for x in tensors]
def next(self):
return self._tensors
class ResNet50Benchmarks(tf.test.Benchmark):
def _train_batch_sizes(self):
"""Choose batch sizes based on GPU capability."""
for device in device_lib.list_local_devices():
if tf.DeviceSpec.from_string(device.name).device_type == 'GPU':
# Avoid OOM errors with larger batch sizes, which seem to cause errors
# later on even if caught.
#
# TODO(allenl): Base this on device memory; memory limit information
# during the test seems to exclude the amount TensorFlow has allocated,
# which isn't useful.
if 'K20' in device.physical_device_desc:
return (16,)
if 'P100' in device.physical_device_desc:
return (16, 32, 64)
if tf.DeviceSpec.from_string(device.name).device_type == 'TPU':
return (32,)
return (16, 32)
def _report(self, label, start, num_iters, device, batch_size, data_format,
num_replicas=1):
avg_time = (time.time() - start) / num_iters
dev = tf.DeviceSpec.from_string(device).device_type.lower()
replica_str = '' if num_replicas == 1 else 'replicas_%d_' % num_replicas
name = '%s_%s_batch_%d_%s%s' % (label, dev, batch_size,
replica_str, data_format)
extras = {'examples_per_sec': (num_replicas * batch_size) / avg_time}
self.report_benchmark(
iters=num_iters, wall_time=avg_time, name=name, extras=extras)
def _force_device_sync(self):
# If this function is called in the context of a non-CPU device
# (e.g., inside a 'with tf.device("/gpu:0")' block)
# then this will force a copy from CPU->NON_CPU_DEVICE->CPU,
# which forces a sync. This is a roundabout way, yes.
tf.constant(1.).cpu()
def _benchmark_eager_apply(self, label, device_and_format, defun=False,
execution_mode=None):
with tfe.execution_mode(execution_mode):
device, data_format = device_and_format
model = resnet50.ResNet50(data_format)
if defun:
model.call = tfe.function(model.call)
batch_size = 64
num_burn = 5
num_iters = 30
with tf.device(device):
images, _ = random_batch(batch_size, data_format)
for _ in xrange(num_burn):
model(images, training=False).cpu()
if execution_mode:
tfe.async_wait()
gc.collect()
start = time.time()
for _ in xrange(num_iters):
model(images, training=False).cpu()
if execution_mode:
tfe.async_wait()
self._report(label, start, num_iters, device, batch_size, data_format)
def benchmark_eager_apply_sync(self):
self._benchmark_eager_apply('eager_apply', device_and_data_format(),
defun=False)
def benchmark_eager_apply_async(self):
self._benchmark_eager_apply(
'eager_apply_async', device_and_data_format(), defun=False,
execution_mode=tfe.ASYNC)
def benchmark_eager_apply_with_defun(self):
self._benchmark_eager_apply('eager_apply_with_defun',
device_and_data_format(), defun=True)
def _benchmark_eager_train(self,
label,
make_iterator,
device_and_format,
defun=False,
execution_mode=None):
with tfe.execution_mode(execution_mode):
device, data_format = device_and_format
for batch_size in self._train_batch_sizes():
(images, labels) = random_batch(batch_size, data_format)
model = resnet50.ResNet50(data_format)
optimizer = tf.train.GradientDescentOptimizer(0.1)
apply_grads = apply_gradients
if defun:
model.call = tfe.function(model.call)
apply_grads = tfe.function(apply_gradients)
num_burn = 3
num_iters = 10
with tf.device(device):
iterator = make_iterator((images, labels))
for _ in xrange(num_burn):
(images, labels) = iterator.next()
apply_grads(model, optimizer,
compute_gradients(model, images, labels))
if execution_mode:
tfe.async_wait()
self._force_device_sync()
gc.collect()
start = time.time()
for _ in xrange(num_iters):
(images, labels) = iterator.next()
apply_grads(model, optimizer,
compute_gradients(model, images, labels))
if execution_mode:
tfe.async_wait()
self._force_device_sync()
self._report(label, start, num_iters, device, batch_size, data_format)
def benchmark_eager_train_sync(self):
self._benchmark_eager_train('eager_train', MockIterator,
device_and_data_format(), defun=False)
def benchmark_eager_train_async(self):
self._benchmark_eager_train(
'eager_train_async',
MockIterator,
device_and_data_format(),
defun=False,
execution_mode=tfe.ASYNC)
def benchmark_eager_train_with_defun(self):
self._benchmark_eager_train(
'eager_train_with_defun', MockIterator,
device_and_data_format(), defun=True)
def benchmark_eager_train_datasets(self):
def make_iterator(tensors):
with tf.device('/device:CPU:0'):
ds = tf.data.Dataset.from_tensors(tensors).repeat()
return tfe.Iterator(ds)
self._benchmark_eager_train(
'eager_train_dataset', make_iterator,
device_and_data_format(), defun=False)
def benchmark_eager_train_datasets_with_defun(self):
def make_iterator(tensors):
with tf.device('/device:CPU:0'):
ds = tf.data.Dataset.from_tensors(tensors).repeat()
return tfe.Iterator(ds)
self._benchmark_eager_train(
'eager_train_dataset_with_defun', make_iterator,
device_and_data_format(), defun=True)
if __name__ == '__main__':
tfe.enable_eager_execution()
tf.test.main()
|
tensorflow-master
|
tensorflow/contrib/eager/python/examples/resnet50/resnet50_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests and benchmarks for ResNet50 under graph execution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tempfile
import time
import numpy as np
import tensorflow as tf
from tensorflow.contrib.eager.python.examples.resnet50 import resnet50
from tensorflow.contrib.summary import summary_test_util
def data_format():
return 'channels_first' if tf.test.is_gpu_available() else 'channels_last'
def image_shape(batch_size):
if data_format() == 'channels_first':
return [batch_size, 3, 224, 224]
return [batch_size, 224, 224, 3]
def random_batch(batch_size):
images = np.random.rand(*image_shape(batch_size)).astype(np.float32)
num_classes = 1000
labels = np.random.randint(
low=0, high=num_classes, size=[batch_size]).astype(np.int32)
one_hot = np.zeros((batch_size, num_classes)).astype(np.float32)
one_hot[np.arange(batch_size), labels] = 1.
return images, one_hot
class ResNet50GraphTest(tf.test.TestCase):
def testApply(self):
# Use small batches for tests because the OSS version runs
# in constrained GPU environment with 1-2GB of memory.
batch_size = 8
with tf.Graph().as_default():
images = tf.placeholder(tf.float32, image_shape(None))
model = resnet50.ResNet50(data_format())
predictions = model(images, training=False)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
np_images, _ = random_batch(batch_size)
out = sess.run(predictions, feed_dict={images: np_images})
self.assertAllEqual([batch_size, 1000], out.shape)
def testTrainWithSummary(self):
with tf.Graph().as_default():
images = tf.placeholder(tf.float32, image_shape(None), name='images')
labels = tf.placeholder(tf.float32, [None, 1000], name='labels')
tf.train.get_or_create_global_step()
logdir = tempfile.mkdtemp()
with tf.contrib.summary.always_record_summaries():
with tf.contrib.summary.create_file_writer(
logdir, max_queue=0,
name='t0').as_default():
model = resnet50.ResNet50(data_format())
logits = model(images, training=True)
loss = tf.losses.softmax_cross_entropy(
logits=logits, onehot_labels=labels)
tf.contrib.summary.scalar(name='loss', tensor=loss)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)
train_op = optimizer.minimize(loss)
init = tf.global_variables_initializer()
self.assertEqual(321, len(tf.global_variables()))
# Use small batches for tests because the OSS version runs
# in constrained GPU environment with 1-2GB of memory.
batch_size = 2
with tf.Session() as sess:
sess.run(init)
sess.run(tf.contrib.summary.summary_writer_initializer_op())
np_images, np_labels = random_batch(batch_size)
sess.run([train_op, tf.contrib.summary.all_summary_ops()],
feed_dict={images: np_images, labels: np_labels})
events = summary_test_util.events_from_logdir(logdir)
self.assertEqual(len(events), 2)
self.assertEqual(events[1].summary.value[0].tag, 'loss')
class ResNet50Benchmarks(tf.test.Benchmark):
def _report(self, label, start, num_iters, batch_size):
avg_time = (time.time() - start) / num_iters
dev = 'gpu' if tf.test.is_gpu_available() else 'cpu'
name = 'graph_%s_%s_batch_%d_%s' % (label, dev, batch_size, data_format())
extras = {'examples_per_sec': batch_size / avg_time}
self.report_benchmark(
iters=num_iters, wall_time=avg_time, name=name, extras=extras)
def benchmark_graph_apply(self):
with tf.Graph().as_default():
images = tf.placeholder(tf.float32, image_shape(None))
model = resnet50.ResNet50(data_format())
predictions = model(images, training=False)
init = tf.global_variables_initializer()
batch_size = 64
with tf.Session() as sess:
sess.run(init)
np_images, _ = random_batch(batch_size)
num_burn, num_iters = (3, 30)
for _ in range(num_burn):
sess.run(predictions, feed_dict={images: np_images})
start = time.time()
for _ in range(num_iters):
# Comparison with the eager execution benchmark in resnet50_test.py
# isn't entirely fair as the time here includes the cost of copying
# the feeds from CPU memory to GPU.
sess.run(predictions, feed_dict={images: np_images})
self._report('apply', start, num_iters, batch_size)
def benchmark_graph_train(self):
for batch_size in [16, 32, 64]:
with tf.Graph().as_default():
np_images, np_labels = random_batch(batch_size)
dataset = tf.data.Dataset.from_tensors((np_images, np_labels)).repeat()
images, labels = tf.compat.v1.data.make_one_shot_iterator(
dataset).get_next()
model = resnet50.ResNet50(data_format())
logits = model(images, training=True)
loss = tf.losses.softmax_cross_entropy(
logits=logits, onehot_labels=labels)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
train_op = optimizer.minimize(loss)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
(num_burn, num_iters) = (5, 10)
for _ in range(num_burn):
sess.run(train_op)
start = time.time()
for _ in range(num_iters):
sess.run(train_op)
self._report('train', start, num_iters, batch_size)
if __name__ == '__main__':
tf.test.main()
|
tensorflow-master
|
tensorflow/contrib/eager/python/examples/resnet50/resnet50_graph_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""ResNet50 model definition compatible with TensorFlow's eager execution.
Reference [Deep Residual Learning for Image
Recognition](https://arxiv.org/abs/1512.03385)
Adapted from tf.keras.applications.ResNet50. A notable difference is that the
model here outputs logits while the Keras model outputs probability.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import tensorflow as tf
layers = tf.keras.layers
class _IdentityBlock(tf.keras.Model):
"""_IdentityBlock is the block that has no conv layer at shortcut.
Args:
kernel_size: the kernel size of middle conv layer at main path
filters: list of integers, the filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
data_format: data_format for the input ('channels_first' or
'channels_last').
"""
def __init__(self, kernel_size, filters, stage, block, data_format):
super(_IdentityBlock, self).__init__(name='')
filters1, filters2, filters3 = filters
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
bn_axis = 1 if data_format == 'channels_first' else 3
self.conv2a = layers.Conv2D(
filters1, (1, 1), name=conv_name_base + '2a', data_format=data_format)
self.bn2a = layers.BatchNormalization(
axis=bn_axis, name=bn_name_base + '2a')
self.conv2b = layers.Conv2D(
filters2,
kernel_size,
padding='same',
data_format=data_format,
name=conv_name_base + '2b')
self.bn2b = layers.BatchNormalization(
axis=bn_axis, name=bn_name_base + '2b')
self.conv2c = layers.Conv2D(
filters3, (1, 1), name=conv_name_base + '2c', data_format=data_format)
self.bn2c = layers.BatchNormalization(
axis=bn_axis, name=bn_name_base + '2c')
def call(self, input_tensor, training=False):
x = self.conv2a(input_tensor)
x = self.bn2a(x, training=training)
x = tf.nn.relu(x)
x = self.conv2b(x)
x = self.bn2b(x, training=training)
x = tf.nn.relu(x)
x = self.conv2c(x)
x = self.bn2c(x, training=training)
x += input_tensor
return tf.nn.relu(x)
class _ConvBlock(tf.keras.Model):
"""_ConvBlock is the block that has a conv layer at shortcut.
Args:
kernel_size: the kernel size of middle conv layer at main path
filters: list of integers, the filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
data_format: data_format for the input ('channels_first' or
'channels_last').
strides: strides for the convolution. Note that from stage 3, the first
conv layer at main path is with strides=(2,2), and the shortcut should
have strides=(2,2) as well.
"""
def __init__(self,
kernel_size,
filters,
stage,
block,
data_format,
strides=(2, 2)):
super(_ConvBlock, self).__init__(name='')
filters1, filters2, filters3 = filters
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
bn_axis = 1 if data_format == 'channels_first' else 3
self.conv2a = layers.Conv2D(
filters1, (1, 1),
strides=strides,
name=conv_name_base + '2a',
data_format=data_format)
self.bn2a = layers.BatchNormalization(
axis=bn_axis, name=bn_name_base + '2a')
self.conv2b = layers.Conv2D(
filters2,
kernel_size,
padding='same',
name=conv_name_base + '2b',
data_format=data_format)
self.bn2b = layers.BatchNormalization(
axis=bn_axis, name=bn_name_base + '2b')
self.conv2c = layers.Conv2D(
filters3, (1, 1), name=conv_name_base + '2c', data_format=data_format)
self.bn2c = layers.BatchNormalization(
axis=bn_axis, name=bn_name_base + '2c')
self.conv_shortcut = layers.Conv2D(
filters3, (1, 1),
strides=strides,
name=conv_name_base + '1',
data_format=data_format)
self.bn_shortcut = layers.BatchNormalization(
axis=bn_axis, name=bn_name_base + '1')
def call(self, input_tensor, training=False):
x = self.conv2a(input_tensor)
x = self.bn2a(x, training=training)
x = tf.nn.relu(x)
x = self.conv2b(x)
x = self.bn2b(x, training=training)
x = tf.nn.relu(x)
x = self.conv2c(x)
x = self.bn2c(x, training=training)
shortcut = self.conv_shortcut(input_tensor)
shortcut = self.bn_shortcut(shortcut, training=training)
x += shortcut
return tf.nn.relu(x)
# pylint: disable=not-callable
class ResNet50(tf.keras.Model):
"""Instantiates the ResNet50 architecture.
Args:
data_format: format for the image. Either 'channels_first' or
'channels_last'. 'channels_first' is typically faster on GPUs while
'channels_last' is typically faster on CPUs. See
https://www.tensorflow.org/performance/performance_guide#data_formats
name: Prefix applied to names of variables created in the model.
trainable: Is the model trainable? If true, performs backward
and optimization after call() method.
include_top: whether to include the fully-connected layer at the top of the
network.
pooling: Optional pooling mode for feature extraction when `include_top`
is `False`.
- `None` means that the output of the model will be the 4D tensor
output of the last convolutional layer.
- `avg` means that global average pooling will be applied to the output of
the last convolutional layer, and thus the output of the model will be
a 2D tensor.
- `max` means that global max pooling will be applied.
classes: optional number of classes to classify images into, only to be
specified if `include_top` is True.
Raises:
ValueError: in case of invalid argument for data_format.
"""
def __init__(self,
data_format,
name='',
trainable=True,
include_top=True,
pooling=None,
classes=1000):
super(ResNet50, self).__init__(name=name)
valid_channel_values = ('channels_first', 'channels_last')
if data_format not in valid_channel_values:
raise ValueError('Unknown data_format: %s. Valid values: %s' %
(data_format, valid_channel_values))
self.include_top = include_top
def conv_block(filters, stage, block, strides=(2, 2)):
return _ConvBlock(
3,
filters,
stage=stage,
block=block,
data_format=data_format,
strides=strides)
def id_block(filters, stage, block):
return _IdentityBlock(
3, filters, stage=stage, block=block, data_format=data_format)
self.conv1 = layers.Conv2D(
64, (7, 7),
strides=(2, 2),
data_format=data_format,
padding='same',
name='conv1')
bn_axis = 1 if data_format == 'channels_first' else 3
self.bn_conv1 = layers.BatchNormalization(axis=bn_axis, name='bn_conv1')
self.max_pool = layers.MaxPooling2D(
(3, 3), strides=(2, 2), data_format=data_format)
self.l2a = conv_block([64, 64, 256], stage=2, block='a', strides=(1, 1))
self.l2b = id_block([64, 64, 256], stage=2, block='b')
self.l2c = id_block([64, 64, 256], stage=2, block='c')
self.l3a = conv_block([128, 128, 512], stage=3, block='a')
self.l3b = id_block([128, 128, 512], stage=3, block='b')
self.l3c = id_block([128, 128, 512], stage=3, block='c')
self.l3d = id_block([128, 128, 512], stage=3, block='d')
self.l4a = conv_block([256, 256, 1024], stage=4, block='a')
self.l4b = id_block([256, 256, 1024], stage=4, block='b')
self.l4c = id_block([256, 256, 1024], stage=4, block='c')
self.l4d = id_block([256, 256, 1024], stage=4, block='d')
self.l4e = id_block([256, 256, 1024], stage=4, block='e')
self.l4f = id_block([256, 256, 1024], stage=4, block='f')
self.l5a = conv_block([512, 512, 2048], stage=5, block='a')
self.l5b = id_block([512, 512, 2048], stage=5, block='b')
self.l5c = id_block([512, 512, 2048], stage=5, block='c')
self.avg_pool = layers.AveragePooling2D(
(7, 7), strides=(7, 7), data_format=data_format)
if self.include_top:
self.flatten = layers.Flatten()
self.fc1000 = layers.Dense(classes, name='fc1000')
else:
reduction_indices = [1, 2] if data_format == 'channels_last' else [2, 3]
reduction_indices = tf.constant(reduction_indices)
if pooling == 'avg':
self.global_pooling = functools.partial(
tf.reduce_mean,
reduction_indices=reduction_indices,
keep_dims=False)
elif pooling == 'max':
self.global_pooling = functools.partial(
tf.reduce_max, reduction_indices=reduction_indices, keep_dims=False)
else:
self.global_pooling = None
def call(self, inputs, training=True):
x = self.conv1(inputs)
x = self.bn_conv1(x, training=training)
x = tf.nn.relu(x)
x = self.max_pool(x)
x = self.l2a(x, training=training)
x = self.l2b(x, training=training)
x = self.l2c(x, training=training)
x = self.l3a(x, training=training)
x = self.l3b(x, training=training)
x = self.l3c(x, training=training)
x = self.l3d(x, training=training)
x = self.l4a(x, training=training)
x = self.l4b(x, training=training)
x = self.l4c(x, training=training)
x = self.l4d(x, training=training)
x = self.l4e(x, training=training)
x = self.l4f(x, training=training)
x = self.l5a(x, training=training)
x = self.l5b(x, training=training)
x = self.l5c(x, training=training)
x = self.avg_pool(x)
if self.include_top:
return self.fc1000(self.flatten(x))
elif self.global_pooling:
return self.global_pooling(x)
else:
return x
|
tensorflow-master
|
tensorflow/contrib/eager/python/examples/resnet50/resnet50.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Densely Connected Convolutional Networks.
Reference [
Densely Connected Convolutional Networks](https://arxiv.org/abs/1608.06993)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
l2 = tf.keras.regularizers.l2
class ConvBlock(tf.keras.Model):
"""Convolutional Block consisting of (batchnorm->relu->conv).
Arguments:
num_filters: number of filters passed to a convolutional layer.
data_format: "channels_first" or "channels_last"
bottleneck: if True, then a 1x1 Conv is performed followed by 3x3 Conv.
weight_decay: weight decay
dropout_rate: dropout rate.
"""
def __init__(self, num_filters, data_format, bottleneck, weight_decay=1e-4,
dropout_rate=0):
super(ConvBlock, self).__init__()
self.bottleneck = bottleneck
axis = -1 if data_format == "channels_last" else 1
inter_filter = num_filters * 4
# don't forget to set use_bias=False when using batchnorm
self.conv2 = tf.keras.layers.Conv2D(num_filters,
(3, 3),
padding="same",
use_bias=False,
data_format=data_format,
kernel_initializer="he_normal",
kernel_regularizer=l2(weight_decay))
self.batchnorm1 = tf.keras.layers.BatchNormalization(axis=axis)
self.dropout = tf.keras.layers.Dropout(dropout_rate)
if self.bottleneck:
self.conv1 = tf.keras.layers.Conv2D(inter_filter,
(1, 1),
padding="same",
use_bias=False,
data_format=data_format,
kernel_initializer="he_normal",
kernel_regularizer=l2(weight_decay))
self.batchnorm2 = tf.keras.layers.BatchNormalization(axis=axis)
def call(self, x, training=True):
output = self.batchnorm1(x, training=training)
if self.bottleneck:
output = self.conv1(tf.nn.relu(output))
output = self.batchnorm2(output, training=training)
output = self.conv2(tf.nn.relu(output))
output = self.dropout(output, training=training)
return output
class TransitionBlock(tf.keras.Model):
"""Transition Block to reduce the number of features.
Arguments:
num_filters: number of filters passed to a convolutional layer.
data_format: "channels_first" or "channels_last"
weight_decay: weight decay
dropout_rate: dropout rate.
"""
def __init__(self, num_filters, data_format,
weight_decay=1e-4, dropout_rate=0):
super(TransitionBlock, self).__init__()
axis = -1 if data_format == "channels_last" else 1
self.batchnorm = tf.keras.layers.BatchNormalization(axis=axis)
self.conv = tf.keras.layers.Conv2D(num_filters,
(1, 1),
padding="same",
use_bias=False,
data_format=data_format,
kernel_initializer="he_normal",
kernel_regularizer=l2(weight_decay))
self.avg_pool = tf.keras.layers.AveragePooling2D(data_format=data_format)
def call(self, x, training=True):
output = self.batchnorm(x, training=training)
output = self.conv(tf.nn.relu(output))
output = self.avg_pool(output)
return output
class DenseBlock(tf.keras.Model):
"""Dense Block consisting of ConvBlocks where each block's
output is concatenated with its input.
Arguments:
num_layers: Number of layers in each block.
growth_rate: number of filters to add per conv block.
data_format: "channels_first" or "channels_last"
bottleneck: boolean, that decides which part of ConvBlock to call.
weight_decay: weight decay
dropout_rate: dropout rate.
"""
def __init__(self, num_layers, growth_rate, data_format, bottleneck,
weight_decay=1e-4, dropout_rate=0):
super(DenseBlock, self).__init__()
self.num_layers = num_layers
self.axis = -1 if data_format == "channels_last" else 1
self.blocks = []
for _ in range(int(self.num_layers)):
self.blocks.append(ConvBlock(growth_rate,
data_format,
bottleneck,
weight_decay,
dropout_rate))
def call(self, x, training=True):
for i in range(int(self.num_layers)):
output = self.blocks[i](x, training=training)
x = tf.concat([x, output], axis=self.axis)
return x
class DenseNet(tf.keras.Model):
"""Creating the Densenet Architecture.
Arguments:
depth_of_model: number of layers in the model.
growth_rate: number of filters to add per conv block.
num_of_blocks: number of dense blocks.
output_classes: number of output classes.
num_layers_in_each_block: number of layers in each block.
If -1, then we calculate this by (depth-3)/4.
If positive integer, then the it is used as the
number of layers per block.
If list or tuple, then this list is used directly.
data_format: "channels_first" or "channels_last"
bottleneck: boolean, to decide which part of conv block to call.
compression: reducing the number of inputs(filters) to the transition block.
weight_decay: weight decay
rate: dropout rate.
pool_initial: If True add a 7x7 conv with stride 2 followed by 3x3 maxpool
else, do a 3x3 conv with stride 1.
include_top: If true, GlobalAveragePooling Layer and Dense layer are
included.
"""
def __init__(self, depth_of_model, growth_rate, num_of_blocks,
output_classes, num_layers_in_each_block, data_format,
bottleneck=True, compression=0.5, weight_decay=1e-4,
dropout_rate=0, pool_initial=False, include_top=True):
super(DenseNet, self).__init__()
self.depth_of_model = depth_of_model
self.growth_rate = growth_rate
self.num_of_blocks = num_of_blocks
self.output_classes = output_classes
self.num_layers_in_each_block = num_layers_in_each_block
self.data_format = data_format
self.bottleneck = bottleneck
self.compression = compression
self.weight_decay = weight_decay
self.dropout_rate = dropout_rate
self.pool_initial = pool_initial
self.include_top = include_top
# deciding on number of layers in each block
if isinstance(self.num_layers_in_each_block, list) or isinstance(
self.num_layers_in_each_block, tuple):
self.num_layers_in_each_block = list(self.num_layers_in_each_block)
else:
if self.num_layers_in_each_block == -1:
if self.num_of_blocks != 3:
raise ValueError(
"Number of blocks must be 3 if num_layers_in_each_block is -1")
if (self.depth_of_model - 4) % 3 == 0:
num_layers = (self.depth_of_model - 4) / 3
if self.bottleneck:
num_layers //= 2
self.num_layers_in_each_block = [num_layers] * self.num_of_blocks
else:
raise ValueError("Depth must be 3N+4 if num_layer_in_each_block=-1")
else:
self.num_layers_in_each_block = [
self.num_layers_in_each_block] * self.num_of_blocks
axis = -1 if self.data_format == "channels_last" else 1
# setting the filters and stride of the initial covn layer.
if self.pool_initial:
init_filters = (7, 7)
stride = (2, 2)
else:
init_filters = (3, 3)
stride = (1, 1)
self.num_filters = 2 * self.growth_rate
# first conv and pool layer
self.conv1 = tf.keras.layers.Conv2D(self.num_filters,
init_filters,
strides=stride,
padding="same",
use_bias=False,
data_format=self.data_format,
kernel_initializer="he_normal",
kernel_regularizer=l2(
self.weight_decay))
if self.pool_initial:
self.pool1 = tf.keras.layers.MaxPooling2D(pool_size=(3, 3),
strides=(2, 2),
padding="same",
data_format=self.data_format)
self.batchnorm1 = tf.keras.layers.BatchNormalization(axis=axis)
self.batchnorm2 = tf.keras.layers.BatchNormalization(axis=axis)
# last pooling and fc layer
if self.include_top:
self.last_pool = tf.keras.layers.GlobalAveragePooling2D(
data_format=self.data_format)
self.classifier = tf.keras.layers.Dense(self.output_classes)
# calculating the number of filters after each block
num_filters_after_each_block = [self.num_filters]
for i in range(1, self.num_of_blocks):
temp_num_filters = num_filters_after_each_block[i-1] + (
self.growth_rate * self.num_layers_in_each_block[i-1])
# using compression to reduce the number of inputs to the
# transition block
temp_num_filters = int(temp_num_filters * compression)
num_filters_after_each_block.append(temp_num_filters)
# dense block initialization
self.dense_blocks = []
self.transition_blocks = []
for i in range(self.num_of_blocks):
self.dense_blocks.append(DenseBlock(self.num_layers_in_each_block[i],
self.growth_rate,
self.data_format,
self.bottleneck,
self.weight_decay,
self.dropout_rate))
if i+1 < self.num_of_blocks:
self.transition_blocks.append(
TransitionBlock(num_filters_after_each_block[i+1],
self.data_format,
self.weight_decay,
self.dropout_rate))
def call(self, x, training=True):
output = self.conv1(x)
if self.pool_initial:
output = self.batchnorm1(output, training=training)
output = tf.nn.relu(output)
output = self.pool1(output)
for i in range(self.num_of_blocks - 1):
output = self.dense_blocks[i](output, training=training)
output = self.transition_blocks[i](output, training=training)
output = self.dense_blocks[
self.num_of_blocks - 1](output, training=training)
output = self.batchnorm2(output, training=training)
output = tf.nn.relu(output)
if self.include_top:
output = self.last_pool(output)
output = self.classifier(output)
return output
|
tensorflow-master
|
tensorflow/contrib/eager/python/examples/densenet/densenet.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests and Benchmarks for Densenet model under graph execution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
import tensorflow as tf
from tensorflow.contrib.eager.python.examples.densenet import densenet
def data_format():
return 'channels_first' if tf.test.is_gpu_available() else 'channels_last'
def image_shape(batch_size):
if data_format() == 'channels_first':
return [batch_size, 3, 224, 224]
return [batch_size, 224, 224, 3]
def random_batch(batch_size):
images = np.random.rand(*image_shape(batch_size)).astype(np.float32)
num_classes = 1000
labels = np.random.randint(
low=0, high=num_classes, size=[batch_size]).astype(np.int32)
one_hot = np.zeros((batch_size, num_classes)).astype(np.float32)
one_hot[np.arange(batch_size), labels] = 1.
return images, one_hot
class DensenetGraphTest(tf.test.TestCase):
def testApply(self):
depth = 7
growth_rate = 2
num_blocks = 3
output_classes = 10
num_layers_in_each_block = -1
batch_size = 1
with tf.Graph().as_default():
images = tf.placeholder(tf.float32, image_shape(None))
model = densenet.DenseNet(depth, growth_rate, num_blocks,
output_classes, num_layers_in_each_block,
data_format(), bottleneck=True, compression=0.5,
weight_decay=1e-4, dropout_rate=0,
pool_initial=False, include_top=True)
predictions = model(images, training=False)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
np_images, _ = random_batch(batch_size)
out = sess.run(predictions, feed_dict={images: np_images})
self.assertAllEqual([batch_size, output_classes], out.shape)
class DensenetBenchmark(tf.test.Benchmark):
def __init__(self):
self.depth = 121
self.growth_rate = 32
self.num_blocks = 4
self.output_classes = 1000
self.num_layers_in_each_block = [6, 12, 24, 16]
def _report(self, label, start, num_iters, batch_size):
avg_time = (time.time() - start) / num_iters
dev = 'gpu' if tf.test.is_gpu_available() else 'cpu'
name = 'graph_%s_%s_batch_%d_%s' % (label, dev, batch_size, data_format())
extras = {'examples_per_sec': batch_size / avg_time}
self.report_benchmark(
iters=num_iters, wall_time=avg_time, name=name, extras=extras)
def benchmark_graph_apply(self):
with tf.Graph().as_default():
images = tf.placeholder(tf.float32, image_shape(None))
model = densenet.DenseNet(self.depth, self.growth_rate, self.num_blocks,
self.output_classes,
self.num_layers_in_each_block, data_format(),
bottleneck=True, compression=0.5,
weight_decay=1e-4, dropout_rate=0,
pool_initial=True, include_top=True)
predictions = model(images, training=False)
init = tf.global_variables_initializer()
batch_size = 64
with tf.Session() as sess:
sess.run(init)
np_images, _ = random_batch(batch_size)
num_burn, num_iters = (3, 30)
for _ in range(num_burn):
sess.run(predictions, feed_dict={images: np_images})
start = time.time()
for _ in range(num_iters):
sess.run(predictions, feed_dict={images: np_images})
self._report('apply', start, num_iters, batch_size)
def benchmark_graph_train(self):
for batch_size in [16, 32, 64]:
with tf.Graph().as_default():
np_images, np_labels = random_batch(batch_size)
dataset = tf.data.Dataset.from_tensors((np_images, np_labels)).repeat()
(images, labels) = tf.compat.v1.data.make_one_shot_iterator(
dataset).get_next()
model = densenet.DenseNet(self.depth, self.growth_rate, self.num_blocks,
self.output_classes,
self.num_layers_in_each_block, data_format(),
bottleneck=True, compression=0.5,
weight_decay=1e-4, dropout_rate=0,
pool_initial=True, include_top=True)
logits = model(images, training=True)
cross_ent = tf.losses.softmax_cross_entropy(
logits=logits, onehot_labels=labels)
regularization = tf.add_n(model.losses)
loss = cross_ent + regularization
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
train_op = optimizer.minimize(loss)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
(num_burn, num_iters) = (5, 10)
for _ in range(num_burn):
sess.run(train_op)
start = time.time()
for _ in range(num_iters):
sess.run(train_op)
self._report('train', start, num_iters, batch_size)
if __name__ == '__main__':
tf.test.main()
|
tensorflow-master
|
tensorflow/contrib/eager/python/examples/densenet/densenet_graph_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests and Benchmarks for Densenet model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gc
import time
import tensorflow as tf
import tensorflow.contrib.eager as tfe
from tensorflow.contrib.eager.python.examples.densenet import densenet
from tensorflow.python.client import device_lib
class DensenetTest(tf.test.TestCase):
def test_bottleneck_true(self):
depth = 7
growth_rate = 2
num_blocks = 3
output_classes = 10
num_layers_in_each_block = -1
batch_size = 1
data_format = ('channels_first') if tf.test.is_gpu_available() else (
'channels_last')
model = densenet.DenseNet(depth, growth_rate, num_blocks,
output_classes, num_layers_in_each_block,
data_format, bottleneck=True, compression=0.5,
weight_decay=1e-4, dropout_rate=0,
pool_initial=False, include_top=True)
if data_format == 'channels_last':
rand_input = tf.random_uniform((batch_size, 32, 32, 3))
else:
rand_input = tf.random_uniform((batch_size, 3, 32, 32))
output_shape = model(rand_input).shape
self.assertEqual(output_shape, (batch_size, output_classes))
def test_bottleneck_false(self):
depth = 7
growth_rate = 2
num_blocks = 3
output_classes = 10
num_layers_in_each_block = -1
batch_size = 1
data_format = ('channels_first') if tf.test.is_gpu_available() else (
'channels_last')
model = densenet.DenseNet(depth, growth_rate, num_blocks,
output_classes, num_layers_in_each_block,
data_format, bottleneck=False, compression=0.5,
weight_decay=1e-4, dropout_rate=0,
pool_initial=False, include_top=True)
if data_format == 'channels_last':
rand_input = tf.random_uniform((batch_size, 32, 32, 3))
else:
rand_input = tf.random_uniform((batch_size, 3, 32, 32))
output_shape = model(rand_input).shape
self.assertEqual(output_shape, (batch_size, output_classes))
def test_pool_initial_true(self):
depth = 7
growth_rate = 2
num_blocks = 4
output_classes = 10
num_layers_in_each_block = [1, 2, 2, 1]
batch_size = 1
data_format = ('channels_first') if tf.test.is_gpu_available() else (
'channels_last')
model = densenet.DenseNet(depth, growth_rate, num_blocks,
output_classes, num_layers_in_each_block,
data_format, bottleneck=True, compression=0.5,
weight_decay=1e-4, dropout_rate=0,
pool_initial=True, include_top=True)
if data_format == 'channels_last':
rand_input = tf.random_uniform((batch_size, 32, 32, 3))
else:
rand_input = tf.random_uniform((batch_size, 3, 32, 32))
output_shape = model(rand_input).shape
self.assertEqual(output_shape, (batch_size, output_classes))
def test_regularization(self):
if tf.test.is_gpu_available():
rand_input = tf.random_uniform((10, 3, 32, 32))
data_format = 'channels_first'
else:
rand_input = tf.random_uniform((10, 32, 32, 3))
data_format = 'channels_last'
weight_decay = 1e-4
conv = tf.keras.layers.Conv2D(
3, (3, 3),
padding='same',
use_bias=False,
data_format=data_format,
kernel_regularizer=tf.keras.regularizers.l2(weight_decay))
optimizer = tf.train.GradientDescentOptimizer(0.1)
conv(rand_input) # Initialize the variables in the layer
def compute_true_l2(vs, wd):
return tf.reduce_sum(tf.square(vs)) * wd
true_l2 = compute_true_l2(conv.variables, weight_decay)
keras_l2 = tf.add_n(conv.losses)
self.assertAllClose(true_l2, keras_l2)
with tf.GradientTape() as tape_true, tf.GradientTape() as tape_keras:
loss = tf.reduce_sum(conv(rand_input))
loss_with_true_l2 = loss + compute_true_l2(conv.variables, weight_decay)
loss_with_keras_l2 = loss + tf.add_n(conv.losses)
true_grads = tape_true.gradient(loss_with_true_l2, conv.variables)
keras_grads = tape_keras.gradient(loss_with_keras_l2, conv.variables)
self.assertAllClose(true_grads, keras_grads)
optimizer.apply_gradients(zip(keras_grads, conv.variables))
keras_l2_after_update = tf.add_n(conv.losses)
self.assertNotAllClose(keras_l2, keras_l2_after_update)
def compute_gradients(model, images, labels):
with tf.GradientTape() as tape:
logits = model(images, training=True)
cross_ent = tf.losses.softmax_cross_entropy(
logits=logits, onehot_labels=labels)
regularization = tf.add_n(model.losses)
loss = cross_ent + regularization
tf.contrib.summary.scalar(name='loss', tensor=loss)
return tape.gradient(loss, model.variables)
def apply_gradients(model, optimizer, gradients):
optimizer.apply_gradients(zip(gradients, model.variables))
def device_and_data_format():
return ('/gpu:0',
'channels_first') if tf.test.is_gpu_available() else ('/cpu:0',
'channels_last')
def random_batch(batch_size, data_format):
shape = (3, 224, 224) if data_format == 'channels_first' else (224, 224, 3)
shape = (batch_size,) + shape
num_classes = 1000
images = tf.random_uniform(shape)
labels = tf.random_uniform(
[batch_size], minval=0, maxval=num_classes, dtype=tf.int32)
one_hot = tf.one_hot(labels, num_classes)
return images, one_hot
class MockIterator(object):
def __init__(self, tensors):
self._tensors = [tf.identity(x) for x in tensors]
def next(self):
return self._tensors
class DensenetBenchmark(tf.test.Benchmark):
def __init__(self):
self.depth = 121
self.growth_rate = 32
self.num_blocks = 4
self.output_classes = 1000
self.num_layers_in_each_block = [6, 12, 24, 16]
def _train_batch_sizes(self):
"""Choose batch sizes based on GPU capability."""
for device in device_lib.list_local_devices():
if tf.DeviceSpec.from_string(device.name).device_type == 'GPU':
if 'K20' in device.physical_device_desc:
return (16,)
if 'P100' in device.physical_device_desc:
return (16, 32, 64)
if tf.DeviceSpec.from_string(device.name).device_type == 'TPU':
return (32,)
return (16, 32)
def _report(self, label, start, num_iters, device, batch_size, data_format):
avg_time = (time.time() - start) / num_iters
dev = tf.DeviceSpec.from_string(device).device_type.lower()
name = '%s_%s_batch_%d_%s' % (label, dev, batch_size, data_format)
extras = {'examples_per_sec': batch_size / avg_time}
self.report_benchmark(
iters=num_iters, wall_time=avg_time, name=name, extras=extras)
def _force_device_sync(self):
# If this function is called in the context of a non-CPU device
# (e.g., inside a 'with tf.device("/gpu:0")' block)
# then this will force a copy from CPU->NON_CPU_DEVICE->CPU,
# which forces a sync. This is a roundabout way, yes.
tf.constant(1.).cpu()
def _benchmark_eager_apply(self, label, device_and_format, defun=False,
execution_mode=None):
with tfe.execution_mode(execution_mode):
device, data_format = device_and_format
model = densenet.DenseNet(self.depth, self.growth_rate, self.num_blocks,
self.output_classes,
self.num_layers_in_each_block, data_format,
bottleneck=True, compression=0.5,
weight_decay=1e-4, dropout_rate=0,
pool_initial=True, include_top=True)
if defun:
# TODO(apassos) enable tfe.function here
model.call = tfe.defun(model.call)
batch_size = 64
num_burn = 5
num_iters = 30
with tf.device(device):
images, _ = random_batch(batch_size, data_format)
for _ in xrange(num_burn):
model(images, training=False).cpu()
if execution_mode:
tfe.async_wait()
gc.collect()
start = time.time()
for _ in xrange(num_iters):
model(images, training=False).cpu()
if execution_mode:
tfe.async_wait()
self._report(label, start, num_iters, device, batch_size, data_format)
def benchmark_eager_apply_sync(self):
self._benchmark_eager_apply('eager_apply', device_and_data_format(),
defun=False)
def benchmark_eager_apply_async(self):
self._benchmark_eager_apply(
'eager_apply_async', device_and_data_format(), defun=False,
execution_mode=tfe.ASYNC)
def benchmark_eager_apply_with_defun(self):
self._benchmark_eager_apply('eager_apply_with_defun',
device_and_data_format(), defun=True)
def _benchmark_eager_train(self,
label,
make_iterator,
device_and_format,
defun=False,
execution_mode=None):
with tfe.execution_mode(execution_mode):
device, data_format = device_and_format
for batch_size in self._train_batch_sizes():
(images, labels) = random_batch(batch_size, data_format)
model = densenet.DenseNet(self.depth, self.growth_rate, self.num_blocks,
self.output_classes,
self.num_layers_in_each_block, data_format,
bottleneck=True, compression=0.5,
weight_decay=1e-4, dropout_rate=0,
pool_initial=True, include_top=True)
optimizer = tf.train.GradientDescentOptimizer(0.1)
apply_grads = apply_gradients
if defun:
model.call = tfe.defun(model.call)
apply_grads = tfe.defun(apply_gradients)
num_burn = 3
num_iters = 10
with tf.device(device):
iterator = make_iterator((images, labels))
for _ in xrange(num_burn):
(images, labels) = iterator.next()
apply_grads(model, optimizer,
compute_gradients(model, images, labels))
if execution_mode:
tfe.async_wait()
self._force_device_sync()
gc.collect()
start = time.time()
for _ in xrange(num_iters):
(images, labels) = iterator.next()
apply_grads(model, optimizer,
compute_gradients(model, images, labels))
if execution_mode:
tfe.async_wait()
self._force_device_sync()
self._report(label, start, num_iters, device, batch_size, data_format)
def benchmark_eager_train_sync(self):
self._benchmark_eager_train('eager_train', MockIterator,
device_and_data_format(), defun=False)
def benchmark_eager_train_async(self):
self._benchmark_eager_train(
'eager_train_async',
MockIterator,
device_and_data_format(),
defun=False,
execution_mode=tfe.ASYNC)
def benchmark_eager_train_with_defun(self):
self._benchmark_eager_train(
'eager_train_with_defun', MockIterator,
device_and_data_format(), defun=True)
def benchmark_eager_train_datasets(self):
def make_iterator(tensors):
with tf.device('/device:CPU:0'):
ds = tf.data.Dataset.from_tensors(tensors).repeat()
return tfe.Iterator(ds)
self._benchmark_eager_train(
'eager_train_dataset', make_iterator,
device_and_data_format(), defun=False)
def benchmark_eager_train_datasets_with_defun(self):
def make_iterator(tensors):
with tf.device('/device:CPU:0'):
ds = tf.data.Dataset.from_tensors(tensors).repeat()
return tfe.Iterator(ds)
self._benchmark_eager_train(
'eager_train_dataset_with_defun', make_iterator,
device_and_data_format(), defun=True)
if __name__ == '__main__':
tf.enable_eager_execution()
tf.test.main()
|
tensorflow-master
|
tensorflow/contrib/eager/python/examples/densenet/densenet_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Script for reading and loading CIFAR-10."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf
# Global constants describing the CIFAR data set.
IMAGE_HEIGHT = 32
IMAGE_WIDTH = 32
NUM_CHANNEL = 3
def get_ds_from_tfrecords(data_dir,
split,
data_aug=True,
batch_size=100,
epochs=None,
shuffle=True,
data_format="channels_first",
num_parallel_calls=12,
prefetch=0,
div255=True,
dtype=tf.float32):
"""Returns a tf.train.Dataset object from reading tfrecords.
Args:
data_dir: Directory of tfrecords
split: "train", "validation", or "test"
data_aug: Apply data augmentation if True
batch_size: Batch size of dataset object
epochs: Number of epochs to repeat the dataset; default `None` means
repeating indefinitely
shuffle: Shuffle the dataset if True
data_format: `channels_first` or `channels_last`
num_parallel_calls: Number of threads for dataset preprocess
prefetch: Buffer size for prefetch
div255: Divide the images by 255 if True
dtype: Data type of images
Returns:
A tf.train.Dataset object
Raises:
ValueError: Unknown split
"""
if split not in ["train", "validation", "test", "train_all"]:
raise ValueError("Unknown split {}".format(split))
def _parser(serialized_example):
"""Parses a single tf.Example into image and label tensors."""
features = tf.parse_single_example(
serialized_example,
features={
"image": tf.FixedLenFeature([], tf.string),
"label": tf.FixedLenFeature([], tf.int64),
})
image = tf.decode_raw(features["image"], tf.uint8)
# Initially reshaping to [H, W, C] does not work
image = tf.reshape(image, [NUM_CHANNEL, IMAGE_HEIGHT, IMAGE_WIDTH])
# This is needed for `tf.image.resize_image_with_crop_or_pad`
image = tf.transpose(image, [1, 2, 0])
image = tf.cast(image, dtype)
label = tf.cast(features["label"], tf.int32)
if data_aug:
image = tf.image.resize_image_with_crop_or_pad(image, IMAGE_HEIGHT + 4,
IMAGE_WIDTH + 4)
image = tf.random_crop(image, [IMAGE_HEIGHT, IMAGE_WIDTH, NUM_CHANNEL])
image = tf.image.random_flip_left_right(image)
if data_format == "channels_first":
image = tf.transpose(image, [2, 0, 1])
if div255:
image /= 255.
return image, label
filename = os.path.join(data_dir, split + ".tfrecords")
dataset = tf.data.TFRecordDataset(filename)
dataset = dataset.repeat(epochs)
dataset = dataset.map(_parser, num_parallel_calls=num_parallel_calls)
dataset = dataset.prefetch(prefetch)
if shuffle:
# Find the right size according to the split
size = {
"train": 40000,
"validation": 10000,
"test": 10000,
"train_all": 50000
}[split]
dataset = dataset.shuffle(size)
dataset = dataset.batch(batch_size, drop_remainder=True)
return dataset
|
tensorflow-master
|
tensorflow/contrib/eager/python/examples/revnet/cifar_input.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Cloud TPU Estimator workflow with RevNet train on ImageNet."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
from absl import flags
import tensorflow as tf
from tensorflow.contrib import summary
from tensorflow.contrib.eager.python.examples.revnet import config as config_
from tensorflow.contrib.eager.python.examples.revnet import imagenet_input
from tensorflow.contrib.eager.python.examples.revnet import revnet
from tensorflow.contrib.training.python.training import evaluation
from tensorflow.python.estimator import estimator
MEAN_RGB = [0.485, 0.456, 0.406]
STDDEV_RGB = [0.229, 0.224, 0.225]
def _host_call_fn(gs, loss, lr):
"""Training host call.
Creates scalar summaries for training metrics.
This function is executed on the CPU and should not directly reference
any Tensors in the rest of the `model_fn`. To pass Tensors from the
model to the `metric_fn`, provide as part of the `host_call`. See
https://www.tensorflow.org/api_docs/python/tf/contrib/tpu/TPUEstimatorSpec
for more information.
Arguments should match the list of `Tensor` objects passed as the second
element in the tuple passed to `host_call`.
Args:
gs: `Tensor with shape `[batch]` for the global_step
loss: `Tensor` with shape `[batch]` for the training loss.
lr: `Tensor` with shape `[batch]` for the learning_rate.
Returns:
List of summary ops to run on the CPU host.
"""
# Host call fns are executed FLAGS.iterations_per_loop times after one
# TPU loop is finished, setting max_queue value to the same as number of
# iterations will make the summary writer only flush the data to storage
# once per loop.
gs = gs[0]
with summary.create_file_writer(
FLAGS.model_dir, max_queue=FLAGS.iterations_per_loop).as_default():
with summary.always_record_summaries():
summary.scalar("loss", loss[0], step=gs)
summary.scalar("learning_rate", lr[0], step=gs)
return summary.all_summary_ops()
def _metric_fn(labels, logits):
"""Evaluation metric function. Evaluates accuracy.
This function is executed on the CPU and should not directly reference
any Tensors in the rest of the `model_fn`. To pass Tensors from the model
to the `metric_fn`, provide as part of the `eval_metrics`. See
https://www.tensorflow.org/api_docs/python/tf/contrib/tpu/TPUEstimatorSpec
for more information.
Arguments should match the list of `Tensor` objects passed as the second
element in the tuple passed to `eval_metrics`.
Args:
labels: `Tensor` with shape `[batch]`.
logits: `Tensor` with shape `[batch, num_classes]`.
Returns:
A dict of the metrics to return from evaluation.
"""
predictions = tf.argmax(logits, axis=1)
top_1_accuracy = tf.metrics.accuracy(labels, predictions)
in_top_5 = tf.cast(tf.nn.in_top_k(logits, labels, 5), tf.float32)
top_5_accuracy = tf.metrics.mean(in_top_5)
return {
"top_1_accuracy": top_1_accuracy,
"top_5_accuracy": top_5_accuracy,
}
def model_fn(features, labels, mode, params):
"""Model function required by the `tf.contrib.tpu.TPUEstimator` API.
Args:
features: Input images
labels: Labels of images
mode: One of `ModeKeys.TRAIN`, `ModeKeys.EVAL` or 'ModeKeys.PREDICT'
params: A dictionary of extra parameter that might be passed
Returns:
An instance of `tf.contrib.tpu.TPUEstimatorSpec`
"""
revnet_config = params["revnet_config"]
model = revnet.RevNet(config=revnet_config)
inputs = features
if isinstance(inputs, dict):
inputs = features["image"]
if revnet_config.data_format == "channels_first":
assert not FLAGS.transpose_input # channels_first only for GPU
inputs = tf.transpose(inputs, [0, 3, 1, 2])
if FLAGS.transpose_input and mode != tf.estimator.ModeKeys.PREDICT:
inputs = tf.transpose(inputs, [3, 0, 1, 2]) # HWCN to NHWC
# Normalize the image to zero mean and unit variance.
inputs -= tf.constant(MEAN_RGB, shape=[1, 1, 3], dtype=inputs.dtype)
inputs /= tf.constant(STDDEV_RGB, shape=[1, 1, 3], dtype=inputs.dtype)
if mode == tf.estimator.ModeKeys.TRAIN:
global_step = tf.train.get_or_create_global_step()
learning_rate = tf.train.piecewise_constant(
global_step, revnet_config.lr_decay_steps, revnet_config.lr_list)
optimizer = tf.train.MomentumOptimizer(learning_rate,
revnet_config.momentum)
if FLAGS.use_tpu:
optimizer = tf.contrib.tpu.CrossShardOptimizer(optimizer)
logits, saved_hidden = model(inputs, training=True)
grads, loss = model.compute_gradients(saved_hidden, labels, training=True)
with tf.control_dependencies(model.get_updates_for(inputs)):
train_op = optimizer.apply_gradients(
zip(grads, model.trainable_variables), global_step=global_step)
if not FLAGS.skip_host_call:
# To log the loss, current learning rate, and epoch for Tensorboard, the
# summary op needs to be run on the host CPU via host_call. host_call
# expects [batch_size, ...] Tensors, thus reshape to introduce a batch
# dimension. These Tensors are implicitly concatenated to
# [params['batch_size']].
gs_t = tf.reshape(global_step, [1])
loss_t = tf.reshape(loss, [1])
lr_t = tf.reshape(learning_rate, [1])
host_call = (_host_call_fn, [gs_t, loss_t, lr_t])
return tf.contrib.tpu.TPUEstimatorSpec(
mode=mode, loss=loss, train_op=train_op, host_call=host_call)
elif mode == tf.estimator.ModeKeys.EVAL:
logits, _ = model(inputs, training=False)
loss = model.compute_loss(labels=labels, logits=logits)
return tf.contrib.tpu.TPUEstimatorSpec(
mode=mode, loss=loss, eval_metrics=(_metric_fn, [labels, logits]))
else: # Predict or export
logits, _ = model(inputs, training=False)
predictions = {
"classes": tf.argmax(logits, axis=1),
"probabilities": tf.nn.softmax(logits),
}
return tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
predictions=predictions,
export_outputs={
"classify": tf.estimator.export.PredictOutput(predictions)
})
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
# RevNet specific configuration
revnet_config = {
"revnet-56": config_.get_hparams_imagenet_56(),
"revnet-104": config_.get_hparams_imagenet_104()
}[FLAGS.revnet_config]
if FLAGS.use_tpu:
revnet_config.data_format = "channels_last"
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
FLAGS.tpu, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
# Estimator specific configuration
config = tf.contrib.tpu.RunConfig(
cluster=tpu_cluster_resolver,
model_dir=FLAGS.model_dir,
session_config=tf.ConfigProto(
allow_soft_placement=True, log_device_placement=True),
tpu_config=tf.contrib.tpu.TPUConfig(
iterations_per_loop=FLAGS.iterations_per_loop,
num_shards=FLAGS.num_shards,
per_host_input_for_training=tf.contrib.tpu.InputPipelineConfig.
PER_HOST_V2),
)
# Input pipelines are slightly different (with regards to shuffling and
# preprocessing) between training and evaluation.
imagenet_train, imagenet_eval = [
imagenet_input.ImageNetInput(
is_training=is_training,
data_dir=FLAGS.data_dir,
transpose_input=FLAGS.transpose_input,
use_bfloat16=False) for is_training in [True, False]
]
revnet_classifier = tf.contrib.tpu.TPUEstimator(
model_fn=model_fn,
use_tpu=FLAGS.use_tpu,
train_batch_size=revnet_config.tpu_batch_size,
eval_batch_size=revnet_config.tpu_eval_batch_size,
config=config,
export_to_tpu=False,
params={"revnet_config": revnet_config})
steps_per_epoch = revnet_config.tpu_iters_per_epoch
eval_steps = revnet_config.tpu_eval_steps
# pylint: disable=protected-access
if FLAGS.mode == "eval":
# Run evaluation when there's a new checkpoint
for ckpt in evaluation.checkpoints_iterator(
FLAGS.model_dir, timeout=FLAGS.eval_timeout):
tf.logging.info("Starting to evaluate.")
try:
start_timestamp = time.time() # This time will include compilation time
eval_results = revnet_classifier.evaluate(
input_fn=imagenet_eval.input_fn,
steps=eval_steps,
checkpoint_path=ckpt)
elapsed_time = int(time.time() - start_timestamp)
tf.logging.info("Eval results: %s. Elapsed seconds: %d" %
(eval_results, elapsed_time))
# Terminate eval job when final checkpoint is reached
current_step = int(os.path.basename(ckpt).split("-")[1])
if current_step >= revnet_config.max_train_iter:
tf.logging.info(
"Evaluation finished after training step %d" % current_step)
break
except tf.errors.NotFoundError:
# Since the coordinator is on a different job than the TPU worker,
# sometimes the TPU worker does not finish initializing until long after
# the CPU job tells it to start evaluating. In this case, the checkpoint
# file could have been deleted already.
tf.logging.info(
"Checkpoint %s no longer exists, skipping checkpoint" % ckpt)
else: # FLAGS.mode == 'train' or FLAGS.mode == 'train_and_eval'
current_step = estimator._load_global_step_from_checkpoint_dir(
FLAGS.model_dir)
tf.logging.info(
"Training for %d steps (%.2f epochs in total). Current"
" step %d." % (revnet_config.max_train_iter,
revnet_config.max_train_iter / steps_per_epoch,
current_step))
start_timestamp = time.time() # This time will include compilation time
if FLAGS.mode == "train":
revnet_classifier.train(
input_fn=imagenet_train.input_fn,
max_steps=revnet_config.max_train_iter)
else:
assert FLAGS.mode == "train_and_eval"
while current_step < revnet_config.max_train_iter:
# Train for up to steps_per_eval number of steps.
# At the end of training, a checkpoint will be written to --model_dir.
next_checkpoint = min(current_step + FLAGS.steps_per_eval,
revnet_config.max_train_iter)
revnet_classifier.train(
input_fn=imagenet_train.input_fn, max_steps=next_checkpoint)
current_step = next_checkpoint
tf.logging.info("Finished training up to step %d. Elapsed seconds %d." %
(next_checkpoint, int(time.time() - start_timestamp)))
# Evaluate the model on the most recent model in --model_dir.
# Since evaluation happens in batches of --eval_batch_size, some images
# may be excluded modulo the batch size. As long as the batch size is
# consistent, the evaluated images are also consistent.
tf.logging.info("Starting to evaluate.")
eval_results = revnet_classifier.evaluate(
input_fn=imagenet_eval.input_fn, steps=eval_steps)
tf.logging.info("Eval results: %s" % eval_results)
elapsed_time = int(time.time() - start_timestamp)
tf.logging.info("Finished training up to step %d. Elapsed seconds %d." %
(revnet_config.max_train_iter, elapsed_time))
if FLAGS.export_dir is not None:
# The guide to serve an exported TensorFlow model is at:
# https://www.tensorflow.org/serving/serving_basic
tf.logging.info("Starting to export model.")
revnet_classifier.export_saved_model(
export_dir_base=FLAGS.export_dir,
serving_input_receiver_fn=imagenet_input.image_serving_input_fn)
if __name__ == "__main__":
# Cloud TPU Cluster Resolver flags
flags.DEFINE_string(
"tpu",
default=None,
help="The Cloud TPU to use for training. This should be either the name "
"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 "
"url.")
flags.DEFINE_string(
"tpu_zone",
default=None,
help="[Optional] GCE zone where the Cloud TPU is located in. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
flags.DEFINE_string(
"gcp_project",
default=None,
help="[Optional] Project name for the Cloud TPU-enabled project. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
# Model specific parameters
flags.DEFINE_string(
"data_dir", default=None, help="Directory to load tfrecords")
flags.DEFINE_string(
"model_dir",
default=None,
help="[Optional] Directory to store the model information")
flags.DEFINE_string(
"revnet_config",
default="revnet-56",
help="[Optional] Architecture of network. "
"Other options include `revnet-104`")
flags.DEFINE_boolean(
"use_tpu", default=True, help="[Optional] Whether to use TPU")
flags.DEFINE_integer(
"num_shards", default=8, help="Number of shards (TPU chips).")
flags.DEFINE_integer(
"iterations_per_loop",
default=100,
help=(
"Number of steps to run on TPU before feeding metrics to the CPU."
" If the number of iterations in the loop would exceed the number of"
" train steps, the loop will exit before reaching"
" --iterations_per_loop. The larger this value is, the higher the"
" utilization on the TPU."))
flags.DEFINE_integer(
"eval_timeout",
default=None,
help="Maximum seconds between checkpoints before evaluation terminates.")
flags.DEFINE_integer(
"steps_per_eval",
default=5000,
help=(
"Controls how often evaluation is performed. Since evaluation is"
" fairly expensive, it is advised to evaluate as infrequently as"
" possible (i.e. up to --train_steps, which evaluates the model only"
" after finishing the entire training regime)."))
flags.DEFINE_bool(
"transpose_input",
default=True,
help="Use TPU double transpose optimization")
flags.DEFINE_string(
"export_dir",
default=None,
help=("The directory where the exported SavedModel will be stored."))
flags.DEFINE_bool(
"skip_host_call",
default=False,
help=("Skip the host_call which is executed every training step. This is"
" generally used for generating training summaries (train loss,"
" learning rate, etc...). When --skip_host_call=false, there could"
" be a performance drop if host_call function is slow and cannot"
" keep up with the TPU-side computation."))
flags.DEFINE_string(
"mode",
default="train_and_eval",
help='One of {"train_and_eval", "train", "eval"}.')
FLAGS = flags.FLAGS
tf.app.run()
|
tensorflow-master
|
tensorflow/contrib/eager/python/examples/revnet/main_estimator_tpu.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Reversible residual network compatible with eager execution.
Configuration in format of tf.contrib.training.HParams.
Supports CIFAR-10, CIFAR-100, and ImageNet datasets.
Reference [The Reversible Residual Network: Backpropagation
Without Storing Activations](https://arxiv.org/pdf/1707.04585.pdf)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
def get_hparams_cifar_38():
"""RevNet-38 configurations for CIFAR-10/CIFAR-100."""
config = tf.contrib.training.HParams()
config.add_hparam("num_train_images", 50000)
config.add_hparam("num_eval_images", 10000)
config.add_hparam("init_filters", 32)
config.add_hparam("init_kernel", 3)
config.add_hparam("init_stride", 1)
config.add_hparam("n_rev_blocks", 3)
config.add_hparam("n_res", [3, 3, 3])
config.add_hparam("filters", [32, 64, 112])
config.add_hparam("strides", [1, 2, 2])
config.add_hparam("batch_size", 100)
config.add_hparam("bottleneck", False)
config.add_hparam("fused", True)
config.add_hparam("init_max_pool", False)
if tf.test.is_gpu_available():
config.add_hparam("input_shape", (3, 32, 32))
config.add_hparam("data_format", "channels_first")
else:
config.add_hparam("input_shape", (32, 32, 3))
config.add_hparam("data_format", "channels_last")
# Training details
config.add_hparam("weight_decay", 2e-4)
config.add_hparam("momentum", .9)
config.add_hparam("lr_decay_steps", [40000, 60000])
config.add_hparam("lr_list", [1e-1, 1e-2, 1e-3])
config.add_hparam("max_train_iter", 80000)
config.add_hparam("seed", 1234)
config.add_hparam("shuffle", True)
config.add_hparam("log_every", 500)
config.add_hparam("save_every", 500)
config.add_hparam("dtype", tf.float32)
config.add_hparam("eval_batch_size", 1000)
config.add_hparam("div255", True)
# This is imprecise, when training with validation set,
# we only have 40k images in training data
config.add_hparam("iters_per_epoch",
config.num_train_images // config.batch_size)
config.add_hparam("epochs", config.max_train_iter // config.iters_per_epoch)
# Customized TPU hyperparameters due to differing batch size caused by
# TPU architecture specifics
# Suggested batch sizes to reduce overhead from excessive tensor padding
# https://cloud.google.com/tpu/docs/troubleshooting
config.add_hparam("tpu_batch_size", 1024)
config.add_hparam("tpu_eval_batch_size", 1024)
config.add_hparam("tpu_iters_per_epoch",
config.num_train_images // config.tpu_batch_size)
config.add_hparam("tpu_epochs",
config.max_train_iter // config.tpu_iters_per_epoch)
config.add_hparam("tpu_eval_steps",
config.num_eval_images // config.tpu_eval_batch_size)
return config
def get_hparams_cifar_110():
config = get_hparams_cifar_38()
config.filters = [32, 64, 128]
config.n_res = [9, 9, 9]
return config
def get_hparams_cifar_164():
config = get_hparams_cifar_38()
config.filters = [32, 64, 128]
config.n_res = [9, 9, 9]
config.use_bottleneck = True
# Due to bottleneck residual blocks
filters = [f * 4 for f in config.filters]
config.filters = filters
return config
def get_hparams_imagenet_56():
"""RevNet-56 configurations for ImageNet."""
config = tf.contrib.training.HParams()
config.add_hparam("n_classes", 1000)
config.add_hparam("dataset", "ImageNet")
config.add_hparam("num_train_images", 1281167)
config.add_hparam("num_eval_images", 50000)
config.add_hparam("init_filters", 128)
config.add_hparam("init_kernel", 7)
config.add_hparam("init_stride", 2)
config.add_hparam("n_rev_blocks", 4)
config.add_hparam("n_res", [2, 2, 2, 2])
config.add_hparam("filters", [128, 256, 512, 832])
config.add_hparam("strides", [1, 2, 2, 2])
config.add_hparam("batch_size", 256)
config.add_hparam("bottleneck", True)
config.add_hparam("fused", True)
config.add_hparam("init_max_pool", True)
if tf.test.is_gpu_available():
config.add_hparam("input_shape", (3, 224, 224))
config.add_hparam("data_format", "channels_first")
else:
config.add_hparam("input_shape", (224, 224, 3))
config.add_hparam("data_format", "channels_last")
# Due to bottleneck residual blocks
filters = [f * 4 for f in config.filters]
config.filters = filters
# Training details
config.add_hparam("weight_decay", 1e-4)
config.add_hparam("momentum", .9)
config.add_hparam("lr_decay_steps", [160000, 320000, 480000])
config.add_hparam("lr_list", [1e-1, 1e-2, 1e-3, 1e-4])
config.add_hparam("max_train_iter", 600000)
config.add_hparam("seed", 1234)
config.add_hparam("shuffle", True)
config.add_hparam("log_every", 500)
config.add_hparam("save_every", 500)
config.add_hparam("dtype", tf.float32)
config.add_hparam("eval_batch_size", 256)
config.add_hparam("div255", True)
config.add_hparam("iters_per_epoch",
config.num_train_images // config.batch_size)
config.add_hparam("epochs", config.max_train_iter // config.iters_per_epoch)
# Customized TPU hyperparameters due to differing batch size caused by
# TPU architecture specifics
# Suggested batch sizes to reduce overhead from excessive tensor padding
# https://cloud.google.com/tpu/docs/troubleshooting
config.add_hparam("tpu_batch_size", 1024)
config.add_hparam("tpu_eval_batch_size", 1024)
config.add_hparam("tpu_iters_per_epoch",
config.num_train_images // config.tpu_batch_size)
config.add_hparam("tpu_epochs",
config.max_train_iter // config.tpu_iters_per_epoch)
config.add_hparam("tpu_eval_steps",
config.num_eval_images // config.tpu_eval_batch_size)
return config
def get_hparams_imagenet_104():
config = get_hparams_imagenet_56()
config.n_res = [2, 2, 11, 2]
return config
|
tensorflow-master
|
tensorflow/contrib/eager/python/examples/revnet/config.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Efficient ImageNet input pipeline using tf.data.Dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
import tensorflow as tf
from tensorflow.contrib.eager.python.examples.revnet import resnet_preprocessing
def image_serving_input_fn():
"""Serving input fn for raw images."""
def _preprocess_image(image_bytes):
"""Preprocess a single raw image."""
image = resnet_preprocessing.preprocess_image(
image_bytes=image_bytes, is_training=False)
return image
image_bytes_list = tf.placeholder(
shape=[None],
dtype=tf.string,
)
images = tf.map_fn(
_preprocess_image, image_bytes_list, back_prop=False, dtype=tf.float32)
return tf.estimator.export.ServingInputReceiver(
images, {'image_bytes': image_bytes_list})
class ImageNetInput(object):
"""Generates ImageNet input_fn for training or evaluation.
The training data is assumed to be in TFRecord format with keys as specified
in the dataset_parser below, sharded across 1024 files, named sequentially:
train-00000-of-01024
train-00001-of-01024
...
train-01023-of-01024
The validation data is in the same format but sharded in 128 files.
The format of the data required is created by the script at:
https://github.com/tensorflow/tpu/blob/master/tools/datasets/imagenet_to_gcs.py
Args:
is_training: `bool` for whether the input is for training
data_dir: `str` for the directory of the training and validation data;
if 'null' (the literal string 'null', not None), then construct a null
pipeline, consisting of empty images.
use_bfloat16: If True, use bfloat16 precision; else use float32.
transpose_input: 'bool' for whether to use the double transpose trick
num_cores: `int` for the number of TPU cores
"""
def __init__(self, is_training,
use_bfloat16,
data_dir,
num_cores=8,
num_parallel_calls=64,
image_size=224,
transpose_input=False,
cache=False):
self.image_preprocessing_fn = resnet_preprocessing.preprocess_image
self.is_training = is_training
self.use_bfloat16 = use_bfloat16
self.data_dir = data_dir
self.num_cores = num_cores
self.num_parallel_calls = num_parallel_calls
if self.data_dir == 'null' or self.data_dir == '':
self.data_dir = None
self.transpose_input = transpose_input
self.image_size = image_size
self.cache = cache
def set_shapes(self, batch_size, images, labels):
"""Statically set the batch_size dimension."""
if self.transpose_input:
images.set_shape(images.get_shape().merge_with(
tf.TensorShape([None, None, None, batch_size])))
labels.set_shape(labels.get_shape().merge_with(
tf.TensorShape([batch_size])))
else:
images.set_shape(images.get_shape().merge_with(
tf.TensorShape([batch_size, None, None, None])))
labels.set_shape(labels.get_shape().merge_with(
tf.TensorShape([batch_size])))
return images, labels
def dataset_parser(self, value):
"""Parse an ImageNet record from a serialized string Tensor."""
keys_to_features = {
'image/encoded': tf.FixedLenFeature((), tf.string, ''),
'image/format': tf.FixedLenFeature((), tf.string, 'jpeg'),
'image/class/label': tf.FixedLenFeature([], tf.int64, -1),
'image/class/text': tf.FixedLenFeature([], tf.string, ''),
'image/object/bbox/xmin': tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/ymin': tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/xmax': tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/ymax': tf.VarLenFeature(dtype=tf.float32),
'image/object/class/label': tf.VarLenFeature(dtype=tf.int64),
}
parsed = tf.parse_single_example(value, keys_to_features)
image_bytes = tf.reshape(parsed['image/encoded'], shape=[])
image = self.image_preprocessing_fn(
image_bytes=image_bytes,
is_training=self.is_training,
image_size=self.image_size,
use_bfloat16=self.use_bfloat16)
# Subtract one so that labels are in [0, 1000).
label = tf.cast(
tf.reshape(parsed['image/class/label'], shape=[]), dtype=tf.int32) - 1
return image, label
def input_fn(self, params):
"""Input function which provides a single batch for train or eval.
Args:
params: `dict` of parameters passed from the `TPUEstimator`.
`params['batch_size']` is always provided and should be used as the
effective batch size.
Returns:
A `tf.data.Dataset` object.
"""
if self.data_dir is None:
tf.logging.info('Using fake input.')
return self.input_fn_null(params)
# Retrieves the batch size for the current shard. The # of shards is
# computed according to the input pipeline deployment. See
# tf.contrib.tpu.RunConfig for details.
batch_size = params['batch_size']
# Shuffle the filenames to ensure better randomization.
file_pattern = os.path.join(
self.data_dir, 'train-*' if self.is_training else 'validation-*')
dataset = tf.data.Dataset.list_files(file_pattern, shuffle=self.is_training)
if self.is_training and not self.cache:
dataset = dataset.repeat()
def fetch_dataset(filename):
buffer_size = 8 * 1024 * 1024 # 8 MiB per file
dataset = tf.data.TFRecordDataset(filename, buffer_size=buffer_size)
return dataset
# Read the data from disk in parallel
dataset = dataset.apply(
tf.data.experimental.parallel_interleave(
fetch_dataset, cycle_length=self.num_parallel_calls, sloppy=True))
if self.cache:
dataset = dataset.cache().apply(
tf.data.experimental.shuffle_and_repeat(1024 * 16))
else:
dataset = dataset.shuffle(1024)
# Use the fused map-and-batch operation.
#
# For XLA, we must used fixed shapes. Because we repeat the source training
# dataset indefinitely, we can use `drop_remainder=True` to get fixed-size
# batches without dropping any training examples.
#
# When evaluating, `drop_remainder=True` prevents accidentally evaluating
# the same image twice by dropping the final batch if it is less than a full
# batch size. As long as this validation is done with consistent batch size,
# exactly the same images will be used.
dataset = dataset.apply(
tf.data.experimental.map_and_batch(
self.dataset_parser,
batch_size=batch_size,
num_parallel_batches=self.num_cores,
drop_remainder=True))
# Transpose for performance on TPU
if self.transpose_input:
dataset = dataset.map(
lambda images, labels: (tf.transpose(images, [1, 2, 3, 0]), labels),
num_parallel_calls=self.num_cores)
# Assign static batch size dimension
dataset = dataset.map(functools.partial(self.set_shapes, batch_size))
# Prefetch overlaps in-feed with training
dataset = dataset.prefetch(tf.contrib.data.AUTOTUNE)
return dataset
def input_fn_null(self, params):
"""Input function which provides null (black) images."""
batch_size = params['batch_size']
dataset = tf.data.Dataset.range(1).repeat().map(self._get_null_input)
dataset = dataset.prefetch(batch_size)
dataset = dataset.batch(batch_size, drop_remainder=True)
if self.transpose_input:
dataset = dataset.map(
lambda images, labels: (tf.transpose(images, [1, 2, 3, 0]), labels),
num_parallel_calls=8)
dataset = dataset.map(functools.partial(self.set_shapes, batch_size))
dataset = dataset.prefetch(32) # Prefetch overlaps in-feed with training
tf.logging.info('Input dataset: %s', str(dataset))
return dataset
def _get_null_input(self, _):
null_image = tf.zeros([224, 224, 3], tf.bfloat16
if self.use_bfloat16 else tf.float32)
return (null_image, tf.constant(0, tf.int32))
|
tensorflow-master
|
tensorflow/contrib/eager/python/examples/revnet/imagenet_input.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for basic building blocks used in eager mode RevNet."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib.eager.python.examples.revnet import blocks
def compute_degree(g1, g2, eps=1e-7):
"""Compute the degree between two vectors using their usual inner product."""
def _dot(u, v):
return tf.reduce_sum(u * v)
g1_norm = tf.sqrt(_dot(g1, g1))
g2_norm = tf.sqrt(_dot(g2, g2))
if g1_norm.numpy() == 0 and g2_norm.numpy() == 0:
cosine = 1. - eps
else:
g1_norm = 1. if g1_norm.numpy() == 0 else g1_norm
g2_norm = 1. if g2_norm.numpy() == 0 else g2_norm
cosine = _dot(g1, g2) / g1_norm / g2_norm
# Restrict to arccos range
cosine = tf.minimum(tf.maximum(cosine, eps - 1.), 1. - eps)
degree = tf.acos(cosine) * 180. / 3.141592653589793
return degree
def _validate_block_call_channels_last(block_factory, test):
"""Generic testing function for `channels_last` data format.
Completes a set of tests varying data format, stride, and batch normalization
configured train vs test time.
Args:
block_factory: constructor of one of blocks.InitBlock, blocks.FinalBlock,
blocks._ResidualInner
test: tf.test.TestCase object
"""
with tf.device("/cpu:0"): # NHWC format
input_shape = (8, 8, 128)
data_shape = (16,) + input_shape
x = tf.random_normal(shape=data_shape)
# Stride 1
block = block_factory(
filters=128,
strides=(1, 1),
input_shape=input_shape,
data_format="channels_last")
y_tr, y_ev = block(x, training=True), block(x, training=False)
test.assertEqual(y_tr.shape, y_ev.shape)
test.assertEqual(y_ev.shape, (16, 8, 8, 128))
test.assertNotAllClose(y_tr, y_ev)
# Stride of 2
block = block_factory(
filters=128,
strides=(2, 2),
input_shape=input_shape,
data_format="channels_last")
y_tr, y_ev = block(x, training=True), block(x, training=False)
test.assertEqual(y_tr.shape, y_ev.shape)
test.assertEqual(y_ev.shape, (16, 4, 4, 128))
test.assertNotAllClose(y_tr, y_ev)
def _validate_block_call_channels_first(block_factory, test):
"""Generic testing function for `channels_first` data format.
Completes a set of tests varying data format, stride, and batch normalization
configured train vs test time.
Args:
block_factory: constructor of one of blocks.InitBlock, blocks.FinalBlock,
blocks._ResidualInner
test: tf.test.TestCase object
"""
if not tf.test.is_gpu_available():
test.skipTest("GPU not available")
with tf.device("/gpu:0"): # Default NCHW format
input_shape = (128, 8, 8)
data_shape = (16,) + input_shape
x = tf.random_normal(shape=data_shape)
# Stride of 1
block = block_factory(filters=128, strides=(1, 1), input_shape=input_shape)
y_tr, y_ev = block(x, training=True), block(x, training=False)
test.assertEqual(y_tr.shape, y_ev.shape)
test.assertEqual(y_ev.shape, (16, 128, 8, 8))
test.assertNotAllClose(y_tr, y_ev)
# Stride of 2
block = block_factory(filters=128, strides=(2, 2), input_shape=input_shape)
y_tr, y_ev = block(x, training=True), block(x, training=False)
test.assertEqual(y_tr.shape, y_ev.shape)
test.assertEqual(y_ev.shape, (16, 128, 4, 4))
test.assertNotAllClose(y_tr, y_ev)
class RevBlockTest(tf.test.TestCase):
def _check_grad_angle(self, grads, grads_true, atol=1e0):
"""Check the angle between two list of vectors are all close."""
for g1, g2 in zip(grads, grads_true):
degree = compute_degree(g1, g2)
self.assertLessEqual(degree, atol)
def test_backward_grads_channels_first(self):
"""Test `backward` function with `channels_first` data format."""
if not tf.test.is_gpu_available():
self.skipTest("GPU not available")
with tf.device("/gpu:0"): # Default NCHW format
# Stride 1
input_shape = (128, 8, 8)
data_shape = (16,) + input_shape
x = tf.random_normal(shape=data_shape, dtype=tf.float64)
dy = tf.random_normal(shape=data_shape, dtype=tf.float64)
dy1, dy2 = tf.split(dy, num_or_size_splits=2, axis=1)
block = blocks.RevBlock(
n_res=3,
filters=128,
strides=(1, 1),
input_shape=input_shape,
fused=False,
dtype=tf.float64)
with tf.GradientTape() as tape:
tape.watch(x)
x1, x2 = tf.split(x, num_or_size_splits=2, axis=1)
y1, y2 = block((x1, x2), training=True)
y = tf.concat((y1, y2), axis=1)
# Compute grads from reconstruction
(dx1, dx2), dw = block.backward_grads(
x=(x1, x2), y=(y1, y2), dy=(dy1, dy2), training=True)
dx = tf.concat((dx1, dx2), axis=1)
vars_ = block.trainable_variables
# Compute true grads
grads = tape.gradient(y, [x] + vars_, output_gradients=dy)
dx_true, dw_true = grads[0], grads[1:]
self.assertAllClose(dx_true, dx)
self.assertAllClose(dw_true, dw)
self._check_grad_angle(dx_true, dx)
self._check_grad_angle(dw_true, dw)
# Stride 2
x = tf.random_normal(shape=data_shape, dtype=tf.float64)
dy = tf.random_normal(shape=(16, 128, 4, 4), dtype=tf.float64)
dy1, dy2 = tf.split(dy, num_or_size_splits=2, axis=1)
block = blocks.RevBlock(
n_res=3,
filters=128,
strides=(2, 2),
input_shape=input_shape,
fused=False,
dtype=tf.float64)
with tf.GradientTape() as tape:
tape.watch(x)
x1, x2 = tf.split(x, num_or_size_splits=2, axis=1)
y1, y2 = block((x1, x2), training=True)
y = tf.concat((y1, y2), axis=1)
# Compute grads from reconstruction
(dx1, dx2), dw = block.backward_grads(
x=(x1, x2), y=(y1, y2), dy=(dy1, dy2), training=True)
dx = tf.concat((dx1, dx2), axis=1)
vars_ = block.trainable_variables
# Compute true grads
grads = tape.gradient(y, [x] + vars_, output_gradients=dy)
dx_true, dw_true = grads[0], grads[1:]
self.assertAllClose(dx_true, dx)
self.assertAllClose(dw_true, dw)
self._check_grad_angle(dx_true, dx)
self._check_grad_angle(dw_true, dw)
def test_backward_grads_with_nativepy(self):
if not tf.test.is_gpu_available():
self.skipTest("GPU not available")
input_shape = (128, 8, 8)
data_shape = (16,) + input_shape
x = tf.random_normal(shape=data_shape, dtype=tf.float64)
dy = tf.random_normal(shape=data_shape, dtype=tf.float64)
dy1, dy2 = tf.split(dy, num_or_size_splits=2, axis=1)
block = blocks.RevBlock(
n_res=3,
filters=128,
strides=(1, 1),
input_shape=input_shape,
fused=False,
dtype=tf.float64)
with tf.GradientTape() as tape:
tape.watch(x)
x1, x2 = tf.split(x, num_or_size_splits=2, axis=1)
y1, y2 = block((x1, x2), training=True)
y = tf.concat((y1, y2), axis=1)
# Compute true grads
dx_true = tape.gradient(y, x, output_gradients=dy)
# Compute grads from reconstruction
(dx1, dx2), _ = block.backward_grads(
x=(x1, x2), y=(y1, y2), dy=(dy1, dy2), training=True)
dx = tf.concat((dx1, dx2), axis=1)
thres = 1e-5
diff_abs = tf.reshape(abs(dx - dx_true), [-1])
assert all(diff_abs < thres)
class _ResidualTest(tf.test.TestCase):
def test_backward_grads_channels_first(self):
"""Test `backward_grads` function with `channels_first` data format."""
if not tf.test.is_gpu_available():
self.skipTest("GPU not available")
with tf.device("/gpu:0"): # Default NCHW format
input_shape = (128, 8, 8)
data_shape = (16,) + input_shape
# Use double precision for testing
x_true = tf.random_normal(shape=data_shape, dtype=tf.float64)
dy = tf.random_normal(shape=data_shape, dtype=tf.float64)
dy1, dy2 = tf.split(dy, num_or_size_splits=2, axis=1)
residual = blocks._Residual(
filters=128,
strides=(1, 1),
input_shape=input_shape,
fused=False,
dtype=tf.float64)
with tf.GradientTape() as tape:
tape.watch(x_true)
x1_true, x2_true = tf.split(x_true, num_or_size_splits=2, axis=1)
y1, y2 = residual((x1_true, x2_true), training=True)
y = tf.concat((y1, y2), axis=1)
# Gradients computed due to reversibility
(x1, x2), (dx1, dx2), dw = residual.backward_grads(
y=(y1, y2), dy=(dy1, dy2), training=True)
x = tf.concat((x1, x2), axis=1)
dx = tf.concat((dx1, dx2), axis=1)
# True gradients computed by the tape
grads = tape.gradient(
y, [x_true] + residual.trainable_variables, output_gradients=dy)
dx_true, dw_true = grads[0], grads[1:]
self.assertAllClose(x_true, x)
self.assertAllClose(dx_true, dx)
self.assertAllClose(dw_true, dw)
class _ResidualInnerTest(tf.test.TestCase):
def test_call(self):
"""Test `call` function."""
_validate_block_call_channels_first(blocks._ResidualInner, self)
_validate_block_call_channels_last(blocks._ResidualInner, self)
class _BottleneckResidualInner(tf.test.TestCase):
def test_call(self):
"""Test `call` function."""
_validate_block_call_channels_first(blocks._BottleneckResidualInner, self)
_validate_block_call_channels_last(blocks._BottleneckResidualInner, self)
if __name__ == "__main__":
tf.enable_eager_execution()
tf.test.main()
|
tensorflow-master
|
tensorflow/contrib/eager/python/examples/revnet/blocks_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""ImageNet preprocessing for ResNet."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
IMAGE_SIZE = 224
CROP_PADDING = 32
def distorted_bounding_box_crop(image_bytes,
bbox,
min_object_covered=0.1,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0),
max_attempts=100,
scope=None):
"""Generates cropped_image using one of the bboxes randomly distorted.
See `tf.image.sample_distorted_bounding_box` for more documentation.
Args:
image_bytes: `Tensor` of binary image data.
bbox: `Tensor` of bounding boxes arranged `[1, num_boxes, coords]`
where each coordinate is [0, 1) and the coordinates are arranged
as `[ymin, xmin, ymax, xmax]`. If num_boxes is 0 then use the whole
image.
min_object_covered: An optional `float`. Defaults to `0.1`. The cropped
area of the image must contain at least this fraction of any bounding
box supplied.
aspect_ratio_range: An optional list of `float`s. The cropped area of the
image must have an aspect ratio = width / height within this range.
area_range: An optional list of `float`s. The cropped area of the image
must contain a fraction of the supplied image within in this range.
max_attempts: An optional `int`. Number of attempts at generating a cropped
region of the image of the specified constraints. After `max_attempts`
failures, return the entire image.
scope: Optional `str` for name scope.
Returns:
cropped image `Tensor`
"""
with tf.name_scope(scope, 'distorted_bounding_box_crop', [image_bytes, bbox]):
shape = tf.image.extract_jpeg_shape(image_bytes)
sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box(
shape,
bounding_boxes=bbox,
min_object_covered=min_object_covered,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range,
max_attempts=max_attempts,
use_image_if_no_bounding_boxes=True)
bbox_begin, bbox_size, _ = sample_distorted_bounding_box
# Crop the image to the specified bounding box.
offset_y, offset_x, _ = tf.unstack(bbox_begin)
target_height, target_width, _ = tf.unstack(bbox_size)
crop_window = tf.stack([offset_y, offset_x, target_height, target_width])
image = tf.image.decode_and_crop_jpeg(image_bytes, crop_window, channels=3)
return image
def _at_least_x_are_equal(a, b, x):
"""At least `x` of `a` and `b` `Tensors` are equal."""
match = tf.equal(a, b)
match = tf.cast(match, tf.int32)
return tf.greater_equal(tf.reduce_sum(match), x)
def _decode_and_random_crop(image_bytes, image_size):
"""Make a random crop of image_size."""
bbox = tf.constant([0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4])
image = distorted_bounding_box_crop(
image_bytes,
bbox,
min_object_covered=0.1,
aspect_ratio_range=(3. / 4, 4. / 3.),
area_range=(0.08, 1.0),
max_attempts=10,
scope=None)
original_shape = tf.image.extract_jpeg_shape(image_bytes)
bad = _at_least_x_are_equal(original_shape, tf.shape(image), 3)
image = tf.cond(
bad,
lambda: _decode_and_center_crop(image_bytes, image_size),
lambda: tf.image.resize_bicubic([image], # pylint: disable=g-long-lambda
[image_size, image_size])[0])
return image
def _decode_and_center_crop(image_bytes, image_size):
"""Crops to center of image with padding then scales image_size."""
shape = tf.image.extract_jpeg_shape(image_bytes)
image_height = shape[0]
image_width = shape[1]
padded_center_crop_size = tf.cast(
((image_size / (image_size + CROP_PADDING)) *
tf.cast(tf.minimum(image_height, image_width), tf.float32)),
tf.int32)
offset_height = ((image_height - padded_center_crop_size) + 1) // 2
offset_width = ((image_width - padded_center_crop_size) + 1) // 2
crop_window = tf.stack([offset_height, offset_width,
padded_center_crop_size, padded_center_crop_size])
image = tf.image.decode_and_crop_jpeg(image_bytes, crop_window, channels=3)
image = tf.image.resize_bicubic([image], [image_size, image_size])[0]
return image
def _flip(image):
"""Random horizontal image flip."""
image = tf.image.random_flip_left_right(image)
return image
def preprocess_for_train(image_bytes, use_bfloat16, image_size=IMAGE_SIZE):
"""Preprocesses the given image for evaluation.
Args:
image_bytes: `Tensor` representing an image binary of arbitrary size.
use_bfloat16: `bool` for whether to use bfloat16.
image_size: image size.
Returns:
A preprocessed image `Tensor`.
"""
image = _decode_and_random_crop(image_bytes, image_size)
image = _flip(image)
image = tf.reshape(image, [image_size, image_size, 3])
image = tf.image.convert_image_dtype(
image, dtype=tf.bfloat16 if use_bfloat16 else tf.float32)
return image
def preprocess_for_eval(image_bytes, use_bfloat16, image_size=IMAGE_SIZE):
"""Preprocesses the given image for evaluation.
Args:
image_bytes: `Tensor` representing an image binary of arbitrary size.
use_bfloat16: `bool` for whether to use bfloat16.
image_size: image size.
Returns:
A preprocessed image `Tensor`.
"""
image = _decode_and_center_crop(image_bytes, image_size)
image = tf.reshape(image, [image_size, image_size, 3])
image = tf.image.convert_image_dtype(
image, dtype=tf.bfloat16 if use_bfloat16 else tf.float32)
return image
def preprocess_image(image_bytes,
is_training=False,
use_bfloat16=False,
image_size=IMAGE_SIZE):
"""Preprocesses the given image.
Args:
image_bytes: `Tensor` representing an image binary of arbitrary size.
is_training: `bool` for whether the preprocessing is for training.
use_bfloat16: `bool` for whether to use bfloat16.
image_size: image size.
Returns:
A preprocessed image `Tensor`.
"""
if is_training:
return preprocess_for_train(image_bytes, use_bfloat16, image_size)
else:
return preprocess_for_eval(image_bytes, use_bfloat16, image_size)
|
tensorflow-master
|
tensorflow/contrib/eager/python/examples/revnet/resnet_preprocessing.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Reversible residual network compatible with eager execution.
Customized basic operations.
Reference [The Reversible Residual Network: Backpropagation
Without Storing Activations](https://arxiv.org/pdf/1707.04585.pdf)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
def downsample(x, filters, strides, axis=1):
"""Downsample feature map with avg pooling, if filter size doesn't match."""
def pad_strides(strides, axis=1):
"""Convert length 2 to length 4 strides.
Needed since `tf.compat.v1.layers.Conv2D` uses length 2 strides, whereas
operations
such as `tf.nn.avg_pool2d` use length 4 strides.
Args:
strides: length 2 list/tuple strides for height and width
axis: integer specifying feature dimension according to data format
Returns:
length 4 strides padded with 1 on batch and channel dimension
"""
assert len(strides) == 2
if axis == 1:
return [1, 1, strides[0], strides[1]]
return [1, strides[0], strides[1], 1]
assert len(x.shape) == 4 and (axis == 1 or axis == 3)
data_format = "NCHW" if axis == 1 else "NHWC"
strides_ = pad_strides(strides, axis=axis)
if strides[0] > 1:
x = tf.nn.avg_pool(
x, strides_, strides_, padding="VALID", data_format=data_format)
in_filter = x.shape[axis]
out_filter = filters
if in_filter < out_filter:
pad_size = [(out_filter - in_filter) // 2, (out_filter - in_filter) // 2]
if axis == 1:
x = tf.pad(x, [[0, 0], pad_size, [0, 0], [0, 0]])
else:
x = tf.pad(x, [[0, 0], [0, 0], [0, 0], pad_size])
# In case `tape.gradient(x, [x])` produces a list of `None`
return x + 0.
|
tensorflow-master
|
tensorflow/contrib/eager/python/examples/revnet/ops.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Read CIFAR data from pickled numpy arrays and writes TFRecords.
Generates tf.train.Example protos and writes them to TFRecord files from the
python version of the CIFAR dataset downloaded from
https://www.cs.toronto.edu/~kriz/cifar.html.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import tarfile
from absl import flags
from six.moves import cPickle as pickle
from six.moves import urllib
import tensorflow as tf
BASE_URL = 'https://www.cs.toronto.edu/~kriz/'
CIFAR_FILE_NAMES = ['cifar-10-python.tar.gz', 'cifar-100-python.tar.gz']
CIFAR_DOWNLOAD_URLS = [BASE_URL + name for name in CIFAR_FILE_NAMES]
CIFAR_LOCAL_FOLDERS = ['cifar-10', 'cifar-100']
EXTRACT_FOLDERS = ['cifar-10-batches-py', 'cifar-100-python']
def download_and_extract(data_dir, file_name, url):
"""Download CIFAR if not already downloaded."""
filepath = os.path.join(data_dir, file_name)
if tf.gfile.Exists(filepath):
return filepath
if not tf.gfile.Exists(data_dir):
tf.gfile.MakeDirs(data_dir)
urllib.request.urlretrieve(url, filepath)
tarfile.open(os.path.join(filepath), 'r:gz').extractall(data_dir)
return filepath
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _get_file_names(folder):
"""Returns the file names expected to exist in the input_dir."""
assert folder in ['cifar-10', 'cifar-100']
file_names = {}
if folder == 'cifar-10':
file_names['train'] = ['data_batch_%d' % i for i in range(1, 5)]
file_names['validation'] = ['data_batch_5']
file_names['train_all'] = ['data_batch_%d' % i for i in range(1, 6)]
file_names['test'] = ['test_batch']
else:
file_names['train_all'] = ['train']
file_names['test'] = ['test']
# Split in `convert_to_tfrecord` function
file_names['train'] = ['train']
file_names['validation'] = ['train']
return file_names
def read_pickle_from_file(filename):
with tf.gfile.Open(filename, 'rb') as f:
if sys.version_info >= (3, 0):
data_dict = pickle.load(f, encoding='bytes')
else:
data_dict = pickle.load(f)
return data_dict
def convert_to_tfrecord(input_files, output_file, folder):
"""Converts files with pickled data to TFRecords."""
assert folder in ['cifar-10', 'cifar-100']
print('Generating %s' % output_file)
with tf.python_io.TFRecordWriter(output_file) as record_writer:
for input_file in input_files:
data_dict = read_pickle_from_file(input_file)
data = data_dict[b'data']
try:
labels = data_dict[b'labels']
except KeyError:
labels = data_dict[b'fine_labels']
if folder == 'cifar-100' and input_file.endswith('train.tfrecords'):
data = data[:40000]
labels = labels[:40000]
elif folder == 'cifar-100' and input_file.endswith(
'validation.tfrecords'):
data = data[40000:]
labels = labels[40000:]
num_entries_in_batch = len(labels)
for i in range(num_entries_in_batch):
example = tf.train.Example(
features=tf.train.Features(
feature={
'image': _bytes_feature(data[i].tobytes()),
'label': _int64_feature(labels[i])
}))
record_writer.write(example.SerializeToString())
def main(_):
for file_name, url, folder, extract_folder in zip(
CIFAR_FILE_NAMES, CIFAR_DOWNLOAD_URLS, CIFAR_LOCAL_FOLDERS,
EXTRACT_FOLDERS):
print('Download from {} and extract.'.format(url))
data_dir = os.path.join(FLAGS.data_dir, folder)
download_and_extract(data_dir, file_name, url)
file_names = _get_file_names(folder)
input_dir = os.path.join(data_dir, extract_folder)
for mode, files in file_names.items():
input_files = [os.path.join(input_dir, f) for f in files]
output_file = os.path.join(data_dir, mode + '.tfrecords')
try:
os.remove(output_file)
except OSError:
pass
convert_to_tfrecord(input_files, output_file, folder)
print('Done!')
if __name__ == '__main__':
FLAGS = flags.FLAGS
flags.DEFINE_string(
'data_dir',
default=None,
help='Directory to download, extract and store TFRecords.')
tf.app.run(main)
|
tensorflow-master
|
tensorflow/contrib/eager/python/examples/revnet/cifar_tfrecords.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for basic ops used in eager mode RevNet."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib.eager.python.examples.revnet import ops
tfe = tf.contrib.eager
class OpsTest(tf.test.TestCase):
def test_downsample(self):
"""Test `possible_down_sample` function with mock object."""
batch_size = 100
# NHWC format
x = tf.random_normal(shape=[batch_size, 32, 32, 3])
# HW doesn't change but number of features increased
y = ops.downsample(x, filters=5, strides=(1, 1), axis=3)
self.assertEqual(y.shape, [batch_size, 32, 32, 5])
# Feature map doesn't change but HW reduced
y = ops.downsample(x, filters=3, strides=(2, 2), axis=3)
self.assertEqual(y.shape, [batch_size, 16, 16, 3])
# Number of feature increased and HW reduced
y = ops.downsample(x, filters=5, strides=(2, 2), axis=3)
self.assertEqual(y.shape, [batch_size, 16, 16, 5])
# Test gradient flow
x = tf.random_normal(shape=[batch_size, 32, 32, 3])
with tfe.GradientTape() as tape:
tape.watch(x)
y = ops.downsample(x, filters=3, strides=(1, 1))
self.assertEqual(y.shape, x.shape)
dy = tf.random_normal(shape=[batch_size, 32, 32, 3])
grad, = tape.gradient(y, [x], output_gradients=[dy])
self.assertEqual(grad.shape, x.shape)
# Default NCHW format
if tf.test.is_gpu_available():
x = tf.random_normal(shape=[batch_size, 3, 32, 32])
# HW doesn't change but feature map reduced
y = ops.downsample(x, filters=5, strides=(1, 1))
self.assertEqual(y.shape, [batch_size, 5, 32, 32])
# Feature map doesn't change but HW reduced
y = ops.downsample(x, filters=3, strides=(2, 2))
self.assertEqual(y.shape, [batch_size, 3, 16, 16])
# Both feature map and HW reduced
y = ops.downsample(x, filters=5, strides=(2, 2))
self.assertEqual(y.shape, [batch_size, 5, 16, 16])
# Test gradient flow
x = tf.random_normal(shape=[batch_size, 3, 32, 32])
with tfe.GradientTape() as tape:
tape.watch(x)
y = ops.downsample(x, filters=3, strides=(1, 1))
self.assertEqual(y.shape, x.shape)
dy = tf.random_normal(shape=[batch_size, 3, 32, 32])
grad, = tape.gradient(y, [x], output_gradients=[dy])
self.assertEqual(grad.shape, x.shape)
if __name__ == '__main__':
tf.enable_eager_execution()
tf.test.main()
|
tensorflow-master
|
tensorflow/contrib/eager/python/examples/revnet/ops_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Reversible residual network compatible with eager execution.
Building blocks with manual backward gradient computation.
Reference [The Reversible Residual Network: Backpropagation
Without Storing Activations](https://arxiv.org/pdf/1707.04585.pdf)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import operator
import tensorflow as tf
from tensorflow.contrib.eager.python.examples.revnet import ops
class RevBlock(tf.keras.Model):
"""Single reversible block containing several `_Residual` blocks.
Each `_Residual` block in turn contains two _ResidualInner blocks,
corresponding to the `F`/`G` functions in the paper.
"""
def __init__(self,
n_res,
filters,
strides,
input_shape,
batch_norm_first=False,
data_format="channels_first",
bottleneck=False,
fused=True,
dtype=tf.float32):
"""Initialization.
Args:
n_res: number of residual blocks
filters: list/tuple of integers for output filter sizes of each residual
strides: length 2 list/tuple of integers for height and width strides
input_shape: length 3 list/tuple of integers
batch_norm_first: whether to apply activation and batch norm before conv
data_format: tensor data format, "NCHW"/"NHWC"
bottleneck: use bottleneck residual if True
fused: use fused batch normalization if True
dtype: float16, float32, or float64
"""
super(RevBlock, self).__init__()
self.blocks = tf.contrib.checkpoint.List()
for i in range(n_res):
curr_batch_norm_first = batch_norm_first and i == 0
curr_strides = strides if i == 0 else (1, 1)
block = _Residual(
filters,
curr_strides,
input_shape,
batch_norm_first=curr_batch_norm_first,
data_format=data_format,
bottleneck=bottleneck,
fused=fused,
dtype=dtype)
self.blocks.append(block)
if data_format == "channels_first":
input_shape = (filters, input_shape[1] // curr_strides[0],
input_shape[2] // curr_strides[1])
else:
input_shape = (input_shape[0] // curr_strides[0],
input_shape[1] // curr_strides[1], filters)
def call(self, h, training=True):
"""Apply reversible block to inputs."""
for block in self.blocks:
h = block(h, training=training)
return h
def backward_grads(self, x, y, dy, training=True):
"""Apply reversible block backward to outputs."""
grads_all = []
for i in reversed(range(len(self.blocks))):
block = self.blocks[i]
if i == 0:
# First block usually contains downsampling that can't be reversed
dy, grads = block.backward_grads_with_downsample(
x, y, dy, training=True)
else:
y, dy, grads = block.backward_grads(y, dy, training=training)
grads_all = grads + grads_all
return dy, grads_all
class _Residual(tf.keras.Model):
"""Single residual block contained in a _RevBlock. Each `_Residual` object has
two _ResidualInner objects, corresponding to the `F` and `G` functions in the
paper.
"""
def __init__(self,
filters,
strides,
input_shape,
batch_norm_first=True,
data_format="channels_first",
bottleneck=False,
fused=True,
dtype=tf.float32):
"""Initialization.
Args:
filters: output filter size
strides: length 2 list/tuple of integers for height and width strides
input_shape: length 3 list/tuple of integers
batch_norm_first: whether to apply activation and batch norm before conv
data_format: tensor data format, "NCHW"/"NHWC",
bottleneck: use bottleneck residual if True
fused: use fused batch normalization if True
dtype: float16, float32, or float64
"""
super(_Residual, self).__init__()
self.filters = filters
self.strides = strides
self.axis = 1 if data_format == "channels_first" else 3
if data_format == "channels_first":
f_input_shape = (input_shape[0] // 2,) + input_shape[1:]
g_input_shape = (filters // 2, input_shape[1] // strides[0],
input_shape[2] // strides[1])
else:
f_input_shape = input_shape[:2] + (input_shape[2] // 2,)
g_input_shape = (input_shape[0] // strides[0],
input_shape[1] // strides[1], filters // 2)
factory = _BottleneckResidualInner if bottleneck else _ResidualInner
self.f = factory(
filters=filters // 2,
strides=strides,
input_shape=f_input_shape,
batch_norm_first=batch_norm_first,
data_format=data_format,
fused=fused,
dtype=dtype)
self.g = factory(
filters=filters // 2,
strides=(1, 1),
input_shape=g_input_shape,
batch_norm_first=batch_norm_first,
data_format=data_format,
fused=fused,
dtype=dtype)
def call(self, x, training=True):
"""Apply residual block to inputs."""
x1, x2 = x
f_x2 = self.f(x2, training=training)
x1_down = ops.downsample(
x1, self.filters // 2, self.strides, axis=self.axis)
x2_down = ops.downsample(
x2, self.filters // 2, self.strides, axis=self.axis)
y1 = f_x2 + x1_down
g_y1 = self.g(y1, training=training)
y2 = g_y1 + x2_down
return y1, y2
def backward_grads(self, y, dy, training=True):
"""Manually compute backward gradients given input and output grads."""
dy1, dy2 = dy
y1, y2 = y
with tf.GradientTape() as gtape:
gtape.watch(y1)
gy1 = self.g(y1, training=training)
grads_combined = gtape.gradient(
gy1, [y1] + self.g.trainable_variables, output_gradients=dy2)
dg = grads_combined[1:]
dx1 = dy1 + grads_combined[0]
# This doesn't affect eager execution, but improves memory efficiency with
# graphs
with tf.control_dependencies(dg + [dx1]):
x2 = y2 - gy1
with tf.GradientTape() as ftape:
ftape.watch(x2)
fx2 = self.f(x2, training=training)
grads_combined = ftape.gradient(
fx2, [x2] + self.f.trainable_variables, output_gradients=dx1)
df = grads_combined[1:]
dx2 = dy2 + grads_combined[0]
# Same behavior as above
with tf.control_dependencies(df + [dx2]):
x1 = y1 - fx2
x = x1, x2
dx = dx1, dx2
grads = df + dg
return x, dx, grads
def backward_grads_with_downsample(self, x, y, dy, training=True):
"""Manually compute backward gradients given input and output grads."""
# Splitting this from `backward_grads` for better readability
x1, x2 = x
y1, _ = y
dy1, dy2 = dy
with tf.GradientTape() as gtape:
gtape.watch(y1)
gy1 = self.g(y1, training=training)
grads_combined = gtape.gradient(
gy1, [y1] + self.g.trainable_variables, output_gradients=dy2)
dg = grads_combined[1:]
dz1 = dy1 + grads_combined[0]
# dx1 need one more step to backprop through downsample
with tf.GradientTape() as x1tape:
x1tape.watch(x1)
z1 = ops.downsample(x1, self.filters // 2, self.strides, axis=self.axis)
dx1 = x1tape.gradient(z1, x1, output_gradients=dz1)
with tf.GradientTape() as ftape:
ftape.watch(x2)
fx2 = self.f(x2, training=training)
grads_combined = ftape.gradient(
fx2, [x2] + self.f.trainable_variables, output_gradients=dz1)
dx2, df = grads_combined[0], grads_combined[1:]
# dx2 need one more step to backprop through downsample
with tf.GradientTape() as x2tape:
x2tape.watch(x2)
z2 = ops.downsample(x2, self.filters // 2, self.strides, axis=self.axis)
dx2 += x2tape.gradient(z2, x2, output_gradients=dy2)
dx = dx1, dx2
grads = df + dg
return dx, grads
# Ideally, the following should be wrapped in `tf.keras.Sequential`, however
# there are subtle issues with its placeholder insertion policy and batch norm
class _BottleneckResidualInner(tf.keras.Model):
"""Single bottleneck residual inner function contained in _Resdual.
Corresponds to the `F`/`G` functions in the paper.
Suitable for training on ImageNet dataset.
"""
def __init__(self,
filters,
strides,
input_shape,
batch_norm_first=True,
data_format="channels_first",
fused=True,
dtype=tf.float32):
"""Initialization.
Args:
filters: output filter size
strides: length 2 list/tuple of integers for height and width strides
input_shape: length 3 list/tuple of integers
batch_norm_first: whether to apply activation and batch norm before conv
data_format: tensor data format, "NCHW"/"NHWC"
fused: use fused batch normalization if True
dtype: float16, float32, or float64
"""
super(_BottleneckResidualInner, self).__init__()
axis = 1 if data_format == "channels_first" else 3
if batch_norm_first:
self.batch_norm_0 = tf.keras.layers.BatchNormalization(
axis=axis, input_shape=input_shape, fused=fused, dtype=dtype)
self.conv2d_1 = tf.keras.layers.Conv2D(
filters=filters // 4,
kernel_size=1,
strides=strides,
input_shape=input_shape,
data_format=data_format,
use_bias=False,
padding="SAME",
dtype=dtype)
self.batch_norm_1 = tf.keras.layers.BatchNormalization(
axis=axis, fused=fused, dtype=dtype)
self.conv2d_2 = tf.keras.layers.Conv2D(
filters=filters // 4,
kernel_size=3,
strides=(1, 1),
data_format=data_format,
use_bias=False,
padding="SAME",
dtype=dtype)
self.batch_norm_2 = tf.keras.layers.BatchNormalization(
axis=axis, fused=fused, dtype=dtype)
self.conv2d_3 = tf.keras.layers.Conv2D(
filters=filters,
kernel_size=1,
strides=(1, 1),
data_format=data_format,
use_bias=False,
padding="SAME",
dtype=dtype)
self.batch_norm_first = batch_norm_first
def call(self, x, training=True):
net = x
if self.batch_norm_first:
net = self.batch_norm_0(net, training=training)
net = tf.nn.relu(net)
net = self.conv2d_1(net)
net = self.batch_norm_1(net, training=training)
net = tf.nn.relu(net)
net = self.conv2d_2(net)
net = self.batch_norm_2(net, training=training)
net = tf.nn.relu(net)
net = self.conv2d_3(net)
return net
class _ResidualInner(tf.keras.Model):
"""Single residual inner function contained in _ResdualBlock.
Corresponds to the `F`/`G` functions in the paper.
"""
def __init__(self,
filters,
strides,
input_shape,
batch_norm_first=True,
data_format="channels_first",
fused=True,
dtype=tf.float32):
"""Initialization.
Args:
filters: output filter size
strides: length 2 list/tuple of integers for height and width strides
input_shape: length 3 list/tuple of integers
batch_norm_first: whether to apply activation and batch norm before conv
data_format: tensor data format, "NCHW"/"NHWC"
fused: use fused batch normalization if True
dtype: float16, float32, or float64
"""
super(_ResidualInner, self).__init__()
axis = 1 if data_format == "channels_first" else 3
if batch_norm_first:
self.batch_norm_0 = tf.keras.layers.BatchNormalization(
axis=axis, input_shape=input_shape, fused=fused, dtype=dtype)
self.conv2d_1 = tf.keras.layers.Conv2D(
filters=filters,
kernel_size=3,
strides=strides,
input_shape=input_shape,
data_format=data_format,
use_bias=False,
padding="SAME",
dtype=dtype)
self.batch_norm_1 = tf.keras.layers.BatchNormalization(
axis=axis, fused=fused, dtype=dtype)
self.conv2d_2 = tf.keras.layers.Conv2D(
filters=filters,
kernel_size=3,
strides=(1, 1),
data_format=data_format,
use_bias=False,
padding="SAME",
dtype=dtype)
self.batch_norm_first = batch_norm_first
def call(self, x, training=True):
net = x
if self.batch_norm_first:
net = self.batch_norm_0(net, training=training)
net = tf.nn.relu(net)
net = self.conv2d_1(net)
net = self.batch_norm_1(net, training=training)
net = tf.nn.relu(net)
net = self.conv2d_2(net)
return net
class InitBlock(tf.keras.Model):
"""Initial block of RevNet."""
def __init__(self, config):
"""Initialization.
Args:
config: tf.contrib.training.HParams object; specifies hyperparameters
"""
super(InitBlock, self).__init__()
self.config = config
self.axis = 1 if self.config.data_format == "channels_first" else 3
self.conv2d = tf.keras.layers.Conv2D(
filters=self.config.init_filters,
kernel_size=self.config.init_kernel,
strides=(self.config.init_stride, self.config.init_stride),
data_format=self.config.data_format,
use_bias=False,
padding="SAME",
input_shape=self.config.input_shape,
dtype=self.config.dtype)
self.batch_norm = tf.keras.layers.BatchNormalization(
axis=self.axis, fused=self.config.fused, dtype=self.config.dtype)
self.activation = tf.keras.layers.Activation("relu")
if self.config.init_max_pool:
self.max_pool = tf.keras.layers.MaxPooling2D(
pool_size=(3, 3),
strides=(2, 2),
padding="SAME",
data_format=self.config.data_format,
dtype=self.config.dtype)
def call(self, x, training=True):
net = x
net = self.conv2d(net)
net = self.batch_norm(net, training=training)
net = self.activation(net)
if self.config.init_max_pool:
net = self.max_pool(net)
return tf.split(net, num_or_size_splits=2, axis=self.axis)
class FinalBlock(tf.keras.Model):
"""Final block of RevNet."""
def __init__(self, config):
"""Initialization.
Args:
config: tf.contrib.training.HParams object; specifies hyperparameters
Raises:
ValueError: Unsupported data format
"""
super(FinalBlock, self).__init__()
self.config = config
self.axis = 1 if self.config.data_format == "channels_first" else 3
f = self.config.filters[-1] # Number of filters
r = functools.reduce(operator.mul, self.config.strides, 1) # Reduce ratio
r *= self.config.init_stride
if self.config.init_max_pool:
r *= 2
if self.config.data_format == "channels_first":
w, h = self.config.input_shape[1], self.config.input_shape[2]
input_shape = (f, w // r, h // r)
elif self.config.data_format == "channels_last":
w, h = self.config.input_shape[0], self.config.input_shape[1]
input_shape = (w // r, h // r, f)
else:
raise ValueError("Data format should be either `channels_first`"
" or `channels_last`")
self.batch_norm = tf.keras.layers.BatchNormalization(
axis=self.axis,
input_shape=input_shape,
fused=self.config.fused,
dtype=self.config.dtype)
self.activation = tf.keras.layers.Activation("relu")
self.global_avg_pool = tf.keras.layers.GlobalAveragePooling2D(
data_format=self.config.data_format, dtype=self.config.dtype)
self.dense = tf.keras.layers.Dense(
self.config.n_classes, dtype=self.config.dtype)
def call(self, x, training=True):
net = tf.concat(x, axis=self.axis)
net = self.batch_norm(net, training=training)
net = self.activation(net)
net = self.global_avg_pool(net)
net = self.dense(net)
return net
|
tensorflow-master
|
tensorflow/contrib/eager/python/examples/revnet/blocks.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Eager execution workflow with RevNet train on CIFAR-10."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
from absl import flags
import tensorflow as tf
from tensorflow.contrib.eager.python.examples.revnet import cifar_input
from tensorflow.contrib.eager.python.examples.revnet import config as config_
from tensorflow.contrib.eager.python.examples.revnet import revnet
tfe = tf.contrib.eager
def apply_gradients(optimizer, grads, vars_, global_step=None):
"""Functional style apply_grads for `tfe.defun`."""
optimizer.apply_gradients(zip(grads, vars_), global_step=global_step)
def main(_):
"""Eager execution workflow with RevNet trained on CIFAR-10."""
tf.enable_eager_execution()
config = get_config(config_name=FLAGS.config, dataset=FLAGS.dataset)
ds_train, ds_train_one_shot, ds_validation, ds_test = get_datasets(
data_dir=FLAGS.data_dir, config=config)
model = revnet.RevNet(config=config)
global_step = tf.train.get_or_create_global_step() # Ensure correct summary
global_step.assign(1)
learning_rate = tf.train.piecewise_constant(
global_step, config.lr_decay_steps, config.lr_list)
optimizer = tf.train.MomentumOptimizer(
learning_rate, momentum=config.momentum)
checkpointer = tf.train.Checkpoint(
optimizer=optimizer, model=model, optimizer_step=global_step)
if FLAGS.use_defun:
model.call = tfe.defun(model.call)
model.compute_gradients = tfe.defun(model.compute_gradients)
model.get_moving_stats = tfe.defun(model.get_moving_stats)
model.restore_moving_stats = tfe.defun(model.restore_moving_stats)
global apply_gradients # pylint:disable=global-variable-undefined
apply_gradients = tfe.defun(apply_gradients)
if FLAGS.train_dir:
summary_writer = tf.contrib.summary.create_file_writer(FLAGS.train_dir)
if FLAGS.restore:
latest_path = tf.train.latest_checkpoint(FLAGS.train_dir)
checkpointer.restore(latest_path)
print("Restored latest checkpoint at path:\"{}\" "
"with global_step: {}".format(latest_path, global_step.numpy()))
sys.stdout.flush()
for x, y in ds_train:
train_one_iter(model, x, y, optimizer, global_step=global_step)
if global_step.numpy() % config.log_every == 0:
acc_test, loss_test = evaluate(model, ds_test)
if FLAGS.validate:
acc_train, loss_train = evaluate(model, ds_train_one_shot)
acc_validation, loss_validation = evaluate(model, ds_validation)
print("Iter {}, "
"training set accuracy {:.4f}, loss {:.4f}; "
"validation set accuracy {:.4f}, loss {:.4f}; "
"test accuracy {:.4f}, loss {:.4f}".format(
global_step.numpy(), acc_train, loss_train, acc_validation,
loss_validation, acc_test, loss_test))
else:
print("Iter {}, test accuracy {:.4f}, loss {:.4f}".format(
global_step.numpy(), acc_test, loss_test))
sys.stdout.flush()
if FLAGS.train_dir:
with summary_writer.as_default():
with tf.contrib.summary.always_record_summaries():
tf.contrib.summary.scalar("Test accuracy", acc_test)
tf.contrib.summary.scalar("Test loss", loss_test)
if FLAGS.validate:
tf.contrib.summary.scalar("Training accuracy", acc_train)
tf.contrib.summary.scalar("Training loss", loss_train)
tf.contrib.summary.scalar("Validation accuracy", acc_validation)
tf.contrib.summary.scalar("Validation loss", loss_validation)
if global_step.numpy() % config.save_every == 0 and FLAGS.train_dir:
saved_path = checkpointer.save(
file_prefix=os.path.join(FLAGS.train_dir, "ckpt"))
print("Saved checkpoint at path: \"{}\" "
"with global_step: {}".format(saved_path, global_step.numpy()))
sys.stdout.flush()
def get_config(config_name="revnet-38", dataset="cifar-10"):
"""Return configuration."""
print("Config: {}".format(config_name))
sys.stdout.flush()
config = {
"revnet-38": config_.get_hparams_cifar_38(),
"revnet-110": config_.get_hparams_cifar_110(),
"revnet-164": config_.get_hparams_cifar_164(),
}[config_name]
if dataset == "cifar-10":
config.add_hparam("n_classes", 10)
config.add_hparam("dataset", "cifar-10")
else:
config.add_hparam("n_classes", 100)
config.add_hparam("dataset", "cifar-100")
return config
def get_datasets(data_dir, config):
"""Return dataset."""
if data_dir is None:
raise ValueError("No supplied data directory")
if not os.path.exists(data_dir):
raise ValueError("Data directory {} does not exist".format(data_dir))
if config.dataset not in ["cifar-10", "cifar-100"]:
raise ValueError("Unknown dataset {}".format(config.dataset))
print("Training on {} dataset.".format(config.dataset))
sys.stdout.flush()
data_dir = os.path.join(data_dir, config.dataset)
if FLAGS.validate:
# 40k Training set
ds_train = cifar_input.get_ds_from_tfrecords(
data_dir=data_dir,
split="train",
data_aug=True,
batch_size=config.batch_size,
epochs=config.epochs,
shuffle=config.shuffle,
data_format=config.data_format,
dtype=config.dtype,
prefetch=config.batch_size)
# 10k Training set
ds_validation = cifar_input.get_ds_from_tfrecords(
data_dir=data_dir,
split="validation",
data_aug=False,
batch_size=config.eval_batch_size,
epochs=1,
shuffle=False,
data_format=config.data_format,
dtype=config.dtype,
prefetch=config.eval_batch_size)
else:
# 50k Training set
ds_train = cifar_input.get_ds_from_tfrecords(
data_dir=data_dir,
split="train_all",
data_aug=True,
batch_size=config.batch_size,
epochs=config.epochs,
shuffle=config.shuffle,
data_format=config.data_format,
dtype=config.dtype,
prefetch=config.batch_size)
ds_validation = None
# Always compute loss and accuracy on whole test set
ds_train_one_shot = cifar_input.get_ds_from_tfrecords(
data_dir=data_dir,
split="train_all",
data_aug=False,
batch_size=config.eval_batch_size,
epochs=1,
shuffle=False,
data_format=config.data_format,
dtype=config.dtype,
prefetch=config.eval_batch_size)
ds_test = cifar_input.get_ds_from_tfrecords(
data_dir=data_dir,
split="test",
data_aug=False,
batch_size=config.eval_batch_size,
epochs=1,
shuffle=False,
data_format=config.data_format,
dtype=config.dtype,
prefetch=config.eval_batch_size)
return ds_train, ds_train_one_shot, ds_validation, ds_test
def train_one_iter(model, inputs, labels, optimizer, global_step=None):
"""Train for one iteration."""
logits, saved_hiddens = model(inputs, training=True)
values = model.get_moving_stats()
grads, loss = model.compute_gradients(saved_hiddens, labels)
# Restore moving averages when executing eagerly to avoid updating twice
model.restore_moving_stats(values)
apply_gradients(
optimizer, grads, model.trainable_variables, global_step=global_step)
return logits, loss
def evaluate(model, dataset):
"""Compute accuracy with the given dataset iterator."""
mean_loss = tfe.metrics.Mean()
accuracy = tfe.metrics.Accuracy()
for x, y in dataset:
logits, _ = model(x, training=False)
loss = model.compute_loss(logits=logits, labels=y)
accuracy(
labels=tf.cast(y, tf.int64),
predictions=tf.argmax(logits, axis=1, output_type=tf.int64))
mean_loss(loss)
return accuracy.result().numpy(), mean_loss.result().numpy()
if __name__ == "__main__":
flags.DEFINE_string(
"data_dir", default=None, help="Directory to load tfrecords")
flags.DEFINE_string(
"train_dir",
default=None,
help="[Optional] Directory to store the training information")
flags.DEFINE_boolean(
"restore",
default=False,
help="[Optional] Restore the latest checkpoint from `train_dir` if True")
flags.DEFINE_boolean(
"validate",
default=False,
help="[Optional] Use the validation set or not for hyperparameter search")
flags.DEFINE_string(
"dataset",
default="cifar-10",
help="[Optional] The dataset used; either `cifar-10` or `cifar-100`")
flags.DEFINE_string(
"config",
default="revnet-38",
help="[Optional] Architecture of network. "
"Other options include `revnet-110` and `revnet-164`")
flags.DEFINE_boolean(
"use_defun",
default=False,
help="[Optional] Use `tfe.defun` to boost performance.")
FLAGS = flags.FLAGS
tf.app.run(main)
|
tensorflow-master
|
tensorflow/contrib/eager/python/examples/revnet/main.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for basic building blocks used in eager mode RevNet."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gc
import time
import tensorflow as tf
from tensorflow.contrib.eager.python.examples.revnet import blocks_test
from tensorflow.contrib.eager.python.examples.revnet import config as config_
from tensorflow.contrib.eager.python.examples.revnet import revnet
from tensorflow.python.client import device_lib
tfe = tf.contrib.eager
def train_one_iter(model, inputs, labels, optimizer, global_step=None):
"""Train for one iteration."""
logits, saved_hidden = model(inputs)
grads, loss = model.compute_gradients(
saved_hidden=saved_hidden, labels=labels)
optimizer.apply_gradients(
zip(grads, model.trainable_variables), global_step=global_step)
return logits, loss
class RevNetTest(tf.test.TestCase):
def setUp(self):
super(RevNetTest, self).setUp()
config = config_.get_hparams_cifar_38()
config.add_hparam("n_classes", 10)
config.add_hparam("dataset", "cifar-10")
# Reconstruction could cause numerical error, use double precision for tests
config.dtype = tf.float64
config.fused = False # Fused batch norm does not support tf.float64
# Reduce the batch size for tests because the OSS version runs
# in constrained GPU environment with 1-2GB of memory.
config.batch_size = 2
shape = (config.batch_size,) + config.input_shape
self.model = revnet.RevNet(config=config)
self.x = tf.random_normal(shape=shape, dtype=tf.float64)
self.t = tf.random_uniform(
shape=[config.batch_size],
minval=0,
maxval=config.n_classes,
dtype=tf.int64)
self.config = config
def tearDown(self):
del self.model
del self.x
del self.t
del self.config
super(RevNetTest, self).tearDown()
def test_call(self):
"""Test `call` function."""
y, _ = self.model(self.x, training=False)
self.assertEqual(y.shape, [self.config.batch_size, self.config.n_classes])
def _check_grad_angle_combined(self, grads, grads_true):
"""Verify that the reconstructed gradients has correct direction.
Due to numerical imprecision, the magnitude may be slightly different.
Yet according to the paper, the angle should be roughly the same.
Args:
grads: list of gradients from reconstruction
grads_true: list of true gradients
"""
def _combine(gs):
return [tf.reshape(g, [-1]) for g in gs]
g1_all = tf.concat(_combine(grads), axis=0)
g2_all = tf.concat(_combine(grads_true), axis=0)
self.assertEqual(len(g1_all.shape), 1)
self.assertEqual(len(g2_all.shape), 1)
degree = blocks_test.compute_degree(g1_all, g2_all)
self.assertLessEqual(degree, 1e0)
def test_compute_gradients(self):
"""Test `compute_gradients` function."""
_, saved_hidden = self.model(self.x) # Initialize model
grads, loss = self.model.compute_gradients(
saved_hidden=saved_hidden, labels=self.t)
vars_ = self.model.trainable_variables
self.assertTrue(isinstance(grads, list))
self.assertTrue(isinstance(vars_, list))
self.assertEqual(len(grads), len(vars_))
for grad, var in zip(grads, vars_):
self.assertEqual(grad.shape, var.shape)
# Compare against the true gradient computed by the tape
with tf.GradientTape() as tape:
logits, _ = self.model(self.x)
loss_true = self.model.compute_loss(logits=logits, labels=self.t)
grads_true = tape.gradient(loss_true, vars_)
self.assertAllClose(loss, loss_true)
self.assertAllClose(grads, grads_true, rtol=1e-4, atol=1e-4)
self._check_grad_angle_combined(grads, grads_true)
def test_call_defun(self):
"""Test `call` function with defun."""
y, _ = tfe.defun(self.model.call)(self.x, training=False)
self.assertEqual(y.shape, [self.config.batch_size, self.config.n_classes])
def test_compute_gradients_defun(self):
"""Test `compute_gradients` function with defun."""
# TODO(apassos): make cond support returning None to let this happen with
# tf.function.
compute_gradients = tfe.defun(self.model.compute_gradients)
_, saved_hidden = self.model(self.x)
grads, _ = compute_gradients(saved_hidden=saved_hidden, labels=self.t)
vars_ = self.model.trainable_variables
self.assertTrue(isinstance(grads, list))
self.assertTrue(isinstance(vars_, list))
self.assertEqual(len(grads), len(vars_))
for grad, var in zip(grads, vars_):
if grad is not None:
self.assertEqual(grad.shape, var.shape)
def test_training_graph(self):
"""Test model training in graph mode."""
with tf.Graph().as_default():
config = config_.get_hparams_cifar_38()
config.add_hparam("n_classes", 10)
config.add_hparam("dataset", "cifar-10")
x = tf.random_normal(
shape=(self.config.batch_size,) + self.config.input_shape)
t = tf.random_uniform(
shape=(self.config.batch_size,),
minval=0,
maxval=self.config.n_classes,
dtype=tf.int32)
global_step = tf.Variable(0., trainable=False)
model = revnet.RevNet(config=config)
_, saved_hidden = model(x)
grads, _ = model.compute_gradients(saved_hidden=saved_hidden, labels=t)
optimizer = tf.train.AdamOptimizer(learning_rate=1e-3)
train_op = optimizer.apply_gradients(
zip(grads, model.trainable_variables), global_step=global_step)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for _ in range(1):
sess.run(train_op)
# Benchmark related
def device_and_data_format():
return ("/gpu:0",
"channels_first") if tf.test.is_gpu_available() else ("/cpu:0",
"channels_last")
def random_batch(batch_size, config):
shape = (batch_size,) + config.input_shape
images = tf.random_uniform(shape)
labels = tf.random_uniform(
[batch_size], minval=0, maxval=config.n_classes, dtype=tf.int32)
return images, labels
class MockIterator(object):
def __init__(self, tensors):
self._tensors = [tf.identity(x) for x in tensors]
def next(self):
return self._tensors
class RevNetBenchmark(tf.test.Benchmark):
"""Eager and graph benchmarks for RevNet."""
def _train_batch_sizes(self):
"""Shamelessly copied from `resnet50_test.py`.
Note: This is targeted towards ImageNet. CIFAR-10 should allow more
aggressive batch sizes.
Returns:
A tuple of possible batch sizes
"""
for device in device_lib.list_local_devices():
if tf.DeviceSpec.from_string(device.name).device_type == "GPU":
if "K20" in device.physical_device_desc:
return (16,)
if "P100" in device.physical_device_desc:
return (16, 32, 64)
if tf.DeviceSpec.from_string(device.name).device_type == "TPU":
return (32,)
return (16, 32)
def _force_device_sync(self):
"""Shamelessly copied from `resnet50_test.py`."""
tf.constant(1.).cpu()
def _report(self, label, start, num_iters, device, batch_size, data_format):
avg_time = (time.time() - start) / num_iters
dev = tf.DeviceSpec.from_string(device).device_type.lower()
name = "%s_%s_batch_%d_%s" % (label, dev, batch_size, data_format)
extras = {"examples_per_sec": batch_size / avg_time}
self.report_benchmark(
iters=num_iters, wall_time=avg_time, name=name, extras=extras)
def _benchmark_eager_apply(self,
label,
device_and_format,
defun=False,
execution_mode=None):
config = config_.get_hparams_imagenet_56()
with tfe.execution_mode(execution_mode):
device, data_format = device_and_format
model = revnet.RevNet(config=config)
if defun:
# TODO(apassos): reenable after cond lets you return None
model.call = tfe.defun(model.call)
batch_size = 64
num_burn = 5
num_iters = 10
with tf.device(device):
images, _ = random_batch(batch_size, config)
for _ in range(num_burn):
model(images, training=False)
if execution_mode:
tfe.async_wait()
gc.collect()
start = time.time()
for _ in range(num_iters):
model(images, training=False)
if execution_mode:
tfe.async_wait()
self._report(label, start, num_iters, device, batch_size, data_format)
def benchmark_eager_apply_sync(self):
self._benchmark_eager_apply(
"eager_apply_sync", device_and_data_format(), defun=False)
def benchmark_eager_apply_async(self):
self._benchmark_eager_apply(
"eager_apply_async",
device_and_data_format(),
defun=False,
execution_mode=tfe.ASYNC)
def benchmark_eager_call_defun(self):
self._benchmark_eager_apply(
"eager_apply_with_defun", device_and_data_format(), defun=True)
def _benchmark_eager_train(self,
label,
make_iterator,
device_and_format,
defun=False,
execution_mode=None):
config = config_.get_hparams_imagenet_56()
with tfe.execution_mode(execution_mode):
device, data_format = device_and_format
for batch_size in self._train_batch_sizes():
(images, labels) = random_batch(batch_size, config)
model = revnet.RevNet(config=config)
optimizer = tf.train.GradientDescentOptimizer(0.1)
if defun:
model.call = tfe.function(model.call)
num_burn = 3
num_iters = 10
with tf.device(device):
iterator = make_iterator((images, labels))
for _ in range(num_burn):
(images, labels) = iterator.next()
train_one_iter(model, images, labels, optimizer)
if execution_mode:
tfe.async_wait()
self._force_device_sync()
gc.collect()
start = time.time()
for _ in range(num_iters):
(images, labels) = iterator.next()
train_one_iter(model, images, labels, optimizer)
if execution_mode:
tfe.async_wait()
self._force_device_sync()
self._report(label, start, num_iters, device, batch_size, data_format)
def benchmark_eager_train_sync(self):
self._benchmark_eager_train(
"eager_train_sync", MockIterator, device_and_data_format(), defun=False)
def benchmark_eager_train_async(self):
self._benchmark_eager_train(
"eager_train_async",
MockIterator,
device_and_data_format(),
defun=False,
execution_mode=tfe.ASYNC)
def benchmark_eager_train_defun(self):
self._benchmark_eager_train(
"eager_train", MockIterator, device_and_data_format(), defun=False)
def benchmark_eager_train_datasets_with_defun(self):
def make_iterator(tensors):
with tf.device("/device:CPU:0"):
ds = tf.data.Dataset.from_tensors(tensors).repeat()
return tfe.Iterator(ds)
self._benchmark_eager_train(
"eager_train_dataset_with_defun",
make_iterator,
device_and_data_format(),
defun=True)
if __name__ == "__main__":
tf.enable_eager_execution()
tf.test.main()
|
tensorflow-master
|
tensorflow/contrib/eager/python/examples/revnet/revnet_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Estimator workflow with RevNet train on CIFAR-10."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import flags
import tensorflow as tf
from tensorflow.contrib.eager.python.examples.revnet import cifar_input
from tensorflow.contrib.eager.python.examples.revnet import main as main_
from tensorflow.contrib.eager.python.examples.revnet import revnet
def model_fn(features, labels, mode, params):
"""Function specifying the model that is required by the `tf.estimator` API.
Args:
features: Input images
labels: Labels of images
mode: One of `ModeKeys.TRAIN`, `ModeKeys.EVAL` or 'ModeKeys.PREDICT'
params: A dictionary of extra parameter that might be passed
Returns:
An instance of `tf.estimator.EstimatorSpec`
"""
inputs = features
if isinstance(inputs, dict):
inputs = features["image"]
config = params["config"]
model = revnet.RevNet(config=config)
if mode == tf.estimator.ModeKeys.TRAIN:
global_step = tf.train.get_or_create_global_step()
learning_rate = tf.train.piecewise_constant(
global_step, config.lr_decay_steps, config.lr_list)
optimizer = tf.train.MomentumOptimizer(
learning_rate, momentum=config.momentum)
logits, saved_hidden = model(inputs, training=True)
grads, loss = model.compute_gradients(saved_hidden, labels, training=True)
with tf.control_dependencies(model.get_updates_for(inputs)):
train_op = optimizer.apply_gradients(
zip(grads, model.trainable_variables), global_step=global_step)
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)
else:
logits, _ = model(inputs, training=False)
predictions = tf.argmax(logits, axis=1)
probabilities = tf.nn.softmax(logits)
if mode == tf.estimator.ModeKeys.EVAL:
loss = model.compute_loss(labels=labels, logits=logits)
return tf.estimator.EstimatorSpec(
mode=mode,
loss=loss,
eval_metric_ops={
"accuracy":
tf.metrics.accuracy(labels=labels, predictions=predictions)
})
else: # mode == tf.estimator.ModeKeys.PREDICT
result = {
"classes": predictions,
"probabilities": probabilities,
}
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions,
export_outputs={
"classify": tf.estimator.export.PredictOutput(result)
})
def get_input_fn(config, data_dir, split):
"""Get the input function that is required by the `tf.estimator` API.
Args:
config: Customized hyperparameters
data_dir: Directory where the data is stored
split: One of `train`, `validation`, `train_all`, and `test`
Returns:
Input function required by the `tf.estimator` API
"""
data_dir = os.path.join(data_dir, config.dataset)
# Fix split-dependent hyperparameters
if split == "train_all" or split == "train":
data_aug = True
batch_size = config.batch_size
epochs = config.epochs
shuffle = True
prefetch = config.batch_size
else:
data_aug = False
batch_size = config.eval_batch_size
epochs = 1
shuffle = False
prefetch = config.eval_batch_size
def input_fn():
"""Input function required by the `tf.estimator.Estimator` API."""
return cifar_input.get_ds_from_tfrecords(
data_dir=data_dir,
split=split,
data_aug=data_aug,
batch_size=batch_size,
epochs=epochs,
shuffle=shuffle,
prefetch=prefetch,
data_format=config.data_format)
return input_fn
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
# RevNet specific configuration
config = main_.get_config(config_name=FLAGS.config, dataset=FLAGS.dataset)
# Estimator specific configuration
run_config = tf.estimator.RunConfig(
model_dir=FLAGS.model_dir, # Directory for storing checkpoints
tf_random_seed=config.seed,
save_summary_steps=config.log_every,
save_checkpoints_steps=config.log_every,
session_config=None, # Using default
keep_checkpoint_max=100,
keep_checkpoint_every_n_hours=10000, # Using default
log_step_count_steps=config.log_every,
train_distribute=None # Default not use distribution strategy
)
# Construct estimator
revnet_estimator = tf.estimator.Estimator(
model_fn=model_fn,
model_dir=FLAGS.model_dir,
config=run_config,
params={"config": config})
# Construct input functions
train_input_fn = get_input_fn(
config=config, data_dir=FLAGS.data_dir, split="train_all")
eval_input_fn = get_input_fn(
config=config, data_dir=FLAGS.data_dir, split="test")
# Train and evaluate estimator
revnet_estimator.train(input_fn=train_input_fn)
revnet_estimator.evaluate(input_fn=eval_input_fn)
if FLAGS.export:
input_shape = (None,) + config.input_shape
inputs = tf.placeholder(tf.float32, shape=input_shape)
input_fn = tf.estimator.export.build_raw_serving_input_receiver_fn({
"image": inputs
})
revnet_estimator.export_saved_model(FLAGS.model_dir, input_fn)
if __name__ == "__main__":
flags.DEFINE_string(
"data_dir", default=None, help="Directory to load tfrecords")
flags.DEFINE_string(
"model_dir",
default=None,
help="[Optional] Directory to store the training information")
flags.DEFINE_string(
"dataset",
default="cifar-10",
help="[Optional] The dataset used; either `cifar-10` or `cifar-100`")
flags.DEFINE_boolean(
"export",
default=False,
help="[Optional] Export the model for serving if True")
flags.DEFINE_string(
"config",
default="revnet-38",
help="[Optional] Architecture of network. "
"Other options include `revnet-110` and `revnet-164`")
FLAGS = flags.FLAGS
tf.app.run()
|
tensorflow-master
|
tensorflow/contrib/eager/python/examples/revnet/main_estimator.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Reversible residual network compatible with eager execution.
Code for main model.
Reference [The Reversible Residual Network: Backpropagation
Without Storing Activations](https://arxiv.org/pdf/1707.04585.pdf)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib.eager.python.examples.revnet import blocks
class RevNet(tf.keras.Model):
"""RevNet that depends on all the blocks."""
def __init__(self, config):
"""Initialize RevNet with building blocks.
Args:
config: tf.contrib.training.HParams object; specifies hyperparameters
"""
super(RevNet, self).__init__()
self.axis = 1 if config.data_format == "channels_first" else 3
self.config = config
self._init_block = blocks.InitBlock(config=self.config)
self._final_block = blocks.FinalBlock(config=self.config)
self._block_list = self._construct_intermediate_blocks()
self._moving_average_variables = []
def _construct_intermediate_blocks(self):
# Precompute input shape after initial block
stride = self.config.init_stride
if self.config.init_max_pool:
stride *= 2
if self.config.data_format == "channels_first":
w, h = self.config.input_shape[1], self.config.input_shape[2]
input_shape = (self.config.init_filters, w // stride, h // stride)
else:
w, h = self.config.input_shape[0], self.config.input_shape[1]
input_shape = (w // stride, h // stride, self.config.init_filters)
# Aggregate intermediate blocks
block_list = tf.contrib.checkpoint.List()
for i in range(self.config.n_rev_blocks):
# RevBlock configurations
n_res = self.config.n_res[i]
filters = self.config.filters[i]
if filters % 2 != 0:
raise ValueError("Number of output filters must be even to ensure"
"correct partitioning of channels")
stride = self.config.strides[i]
strides = (self.config.strides[i], self.config.strides[i])
# Add block
rev_block = blocks.RevBlock(
n_res,
filters,
strides,
input_shape,
batch_norm_first=(i != 0), # Only skip on first block
data_format=self.config.data_format,
bottleneck=self.config.bottleneck,
fused=self.config.fused,
dtype=self.config.dtype)
block_list.append(rev_block)
# Precompute input shape for the next block
if self.config.data_format == "channels_first":
w, h = input_shape[1], input_shape[2]
input_shape = (filters, w // stride, h // stride)
else:
w, h = input_shape[0], input_shape[1]
input_shape = (w // stride, h // stride, filters)
return block_list
def call(self, inputs, training=True):
"""Forward pass."""
saved_hidden = None
if training:
saved_hidden = [inputs]
h = self._init_block(inputs, training=training)
if training:
saved_hidden.append(h)
for block in self._block_list:
h = block(h, training=training)
if training:
saved_hidden.append(h)
logits = self._final_block(h, training=training)
return (logits, saved_hidden) if training else (logits, None)
def compute_loss(self, logits, labels):
"""Compute cross entropy loss."""
if self.config.dtype == tf.float32 or self.config.dtype == tf.float16:
cross_ent = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=labels)
else:
# `sparse_softmax_cross_entropy_with_logits` does not have a GPU kernel
# for float64, int32 pairs
labels = tf.one_hot(
labels, depth=self.config.n_classes, axis=1, dtype=self.config.dtype)
cross_ent = tf.nn.softmax_cross_entropy_with_logits(
logits=logits, labels=labels)
return tf.reduce_mean(cross_ent)
def compute_gradients(self, saved_hidden, labels, training=True, l2_reg=True):
"""Manually computes gradients.
This method silently updates the running averages of batch normalization.
Args:
saved_hidden: List of hidden states Tensors
labels: One-hot labels for classification
training: Use the mini-batch stats in batch norm if set to True
l2_reg: Apply l2 regularization
Returns:
A tuple with the first entry being a list of all gradients and the second
being the loss
"""
def _defunable_pop(l):
"""Functional style list pop that works with `tfe.defun`."""
t, l = l[-1], l[:-1]
return t, l
# Backprop through last block
x = saved_hidden[-1]
with tf.GradientTape() as tape:
tape.watch(x)
logits = self._final_block(x, training=training)
loss = self.compute_loss(logits, labels)
grads_combined = tape.gradient(loss,
[x] + self._final_block.trainable_variables)
dy, final_grads = grads_combined[0], grads_combined[1:]
# Backprop through intermediate blocks
intermediate_grads = []
for block in reversed(self._block_list):
y, saved_hidden = _defunable_pop(saved_hidden)
x = saved_hidden[-1]
dy, grads = block.backward_grads(x, y, dy, training=training)
intermediate_grads = grads + intermediate_grads
# Backprop through first block
_, saved_hidden = _defunable_pop(saved_hidden)
x, saved_hidden = _defunable_pop(saved_hidden)
assert not saved_hidden
with tf.GradientTape() as tape:
y = self._init_block(x, training=training)
init_grads = tape.gradient(
y, self._init_block.trainable_variables, output_gradients=dy)
# Ordering match up with `model.trainable_variables`
grads_all = init_grads + final_grads + intermediate_grads
if l2_reg:
grads_all = self._apply_weight_decay(grads_all)
return grads_all, loss
def _apply_weight_decay(self, grads):
"""Update gradients to reflect weight decay."""
return [
g + self.config.weight_decay * v if v.name.endswith("kernel:0") else g
for g, v in zip(grads, self.trainable_variables)
]
def get_moving_stats(self):
"""Get moving averages of batch normalization."""
device = "/gpu:0" if tf.test.is_gpu_available() else "/cpu:0"
with tf.device(device):
return [v.read_value() for v in self.moving_average_variables]
def restore_moving_stats(self, values):
"""Restore moving averages of batch normalization."""
device = "/gpu:0" if tf.test.is_gpu_available() else "/cpu:0"
with tf.device(device):
for var_, val in zip(self.moving_average_variables, values):
var_.assign(val)
@property
def moving_average_variables(self):
"""Get all variables that are batch norm moving averages."""
def _is_moving_avg(v):
n = v.name
return n.endswith("moving_mean:0") or n.endswith("moving_variance:0")
if not self._moving_average_variables:
self._moving_average_variables = filter(_is_moving_avg, self.variables)
return self._moving_average_variables
|
tensorflow-master
|
tensorflow/contrib/eager/python/examples/revnet/revnet.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests l2hmc fit to 2D strongly correlated Gaussian executed eagerly."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy.random as npr
import tensorflow as tf
import tensorflow.contrib.eager as tfe
from tensorflow.contrib.eager.python.examples.l2hmc import l2hmc
def get_default_hparams():
return tf.contrib.training.HParams(
x_dim=2,
n_samples=200,
n_steps=10,
eps=.1,
n_iters=10,
learning_rate=.0003,
n_warmup_iters=3)
def step(dynamics, optimizer, samples):
loss, grads, samples, _ = l2hmc.loss_and_grads(
dynamics, samples, loss_fn=l2hmc.compute_loss)
optimizer.apply_gradients(zip(grads, dynamics.variables))
return loss, samples
# To be defunnable, the function cannot return an Operation, so the above
# function is used for defun or eager, and this function is used in graph to be
# able to run the gradient updates.
def graph_step(dynamics, optimizer, samples):
loss, grads, samples, _ = l2hmc.loss_and_grads(
dynamics, samples, loss_fn=l2hmc.compute_loss)
train_op = optimizer.apply_gradients(zip(grads, dynamics.variables))
return train_op, loss, samples
def warmup(dynamics,
optimizer,
n_iters=1,
n_samples=200,
step_fn=step):
"""Warmup optimization to reduce overhead."""
samples = tf.random_normal(
shape=[n_samples, dynamics.x_dim], dtype=tf.float32)
for _ in range(n_iters):
_, samples = step_fn(dynamics, optimizer, samples)
def fit(dynamics,
samples,
optimizer,
step_fn=step,
n_iters=5000,
verbose=True,
logdir=None):
"""Fit L2HMC sampler with given log-likelihood function."""
if logdir:
summary_writer = tf.contrib.summary.create_file_writer(logdir)
for i in range(n_iters):
loss, samples = step_fn(dynamics, optimizer, samples)
if verbose:
print("Iteration %d: loss %.4f" % (i, loss))
if logdir:
with summary_writer.as_default():
with tf.contrib.summary.always_record_summaries():
tf.contrib.summary.scalar("loss", loss)
class L2hmcTest(tf.test.TestCase):
"""Unit tests for l2hmc in both eager and graph mode."""
def test_apply_transition(self):
"""Testing function `Dynamics.apply_transition` in graph and eager mode."""
# Eager mode testing
hparams = get_default_hparams()
energy_fn, _, _ = l2hmc.get_scg_energy_fn()
dynamics = l2hmc.Dynamics(
x_dim=hparams.x_dim,
minus_loglikelihood_fn=energy_fn,
n_steps=hparams.n_steps,
eps=hparams.eps)
samples = tf.random_normal(shape=[hparams.n_samples, hparams.x_dim])
x_, v_, x_accept_prob, x_out = dynamics.apply_transition(samples)
self.assertEqual(x_.shape, v_.shape)
self.assertEqual(x_out.shape, samples.shape)
self.assertEqual(x_.shape, x_out.shape)
self.assertEqual(x_accept_prob.shape, (hparams.n_samples,))
# Graph mode testing
with tf.Graph().as_default():
energy_fn, _, _ = l2hmc.get_scg_energy_fn()
dynamics = l2hmc.Dynamics(
x_dim=hparams.x_dim,
minus_loglikelihood_fn=energy_fn,
n_steps=hparams.n_steps,
eps=hparams.eps)
x = tf.placeholder(tf.float32, shape=[None, hparams.x_dim])
x_, v_, x_accept_prob, x_out = dynamics.apply_transition(x)
samples = npr.normal(size=[hparams.n_samples, hparams.x_dim])
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
np_x_, np_v_, np_x_accept_prob, np_x_out = sess.run(
[x_, v_, x_accept_prob, x_out], feed_dict={x: samples})
self.assertEqual(np_x_.shape, np_v_.shape)
self.assertEqual(samples.shape, np_x_out.shape)
self.assertEqual(np_x_.shape, np_x_out.shape)
self.assertEqual(np_x_accept_prob.shape, (hparams.n_samples,))
class L2hmcBenchmark(tf.test.Benchmark):
"""Eager and graph benchmarks for l2hmc."""
def benchmark_graph(self):
"""Benchmark Graph performance."""
hparams = get_default_hparams()
tf.enable_resource_variables()
for sample_size in [10, 25, 50, 100, 200]:
hparams.n_samples = sample_size
tf.reset_default_graph()
with tf.Graph().as_default():
energy_fn, _, _ = l2hmc.get_scg_energy_fn()
x = tf.random_normal([hparams.n_samples, hparams.x_dim],
dtype=tf.float32)
dynamics = l2hmc.Dynamics(
x_dim=hparams.x_dim,
minus_loglikelihood_fn=energy_fn,
n_steps=hparams.n_steps,
eps=hparams.eps)
loss, _, _ = l2hmc.compute_loss(dynamics, x)
optimizer = tf.train.AdamOptimizer(learning_rate=hparams.learning_rate)
train_op, loss, _ = graph_step(dynamics, optimizer, x)
# Single thread; fairer comparison against eager
session_conf = tf.ConfigProto(inter_op_parallelism_threads=1)
with tf.Session(config=session_conf) as sess:
sess.run(tf.global_variables_initializer())
# Warmup to reduce initialization effect when timing
for _ in range(hparams.n_warmup_iters):
_, _ = sess.run([train_op, loss])
# Training
start_time = time.time()
for i in range(hparams.n_iters):
_, loss_np = sess.run([train_op, loss])
print("Iteration %d: loss %.4f" % (i, loss_np))
wall_time = (time.time() - start_time) / hparams.n_iters
examples_per_sec = hparams.n_samples / wall_time
self.report_benchmark(
name="graph_train_%s_%d" %
("gpu" if tf.test.is_gpu_available() else "cpu", sample_size),
iters=hparams.n_iters,
extras={"examples_per_sec": examples_per_sec},
wall_time=wall_time)
def benchmark_eager(self):
self._benchmark_eager()
def benchmark_eager_defun(self):
self._benchmark_eager(defun=True)
def _benchmark_eager(self, defun=False):
"""Benchmark Eager performance."""
hparams = get_default_hparams()
for sample_size in [10, 25, 50, 100, 200]:
hparams.n_samples = sample_size
energy_fn, _, _ = l2hmc.get_scg_energy_fn()
dynamics = l2hmc.Dynamics(
x_dim=hparams.x_dim,
minus_loglikelihood_fn=energy_fn,
n_steps=hparams.n_steps,
eps=hparams.eps)
optimizer = tf.train.AdamOptimizer(learning_rate=hparams.learning_rate)
step_fn = tfe.defun(step) if defun else step
# Warmup to reduce initialization effect when timing
warmup(
dynamics,
optimizer,
n_iters=hparams.n_warmup_iters,
n_samples=hparams.n_samples,
step_fn=step_fn)
# Training
samples = tf.random_normal(
shape=[hparams.n_samples, hparams.x_dim], dtype=tf.float32)
start_time = time.time()
fit(dynamics,
samples,
optimizer,
step_fn=step_fn,
n_iters=hparams.n_iters)
wall_time = (time.time() - start_time) / hparams.n_iters
examples_per_sec = hparams.n_samples / wall_time
self.report_benchmark(
name="eager_train_%s%s_%d" %
("gpu" if tf.test.is_gpu_available() else "cpu",
"_defun" if defun else "", sample_size),
iters=hparams.n_iters,
extras={"examples_per_sec": examples_per_sec},
wall_time=wall_time)
del dynamics
if __name__ == "__main__":
tf.enable_eager_execution()
tf.test.main()
|
tensorflow-master
|
tensorflow/contrib/eager/python/examples/l2hmc/l2hmc_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""L2HMC compatible with TensorFlow's eager execution.
Reference [Generalizing Hamiltonian Monte Carlo with Neural
Networks](https://arxiv.org/pdf/1711.09268.pdf)
Code adapted from the released TensorFlow graph implementation by original
authors https://github.com/brain-research/l2hmc.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import numpy.random as npr
import tensorflow as tf
import tensorflow.contrib.eager as tfe
from tensorflow.contrib.eager.python.examples.l2hmc import neural_nets
class Dynamics(tf.keras.Model):
"""Dynamics engine of naive L2HMC sampler."""
def __init__(self,
x_dim,
minus_loglikelihood_fn,
n_steps=25,
eps=.1,
np_seed=1):
"""Initialization.
Args:
x_dim: dimensionality of observed data
minus_loglikelihood_fn: log-likelihood function of conditional probability
n_steps: number of leapfrog steps within each transition
eps: initial value learnable scale of step size
np_seed: Random seed for numpy; used to control sampled masks.
"""
super(Dynamics, self).__init__()
npr.seed(np_seed)
self.x_dim = x_dim
self.potential = minus_loglikelihood_fn
self.n_steps = n_steps
self._construct_time()
self._construct_masks()
self.position_fn = neural_nets.GenericNet(x_dim, factor=2.)
self.momentum_fn = neural_nets.GenericNet(x_dim, factor=1.)
self.eps = tf.Variable(
initial_value=eps, name="eps", dtype=tf.float32, trainable=True)
def apply_transition(self, position):
"""Propose a new state and perform the accept or reject step."""
# Simulate dynamics both forward and backward;
# Use sampled Bernoulli masks to compute the actual solutions
position_f, momentum_f, accept_prob_f = self.transition_kernel(
position, forward=True)
position_b, momentum_b, accept_prob_b = self.transition_kernel(
position, forward=False)
# Decide direction uniformly
batch_size = tf.shape(position)[0]
forward_mask = tf.cast(tf.random_uniform((batch_size,)) > .5, tf.float32)
backward_mask = 1. - forward_mask
# Obtain proposed states
position_post = (
forward_mask[:, None] * position_f +
backward_mask[:, None] * position_b)
momentum_post = (
forward_mask[:, None] * momentum_f +
backward_mask[:, None] * momentum_b)
# Probability of accepting the proposed states
accept_prob = forward_mask * accept_prob_f + backward_mask * accept_prob_b
# Accept or reject step
accept_mask = tf.cast(
accept_prob > tf.random_uniform(tf.shape(accept_prob)), tf.float32)
reject_mask = 1. - accept_mask
# Samples after accept/reject step
position_out = (
accept_mask[:, None] * position_post + reject_mask[:, None] * position)
return position_post, momentum_post, accept_prob, position_out
def transition_kernel(self, position, forward=True):
"""Transition kernel of augmented leapfrog integrator."""
lf_fn = self._forward_lf if forward else self._backward_lf
# Resample momentum
momentum = tf.random_normal(tf.shape(position))
position_post, momentum_post = position, momentum
sumlogdet = 0.
# Apply augmented leapfrog steps
for i in range(self.n_steps):
position_post, momentum_post, logdet = lf_fn(position_post, momentum_post,
i)
sumlogdet += logdet
accept_prob = self._compute_accept_prob(position, momentum, position_post,
momentum_post, sumlogdet)
return position_post, momentum_post, accept_prob
def _forward_lf(self, position, momentum, i):
"""One forward augmented leapfrog step. See eq (5-6) in paper."""
t = self._get_time(i)
mask, mask_inv = self._get_mask(i)
sumlogdet = 0.
momentum, logdet = self._update_momentum_forward(position, momentum, t)
sumlogdet += logdet
position, logdet = self._update_position_forward(position, momentum, t,
mask, mask_inv)
sumlogdet += logdet
position, logdet = self._update_position_forward(position, momentum, t,
mask_inv, mask)
sumlogdet += logdet
momentum, logdet = self._update_momentum_forward(position, momentum, t)
sumlogdet += logdet
return position, momentum, sumlogdet
def _backward_lf(self, position, momentum, i):
"""One backward augmented leapfrog step. See Appendix A in paper."""
# Reversed index/sinusoidal time
t = self._get_time(self.n_steps - i - 1)
mask, mask_inv = self._get_mask(self.n_steps - i - 1)
sumlogdet = 0.
momentum, logdet = self._update_momentum_backward(position, momentum, t)
sumlogdet += logdet
position, logdet = self._update_position_backward(position, momentum, t,
mask_inv, mask)
sumlogdet += logdet
position, logdet = self._update_position_backward(position, momentum, t,
mask, mask_inv)
sumlogdet += logdet
momentum, logdet = self._update_momentum_backward(position, momentum, t)
sumlogdet += logdet
return position, momentum, sumlogdet
def _update_momentum_forward(self, position, momentum, t):
"""Update v in the forward leapfrog step."""
grad = self.grad_potential(position)
scale, translation, transformed = self.momentum_fn([position, grad, t])
scale *= .5 * self.eps
transformed *= self.eps
momentum = (
momentum * tf.exp(scale) -
.5 * self.eps * (tf.exp(transformed) * grad - translation))
return momentum, tf.reduce_sum(scale, axis=1)
def _update_position_forward(self, position, momentum, t, mask, mask_inv):
"""Update x in the forward leapfrog step."""
scale, translation, transformed = self.position_fn(
[momentum, mask * position, t])
scale *= self.eps
transformed *= self.eps
position = (
mask * position +
mask_inv * (position * tf.exp(scale) + self.eps *
(tf.exp(transformed) * momentum + translation)))
return position, tf.reduce_sum(mask_inv * scale, axis=1)
def _update_momentum_backward(self, position, momentum, t):
"""Update v in the backward leapfrog step. Inverting the forward update."""
grad = self.grad_potential(position)
scale, translation, transformed = self.momentum_fn([position, grad, t])
scale *= -.5 * self.eps
transformed *= self.eps
momentum = (
tf.exp(scale) * (momentum + .5 * self.eps *
(tf.exp(transformed) * grad - translation)))
return momentum, tf.reduce_sum(scale, axis=1)
def _update_position_backward(self, position, momentum, t, mask, mask_inv):
"""Update x in the backward leapfrog step. Inverting the forward update."""
scale, translation, transformed = self.position_fn(
[momentum, mask * position, t])
scale *= -self.eps
transformed *= self.eps
position = (
mask * position + mask_inv * tf.exp(scale) *
(position - self.eps * (tf.exp(transformed) * momentum + translation)))
return position, tf.reduce_sum(mask_inv * scale, axis=1)
def _compute_accept_prob(self, position, momentum, position_post,
momentum_post, sumlogdet):
"""Compute the prob of accepting the proposed state given old state."""
old_hamil = self.hamiltonian(position, momentum)
new_hamil = self.hamiltonian(position_post, momentum_post)
prob = tf.exp(tf.minimum(old_hamil - new_hamil + sumlogdet, 0.))
# Ensure numerical stability as well as correct gradients
return tf.where(tf.is_finite(prob), prob, tf.zeros_like(prob))
def _construct_time(self):
"""Convert leapfrog step index into sinusoidal time."""
self.ts = []
for i in range(self.n_steps):
t = tf.constant(
[
np.cos(2 * np.pi * i / self.n_steps),
np.sin(2 * np.pi * i / self.n_steps)
],
dtype=tf.float32)
self.ts.append(t[None, :])
def _get_time(self, i):
"""Get sinusoidal time for i-th augmented leapfrog step."""
return self.ts[i]
def _construct_masks(self):
"""Construct different binary masks for different time steps."""
self.masks = []
for _ in range(self.n_steps):
# Need to use npr here because tf would generated different random
# values across different `sess.run`
idx = npr.permutation(np.arange(self.x_dim))[:self.x_dim // 2]
mask = np.zeros((self.x_dim,))
mask[idx] = 1.
mask = tf.constant(mask, dtype=tf.float32)
self.masks.append(mask[None, :])
def _get_mask(self, i):
"""Get binary masks for i-th augmented leapfrog step."""
m = self.masks[i]
return m, 1. - m
def kinetic(self, v):
"""Compute the kinetic energy."""
return .5 * tf.reduce_sum(v**2, axis=1)
def hamiltonian(self, position, momentum):
"""Compute the overall Hamiltonian."""
return self.potential(position) + self.kinetic(momentum)
def grad_potential(self, position, check_numerics=True):
"""Get gradient of potential function at current location."""
if tf.executing_eagerly():
grad = tfe.gradients_function(self.potential)(position)[0]
else:
grad = tf.gradients(self.potential(position), position)[0]
return grad
# Examples of unnormalized log densities
def get_scg_energy_fn():
"""Get energy function for 2d strongly correlated Gaussian."""
# Avoid recreating tf constants on each invocation of gradients
mu = tf.constant([0., 0.])
sigma = tf.constant([[50.05, -49.95], [-49.95, 50.05]])
sigma_inv = tf.matrix_inverse(sigma)
def energy(x):
"""Unnormalized minus log density of 2d strongly correlated Gaussian."""
xmmu = x - mu
return .5 * tf.diag_part(
tf.matmul(tf.matmul(xmmu, sigma_inv), tf.transpose(xmmu)))
return energy, mu, sigma
def get_rw_energy_fn():
"""Get energy function for rough well distribution."""
# For small eta, the density underlying the rough-well energy is very close to
# a unit Gaussian; however, the gradient is greatly affected by the small
# cosine perturbations
eta = 1e-2
mu = tf.constant([0., 0.])
sigma = tf.constant([[1., 0.], [0., 1.]])
def energy(x):
ip = tf.reduce_sum(x**2., axis=1)
return .5 * ip + eta * tf.reduce_sum(tf.cos(x / eta), axis=1)
return energy, mu, sigma
# Loss function
def compute_loss(dynamics, x, scale=.1, eps=1e-4):
"""Compute loss defined in equation (8)."""
z = tf.random_normal(tf.shape(x)) # Auxiliary variable
x_, _, x_accept_prob, x_out = dynamics.apply_transition(x)
z_, _, z_accept_prob, _ = dynamics.apply_transition(z)
# Add eps for numerical stability; following released impl
x_loss = tf.reduce_sum((x - x_)**2, axis=1) * x_accept_prob + eps
z_loss = tf.reduce_sum((z - z_)**2, axis=1) * z_accept_prob + eps
loss = tf.reduce_mean(
(1. / x_loss + 1. / z_loss) * scale - (x_loss + z_loss) / scale, axis=0)
return loss, x_out, x_accept_prob
def loss_and_grads(dynamics, x, loss_fn=compute_loss):
"""Obtain loss value and gradients."""
with tf.GradientTape() as tape:
loss_val, out, accept_prob = loss_fn(dynamics, x)
grads = tape.gradient(loss_val, dynamics.trainable_variables)
return loss_val, grads, out, accept_prob
|
tensorflow-master
|
tensorflow/contrib/eager/python/examples/l2hmc/l2hmc.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""L2HMC on simple Gaussian mixture model with TensorFlow eager."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
from absl import flags
import numpy as np
import tensorflow as tf
from tensorflow.contrib.eager.python.examples.l2hmc import l2hmc
try:
import matplotlib.pyplot as plt # pylint: disable=g-import-not-at-top
HAS_MATPLOTLIB = True
except ImportError:
HAS_MATPLOTLIB = False
tfe = tf.contrib.eager
def main(_):
tf.enable_eager_execution()
global_step = tf.train.get_or_create_global_step()
global_step.assign(1)
energy_fn, mean, covar = {
"scg": l2hmc.get_scg_energy_fn(),
"rw": l2hmc.get_rw_energy_fn()
}[FLAGS.energy_fn]
x_dim = 2
train_iters = 5000
eval_iters = 2000
eps = 0.1
n_steps = 10 # Chain length
n_samples = 200
record_loss_every = 100
dynamics = l2hmc.Dynamics(
x_dim=x_dim, minus_loglikelihood_fn=energy_fn, n_steps=n_steps, eps=eps)
learning_rate = tf.train.exponential_decay(
1e-3, global_step, 1000, 0.96, staircase=True)
optimizer = tf.train.AdamOptimizer(learning_rate)
checkpointer = tf.train.Checkpoint(
optimizer=optimizer, dynamics=dynamics, global_step=global_step)
if FLAGS.train_dir:
summary_writer = tf.contrib.summary.create_file_writer(FLAGS.train_dir)
if FLAGS.restore:
latest_path = tf.train.latest_checkpoint(FLAGS.train_dir)
checkpointer.restore(latest_path)
print("Restored latest checkpoint at path:\"{}\" ".format(latest_path))
sys.stdout.flush()
if not FLAGS.restore:
# Training
if FLAGS.use_defun:
# Use `tfe.deun` to boost performance when there are lots of small ops
loss_fn = tfe.function(l2hmc.compute_loss)
else:
loss_fn = l2hmc.compute_loss
samples = tf.random_normal(shape=[n_samples, x_dim])
for i in range(1, train_iters + 1):
loss, samples, accept_prob = train_one_iter(
dynamics,
samples,
optimizer,
loss_fn=loss_fn,
global_step=global_step)
if i % record_loss_every == 0:
print("Iteration {}, loss {:.4f}, x_accept_prob {:.4f}".format(
i, loss.numpy(),
accept_prob.numpy().mean()))
if FLAGS.train_dir:
with summary_writer.as_default():
with tf.contrib.summary.always_record_summaries():
tf.contrib.summary.scalar("Training loss", loss, step=global_step)
print("Training complete.")
sys.stdout.flush()
if FLAGS.train_dir:
saved_path = checkpointer.save(
file_prefix=os.path.join(FLAGS.train_dir, "ckpt"))
print("Saved checkpoint at path: \"{}\" ".format(saved_path))
sys.stdout.flush()
# Evaluation
if FLAGS.use_defun:
# Use tfe.deun to boost performance when there are lots of small ops
apply_transition = tfe.function(dynamics.apply_transition)
else:
apply_transition = dynamics.apply_transition
samples = tf.random_normal(shape=[n_samples, x_dim])
samples_history = []
for i in range(eval_iters):
samples_history.append(samples.numpy())
_, _, _, samples = apply_transition(samples)
samples_history = np.array(samples_history)
print("Sampling complete.")
sys.stdout.flush()
# Mean and covariance of target distribution
mean = mean.numpy()
covar = covar.numpy()
ac_spectrum = compute_ac_spectrum(samples_history, mean, covar)
print("First 25 entries of the auto-correlation spectrum: {}".format(
ac_spectrum[:25]))
ess = compute_ess(ac_spectrum)
print("Effective sample size per Metropolis-Hastings step: {}".format(ess))
sys.stdout.flush()
if FLAGS.train_dir:
# Plot autocorrelation spectrum in tensorboard
plot_step = tfe.Variable(1, trainable=False, dtype=tf.int64)
for ac in ac_spectrum:
with summary_writer.as_default():
with tf.contrib.summary.always_record_summaries():
tf.contrib.summary.scalar("Autocorrelation", ac, step=plot_step)
plot_step.assign(plot_step + n_steps)
if HAS_MATPLOTLIB:
# Choose a single chain and plot the trajectory
single_chain = samples_history[:, 0, :]
xs = single_chain[:100, 0]
ys = single_chain[:100, 1]
plt.figure()
plt.plot(xs, ys, color="orange", marker="o", alpha=0.6) # Trained chain
plt.savefig(os.path.join(FLAGS.train_dir, "single_chain.png"))
def train_one_iter(dynamics,
x,
optimizer,
loss_fn=l2hmc.compute_loss,
global_step=None):
"""Train the sampler for one iteration."""
loss, grads, out, accept_prob = l2hmc.loss_and_grads(
dynamics, x, loss_fn=loss_fn)
optimizer.apply_gradients(
zip(grads, dynamics.trainable_variables), global_step=global_step)
return loss, out, accept_prob
def compute_ac_spectrum(samples_history, target_mean, target_covar):
"""Compute autocorrelation spectrum.
Follows equation 15 from the L2HMC paper.
Args:
samples_history: Numpy array of shape [T, B, D], where T is the total
number of time steps, B is the batch size, and D is the dimensionality
of sample space.
target_mean: 1D Numpy array of the mean of target(true) distribution.
target_covar: 2D Numpy array representing a symmetric matrix for variance.
Returns:
Autocorrelation spectrum, Numpy array of shape [T-1].
"""
# Using numpy here since eager is a bit slow due to the loop
time_steps = samples_history.shape[0]
trace = np.trace(target_covar)
rhos = []
for t in range(time_steps - 1):
rho_t = 0.
for tau in range(time_steps - t):
v_tau = samples_history[tau, :, :] - target_mean
v_tau_plus_t = samples_history[tau + t, :, :] - target_mean
# Take dot product over observation dims and take mean over batch dims
rho_t += np.mean(np.sum(v_tau * v_tau_plus_t, axis=1))
rho_t /= trace * (time_steps - t)
rhos.append(rho_t)
return np.array(rhos)
def compute_ess(ac_spectrum):
"""Compute the effective sample size based on autocorrelation spectrum.
This follows equation 16 from the L2HMC paper.
Args:
ac_spectrum: Autocorrelation spectrum
Returns:
The effective sample size
"""
# Cutoff from the first value less than 0.05
cutoff = np.argmax(ac_spectrum[1:] < .05)
if cutoff == 0:
cutoff = len(ac_spectrum)
ess = 1. / (1. + 2. * np.sum(ac_spectrum[1:cutoff]))
return ess
if __name__ == "__main__":
flags.DEFINE_string(
"train_dir",
default=None,
help="[Optional] Directory to store the training information")
flags.DEFINE_boolean(
"restore",
default=False,
help="[Optional] Restore the latest checkpoint from `train_dir` if True")
flags.DEFINE_boolean(
"use_defun",
default=False,
help="[Optional] Use `tfe.defun` to boost performance")
flags.DEFINE_string(
"energy_fn",
default="scg",
help="[Optional] The energy function used for experimentation"
"Other options include `rw`")
FLAGS = flags.FLAGS
tf.app.run(main)
|
tensorflow-master
|
tensorflow/contrib/eager/python/examples/l2hmc/main.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Neural nets utility for L2HMC compatible with TensorFlow's eager execution.
Reference [Generalizing Hamiltonian Monte Carlo with Neural
Networks](https://arxiv.org/pdf/1711.09268.pdf)
Code adapted from the released TensorFlow graph implementation by original
authors https://github.com/brain-research/l2hmc.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
class GenericNet(tf.keras.Model):
"""Generic neural net with different initialization scale based on input.
Args:
x_dim: dimensionality of observed data
factor: factor of variance scaling initializer
n_hidden: number of hidden units
"""
def __init__(self, x_dim, factor, n_hidden=10):
super(GenericNet, self).__init__()
self.v_layer = _custom_dense(n_hidden, 1. / 3.)
self.x_layer = _custom_dense(n_hidden, factor / 3.)
self.t_layer = _custom_dense(n_hidden, 1. / 3.)
self.h_layer = _custom_dense(n_hidden)
# Scale
self.scale_layer = _custom_dense(x_dim, .001)
self.coeff_scale = tf.Variable(
initial_value=tf.zeros([1, x_dim]), name='coeff_scale', trainable=True)
# Translation
self.translation_layer = _custom_dense(x_dim, factor=.001)
# Transformation
self.transformation_layer = _custom_dense(x_dim, .001)
self.coeff_transformation = tf.Variable(
initial_value=tf.zeros([1, x_dim]),
name='coeff_transformation',
trainable=True)
def call(self, inputs):
v, x, t = inputs
h = self.v_layer(v) + self.x_layer(x) + self.t_layer(t)
h = tf.nn.relu(h)
h = self.h_layer(h)
h = tf.nn.relu(h)
scale = tf.nn.tanh(self.scale_layer(h)) * tf.exp(self.coeff_scale)
translation = self.translation_layer(h)
transformation = (
tf.nn.tanh(self.transformation_layer(h)) * tf.exp(
self.coeff_transformation))
return scale, translation, transformation
def _custom_dense(units, factor=1.):
"""Custom dense layer with specified weight initialization."""
return tf.keras.layers.Dense(
units=units,
use_bias=True,
kernel_initializer=tf.contrib.layers.variance_scaling_initializer(
factor=factor * 2., mode='FAN_IN', uniform=False),
bias_initializer=tf.constant_initializer(0., dtype=tf.float32))
|
tensorflow-master
|
tensorflow/contrib/eager/python/examples/l2hmc/neural_nets.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""TensorFlow Eager Execution Example: Linear Regression.
This example shows how to use TensorFlow Eager Execution to fit a simple linear
regression model using some synthesized data. Specifically, it illustrates how
to define the forward path of the linear model and the loss function, as well
as how to obtain the gradients of the loss function with respect to the
variables and update the variables with the gradients.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import tensorflow as tf
import tensorflow.contrib.eager as tfe
layers = tf.keras.layers
class LinearModel(tf.keras.Model):
"""A TensorFlow linear regression model."""
def __init__(self):
"""Constructs a LinearModel object."""
super(LinearModel, self).__init__()
self._hidden_layer = layers.Dense(1)
def call(self, xs):
"""Invoke the linear model.
Args:
xs: input features, as a tensor of size [batch_size, ndims].
Returns:
ys: the predictions of the linear mode, as a tensor of size [batch_size]
"""
return self._hidden_layer(xs)
def mean_square_loss(model, xs, ys):
return tf.reduce_mean(tf.squared_difference(model(xs), ys))
def fit(model, dataset, optimizer, verbose=False, logdir=None):
"""Fit the linear-regression model.
Args:
model: The LinearModel to fit.
dataset: The tf.data.Dataset to use for training data.
optimizer: The TensorFlow Optimizer object to be used.
verbose: If true, will print out loss values at every iteration.
logdir: The directory in which summaries will be written for TensorBoard
(optional).
"""
# The loss function to optimize.
mse = lambda xs, ys: mean_square_loss(model, xs, ys)
loss_and_grads = tfe.implicit_value_and_gradients(mse)
if logdir:
# Support for TensorBoard summaries. Once training has started, use:
# tensorboard --logdir=<logdir>
summary_writer = tf.contrib.summary.create_file_writer(logdir)
# Training loop.
for i, (xs, ys) in enumerate(tfe.Iterator(dataset)):
loss, grads = loss_and_grads(xs, ys)
if verbose:
print("Iteration %d: loss = %s" % (i, loss.numpy()))
optimizer.apply_gradients(grads)
if logdir:
with summary_writer.as_default():
with tf.contrib.summary.always_record_summaries():
tf.contrib.summary.scalar("loss", loss, step=i)
tf.contrib.summary.scalar("step", i, step=i)
def synthetic_dataset(w, b, noise_level, batch_size, num_batches):
"""tf.data.Dataset that yields synthetic data for linear regression."""
return synthetic_dataset_helper(w, b,
tf.shape(w)[0], noise_level, batch_size,
num_batches)
def synthetic_dataset_helper(w, b, num_features, noise_level, batch_size,
num_batches):
# w is a matrix with shape [N, M]
# b is a vector with shape [M]
# So:
# - Generate x's as vectors with shape [batch_size N]
# - y = tf.matmul(x, W) + b + noise
def batch(_):
x = tf.random_normal([batch_size, num_features])
y = tf.matmul(x, w) + b + noise_level * tf.random_normal([])
return x, y
with tf.device("/device:CPU:0"):
return tf.data.Dataset.range(num_batches).map(batch)
def main(_):
tf.enable_eager_execution()
# Ground-truth constants.
true_w = [[-2.0], [4.0], [1.0]]
true_b = [0.5]
noise_level = 0.01
# Training constants.
batch_size = 64
learning_rate = 0.1
print("True w: %s" % true_w)
print("True b: %s\n" % true_b)
model = LinearModel()
dataset = synthetic_dataset(true_w, true_b, noise_level, batch_size, 20)
device = "gpu:0" if tfe.num_gpus() else "cpu:0"
print("Using device: %s" % device)
with tf.device(device):
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
fit(model, dataset, optimizer, verbose=True, logdir=FLAGS.logdir)
print("\nAfter training: w = %s" % model.variables[0].numpy())
print("\nAfter training: b = %s" % model.variables[1].numpy())
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--logdir",
type=str,
default=None,
help="logdir in which TensorBoard summaries will be written (optional).")
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
tensorflow-master
|
tensorflow/contrib/eager/python/examples/linear_regression/linear_regression.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Graph benchmark for linear regression, to contrast with eager execution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import tensorflow as tf
from tensorflow.contrib.eager.python.examples.linear_regression import linear_regression
class GraphLinearRegressionBenchmark(tf.test.Benchmark):
def benchmarkGraphLinearRegression(self):
num_epochs = 10
num_batches = 200
batch_size = 64
dataset = linear_regression.synthetic_dataset_helper(
w=tf.random_uniform([3, 1]),
b=tf.random_uniform([1]),
num_features=3,
noise_level=0.01,
batch_size=batch_size,
num_batches=num_batches)
iterator = tf.compat.v1.data.make_initializable_iterator(dataset)
x, y = iterator.get_next()
model = linear_regression.LinearModel()
if tf.test.is_gpu_available():
use_gpu = True
device = "/device:GPU:0"
else:
use_gpu = False
device = "/device:CPU:0"
with tf.device(device):
loss = linear_regression.mean_square_loss(model, x, y)
optimization_step = tf.train.GradientDescentOptimizer(
learning_rate=0.1).minimize(loss)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
def train(num_epochs):
for _ in range(num_epochs):
sess.run(iterator.initializer)
try:
while True:
_, _ = sess.run([optimization_step, loss])
except tf.errors.OutOfRangeError:
pass
# Warmup: a single epoch.
train(1)
start_time = time.time()
train(num_epochs)
wall_time = time.time() - start_time
examples_per_sec = num_epochs * num_batches * batch_size / wall_time
self.report_benchmark(
name="graph_train_%s" %
("gpu" if use_gpu else "cpu"),
iters=num_epochs * num_batches,
extras={"examples_per_sec": examples_per_sec},
wall_time=wall_time)
if __name__ == "__main__":
tf.test.main()
|
tensorflow-master
|
tensorflow/contrib/eager/python/examples/linear_regression/linear_regression_graph_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unit tests for linear regression example under TensorFlow eager execution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import os
import shutil
import tempfile
import time
import tensorflow as tf
import tensorflow.contrib.eager as tfe
from tensorflow.contrib.eager.python.examples.linear_regression import linear_regression
def device():
return "/device:GPU:0" if tfe.num_gpus() > 0 else "/device:CPU:0"
class LinearRegressionTest(tf.test.TestCase):
def setUp(self):
super(LinearRegressionTest, self).setUp()
self._tmp_logdir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self._tmp_logdir)
super(LinearRegressionTest, self).tearDown()
def testSyntheticDataset(self):
true_w = tf.random_uniform([3, 1])
true_b = [1.0]
batch_size = 10
num_batches = 2
noise_level = 0.
dataset = linear_regression.synthetic_dataset(true_w, true_b, noise_level,
batch_size, num_batches)
it = tfe.Iterator(dataset)
for _ in range(2):
(xs, ys) = it.next()
self.assertEqual((batch_size, 3), xs.shape)
self.assertEqual((batch_size, 1), ys.shape)
self.assertEqual(tf.float32, xs.dtype)
self.assertEqual(tf.float32, ys.dtype)
with self.assertRaises(StopIteration):
it.next()
def testLinearRegression(self):
true_w = [[1.0], [-0.5], [2.0]]
true_b = [1.0]
model = linear_regression.LinearModel()
dataset = linear_regression.synthetic_dataset(
true_w, true_b, noise_level=0., batch_size=64, num_batches=40)
with tf.device(device()):
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1)
linear_regression.fit(model, dataset, optimizer, logdir=self._tmp_logdir)
self.assertAllClose(true_w, model.variables[0].numpy(), rtol=1e-2)
self.assertAllClose(true_b, model.variables[1].numpy(), rtol=1e-2)
self.assertTrue(glob.glob(os.path.join(self._tmp_logdir, "events.out.*")))
class EagerLinearRegressionBenchmark(tf.test.Benchmark):
def benchmarkEagerLinearRegression(self):
num_epochs = 10
num_batches = 200
batch_size = 64
dataset = linear_regression.synthetic_dataset(
w=tf.random_uniform([3, 1]),
b=tf.random_uniform([1]),
noise_level=0.01,
batch_size=batch_size,
num_batches=num_batches)
burn_in_dataset = dataset.take(10)
model = linear_regression.LinearModel()
with tf.device(device()):
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1)
# Perform burn-in.
linear_regression.fit(model, burn_in_dataset, optimizer)
start_time = time.time()
for _ in range(num_epochs):
linear_regression.fit(model, dataset, optimizer)
wall_time = time.time() - start_time
examples_per_sec = num_epochs * num_batches * batch_size / wall_time
self.report_benchmark(
name="eager_train_%s" %
("gpu" if tfe.num_gpus() > 0 else "cpu"),
iters=num_epochs * num_batches,
extras={"examples_per_sec": examples_per_sec},
wall_time=wall_time)
if __name__ == "__main__":
tf.enable_eager_execution()
tf.test.main()
|
tensorflow-master
|
tensorflow/contrib/eager/python/examples/linear_regression/linear_regression_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import gc
import glob
import os
import shutil
import tempfile
import time
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
# pylint: disable=g-bad-import-order
from tensorflow.contrib.eager.python.examples.spinn import data
from third_party.examples.eager.spinn import spinn
from tensorflow.contrib.summary import summary_test_util
from tensorflow.python.eager import test
from tensorflow.python.framework import test_util
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training.tracking import util as trackable_utils
# pylint: enable=g-bad-import-order
def _generate_synthetic_snli_data_batch(sequence_length,
batch_size,
vocab_size):
"""Generate a fake batch of SNLI data for testing."""
with tf.device("cpu:0"):
labels = tf.random_uniform([batch_size], minval=1, maxval=4, dtype=tf.int64)
prem = tf.random_uniform(
(sequence_length, batch_size), maxval=vocab_size, dtype=tf.int64)
prem_trans = tf.constant(np.array(
[[3, 3, 2, 3, 3, 3, 2, 2, 2, 3, 3, 3,
2, 3, 3, 2, 2, 3, 3, 3, 2, 2, 2, 2,
3, 2, 2]] * batch_size, dtype=np.int64).T)
hypo = tf.random_uniform(
(sequence_length, batch_size), maxval=vocab_size, dtype=tf.int64)
hypo_trans = tf.constant(np.array(
[[3, 3, 2, 3, 3, 3, 2, 2, 2, 3, 3, 3,
2, 3, 3, 2, 2, 3, 3, 3, 2, 2, 2, 2,
3, 2, 2]] * batch_size, dtype=np.int64).T)
if test_util.is_gpu_available():
labels = labels.gpu()
prem = prem.gpu()
prem_trans = prem_trans.gpu()
hypo = hypo.gpu()
hypo_trans = hypo_trans.gpu()
return labels, prem, prem_trans, hypo, hypo_trans
def _test_spinn_config(d_embed, d_out, logdir=None, inference_sentences=None):
"""Generate a config tuple for testing.
Args:
d_embed: Embedding dimensions.
d_out: Model output dimensions.
logdir: Optional logdir.
inference_sentences: A 2-tuple of strings representing the sentences (with
binary parsing result), e.g.,
("( ( The dog ) ( ( is running ) . ) )", "( ( The dog ) ( moves . ) )").
Returns:
A config tuple.
"""
config_tuple = collections.namedtuple(
"Config", ["d_hidden", "d_proj", "d_tracker", "predict",
"embed_dropout", "mlp_dropout", "n_mlp_layers", "d_mlp",
"d_out", "projection", "lr", "batch_size", "epochs",
"force_cpu", "logdir", "log_every", "dev_every", "save_every",
"lr_decay_every", "lr_decay_by", "inference_premise",
"inference_hypothesis"])
inference_premise = inference_sentences[0] if inference_sentences else None
inference_hypothesis = inference_sentences[1] if inference_sentences else None
return config_tuple(
d_hidden=d_embed,
d_proj=d_embed * 2,
d_tracker=8,
predict=False,
embed_dropout=0.1,
mlp_dropout=0.1,
n_mlp_layers=2,
d_mlp=32,
d_out=d_out,
projection=True,
lr=2e-2,
batch_size=2,
epochs=20,
force_cpu=False,
logdir=logdir,
log_every=1,
dev_every=2,
save_every=2,
lr_decay_every=1,
lr_decay_by=0.75,
inference_premise=inference_premise,
inference_hypothesis=inference_hypothesis)
class SpinnTest(test_util.TensorFlowTestCase):
def setUp(self):
super(SpinnTest, self).setUp()
self._test_device = "gpu:0" if test_util.is_gpu_available() else "cpu:0"
self._temp_data_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self._temp_data_dir)
super(SpinnTest, self).tearDown()
def testBundle(self):
with tf.device(self._test_device):
lstm_iter = [np.array([[0, 1], [2, 3]], dtype=np.float32),
np.array([[0, -1], [-2, -3]], dtype=np.float32),
np.array([[0, 2], [4, 6]], dtype=np.float32),
np.array([[0, -2], [-4, -6]], dtype=np.float32)]
out = spinn._bundle(lstm_iter)
self.assertEqual(2, len(out))
self.assertEqual(tf.float32, out[0].dtype)
self.assertEqual(tf.float32, out[1].dtype)
self.assertAllEqual(np.array([[0, 2, 0, -2, 0, 4, 0, -4]]).T,
out[0].numpy())
self.assertAllEqual(np.array([[1, 3, -1, -3, 2, 6, -2, -6]]).T,
out[1].numpy())
def testUnbunbdle(self):
with tf.device(self._test_device):
state = [np.array([[0, 1, 2], [3, 4, 5]], dtype=np.float32),
np.array([[0, -1, -2], [-3, -4, -5]], dtype=np.float32)]
out = spinn._unbundle(state)
self.assertEqual(2, len(out))
self.assertEqual(tf.float32, out[0].dtype)
self.assertEqual(tf.float32, out[1].dtype)
self.assertAllEqual(np.array([[0, 1, 2, 0, -1, -2]]),
out[0].numpy())
self.assertAllEqual(np.array([[3, 4, 5, -3, -4, -5]]),
out[1].numpy())
def testReducer(self):
with tf.device(self._test_device):
batch_size = 3
size = 10
tracker_size = 8
reducer = spinn.Reducer(size, tracker_size=tracker_size)
left_in = []
right_in = []
tracking = []
for _ in range(batch_size):
left_in.append(tf.random_normal((1, size * 2)))
right_in.append(tf.random_normal((1, size * 2)))
tracking.append(tf.random_normal((1, tracker_size * 2)))
out = reducer(left_in, right_in, tracking=tracking)
self.assertEqual(batch_size, len(out))
self.assertEqual(tf.float32, out[0].dtype)
self.assertEqual((1, size * 2), out[0].shape)
def testReduceTreeLSTM(self):
with tf.device(self._test_device):
size = 10
tracker_size = 8
reducer = spinn.Reducer(size, tracker_size=tracker_size)
lstm_in = np.array([[0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
[0, -1, -2, -3, -4, -5, -6, -7, -8, -9]],
dtype=np.float32)
c1 = np.array([[0, 1], [2, 3]], dtype=np.float32)
c2 = np.array([[0, -1], [-2, -3]], dtype=np.float32)
h, c = reducer._tree_lstm(c1, c2, lstm_in)
self.assertEqual(tf.float32, h.dtype)
self.assertEqual(tf.float32, c.dtype)
self.assertEqual((2, 2), h.shape)
self.assertEqual((2, 2), c.shape)
def testTracker(self):
with tf.device(self._test_device):
batch_size = 2
size = 10
tracker_size = 8
buffer_length = 18
stack_size = 3
tracker = spinn.Tracker(tracker_size, False)
tracker.reset_state()
# Create dummy inputs for testing.
bufs = []
buf = []
for _ in range(buffer_length):
buf.append(tf.random_normal((batch_size, size * 2)))
bufs.append(buf)
self.assertEqual(1, len(bufs))
self.assertEqual(buffer_length, len(bufs[0]))
self.assertEqual((batch_size, size * 2), bufs[0][0].shape)
stacks = []
stack = []
for _ in range(stack_size):
stack.append(tf.random_normal((batch_size, size * 2)))
stacks.append(stack)
self.assertEqual(1, len(stacks))
self.assertEqual(3, len(stacks[0]))
self.assertEqual((batch_size, size * 2), stacks[0][0].shape)
for _ in range(2):
out1, out2 = tracker(bufs, stacks)
self.assertIsNone(out2)
self.assertEqual(batch_size, len(out1))
self.assertEqual(tf.float32, out1[0].dtype)
self.assertEqual((1, tracker_size * 2), out1[0].shape)
self.assertEqual(tf.float32, tracker.state.c.dtype)
self.assertEqual((batch_size, tracker_size), tracker.state.c.shape)
self.assertEqual(tf.float32, tracker.state.h.dtype)
self.assertEqual((batch_size, tracker_size), tracker.state.h.shape)
def testSPINN(self):
with tf.device(self._test_device):
embedding_dims = 10
d_tracker = 8
sequence_length = 15
num_transitions = 27
config_tuple = collections.namedtuple(
"Config", ["d_hidden", "d_proj", "d_tracker", "predict"])
config = config_tuple(
embedding_dims, embedding_dims * 2, d_tracker, False)
s = spinn.SPINN(config)
# Create some fake data.
buffers = tf.random_normal((sequence_length, 1, config.d_proj))
transitions = tf.constant(
[[3], [3], [2], [3], [3], [3], [2], [2], [2], [3], [3], [3],
[2], [3], [3], [2], [2], [3], [3], [3], [2], [2], [2], [2],
[3], [2], [2]], dtype=tf.int64)
self.assertEqual(tf.int64, transitions.dtype)
self.assertEqual((num_transitions, 1), transitions.shape)
out = s(buffers, transitions, training=True)
self.assertEqual(tf.float32, out.dtype)
self.assertEqual((1, embedding_dims), out.shape)
def testSNLIClassifierAndTrainer(self):
with tf.device(self._test_device):
vocab_size = 40
batch_size = 2
d_embed = 10
sequence_length = 15
d_out = 4
config = _test_spinn_config(d_embed, d_out)
# Create fake embedding matrix.
embed = tf.random_normal((vocab_size, d_embed))
model = spinn.SNLIClassifier(config, embed)
trainer = spinn.SNLIClassifierTrainer(model, config.lr)
(labels, prem, prem_trans, hypo,
hypo_trans) = _generate_synthetic_snli_data_batch(sequence_length,
batch_size,
vocab_size)
# Invoke model under non-training mode.
logits = model(prem, prem_trans, hypo, hypo_trans, training=False)
self.assertEqual(tf.float32, logits.dtype)
self.assertEqual((batch_size, d_out), logits.shape)
# Invoke model under training model.
logits = model(prem, prem_trans, hypo, hypo_trans, training=True)
self.assertEqual(tf.float32, logits.dtype)
self.assertEqual((batch_size, d_out), logits.shape)
# Calculate loss.
loss1 = trainer.loss(labels, logits)
self.assertEqual(tf.float32, loss1.dtype)
self.assertEqual((), loss1.shape)
loss2, logits = trainer.train_batch(
labels, prem, prem_trans, hypo, hypo_trans)
self.assertEqual(tf.float32, loss2.dtype)
self.assertEqual((), loss2.shape)
self.assertEqual(tf.float32, logits.dtype)
self.assertEqual((batch_size, d_out), logits.shape)
# Training on the batch should have led to a change in the loss value.
self.assertNotEqual(loss1.numpy(), loss2.numpy())
def _create_test_data(self, snli_1_0_dir):
fake_train_file = os.path.join(snli_1_0_dir, "snli_1.0_train.txt")
os.makedirs(snli_1_0_dir)
# Four sentences in total.
with open(fake_train_file, "wt") as f:
f.write("gold_label\tsentence1_binary_parse\tsentence2_binary_parse\t"
"sentence1_parse\tsentence2_parse\tsentence1\tsentence2\t"
"captionID\tpairID\tlabel1\tlabel2\tlabel3\tlabel4\tlabel5\n")
f.write("neutral\t( ( Foo bar ) . )\t( ( foo . )\t"
"DummySentence1Parse\tDummySentence2Parse\t"
"Foo bar.\tfoo baz.\t"
"4705552913.jpg#2\t4705552913.jpg#2r1n\t"
"neutral\tentailment\tneutral\tneutral\tneutral\n")
f.write("contradiction\t( ( Bar foo ) . )\t( ( baz . )\t"
"DummySentence1Parse\tDummySentence2Parse\t"
"Foo bar.\tfoo baz.\t"
"4705552913.jpg#2\t4705552913.jpg#2r1n\t"
"neutral\tentailment\tneutral\tneutral\tneutral\n")
f.write("entailment\t( ( Quux quuz ) . )\t( ( grault . )\t"
"DummySentence1Parse\tDummySentence2Parse\t"
"Foo bar.\tfoo baz.\t"
"4705552913.jpg#2\t4705552913.jpg#2r1n\t"
"neutral\tentailment\tneutral\tneutral\tneutral\n")
f.write("entailment\t( ( Quuz quux ) . )\t( ( garply . )\t"
"DummySentence1Parse\tDummySentence2Parse\t"
"Foo bar.\tfoo baz.\t"
"4705552913.jpg#2\t4705552913.jpg#2r1n\t"
"neutral\tentailment\tneutral\tneutral\tneutral\n")
glove_dir = os.path.join(self._temp_data_dir, "glove")
os.makedirs(glove_dir)
glove_file = os.path.join(glove_dir, "glove.42B.300d.txt")
words = [".", "foo", "bar", "baz", "quux", "quuz", "grault", "garply"]
with open(glove_file, "wt") as f:
for i, word in enumerate(words):
f.write("%s " % word)
for j in range(data.WORD_VECTOR_LEN):
f.write("%.5f" % (i * 0.1))
if j < data.WORD_VECTOR_LEN - 1:
f.write(" ")
else:
f.write("\n")
return fake_train_file
def testInferSpinnWorks(self):
"""Test inference with the spinn model."""
snli_1_0_dir = os.path.join(self._temp_data_dir, "snli/snli_1.0")
self._create_test_data(snli_1_0_dir)
vocab = data.load_vocabulary(self._temp_data_dir)
word2index, embed = data.load_word_vectors(self._temp_data_dir, vocab)
config = _test_spinn_config(
data.WORD_VECTOR_LEN, 4,
logdir=os.path.join(self._temp_data_dir, "logdir"),
inference_sentences=("( foo ( bar . ) )", "( bar ( foo . ) )"))
logits = spinn.train_or_infer_spinn(
embed, word2index, None, None, None, config)
self.assertEqual(tf.float32, logits.dtype)
self.assertEqual((3,), logits.shape)
def testInferSpinnThrowsErrorIfOnlyOneSentenceIsSpecified(self):
snli_1_0_dir = os.path.join(self._temp_data_dir, "snli/snli_1.0")
self._create_test_data(snli_1_0_dir)
vocab = data.load_vocabulary(self._temp_data_dir)
word2index, embed = data.load_word_vectors(self._temp_data_dir, vocab)
config = _test_spinn_config(
data.WORD_VECTOR_LEN, 4,
logdir=os.path.join(self._temp_data_dir, "logdir"),
inference_sentences=("( foo ( bar . ) )", None))
with self.assertRaises(ValueError):
spinn.train_or_infer_spinn(embed, word2index, None, None, None, config)
def testTrainSpinn(self):
"""Test with fake toy SNLI data and GloVe vectors."""
# 1. Create and load a fake SNLI data file and a fake GloVe embedding file.
snli_1_0_dir = os.path.join(self._temp_data_dir, "snli/snli_1.0")
fake_train_file = self._create_test_data(snli_1_0_dir)
vocab = data.load_vocabulary(self._temp_data_dir)
word2index, embed = data.load_word_vectors(self._temp_data_dir, vocab)
train_data = data.SnliData(fake_train_file, word2index)
dev_data = data.SnliData(fake_train_file, word2index)
test_data = data.SnliData(fake_train_file, word2index)
# 2. Create a fake config.
config = _test_spinn_config(
data.WORD_VECTOR_LEN, 4,
logdir=os.path.join(self._temp_data_dir, "logdir"))
# 3. Test training of a SPINN model.
trainer = spinn.train_or_infer_spinn(
embed, word2index, train_data, dev_data, test_data, config)
# 4. Load train loss values from the summary files and verify that they
# decrease with training.
summary_file = glob.glob(os.path.join(config.logdir, "events.out.*"))[0]
events = summary_test_util.events_from_file(summary_file)
train_losses = [event.summary.value[0].simple_value for event in events
if event.summary.value
and event.summary.value[0].tag == "train/loss"]
self.assertEqual(config.epochs, len(train_losses))
# 5. Verify that checkpoints exist and contains all the expected variables.
self.assertTrue(glob.glob(os.path.join(config.logdir, "ckpt*")))
object_graph = trackable_utils.object_metadata(
checkpoint_management.latest_checkpoint(config.logdir))
ckpt_variable_names = set()
for node in object_graph.nodes:
for attribute in node.attributes:
ckpt_variable_names.add(attribute.full_name)
self.assertIn("global_step", ckpt_variable_names)
for v in trainer.variables:
variable_name = v.name[:v.name.index(":")] if ":" in v.name else v.name
self.assertIn(variable_name, ckpt_variable_names)
class EagerSpinnSNLIClassifierBenchmark(test.Benchmark):
def benchmarkEagerSpinnSNLIClassifier(self):
test_device = "gpu:0" if test_util.is_gpu_available() else "cpu:0"
with tf.device(test_device):
burn_in_iterations = 2
benchmark_iterations = 10
vocab_size = 1000
batch_size = 128
sequence_length = 15
d_embed = 200
d_out = 4
embed = tf.random_normal((vocab_size, d_embed))
config = _test_spinn_config(d_embed, d_out)
model = spinn.SNLIClassifier(config, embed)
trainer = spinn.SNLIClassifierTrainer(model, config.lr)
(labels, prem, prem_trans, hypo,
hypo_trans) = _generate_synthetic_snli_data_batch(sequence_length,
batch_size,
vocab_size)
for _ in range(burn_in_iterations):
trainer.train_batch(labels, prem, prem_trans, hypo, hypo_trans)
gc.collect()
start_time = time.time()
for _ in xrange(benchmark_iterations):
trainer.train_batch(labels, prem, prem_trans, hypo, hypo_trans)
wall_time = time.time() - start_time
# Named "examples"_per_sec to conform with other benchmarks.
extras = {"examples_per_sec": benchmark_iterations / wall_time}
self.report_benchmark(
name="Eager_SPINN_SNLIClassifier_Benchmark",
iters=benchmark_iterations,
wall_time=wall_time,
extras=extras)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/eager/python/examples/spinn/spinn_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unit tests for SPINN data module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import tempfile
import numpy as np
import tensorflow as tf
from tensorflow.contrib.eager.python.examples.spinn import data
class DataTest(tf.test.TestCase):
def setUp(self):
super(DataTest, self).setUp()
self._temp_data_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self._temp_data_dir)
super(DataTest, self).tearDown()
def testGenNonParenthesisWords(self):
seq_with_parse = (
"( Man ( ( ( ( ( wearing pass ) ( on ( a lanyard ) ) ) and "
") ( standing ( in ( ( a crowd ) ( of people ) ) ) ) ) . ) )")
self.assertEqual(
["man", "wearing", "pass", "on", "a", "lanyard", "and", "standing",
"in", "a", "crowd", "of", "people", "."],
data.get_non_parenthesis_words(seq_with_parse.split(" ")))
def testGetShiftReduce(self):
seq_with_parse = (
"( Man ( ( ( ( ( wearing pass ) ( on ( a lanyard ) ) ) and "
") ( standing ( in ( ( a crowd ) ( of people ) ) ) ) ) . ) )")
self.assertEqual(
[3, 3, 3, 2, 3, 3, 3, 2, 2, 2, 3, 2, 3, 3, 3, 3, 2, 3, 3, 2, 2, 2, 2, 2,
3, 2, 2], data.get_shift_reduce(seq_with_parse.split(" ")))
def testPadAndReverseWordIds(self):
id_sequences = [[0, 2, 3, 4, 5],
[6, 7, 8],
[9, 10, 11, 12, 13, 14, 15, 16]]
self.assertAllClose(
[[1, 1, 1, 1, 5, 4, 3, 2, 0],
[1, 1, 1, 1, 1, 1, 8, 7, 6],
[1, 16, 15, 14, 13, 12, 11, 10, 9]],
data.pad_and_reverse_word_ids(id_sequences))
def testPadTransitions(self):
unpadded = [[3, 3, 3, 2, 2, 2, 2],
[3, 3, 2, 2, 2]]
self.assertAllClose(
[[3, 3, 3, 2, 2, 2, 2],
[3, 3, 2, 2, 2, 1, 1]],
data.pad_transitions(unpadded))
def testCalculateBins(self):
length2count = {
1: 10,
2: 15,
3: 25,
4: 40,
5: 35,
6: 10}
self.assertEqual([2, 3, 4, 5, 6],
data.calculate_bins(length2count, 20))
self.assertEqual([3, 4, 6], data.calculate_bins(length2count, 40))
self.assertEqual([4, 6], data.calculate_bins(length2count, 60))
def testLoadVoacbulary(self):
snli_1_0_dir = os.path.join(self._temp_data_dir, "snli/snli_1.0")
fake_train_file = os.path.join(snli_1_0_dir, "snli_1.0_train.txt")
fake_dev_file = os.path.join(snli_1_0_dir, "snli_1.0_dev.txt")
os.makedirs(snli_1_0_dir)
with open(fake_train_file, "wt") as f:
f.write("gold_label\tsentence1_binary_parse\tsentence2_binary_parse\t"
"sentence1_parse\tsentence2_parse\tsentence1\tsentence2\t"
"captionID\tpairID\tlabel1\tlabel2\tlabel3\tlabel4\tlabel5\n")
f.write("neutral\t( ( Foo bar ) . )\t( ( foo baz ) . )\t"
"DummySentence1Parse\tDummySentence2Parse\t"
"Foo bar.\tfoo baz.\t"
"4705552913.jpg#2\t4705552913.jpg#2r1n\t"
"neutral\tentailment\tneutral\tneutral\tneutral\n")
with open(fake_dev_file, "wt") as f:
f.write("gold_label\tsentence1_binary_parse\tsentence2_binary_parse\t"
"sentence1_parse\tsentence2_parse\tsentence1\tsentence2\t"
"captionID\tpairID\tlabel1\tlabel2\tlabel3\tlabel4\tlabel5\n")
f.write("neutral\t( ( Quux quuz ) ? )\t( ( Corge grault ) ! )\t"
"DummySentence1Parse\tDummySentence2Parse\t"
"Quux quuz?\t.Corge grault!\t"
"4705552913.jpg#2\t4705552913.jpg#2r1n\t"
"neutral\tentailment\tneutral\tneutral\tneutral\n")
vocab = data.load_vocabulary(self._temp_data_dir)
self.assertSetEqual(
{".", "?", "!", "foo", "bar", "baz", "quux", "quuz", "corge", "grault"},
vocab)
def testLoadVoacbularyWithoutFileRaisesError(self):
with self.assertRaisesRegexp(ValueError, "Cannot find SNLI data files at"):
data.load_vocabulary(self._temp_data_dir)
os.makedirs(os.path.join(self._temp_data_dir, "snli"))
with self.assertRaisesRegexp(ValueError, "Cannot find SNLI data files at"):
data.load_vocabulary(self._temp_data_dir)
os.makedirs(os.path.join(self._temp_data_dir, "snli/snli_1.0"))
with self.assertRaisesRegexp(ValueError, "Cannot find SNLI data files at"):
data.load_vocabulary(self._temp_data_dir)
def testLoadWordVectors(self):
glove_dir = os.path.join(self._temp_data_dir, "glove")
os.makedirs(glove_dir)
glove_file = os.path.join(glove_dir, "glove.42B.300d.txt")
words = [".", ",", "foo", "bar", "baz"]
with open(glove_file, "wt") as f:
for i, word in enumerate(words):
f.write("%s " % word)
for j in range(data.WORD_VECTOR_LEN):
f.write("%.5f" % (i * 0.1))
if j < data.WORD_VECTOR_LEN - 1:
f.write(" ")
else:
f.write("\n")
vocab = {"foo", "bar", "baz", "qux", "."}
# Notice that "qux" is not present in `words`.
word2index, embed = data.load_word_vectors(self._temp_data_dir, vocab)
self.assertEqual(6, len(word2index))
self.assertEqual(0, word2index["<unk>"])
self.assertEqual(1, word2index["<pad>"])
self.assertEqual(2, word2index["."])
self.assertEqual(3, word2index["foo"])
self.assertEqual(4, word2index["bar"])
self.assertEqual(5, word2index["baz"])
self.assertEqual((6, data.WORD_VECTOR_LEN), embed.shape)
self.assertAllClose([0.0] * data.WORD_VECTOR_LEN, embed[0, :])
self.assertAllClose([0.0] * data.WORD_VECTOR_LEN, embed[1, :])
self.assertAllClose([0.0] * data.WORD_VECTOR_LEN, embed[2, :])
self.assertAllClose([0.2] * data.WORD_VECTOR_LEN, embed[3, :])
self.assertAllClose([0.3] * data.WORD_VECTOR_LEN, embed[4, :])
self.assertAllClose([0.4] * data.WORD_VECTOR_LEN, embed[5, :])
def testLoadWordVectorsWithoutFileRaisesError(self):
vocab = {"foo", "bar", "baz", "qux", "."}
with self.assertRaisesRegexp(
ValueError, "Cannot find GloVe embedding file at"):
data.load_word_vectors(self._temp_data_dir, vocab)
os.makedirs(os.path.join(self._temp_data_dir, "glove"))
with self.assertRaisesRegexp(
ValueError, "Cannot find GloVe embedding file at"):
data.load_word_vectors(self._temp_data_dir, vocab)
def _createFakeSnliData(self, fake_snli_file):
# Four sentences in total.
with open(fake_snli_file, "wt") as f:
f.write("gold_label\tsentence1_binary_parse\tsentence2_binary_parse\t"
"sentence1_parse\tsentence2_parse\tsentence1\tsentence2\t"
"captionID\tpairID\tlabel1\tlabel2\tlabel3\tlabel4\tlabel5\n")
f.write("neutral\t( ( Foo bar ) . )\t( ( foo . )\t"
"DummySentence1Parse\tDummySentence2Parse\t"
"Foo bar.\tfoo baz.\t"
"4705552913.jpg#2\t4705552913.jpg#2r1n\t"
"neutral\tentailment\tneutral\tneutral\tneutral\n")
f.write("contradiction\t( ( Bar foo ) . )\t( ( baz . )\t"
"DummySentence1Parse\tDummySentence2Parse\t"
"Foo bar.\tfoo baz.\t"
"4705552913.jpg#2\t4705552913.jpg#2r1n\t"
"neutral\tentailment\tneutral\tneutral\tneutral\n")
f.write("entailment\t( ( Quux quuz ) . )\t( ( grault . )\t"
"DummySentence1Parse\tDummySentence2Parse\t"
"Foo bar.\tfoo baz.\t"
"4705552913.jpg#2\t4705552913.jpg#2r1n\t"
"neutral\tentailment\tneutral\tneutral\tneutral\n")
f.write("entailment\t( ( Quuz quux ) . )\t( ( garply . )\t"
"DummySentence1Parse\tDummySentence2Parse\t"
"Foo bar.\tfoo baz.\t"
"4705552913.jpg#2\t4705552913.jpg#2r1n\t"
"neutral\tentailment\tneutral\tneutral\tneutral\n")
def _createFakeGloveData(self, glove_file):
words = [".", "foo", "bar", "baz", "quux", "quuz", "grault", "garply"]
with open(glove_file, "wt") as f:
for i, word in enumerate(words):
f.write("%s " % word)
for j in range(data.WORD_VECTOR_LEN):
f.write("%.5f" % (i * 0.1))
if j < data.WORD_VECTOR_LEN - 1:
f.write(" ")
else:
f.write("\n")
def testEncodeSingleSentence(self):
snli_1_0_dir = os.path.join(self._temp_data_dir, "snli/snli_1.0")
fake_train_file = os.path.join(snli_1_0_dir, "snli_1.0_train.txt")
os.makedirs(snli_1_0_dir)
self._createFakeSnliData(fake_train_file)
vocab = data.load_vocabulary(self._temp_data_dir)
glove_dir = os.path.join(self._temp_data_dir, "glove")
os.makedirs(glove_dir)
glove_file = os.path.join(glove_dir, "glove.42B.300d.txt")
self._createFakeGloveData(glove_file)
word2index, _ = data.load_word_vectors(self._temp_data_dir, vocab)
sentence_variants = [
"( Foo ( ( bar baz ) . ) )",
" ( Foo ( ( bar baz ) . ) ) ",
"( Foo ( ( bar baz ) . ) )"]
for sentence in sentence_variants:
word_indices, shift_reduce = data.encode_sentence(sentence, word2index)
self.assertEqual(np.int64, word_indices.dtype)
self.assertEqual((5, 1), word_indices.shape)
self.assertAllClose(
np.array([[3, 3, 3, 2, 3, 2, 2]], dtype=np.int64).T, shift_reduce)
def testSnliData(self):
snli_1_0_dir = os.path.join(self._temp_data_dir, "snli/snli_1.0")
fake_train_file = os.path.join(snli_1_0_dir, "snli_1.0_train.txt")
os.makedirs(snli_1_0_dir)
self._createFakeSnliData(fake_train_file)
glove_dir = os.path.join(self._temp_data_dir, "glove")
os.makedirs(glove_dir)
glove_file = os.path.join(glove_dir, "glove.42B.300d.txt")
self._createFakeGloveData(glove_file)
vocab = data.load_vocabulary(self._temp_data_dir)
word2index, _ = data.load_word_vectors(self._temp_data_dir, vocab)
train_data = data.SnliData(fake_train_file, word2index)
self.assertEqual(4, train_data.num_batches(1))
self.assertEqual(2, train_data.num_batches(2))
self.assertEqual(2, train_data.num_batches(3))
self.assertEqual(1, train_data.num_batches(4))
generator = train_data.get_generator(2)()
for _ in range(2):
label, prem, prem_trans, hypo, hypo_trans = next(generator)
self.assertEqual(2, len(label))
self.assertEqual((4, 2), prem.shape)
self.assertEqual((5, 2), prem_trans.shape)
self.assertEqual((3, 2), hypo.shape)
self.assertEqual((3, 2), hypo_trans.shape)
if __name__ == "__main__":
tf.test.main()
|
tensorflow-master
|
tensorflow/contrib/eager/python/examples/spinn/data_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities of SNLI data and GloVe word vectors for SPINN model.
See more details about the SNLI data set at:
https://nlp.stanford.edu/projects/snli/
See more details about the GloVe pretrained word embeddings at:
https://nlp.stanford.edu/projects/glove/
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import math
import os
import random
import numpy as np
POSSIBLE_LABELS = ("entailment", "contradiction", "neutral")
UNK_CODE = 0 # Code for unknown word tokens.
PAD_CODE = 1 # Code for padding tokens.
SHIFT_CODE = 3
REDUCE_CODE = 2
WORD_VECTOR_LEN = 300 # Embedding dimensions.
LEFT_PAREN = "("
RIGHT_PAREN = ")"
PARENTHESES = (LEFT_PAREN, RIGHT_PAREN)
def get_non_parenthesis_words(items):
"""Get the non-parenthesis items from a SNLI parsed sentence.
Args:
items: Data items from a parsed SNLI sentence, with parentheses. E.g.,
["(", "Man", "(", "(", "(", "(", "(", "wearing", "pass", ")", ...
Returns:
A list of non-parentheses word items, all converted to lower case. E.g.,
["man", "wearing", "pass", ...
"""
return [x.lower() for x in items if x not in PARENTHESES and x]
def get_shift_reduce(items):
"""Obtain shift-reduce vector from a list of items from the SNLI data.
Args:
items: Data items as a list of str, e.g.,
["(", "Man", "(", "(", "(", "(", "(", "wearing", "pass", ")", ...
Returns:
A list of shift-reduce transitions, encoded as `SHIFT_CODE` for shift and
`REDUCE_CODE` for reduce. See code above for the values of `SHIFT_CODE`
and `REDUCE_CODE`.
"""
trans = []
for item in items:
if item == LEFT_PAREN:
continue
elif item == RIGHT_PAREN:
trans.append(REDUCE_CODE)
else:
trans.append(SHIFT_CODE)
return trans
def pad_and_reverse_word_ids(sentences):
"""Pad a list of sentences to the common maximum length + 1.
Args:
sentences: A list of sentences as a list of list of integers. Each integer
is a word ID. Each list of integer corresponds to one sentence.
Returns:
A numpy.ndarray of shape (num_sentences, max_length + 1), wherein max_length
is the maximum sentence length (in # of words). Each sentence is reversed
and then padded with an extra one at head, as required by the model.
"""
max_len = max(len(sent) for sent in sentences)
for sent in sentences:
if len(sent) < max_len:
sent.extend([PAD_CODE] * (max_len - len(sent)))
# Reverse in time order and pad an extra one.
sentences = np.fliplr(np.array(sentences, dtype=np.int64))
sentences = np.concatenate(
[np.ones([sentences.shape[0], 1], dtype=np.int64), sentences], axis=1)
return sentences
def pad_transitions(sentences_transitions):
"""Pad a list of shift-reduce transitions to the maximum length."""
max_len = max(len(transitions) for transitions in sentences_transitions)
for transitions in sentences_transitions:
if len(transitions) < max_len:
transitions.extend([PAD_CODE] * (max_len - len(transitions)))
return np.array(sentences_transitions, dtype=np.int64)
def load_vocabulary(data_root):
"""Load vocabulary from SNLI data files.
Args:
data_root: Root directory of the data. It is assumed that the SNLI data
files have been downloaded and extracted to the "snli/snli_1.0"
subdirectory of it.
Returns:
Vocabulary as a set of strings.
Raises:
ValueError: If SNLI data files cannot be found.
"""
snli_path = os.path.join(data_root, "snli")
snli_glob_pattern = os.path.join(snli_path, "snli_1.0/snli_1.0_*.txt")
file_names = glob.glob(snli_glob_pattern)
if not file_names:
raise ValueError(
"Cannot find SNLI data files at %s. "
"Please download and extract SNLI data first." % snli_glob_pattern)
print("Loading vocabulary...")
vocab = set()
for file_name in file_names:
with open(os.path.join(snli_path, file_name), "rt") as f:
for i, line in enumerate(f):
if i == 0:
continue
items = line.split("\t")
premise_words = get_non_parenthesis_words(items[1].split(" "))
hypothesis_words = get_non_parenthesis_words(items[2].split(" "))
vocab.update(premise_words)
vocab.update(hypothesis_words)
return vocab
def load_word_vectors(data_root, vocab):
"""Load GloVe word vectors for words present in the vocabulary.
Args:
data_root: Data root directory. It is assumed that the GloVe file
has been downloaded and extracted at the "glove/" subdirectory of it.
vocab: A `set` of words, representing the vocabulary.
Returns:
1. word2index: A dict from lower-case word to row index in the embedding
matrix, i.e, `embed` below.
2. embed: The embedding matrix as a float32 numpy array. Its shape is
[vocabulary_size, WORD_VECTOR_LEN]. vocabulary_size is len(vocab).
WORD_VECTOR_LEN is the embedding dimension (300).
Raises:
ValueError: If GloVe embedding file cannot be found.
"""
glove_path = os.path.join(data_root, "glove/glove.42B.300d.txt")
if not os.path.isfile(glove_path):
raise ValueError(
"Cannot find GloVe embedding file at %s. "
"Please download and extract GloVe embeddings first." % glove_path)
print("Loading word vectors...")
word2index = {}
embed = []
embed.append([0] * WORD_VECTOR_LEN) # <unk>
embed.append([0] * WORD_VECTOR_LEN) # <pad>
word2index["<unk>"] = UNK_CODE
word2index["<pad>"] = PAD_CODE
with open(glove_path, "rt") as f:
for line in f:
items = line.split(" ")
word = items[0]
if word in vocab and word not in word2index:
word2index[word] = len(embed)
vector = np.array([float(item) for item in items[1:]])
assert (WORD_VECTOR_LEN,) == vector.shape
embed.append(vector)
embed = np.array(embed, dtype=np.float32)
return word2index, embed
def calculate_bins(length2count, min_bin_size):
"""Calculate bin boundaries given a histogram of lengths and minimum bin size.
Args:
length2count: A `dict` mapping length to sentence count.
min_bin_size: Minimum bin size in terms of total number of sentence pairs
in the bin.
Returns:
A `list` representing the right bin boundaries, starting from the inclusive
right boundary of the first bin. For example, if the output is
[10, 20, 35],
it means there are three bins: [1, 10], [11, 20] and [21, 35].
"""
bounds = []
lengths = sorted(length2count.keys())
cum_count = 0
for length in lengths:
cum_count += length2count[length]
if cum_count >= min_bin_size:
bounds.append(length)
cum_count = 0
if bounds[-1] != lengths[-1]:
bounds.append(lengths[-1])
return bounds
def encode_sentence(sentence, word2index):
"""Encode a single sentence as word indices and shift-reduce code.
Args:
sentence: The sentence with added binary parse information, represented as
a string, with all the word items and parentheses separated by spaces.
E.g., '( ( The dog ) ( ( is ( playing toys ) ) . ) )'.
word2index: A `dict` mapping words to their word indices.
Returns:
1. Word indices as a numpy array, with shape `(sequence_len, 1)`.
2. Shift-reduce sequence as a numpy array, with shape
`(sequence_len * 2 - 3, 1)`.
"""
items = [w for w in sentence.split(" ") if w]
words = get_non_parenthesis_words(items)
shift_reduce = get_shift_reduce(items)
word_indices = pad_and_reverse_word_ids(
[[word2index.get(word, UNK_CODE) for word in words]]).T
return (word_indices,
np.expand_dims(np.array(shift_reduce, dtype=np.int64), -1))
class SnliData(object):
"""A split of SNLI data."""
def __init__(self, data_file, word2index, sentence_len_limit=-1):
"""SnliData constructor.
Args:
data_file: Full path to the data file, e.g.,
"/tmp/spinn-data/snli/snli_1.0/snli_1.0.train.txt"
word2index: A dict from lower-case word to row index in the embedding
matrix (see `load_word_vectors()` for details).
sentence_len_limit: Maximum allowed sentence length (# of words).
A value of <= 0 means unlimited. Sentences longer than this limit
are currently discarded, not truncated.
"""
self._labels = []
self._premises = []
self._premise_transitions = []
self._hypotheses = []
self._hypothesis_transitions = []
with open(data_file, "rt") as f:
for i, line in enumerate(f):
if i == 0:
# Skip header line.
continue
items = line.split("\t")
if items[0] not in POSSIBLE_LABELS:
continue
premise_items = items[1].split(" ")
hypothesis_items = items[2].split(" ")
premise_words = get_non_parenthesis_words(premise_items)
hypothesis_words = get_non_parenthesis_words(hypothesis_items)
if (sentence_len_limit > 0 and
(len(premise_words) > sentence_len_limit or
len(hypothesis_words) > sentence_len_limit)):
# TODO(cais): Maybe truncate; do not discard.
continue
premise_ids = [
word2index.get(word, UNK_CODE) for word in premise_words]
hypothesis_ids = [
word2index.get(word, UNK_CODE) for word in hypothesis_words]
self._premises.append(premise_ids)
self._hypotheses.append(hypothesis_ids)
self._premise_transitions.append(get_shift_reduce(premise_items))
self._hypothesis_transitions.append(get_shift_reduce(hypothesis_items))
assert (len(self._premise_transitions[-1]) ==
2 * len(premise_words) - 1)
assert (len(self._hypothesis_transitions[-1]) ==
2 * len(hypothesis_words) - 1)
self._labels.append(POSSIBLE_LABELS.index(items[0]) + 1)
assert len(self._labels) == len(self._premises)
assert len(self._labels) == len(self._hypotheses)
assert len(self._labels) == len(self._premise_transitions)
assert len(self._labels) == len(self._hypothesis_transitions)
def num_batches(self, batch_size):
"""Calculate number of batches given batch size."""
return int(math.ceil(len(self._labels) / batch_size))
def get_generator(self, batch_size):
"""Obtain a generator for batched data.
All examples of this SnliData object are randomly shuffled, sorted
according to the maximum sentence length of the premise and hypothesis
sentences in the pair, and batched.
Args:
batch_size: Desired batch size.
Returns:
A generator for data batches. The generator yields a 5-tuple:
label: An array of the shape (batch_size,).
premise: An array of the shape (max_premise_len, batch_size), wherein
max_premise_len is the maximum length of the (padded) premise
sentence in the batch.
premise_transitions: An array of the shape (2 * max_premise_len -3,
batch_size).
hypothesis: Same as `premise`, but for hypothesis sentences.
hypothesis_transitions: Same as `premise_transitions`, but for
hypothesis sentences.
All the elements of the 5-tuple have dtype `int64`.
"""
# Randomly shuffle examples.
zipped = list(zip(
self._labels, self._premises, self._premise_transitions,
self._hypotheses, self._hypothesis_transitions))
random.shuffle(zipped)
# Then sort the examples by maximum of the premise and hypothesis sentence
# lengths in the pair. During training, the batches are expected to be
# shuffled. So it is okay to leave them sorted by max length here.
(labels, premises, premise_transitions, hypotheses,
hypothesis_transitions) = zip(
*sorted(zipped, key=lambda x: max(len(x[1]), len(x[3]))))
def _generator():
begin = 0
while begin < len(labels):
# The sorting above and the batching here makes sure that sentences of
# similar max lengths are batched together, minimizing the inefficiency
# due to uneven max lengths. The sentences are batched differently in
# each call to get_generator() due to the shuffling before sorting
# above. The pad_and_reverse_word_ids() and pad_transitions() functions
# take care of any remaining unevenness of the max sentence lengths.
end = min(begin + batch_size, len(labels))
# Transpose, because the SPINN model requires time-major, instead of
# batch-major.
yield (labels[begin:end],
pad_and_reverse_word_ids(premises[begin:end]).T,
pad_transitions(premise_transitions[begin:end]).T,
pad_and_reverse_word_ids(hypotheses[begin:end]).T,
pad_transitions(hypothesis_transitions[begin:end]).T)
begin = end
return _generator
|
tensorflow-master
|
tensorflow/contrib/eager/python/examples/spinn/data.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib.eager.python.examples.rnn_colorbot import rnn_colorbot
from tensorflow.python.framework import test_util
LABEL_DIMENSION = 5
def random_dataset():
batch_size = 64
time_steps = 10
alphabet = 50
chars = tf.one_hot(
tf.random_uniform(
[batch_size, time_steps], minval=0, maxval=alphabet, dtype=tf.int32),
alphabet)
sequence_length = tf.constant(
[time_steps for _ in range(batch_size)], dtype=tf.int64)
labels = tf.random_normal([batch_size, LABEL_DIMENSION])
return tf.data.Dataset.from_tensors((labels, chars, sequence_length))
class RNNColorbotTest(tf.test.TestCase):
def testTrainOneEpoch(self):
model = rnn_colorbot.RNNColorbot(
rnn_cell_sizes=[256, 128, 64],
label_dimension=LABEL_DIMENSION,
keep_prob=1.0)
optimizer = tf.train.AdamOptimizer(learning_rate=.01)
dataset = random_dataset()
with test_util.use_gpu():
rnn_colorbot.train_one_epoch(model, optimizer, dataset)
def testTest(self):
model = rnn_colorbot.RNNColorbot(
rnn_cell_sizes=[256],
label_dimension=LABEL_DIMENSION,
keep_prob=1.0)
dataset = random_dataset()
with test_util.use_gpu():
rnn_colorbot.test(model, dataset)
if __name__ == "__main__":
tf.enable_eager_execution()
tf.test.main()
|
tensorflow-master
|
tensorflow/contrib/eager/python/examples/rnn_colorbot/rnn_colorbot_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""TensorFlow Eager Execution Example: RNN Colorbot.
This example builds, trains, and evaluates a multi-layer RNN that can be
run with eager execution enabled. The RNN is trained to map color names to
their RGB values: it takes as input a one-hot encoded character sequence and
outputs a three-tuple (R, G, B) (scaled by 1/255).
For example, say we'd like the RNN Colorbot to generate the RGB values for the
color white. To represent our query in a form that the Colorbot could
understand, we would create a sequence of five 256-long vectors encoding the
ASCII values of the characters in "white". The first vector in our sequence
would be 0 everywhere except for the ord("w")-th position, where it would be
1, the second vector would be 0 everywhere except for the
ord("h")-th position, where it would be 1, and similarly for the remaining three
vectors. We refer to such indicator vectors as "one-hot encodings" of
characters. After consuming these vectors, a well-trained Colorbot would output
the three tuple (1, 1, 1), since the RGB values for white are (255, 255, 255).
We are of course free to ask the colorbot to generate colors for any string we'd
like, such as "steel gray," "tensorflow orange," or "green apple," though
your mileage may vary as your queries increase in creativity.
This example shows how to:
1. read, process, (one-hot) encode, and pad text data via the
Datasets API;
2. build a trainable model;
3. implement a multi-layer RNN using Python control flow
constructs (e.g., a for loop);
4. train a model using an iterative gradient-based method; and
The data used in this example is licensed under the Creative Commons
Attribution-ShareAlike License and is available at
https://en.wikipedia.org/wiki/List_of_colors:_A-F
https://en.wikipedia.org/wiki/List_of_colors:_G-M
https://en.wikipedia.org/wiki/List_of_colors:_N-Z
This example was adapted from
https://github.com/random-forests/tensorflow-workshop/tree/master/extras/colorbot
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import functools
import os
import sys
import time
import urllib
import six
import tensorflow as tf
from tensorflow.contrib.eager.python import tfe
try:
import matplotlib.pyplot as plt # pylint: disable=g-import-not-at-top
HAS_MATPLOTLIB = True
except ImportError:
HAS_MATPLOTLIB = False
layers = tf.keras.layers
def parse(line):
"""Parse a line from the colors dataset."""
# Each line of the dataset is comma-separated and formatted as
# color_name, r, g, b
# so `items` is a list [color_name, r, g, b].
items = tf.string_split([line], ",").values
rgb = tf.string_to_number(items[1:], out_type=tf.float32) / 255.
# Represent the color name as a one-hot encoded character sequence.
color_name = items[0]
chars = tf.one_hot(tf.decode_raw(color_name, tf.uint8), depth=256)
# The sequence length is needed by our RNN.
length = tf.cast(tf.shape(chars)[0], dtype=tf.int64)
return rgb, chars, length
def maybe_download(filename, work_directory, source_url):
"""Download the data from source url, unless it's already here.
Args:
filename: string, name of the file in the directory.
work_directory: string, path to working directory.
source_url: url to download from if file doesn't exist.
Returns:
Path to resulting file.
"""
if not tf.gfile.Exists(work_directory):
tf.gfile.MakeDirs(work_directory)
filepath = os.path.join(work_directory, filename)
if not tf.gfile.Exists(filepath):
temp_file_name, _ = urllib.request.urlretrieve(source_url)
tf.gfile.Copy(temp_file_name, filepath)
with tf.gfile.GFile(filepath) as f:
size = f.size()
print("Successfully downloaded", filename, size, "bytes.")
return filepath
def load_dataset(data_dir, url, batch_size):
"""Loads the colors data at path into a PaddedDataset."""
# Downloads data at url into data_dir/basename(url). The dataset has a header
# row (color_name, r, g, b) followed by comma-separated lines.
path = maybe_download(os.path.basename(url), data_dir, url)
# This chain of commands loads our data by:
# 1. skipping the header; (.skip(1))
# 2. parsing the subsequent lines; (.map(parse))
# 3. shuffling the data; (.shuffle(...))
# 3. grouping the data into padded batches (.padded_batch(...)).
dataset = tf.data.TextLineDataset(path).skip(1).map(parse).shuffle(
buffer_size=10000).padded_batch(
batch_size, padded_shapes=([None], [None, None], []))
return dataset
# pylint: disable=not-callable
class RNNColorbot(tf.keras.Model):
"""Multi-layer (LSTM) RNN that regresses on real-valued vector labels.
"""
def __init__(self, rnn_cell_sizes, label_dimension, keep_prob):
"""Constructs an RNNColorbot.
Args:
rnn_cell_sizes: list of integers denoting the size of each LSTM cell in
the RNN; rnn_cell_sizes[i] is the size of the i-th layer cell
label_dimension: the length of the labels on which to regress
keep_prob: (1 - dropout probability); dropout is applied to the outputs of
each LSTM layer
"""
super(RNNColorbot, self).__init__(name="")
self.label_dimension = label_dimension
self.keep_prob = keep_prob
self.cells = tf.contrib.checkpoint.List(
[tf.nn.rnn_cell.BasicLSTMCell(size) for size in rnn_cell_sizes])
self.relu = layers.Dense(
label_dimension, activation=tf.nn.relu, name="relu")
def call(self, inputs, training=False):
"""Implements the RNN logic and prediction generation.
Args:
inputs: A tuple (chars, sequence_length), where chars is a batch of
one-hot encoded color names represented as a Tensor with dimensions
[batch_size, time_steps, 256] and sequence_length holds the length
of each character sequence (color name) as a Tensor with dimension
[batch_size].
training: whether the invocation is happening during training
Returns:
A tensor of dimension [batch_size, label_dimension] that is produced by
passing chars through a multi-layer RNN and applying a ReLU to the final
hidden state.
"""
(chars, sequence_length) = inputs
# Transpose the first and second dimensions so that chars is of shape
# [time_steps, batch_size, dimension].
chars = tf.transpose(chars, [1, 0, 2])
# The outer loop cycles through the layers of the RNN; the inner loop
# executes the time steps for a particular layer.
batch_size = int(chars.shape[1])
for l in range(len(self.cells)):
cell = self.cells[l]
outputs = []
state = cell.zero_state(batch_size, tf.float32)
# Unstack the inputs to obtain a list of batches, one for each time step.
chars = tf.unstack(chars, axis=0)
for ch in chars:
output, state = cell(ch, state)
outputs.append(output)
# The outputs of this layer are the inputs of the subsequent layer.
chars = tf.stack(outputs, axis=0)
if training:
chars = tf.nn.dropout(chars, self.keep_prob)
# Extract the correct output (i.e., hidden state) for each example. All the
# character sequences in this batch were padded to the same fixed length so
# that they could be easily fed through the above RNN loop. The
# `sequence_length` vector tells us the true lengths of the character
# sequences, letting us obtain for each sequence the hidden state that was
# generated by its non-padding characters.
batch_range = [i for i in range(batch_size)]
indices = tf.stack([sequence_length - 1, batch_range], axis=1)
hidden_states = tf.gather_nd(chars, indices)
return self.relu(hidden_states)
def loss(labels, predictions):
"""Computes mean squared loss."""
return tf.reduce_mean(tf.squared_difference(predictions, labels))
def test(model, eval_data):
"""Computes the average loss on eval_data, which should be a Dataset."""
avg_loss = tfe.metrics.Mean("loss")
for (labels, chars, sequence_length) in tfe.Iterator(eval_data):
predictions = model((chars, sequence_length), training=False)
avg_loss(loss(labels, predictions))
print("eval/loss: %.6f\n" % avg_loss.result())
with tf.contrib.summary.always_record_summaries():
tf.contrib.summary.scalar("loss", avg_loss.result())
def train_one_epoch(model, optimizer, train_data, log_interval=10):
"""Trains model on train_data using optimizer."""
tf.train.get_or_create_global_step()
def model_loss(labels, chars, sequence_length):
predictions = model((chars, sequence_length), training=True)
loss_value = loss(labels, predictions)
tf.contrib.summary.scalar("loss", loss_value)
return loss_value
for (batch, (labels, chars, sequence_length)) in enumerate(
tfe.Iterator(train_data)):
with tf.contrib.summary.record_summaries_every_n_global_steps(log_interval):
batch_model_loss = functools.partial(model_loss, labels, chars,
sequence_length)
optimizer.minimize(
batch_model_loss, global_step=tf.train.get_global_step())
if log_interval and batch % log_interval == 0:
print("train/batch #%d\tloss: %.6f" % (batch, batch_model_loss()))
SOURCE_TRAIN_URL = "https://raw.githubusercontent.com/random-forests/tensorflow-workshop/master/archive/extras/colorbot/data/train.csv"
SOURCE_TEST_URL = "https://raw.githubusercontent.com/random-forests/tensorflow-workshop/master/archive/extras/colorbot/data/test.csv"
def main(_):
data_dir = os.path.join(FLAGS.dir, "data")
train_data = load_dataset(
data_dir=data_dir, url=SOURCE_TRAIN_URL, batch_size=FLAGS.batch_size)
eval_data = load_dataset(
data_dir=data_dir, url=SOURCE_TEST_URL, batch_size=FLAGS.batch_size)
model = RNNColorbot(
rnn_cell_sizes=FLAGS.rnn_cell_sizes,
label_dimension=3,
keep_prob=FLAGS.keep_probability)
optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate)
if FLAGS.no_gpu or tfe.num_gpus() <= 0:
print(tfe.num_gpus())
device = "/cpu:0"
else:
device = "/gpu:0"
print("Using device %s." % device)
log_dir = os.path.join(FLAGS.dir, "summaries")
tf.gfile.MakeDirs(log_dir)
train_summary_writer = tf.contrib.summary.create_file_writer(
os.path.join(log_dir, "train"), flush_millis=10000)
test_summary_writer = tf.contrib.summary.create_file_writer(
os.path.join(log_dir, "eval"), flush_millis=10000, name="eval")
with tf.device(device):
for epoch in range(FLAGS.num_epochs):
start = time.time()
with train_summary_writer.as_default():
train_one_epoch(model, optimizer, train_data, FLAGS.log_interval)
end = time.time()
print("train/time for epoch #%d: %.2f" % (epoch, end - start))
with test_summary_writer.as_default():
test(model, eval_data)
print("Colorbot is ready to generate colors!")
while True:
try:
color_name = six.moves.input(
"Give me a color name (or press enter to exit): ")
except EOFError:
return
if not color_name:
return
_, chars, length = parse(color_name)
with tf.device(device):
(chars, length) = (tf.identity(chars), tf.identity(length))
chars = tf.expand_dims(chars, 0)
length = tf.expand_dims(length, 0)
preds = tf.unstack(model((chars, length), training=False)[0])
# Predictions cannot be negative, as they are generated by a ReLU layer;
# they may, however, be greater than 1.
clipped_preds = tuple(min(float(p), 1.0) for p in preds)
rgb = tuple(int(p * 255) for p in clipped_preds)
print("rgb:", rgb)
data = [[clipped_preds]]
if HAS_MATPLOTLIB:
plt.imshow(data)
plt.title(color_name)
plt.show()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--dir",
type=str,
default="/tmp/rnn_colorbot/",
help="Directory to download data files and save logs.")
parser.add_argument(
"--log_interval",
type=int,
default=10,
metavar="N",
help="Log training loss every log_interval batches.")
parser.add_argument(
"--num_epochs", type=int, default=20, help="Number of epochs to train.")
parser.add_argument(
"--rnn_cell_sizes",
type=int,
nargs="+",
default=[256, 128],
help="List of sizes for each layer of the RNN.")
parser.add_argument(
"--batch_size",
type=int,
default=64,
help="Batch size for training and eval.")
parser.add_argument(
"--keep_probability",
type=float,
default=0.5,
help="Keep probability for dropout between layers.")
parser.add_argument(
"--learning_rate",
type=float,
default=0.01,
help="Learning rate to be used during training.")
parser.add_argument(
"--no_gpu",
action="store_true",
default=False,
help="Disables GPU usage even if a GPU is available.")
FLAGS, unparsed = parser.parse_known_args()
tfe.run(main=main, argv=[sys.argv[0]] + unparsed)
|
tensorflow-master
|
tensorflow/contrib/eager/python/examples/rnn_colorbot/rnn_colorbot.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for PTBModel with eager execution enabled."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gc
import time
import numpy as np
import tensorflow as tf
from tensorflow.contrib.eager.python import tfe
from tensorflow.contrib.eager.python.examples.rnn_ptb import rnn_ptb
def device():
return "/device:GPU:0" if tfe.num_gpus() else "/device:CPU:0"
class PTBTest(tf.test.TestCase):
def testTrain(self):
model = rnn_ptb.test_model(tfe.num_gpus() > 0)
sequence_length = 35
data = np.ones([4 * sequence_length, 20], dtype=np.int64)
with tf.device(device()):
optimizer = tf.train.GradientDescentOptimizer(1.0)
# Train two epochs
rnn_ptb.train(model, optimizer, data, sequence_length, 0.25)
rnn_ptb.train(model, optimizer, data, sequence_length, 0.25)
def testApply(self):
model = rnn_ptb.test_model(tfe.num_gpus() > 0)
with tf.device(device()):
model(tf.ones([35, 20], dtype=tf.int64), training=False)
def force_gpu_sync():
if tfe.num_gpus():
tf.constant(1).gpu().cpu()
class PTBBenchmark(tf.test.Benchmark):
BATCH_SIZE = 20
SEQ_LEN = 35
def _report(self, label, start, num_iters, dev, batch_size):
wall_time = (time.time() - start) / num_iters
dev = "cpu" if "cpu" in dev.lower() else "gpu"
name = "%s_%s_batch_%d" % (label, dev, batch_size)
examples_per_sec = batch_size / wall_time
self.report_benchmark(
iters=num_iters,
wall_time=wall_time,
name=name,
extras={
"examples_per_sec": examples_per_sec
})
def _benchmark_apply(self, label, model):
with tf.device(device()):
sequence_batch = tf.ones(
[PTBBenchmark.SEQ_LEN, PTBBenchmark.BATCH_SIZE], dtype=tf.int64)
for _ in range(10): # Warmup
model(sequence_batch, training=False).cpu()
gc.collect()
start = time.time()
iters = 100
for _ in range(iters):
model(sequence_batch, training=False).cpu()
self._report(label, start, iters, device(), int(sequence_batch.shape[1]))
def benchmark_apply_small(self):
self._benchmark_apply("eager_apply_small", rnn_ptb.small_model(False))
def benchmark_apply_large(self):
self._benchmark_apply("eager_apply_large", rnn_ptb.large_model(False))
def benchmark_cudnn_apply_small(self):
if not tfe.num_gpus():
return
self._benchmark_apply("eager_cudnn_apply_small", rnn_ptb.small_model(True))
def benchmark_cudnn_apply_large(self):
if not tfe.num_gpus():
return
self._benchmark_apply("eager_cudnn_apply_large", rnn_ptb.large_model(True))
def _benchmark_train(self, label, model):
with tf.device(device()):
optimizer = tf.train.GradientDescentOptimizer(1.)
def model_loss(inputs, targets):
return rnn_ptb.loss_fn(model, inputs, targets, training=True)
grads = tfe.implicit_gradients(model_loss)
sequence_batch = tf.ones(
[PTBBenchmark.SEQ_LEN, PTBBenchmark.BATCH_SIZE], dtype=tf.int64)
def step():
optimizer.apply_gradients(
rnn_ptb.clip_gradients(grads(sequence_batch, sequence_batch), 0.25))
for _ in range(10): # Warmup
step()
force_gpu_sync()
gc.collect()
start = time.time()
iters = 100
for _ in range(iters):
step()
force_gpu_sync()
self._report(label, start, iters, device(), int(sequence_batch.shape[1]))
def benchmark_train_small(self):
self._benchmark_train("eager_train_small", rnn_ptb.small_model(False))
def benchmark_train_large(self):
self._benchmark_train("eager_train_large", rnn_ptb.large_model(False))
def benchmark_cudnn_train_small(self):
if not tfe.num_gpus():
return
self._benchmark_train("eager_cudnn_train_small", rnn_ptb.small_model(True))
def benchmark_cudnn_train_large(self):
if not tfe.num_gpus():
return
self._benchmark_train("eager_cudnn_train_large", rnn_ptb.large_model(True))
if __name__ == "__main__":
tfe.enable_eager_execution()
tf.test.main()
|
tensorflow-master
|
tensorflow/contrib/eager/python/examples/rnn_ptb/rnn_ptb_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Penn Treebank RNN model definition compatible with eager execution.
Model similar to
https://github.com/tensorflow/models/tree/master/tutorials/rnn/ptb
Usage: python ./rnn_ptb.py --data-path=<path_to_dataset>
Penn Treebank (PTB) dataset from:
http://www.fit.vutbr.cz/~imikolov/rnnlm/simple-examples.tgz
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import sys
import time
import numpy as np
import tensorflow as tf
from tensorflow.contrib.cudnn_rnn.python.layers import cudnn_rnn
from tensorflow.contrib.eager.python import tfe
layers = tf.keras.layers
class RNN(tf.keras.Model):
"""A static RNN.
Similar to tf.compat.v1.nn.static_rnn, implemented as a class.
"""
def __init__(self, hidden_dim, num_layers, keep_ratio):
super(RNN, self).__init__()
self.keep_ratio = keep_ratio
self.cells = tf.contrib.checkpoint.List([
tf.nn.rnn_cell.BasicLSTMCell(num_units=hidden_dim)
for _ in range(num_layers)
])
def call(self, input_seq, training):
batch_size = int(input_seq.shape[1])
for c in self.cells:
state = c.zero_state(batch_size, tf.float32)
outputs = []
input_seq = tf.unstack(input_seq, num=int(input_seq.shape[0]), axis=0)
for inp in input_seq:
output, state = c(inp, state)
outputs.append(output)
input_seq = tf.stack(outputs, axis=0)
if training:
input_seq = tf.nn.dropout(input_seq, self.keep_ratio)
# Returning a list instead of a single tensor so that the line:
# y = self.rnn(y, ...)[0]
# in PTBModel.call works for both this RNN and CudnnLSTM (which returns a
# tuple (output, output_states).
return [input_seq]
class Embedding(layers.Layer):
"""An Embedding layer."""
def __init__(self, vocab_size, embedding_dim, **kwargs):
super(Embedding, self).__init__(**kwargs)
self.vocab_size = vocab_size
self.embedding_dim = embedding_dim
def build(self, _):
self.embedding = self.add_variable(
"embedding_kernel",
shape=[self.vocab_size, self.embedding_dim],
dtype=tf.float32,
initializer=tf.random_uniform_initializer(-0.1, 0.1),
trainable=True)
def call(self, x):
return tf.nn.embedding_lookup(self.embedding, x)
# pylint: disable=not-callable
class PTBModel(tf.keras.Model):
"""LSTM for word language modeling.
Model described in:
(Zaremba, et. al.) Recurrent Neural Network Regularization
http://arxiv.org/abs/1409.2329
See also:
https://github.com/tensorflow/models/tree/master/tutorials/rnn/ptb
"""
def __init__(self,
vocab_size,
embedding_dim,
hidden_dim,
num_layers,
dropout_ratio,
use_cudnn_rnn=True):
super(PTBModel, self).__init__()
self.keep_ratio = 1 - dropout_ratio
self.use_cudnn_rnn = use_cudnn_rnn
self.embedding = Embedding(vocab_size, embedding_dim)
if self.use_cudnn_rnn:
self.rnn = cudnn_rnn.CudnnLSTM(
num_layers, hidden_dim, dropout=dropout_ratio)
else:
self.rnn = RNN(hidden_dim, num_layers, self.keep_ratio)
self.linear = layers.Dense(
vocab_size, kernel_initializer=tf.random_uniform_initializer(-0.1, 0.1))
self._output_shape = [-1, hidden_dim]
def call(self, input_seq, training):
"""Run the forward pass of PTBModel.
Args:
input_seq: [length, batch] shape int64 tensor.
training: Is this a training call.
Returns:
outputs tensors of inference.
"""
y = self.embedding(input_seq)
if training:
y = tf.nn.dropout(y, self.keep_ratio)
y = self.rnn(y, training=training)[0]
return self.linear(tf.reshape(y, self._output_shape))
def clip_gradients(grads_and_vars, clip_ratio):
gradients, variables = zip(*grads_and_vars)
clipped, _ = tf.clip_by_global_norm(gradients, clip_ratio)
return zip(clipped, variables)
def loss_fn(model, inputs, targets, training):
labels = tf.reshape(targets, [-1])
outputs = model(inputs, training=training)
return tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=outputs))
def _divide_into_batches(data, batch_size):
"""Convert a sequence to a batch of sequences."""
nbatch = data.shape[0] // batch_size
data = data[:nbatch * batch_size]
data = data.reshape(batch_size, -1).transpose()
return data
def _get_batch(data, i, seq_len):
slen = min(seq_len, data.shape[0] - 1 - i)
inputs = data[i:i + slen, :]
target = data[i + 1:i + 1 + slen, :]
return tf.constant(inputs), tf.constant(target)
def evaluate(model, data):
"""evaluate an epoch."""
total_loss = 0.0
total_batches = 0
start = time.time()
for _, i in enumerate(range(0, data.shape[0] - 1, FLAGS.seq_len)):
inp, target = _get_batch(data, i, FLAGS.seq_len)
loss = loss_fn(model, inp, target, training=False)
total_loss += loss.numpy()
total_batches += 1
time_in_ms = (time.time() - start) * 1000
sys.stderr.write("eval loss %.2f (eval took %d ms)\n" %
(total_loss / total_batches, time_in_ms))
return total_loss
def train(model, optimizer, train_data, sequence_length, clip_ratio):
"""training an epoch."""
def model_loss(inputs, targets):
return loss_fn(model, inputs, targets, training=True)
grads = tfe.implicit_gradients(model_loss)
total_time = 0
for batch, i in enumerate(range(0, train_data.shape[0] - 1, sequence_length)):
train_seq, train_target = _get_batch(train_data, i, sequence_length)
start = time.time()
optimizer.apply_gradients(
clip_gradients(grads(train_seq, train_target), clip_ratio))
total_time += (time.time() - start)
if batch % 10 == 0:
time_in_ms = (total_time * 1000) / (batch + 1)
sys.stderr.write("batch %d: training loss %.2f, avg step time %d ms\n" %
(batch, model_loss(train_seq, train_target).numpy(),
time_in_ms))
class Datasets(object):
"""Processed form of the Penn Treebank dataset."""
def __init__(self, path):
"""Load the Penn Treebank dataset.
Args:
path: Path to the data/ directory of the dataset from Tomas Mikolov's
webpage - http://www.fit.vutbr.cz/~imikolov/rnnlm/simple-examples.tgz
"""
self.word2idx = {} # string -> integer id
self.idx2word = [] # integer id -> word string
# Files represented as a list of integer ids (as opposed to list of string
# words).
self.train = self.tokenize(os.path.join(path, "ptb.train.txt"))
self.valid = self.tokenize(os.path.join(path, "ptb.valid.txt"))
def vocab_size(self):
return len(self.idx2word)
def add(self, word):
if word not in self.word2idx:
self.idx2word.append(word)
self.word2idx[word] = len(self.idx2word) - 1
def tokenize(self, path):
"""Read text file in path and return a list of integer token ids."""
tokens = 0
with tf.gfile.Open(path, "r") as f:
for line in f:
words = line.split() + ["<eos>"]
tokens += len(words)
for word in words:
self.add(word)
# Tokenize file content
with tf.gfile.Open(path, "r") as f:
ids = np.zeros(tokens).astype(np.int64)
token = 0
for line in f:
words = line.split() + ["<eos>"]
for word in words:
ids[token] = self.word2idx[word]
token += 1
return ids
def small_model(use_cudnn_rnn):
"""Returns a PTBModel with a 'small' configuration."""
return PTBModel(
vocab_size=10000,
embedding_dim=200,
hidden_dim=200,
num_layers=2,
dropout_ratio=0.,
use_cudnn_rnn=use_cudnn_rnn)
def large_model(use_cudnn_rnn):
"""Returns a PTBModel with a 'large' configuration."""
return PTBModel(
vocab_size=10000,
embedding_dim=650,
hidden_dim=650,
num_layers=2,
dropout_ratio=0.5,
use_cudnn_rnn=use_cudnn_rnn)
def test_model(use_cudnn_rnn):
"""Returns a tiny PTBModel for unit tests."""
return PTBModel(
vocab_size=100,
embedding_dim=20,
hidden_dim=20,
num_layers=2,
dropout_ratio=0.,
use_cudnn_rnn=use_cudnn_rnn)
def main(_):
tf.enable_eager_execution()
if not FLAGS.data_path:
raise ValueError("Must specify --data-path")
corpus = Datasets(FLAGS.data_path)
train_data = _divide_into_batches(corpus.train, FLAGS.batch_size)
eval_data = _divide_into_batches(corpus.valid, 10)
have_gpu = tfe.num_gpus() > 0
use_cudnn_rnn = not FLAGS.no_use_cudnn_rnn and have_gpu
with tf.device("/device:GPU:0" if have_gpu else None):
# Make learning_rate a Variable so it can be included in the checkpoint
# and we can resume training with the last saved learning_rate.
learning_rate = tf.Variable(20.0, name="learning_rate")
model = PTBModel(corpus.vocab_size(), FLAGS.embedding_dim,
FLAGS.hidden_dim, FLAGS.num_layers, FLAGS.dropout,
use_cudnn_rnn)
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
checkpoint = tf.train.Checkpoint(
learning_rate=learning_rate, model=model,
# GradientDescentOptimizer has no state to checkpoint, but noting it
# here lets us swap in an optimizer that does.
optimizer=optimizer)
# Restore existing variables now (learning_rate), and restore new variables
# on creation if a checkpoint exists.
checkpoint.restore(tf.train.latest_checkpoint(FLAGS.logdir))
sys.stderr.write("learning_rate=%f\n" % learning_rate.numpy())
best_loss = None
for _ in range(FLAGS.epoch):
train(model, optimizer, train_data, FLAGS.seq_len, FLAGS.clip)
eval_loss = evaluate(model, eval_data)
if not best_loss or eval_loss < best_loss:
if FLAGS.logdir:
checkpoint.save(os.path.join(FLAGS.logdir, "ckpt"))
best_loss = eval_loss
else:
learning_rate.assign(learning_rate / 4.0)
sys.stderr.write("eval_loss did not reduce in this epoch, "
"changing learning rate to %f for the next epoch\n" %
learning_rate.numpy())
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--data-path",
type=str,
default="",
help="Data directory of the Penn Treebank dataset from "
"http://www.fit.vutbr.cz/~imikolov/rnnlm/simple-examples.tgz")
parser.add_argument(
"--logdir", type=str, default="", help="Directory for checkpoint.")
parser.add_argument("--epoch", type=int, default=20, help="Number of epochs.")
parser.add_argument("--batch-size", type=int, default=20, help="Batch size.")
parser.add_argument(
"--seq-len", type=int, default=35, help="Sequence length.")
parser.add_argument(
"--embedding-dim", type=int, default=200, help="Embedding dimension.")
parser.add_argument(
"--hidden-dim", type=int, default=200, help="Hidden layer dimension.")
parser.add_argument(
"--num-layers", type=int, default=2, help="Number of RNN layers.")
parser.add_argument(
"--dropout", type=float, default=0.2, help="Drop out ratio.")
parser.add_argument(
"--clip", type=float, default=0.25, help="Gradient clipping ratio.")
parser.add_argument(
"--no-use-cudnn-rnn",
action="store_true",
default=False,
help="Disable the fast CuDNN RNN (when no gpu)")
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
tensorflow-master
|
tensorflow/contrib/eager/python/examples/rnn_ptb/rnn_ptb.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for PTBModel used for graph construction."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gc
import time
import numpy as np
import tensorflow as tf
from tensorflow.contrib.eager.python.examples.rnn_ptb import rnn_ptb
class PTBTest(tf.test.TestCase):
def testTrain(self):
batch_size = 20
sequence_length = 35
with tf.Graph().as_default(), tf.device(tf.test.gpu_device_name()):
inputs_ph = tf.placeholder(tf.int64, [sequence_length, batch_size],
"inputs")
labels_ph = tf.placeholder(tf.int64, [sequence_length, batch_size],
"labels")
inputs = np.ones(inputs_ph.shape.as_list(), dtype=np.int64)
labels = np.ones(labels_ph.shape.as_list(), dtype=np.int64)
model = rnn_ptb.test_model(tf.test.is_gpu_available())
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
loss = rnn_ptb.loss_fn(model, inputs_ph, labels_ph, training=True)
grads = rnn_ptb.clip_gradients(optimizer.compute_gradients(loss), 0.25)
train_op = optimizer.apply_gradients(grads)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(train_op, feed_dict={inputs_ph: inputs, labels_ph: labels})
sess.run(
[train_op, loss], feed_dict={
inputs_ph: inputs,
labels_ph: labels
})
class PTBBenchmark(tf.test.Benchmark):
BATCH_SIZE = 20
SEQ_LEN = 35
def _report(self, label, start, num_iters, device, batch_size):
wall_time = (time.time() - start) / num_iters
dev = "cpu" if "cpu" in device.lower() else "gpu"
name = "%s_%s_batch_%d" % (label, dev, batch_size)
examples_per_sec = batch_size / wall_time
self.report_benchmark(
iters=num_iters,
wall_time=wall_time,
name=name,
extras={
"examples_per_sec": examples_per_sec
})
def _benchmark_apply(self, label, model):
num_iters = 100
num_warmup = 10
dataset = tf.data.Dataset.from_tensors(
tf.ones(
[PTBBenchmark.SEQ_LEN, PTBBenchmark.BATCH_SIZE],
dtype=tf.int64)).repeat(num_iters + num_warmup)
inputs = tf.compat.v1.data.make_one_shot_iterator(dataset).get_next()
with tf.device(tf.test.gpu_device_name()):
outputs = model(inputs, training=True)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for _ in range(num_warmup):
sess.run(outputs)
gc.collect()
start = time.time()
for _ in range(num_iters):
sess.run(outputs)
self._report(label, start, num_iters,
tf.test.gpu_device_name(), PTBBenchmark.BATCH_SIZE)
def benchmark_apply_small(self):
self._benchmark_apply("graph_apply_small", rnn_ptb.small_model(False))
def benchmark_apply_large(self):
self._benchmark_apply("graph_apply_large", rnn_ptb.large_model(False))
def benchmark_cudnn_apply_small(self):
if not tf.test.is_gpu_available():
return
self._benchmark_apply("graph_cudnn_apply_small", rnn_ptb.small_model(True))
def benchmark_cudnn_apply_large(self):
if not tf.test.is_gpu_available():
return
self._benchmark_apply("graph_cudnn_apply_large", rnn_ptb.large_model(True))
def _benchmark_train(self, label, model):
num_iters = 100
num_warmup = 10
dataset = tf.data.Dataset.from_tensors(
tf.ones(
[PTBBenchmark.SEQ_LEN, PTBBenchmark.BATCH_SIZE],
dtype=tf.int64)).repeat(num_iters + num_warmup)
# inputs and labels have the same shape
dataset = tf.data.Dataset.zip((dataset, dataset))
(inputs, labels) = tf.compat.v1.data.make_one_shot_iterator(
dataset).get_next()
with tf.device(tf.test.gpu_device_name()):
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
loss = rnn_ptb.loss_fn(model, inputs, labels, training=True)
grads = rnn_ptb.clip_gradients(optimizer.compute_gradients(loss), 0.25)
train_op = optimizer.apply_gradients(grads)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for _ in range(num_warmup):
sess.run(train_op)
gc.collect()
start = time.time()
for _ in range(num_iters):
sess.run(train_op)
self._report(label, start, num_iters,
tf.test.gpu_device_name(), PTBBenchmark.BATCH_SIZE)
def benchmark_train_small(self):
self._benchmark_train("graph_train_small", rnn_ptb.small_model(False))
def benchmark_train_large(self):
self._benchmark_train("graph_train_large", rnn_ptb.large_model(False))
def benchmark_cudnn_train_small(self):
if not tf.test.is_gpu_available():
return
self._benchmark_train("graph_cudnn_train_small", rnn_ptb.small_model(True))
def benchmark_cudnn_train_large(self):
if not tf.test.is_gpu_available():
return
self._benchmark_train("graph_cudnn_train_large", rnn_ptb.large_model(True))
if __name__ == "__main__":
tf.test.main()
|
tensorflow-master
|
tensorflow/contrib/eager/python/examples/rnn_ptb/rnn_ptb_graph_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Tests for contrib.compiler.xla."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
from absl.testing import parameterized
from tensorflow.contrib.compiler import xla
from tensorflow.contrib.tpu.python.tpu import tpu_estimator
from tensorflow.contrib.training.python.training import hparam
from tensorflow.python import summary
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.estimator import model_fn as model_fn_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.platform import test
from tensorflow.python.training import training
_TRAIN = model_fn_lib.ModeKeys.TRAIN
_EVAL = model_fn_lib.ModeKeys.EVAL
_EXPECTED_LOSS = 1
_EXPECTED_FEATURE = 2
_EXPECTED_LABEL = 3
def _test_train_model_fn(features, labels, mode, params):
"""A dummy model_fn for testing purpose."""
del features, labels, params
loss = constant_op.constant(_EXPECTED_LOSS)
return model_fn_lib.EstimatorSpec(
mode=mode, loss=loss, train_op=array_ops.identity(loss))
@xla.estimator_model_fn
def decorated_model_fn(features, labels, mode, params):
return _test_train_model_fn(features, labels, mode, params)
def make_dummy_features_labels():
# XLA CPU/GPU backend doesn't support guaranteed constant, thus use dataset
# container to work around.
features_dataset = dataset_ops.Dataset.from_tensors(
constant_op.constant(_EXPECTED_FEATURE)).repeat(10)
features_op = features_dataset.make_one_shot_iterator().get_next()
labels_dataset = dataset_ops.Dataset.from_tensors(
constant_op.constant(_EXPECTED_LABEL)).repeat(10)
labels_op = labels_dataset.make_one_shot_iterator().get_next()
return features_op, labels_op
class XlaDecoratorTest(test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
('test_use_as_decorator', decorated_model_fn, None),
('test_use_as_function', xla.estimator_model_fn(_test_train_model_fn),
None),
('test_use_tpu_false_hparams', decorated_model_fn,
hparam.HParams(use_tpu=False)),
('test_use_tpu_false_dict_params', decorated_model_fn, {
'use_tpu': False
}),
)
def test_compile(self, model_fn, params):
"""Calls model_fn and verifies it is compiled."""
with test.mock.patch.object(xla, 'compile') as mock_xla_compile:
loss = constant_op.constant(_EXPECTED_LOSS)
mock_xla_compile.return_value = [loss]
features, labels = make_dummy_features_labels()
estimator_spec = model_fn(
features=features, labels=labels, mode=_TRAIN, params=params or {})
self.assertEqual(mock_xla_compile.call_count, 1)
self.assertEqual(estimator_spec.mode, _TRAIN)
with self.test_session() as sess:
self.assertEqual(sess.run(estimator_spec.loss), sess.run(loss))
self.assertEqual(sess.run(estimator_spec.train_op), sess.run(loss))
@parameterized.named_parameters(
('test_use_tpu_true_hparams', decorated_model_fn,
hparam.HParams(use_tpu=True)),
('test_use_tpu_true_dict_params', decorated_model_fn, {
'use_tpu': True
}),
)
def test_not_compile(self, model_fn, params):
"""Calls model_fn and verifies it is NOT compiled."""
with test.mock.patch.object(xla, 'compile') as mock_xla_compile:
loss = constant_op.constant(_EXPECTED_LOSS)
mock_xla_compile.return_value = [loss]
features, labels = make_dummy_features_labels()
estimator_spec = model_fn(
features=features, labels=labels, mode=_TRAIN, params=params or {})
mock_xla_compile.assert_not_called()
self.assertEqual(estimator_spec.mode, _TRAIN)
with self.test_session() as sess:
self.assertEqual(sess.run(estimator_spec.loss), sess.run(loss))
self.assertEqual(sess.run(estimator_spec.train_op), sess.run(loss))
def test_model_with_summary(self):
"""Tests that summary ops are disabled."""
@xla.estimator_model_fn
def model_fn_with_summary(features, labels, mode, params):
del features, labels, params
loss = constant_op.constant(_EXPECTED_LOSS)
summary.scalar('loss_scalar_summary', loss)
summary.histogram('loss_histogram_summary', loss)
summary.image('loss_image_summary', loss)
return model_fn_lib.EstimatorSpec(
mode=mode, loss=loss, train_op=array_ops.identity(loss))
features, labels = make_dummy_features_labels()
estimator_spec = model_fn_with_summary(
features=features, labels=labels, mode=_TRAIN, params={})
with self.test_session() as sess:
self.assertEqual(sess.run(estimator_spec.loss), _EXPECTED_LOSS)
def _test_eval_metric_fn(eval_tensor_1, eval_tensor_2):
return {
'metric_1': (eval_tensor_1, eval_tensor_1),
'metric_2': (eval_tensor_2, eval_tensor_2),
}
class XlaDecoratorEvaluationTest(test.TestCase):
def _verify_evaluation_result(self, eval_model_fn):
features, labels = make_dummy_features_labels()
estimator_spec = eval_model_fn(
features=features, labels=labels, mode=_EVAL, params={})
with self.test_session() as sess:
self.assertEqual(sess.run(estimator_spec.loss), _EXPECTED_LOSS)
self.assertEqual(
sess.run(estimator_spec.eval_metric_ops['metric_1'][0]),
_EXPECTED_FEATURE + _EXPECTED_LABEL)
self.assertEqual(
sess.run(estimator_spec.eval_metric_ops['metric_1'][1]),
_EXPECTED_FEATURE + _EXPECTED_LABEL)
self.assertEqual(
sess.run(estimator_spec.eval_metric_ops['metric_2'][0]),
_EXPECTED_FEATURE - _EXPECTED_LABEL)
self.assertEqual(
sess.run(estimator_spec.eval_metric_ops['metric_2'][1]),
_EXPECTED_FEATURE - _EXPECTED_LABEL)
def test_eval_base_estimator_spec_eval_metric_ops_disallowed(self):
@xla.estimator_model_fn
def eval_model_fn_return_estimator_spec(features, labels, mode, params):
del features, labels, params
loss = constant_op.constant(_EXPECTED_LOSS)
return model_fn_lib.EstimatorSpec(
mode=mode,
loss=loss,
eval_metric_ops={
'metric': (array_ops.identity(loss), control_flow_ops.no_op())
})
with self.assertRaisesRegexp(
ValueError, 'EstimatorSpec.eval_metric_ops is not supported with XLA '
'compilation. Please use TPUEstimatorSpec.eval_metrics instead.'):
self._verify_evaluation_result(eval_model_fn_return_estimator_spec)
def test_eval_base_estimator_spec_no_eval_metric_ops(self):
@xla.estimator_model_fn
def eval_model_fn_no_eval_metric_ops(features, labels, mode, params):
del features, labels, params
return model_fn_lib.EstimatorSpec(
mode=mode, loss=constant_op.constant(_EXPECTED_LOSS))
features, labels = make_dummy_features_labels()
estimator_spec = eval_model_fn_no_eval_metric_ops(
features=features, labels=labels, mode=_EVAL, params={})
with self.test_session() as sess:
self.assertEqual(sess.run(estimator_spec.loss), _EXPECTED_LOSS)
def test_eval_no_eval_metrics(self):
@xla.estimator_model_fn
def eval_model_fn_no_eval_metrics(features, labels, mode, params):
del features, labels, params
return tpu_estimator.TPUEstimatorSpec(
mode=mode, loss=constant_op.constant(_EXPECTED_LOSS))
features, labels = make_dummy_features_labels()
estimator_spec = eval_model_fn_no_eval_metrics(
features=features, labels=labels, mode=_EVAL, params={})
self.assertEqual(estimator_spec.eval_metric_ops, {})
with self.test_session() as sess:
self.assertEqual(sess.run(estimator_spec.loss), _EXPECTED_LOSS)
def test_eval_fn_missing_input_tensor(self):
@xla.estimator_model_fn
def eval_model_fn(features, labels, mode, params):
del params
dummy_eval_metric_fn_tensors_dict = {
'eval_tensor_1': features + labels,
}
return tpu_estimator.TPUEstimatorSpec(
mode=mode,
loss=constant_op.constant(_EXPECTED_LOSS),
eval_metrics=(_test_eval_metric_fn,
dummy_eval_metric_fn_tensors_dict))
with self.assertRaisesRegexp(
ValueError,
re.escape("Arguments ['eval_tensor_2'] are needed by metric_fn (first "
'element of TPUEstimatorSpec.eval_metrics) but they are not '
'provided by evaluation tensors (second element of '
'TPUEstimatorSpec.eval_metrics).')):
self._verify_evaluation_result(eval_model_fn)
def test_eval_fn_extraneous_input_tensor(self):
@xla.estimator_model_fn
def eval_model_fn(features, labels, mode, params):
del params
dummy_eval_metric_fn_tensors_dict = {
'eval_tensor_1': features + labels,
'eval_tensor_2': features - labels,
'extra_tensor': features * 2 - labels,
}
return tpu_estimator.TPUEstimatorSpec(
mode=mode,
loss=constant_op.constant(_EXPECTED_LOSS),
eval_metrics=(_test_eval_metric_fn,
dummy_eval_metric_fn_tensors_dict))
with self.assertRaisesRegexp(
ValueError,
re.escape("Arguments ['extra_tensor'] are provided by evaluation "
'tensors (second element of TPUEstimatorSpec.eval_metrics) '
'but they are not needed by metric_fn (first element of '
'TPUEstimatorSpec.eval_metrics).')):
self._verify_evaluation_result(eval_model_fn)
def test_eval_tensors_as_list(self):
@xla.estimator_model_fn
def eval_model_fn(features, labels, mode, params):
del params
dummy_eval_metric_fn_tensors = [features + labels, features - labels]
return tpu_estimator.TPUEstimatorSpec(
mode=mode,
loss=constant_op.constant(_EXPECTED_LOSS),
eval_metrics=(_test_eval_metric_fn, dummy_eval_metric_fn_tensors))
self._verify_evaluation_result(eval_model_fn)
def test_eval_tensors_as_dict(self):
@xla.estimator_model_fn
def eval_model_fn(features, labels, mode, params):
del params
dummy_eval_metric_fn_tensors_dict = {
'eval_tensor_1': features + labels,
'eval_tensor_2': features - labels,
}
return tpu_estimator.TPUEstimatorSpec(
mode=mode,
loss=constant_op.constant(_EXPECTED_LOSS),
eval_metrics=(_test_eval_metric_fn,
dummy_eval_metric_fn_tensors_dict))
self._verify_evaluation_result(eval_model_fn)
def test_model_with_summary(self):
"""Tests that summary ops are disabled."""
@xla.estimator_model_fn
def model_fn_with_summary(features, labels, mode, params):
del features, labels, params
loss = constant_op.constant(_EXPECTED_LOSS)
summary.scalar('loss_scalar_summary', loss)
summary.histogram('loss_histogram_summary', loss)
summary.image('loss_image_summary', loss)
return tpu_estimator.TPUEstimatorSpec(mode=mode, loss=loss)
features, labels = make_dummy_features_labels()
estimator_spec = model_fn_with_summary(
features=features, labels=labels, mode=_EVAL, params={})
with self.test_session() as sess:
self.assertEqual(sess.run(estimator_spec.loss), _EXPECTED_LOSS)
class XlaDecoratorScaffoldTest(test.TestCase, parameterized.TestCase):
def _make_scaffold_fn(self, mode):
def _scaffold_fn_on_cpu():
scaffold = training.Scaffold()
self.assertNotIn(mode, self.is_scaffold_fn_called)
self.is_scaffold_fn_called[mode] = True
return scaffold
return _scaffold_fn_on_cpu
def test_scaffold_fn_return_none(self):
@xla.estimator_model_fn
def model_fn(features, labels, mode, params):
del features, labels, params
return tpu_estimator.TPUEstimatorSpec(
mode=mode,
loss=constant_op.constant(_EXPECTED_LOSS),
train_op=control_flow_ops.no_op(),
scaffold_fn=lambda: None)
features, labels = make_dummy_features_labels()
with self.assertRaisesRegexp(
ValueError,
'TPUEstimatorSpec.scaffold_fn returns None, which is not allowed'):
model_fn(features=features, labels=labels, mode=_TRAIN, params={})
@parameterized.named_parameters(
('train_mode', _TRAIN),
('eval_mode', _EVAL),
# TODO(ycao): Add predict_mode test after PREDICT mode is implemented.
)
def test_scaffold_fn_in_mode(self, mode):
@xla.estimator_model_fn
def model_fn(features, labels, mode, params):
del features, labels, params
return tpu_estimator.TPUEstimatorSpec(
mode=mode,
loss=constant_op.constant(_EXPECTED_LOSS),
train_op=control_flow_ops.no_op(),
scaffold_fn=self._make_scaffold_fn(mode))
features, labels = make_dummy_features_labels()
self.is_scaffold_fn_called = {}
model_fn(features=features, labels=labels, mode=mode, params={})
self.assertTrue(self.is_scaffold_fn_called[mode])
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/compiler/xla_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Library for controlling the Tensorflow/XLA JIT compiler."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.compiler.xla import jit
experimental_jit_scope = jit.experimental_jit_scope
|
tensorflow-master
|
tensorflow/contrib/compiler/jit.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A module for controlling the Tensorflow/XLA JIT compiler."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.compiler import jit
from tensorflow.contrib.compiler import xla
|
tensorflow-master
|
tensorflow/contrib/compiler/__init__.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""xla is an experimental library that provides XLA support APIs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.compiler.xla import xla
from tensorflow.python.estimator import model_fn as model_fn_lib
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import function_utils
from tensorflow.python.util import tf_decorator
compile = xla.compile # pylint: disable=redefined-builtin
check_function_argument_count = xla.check_function_argument_count
class _CapturedObject(object):
"""A placeholder to capture an object."""
def __init__(self):
self._object = None
def capture(self, o):
if self._object:
raise RuntimeError(
'InternalError: _CapturedObject can capture only once. Please file '
'bug.')
self._object = o
def get(self):
return self._object
def _get_scaffold(captured_scaffold_fn):
"""Retrieves the Scaffold from `captured_scaffold_fn`."""
scaffold_fn = captured_scaffold_fn.get()
if not scaffold_fn:
return None
scaffold = scaffold_fn()
if scaffold is None:
raise ValueError(
'TPUEstimatorSpec.scaffold_fn returns None, which is not allowed')
return scaffold
class _ModelFnWrapper(object):
"""_ModelFnWrapper supports executing model_fn with XLA."""
def __init__(self, function):
self._model_fn = function
def __call__(self, features, labels, mode, params):
# TPUEstimator compiles model_fn when use_tpu=True. To avoid double
# compilation, we use this params['use_tpu'] as a hint. When it is set to
# True, model_fn is called without compilation.
# Note that this condition isn't accurate for the case of exporting a model.
# In that case we should ideally not compile so that user can see detailed
# graph. However, we don't have enough information to tell whether model_fn
# is being called for export mode or not.
# TODO(ycao): Make this condition more accurate when implementing PREDICT
# mode.
if params.get('use_tpu'):
return self._call_model_fn(features, labels, mode, params)
if mode == model_fn_lib.ModeKeys.TRAIN:
train_step, captured_scaffold_fn = self._make_train_step(
features, labels, params)
(loss,) = compile(train_step)
return model_fn_lib.EstimatorSpec(
mode=mode,
loss=loss,
train_op=array_ops.identity(loss),
scaffold=_get_scaffold(captured_scaffold_fn))
elif mode == model_fn_lib.ModeKeys.EVAL:
eval_step, captured_eval_metric_fn, captured_scaffold_fn = (
self._make_eval_step(features, labels, params))
outputs = compile(eval_step)
loss = outputs[0]
# Calculate eval_metric_ops if eval_metric_fn is set and captured.
eval_metric_fn = captured_eval_metric_fn.get()
if eval_metric_fn:
eval_metric_fn_tensors = outputs[1:]
eval_metric_ops = eval_metric_fn(*eval_metric_fn_tensors)
else:
eval_metric_ops = None
return model_fn_lib.EstimatorSpec(
mode=mode,
loss=loss,
eval_metric_ops=eval_metric_ops,
scaffold=_get_scaffold(captured_scaffold_fn))
else:
raise NotImplementedError('%s is not implemented, only TRAIN and EVAL are'
' supported' % mode)
def _make_train_step(self, features, labels, params):
"""Creates a single step of training for xla.compile()."""
captured_scaffold_fn = _CapturedObject()
def train_step():
"""A single step of training."""
estimator_spec = self._call_model_fn(features, labels,
model_fn_lib.ModeKeys.TRAIN, params)
try:
captured_scaffold_fn.capture(estimator_spec.scaffold_fn)
except AttributeError:
captured_scaffold_fn.capture(None)
# train_step will be run by xla.compile(). xla.compile() only supports
# tensor output while train_op can be either an operation or a tensor.
# Even though xla.compile() automatically adds operation-typed train_op as
# control dependency of other tensor outputs, it doesn't do so for
# tensor-typed train_op. Thus, we need to set it explicitly here.
with ops.control_dependencies([estimator_spec.train_op]):
return array_ops.identity(estimator_spec.loss)
return train_step, captured_scaffold_fn
def _make_eval_step(self, features, labels, params):
"""Creates a single step of evaluation for xla.compile()."""
captured_eval_metric_fn = _CapturedObject()
captured_scaffold_fn = _CapturedObject()
def eval_step():
"""A single step of evaluation."""
estimator_spec = self._call_model_fn(features, labels,
model_fn_lib.ModeKeys.EVAL, params)
try:
captured_scaffold_fn.capture(estimator_spec.scaffold_fn)
except AttributeError:
captured_scaffold_fn.capture(None)
eval_metric_fn = None
eval_metric_fn_tensors = []
try:
if estimator_spec.eval_metrics:
(eval_metric_fn, eval_metric_fn_tensors) = estimator_spec.eval_metrics
except AttributeError:
pass
# If a dictionary is provided, we need to convert it into a list sorted
# according to order of eval_metric_fn positional arguments.
if isinstance(eval_metric_fn_tensors, dict):
eval_metric_fn_args = function_utils.fn_args(eval_metric_fn)
eval_metric_fn_tensors = [
eval_metric_fn_tensors[i] for i in eval_metric_fn_args
]
captured_eval_metric_fn.capture(eval_metric_fn)
return tuple([estimator_spec.loss] + eval_metric_fn_tensors)
return eval_step, captured_eval_metric_fn, captured_scaffold_fn
def _call_model_fn(self, features, labels, mode, params):
"""Calls the model_fn with required parameters."""
model_fn_args = function_utils.fn_args(self._model_fn)
kwargs = {}
if 'labels' in model_fn_args:
kwargs['labels'] = labels
elif labels is not None:
raise ValueError(
'model_fn does not take labels, but input_fn returns labels.')
if 'mode' in model_fn_args:
kwargs['mode'] = mode
if 'params' in model_fn_args:
kwargs['params'] = params
return self._verify_estimator_spec(
self._model_fn(features=features, **kwargs))
def _verify_estimator_spec(self, estimator_spec):
"""Verifies estimator spec contains correct data."""
# TODO(ycao): Implement estimator spec verification for other modes.
try:
if estimator_spec.scaffold:
logging.warning('EstimatorSpec.scaffold is ignored with XLA compilation'
'. Please use TPUEstimatorSpec.scaffold_fn instead.')
except AttributeError:
pass
try:
if estimator_spec.eval_metric_ops:
raise ValueError('EstimatorSpec.eval_metric_ops is not supported with '
'XLA compilation. Please use '
'TPUEstimatorSpec.eval_metrics instead.')
except AttributeError:
pass
if estimator_spec.mode == model_fn_lib.ModeKeys.EVAL:
# If estimator_spec is of type TPUEstimatorSpec and contains eval_metrics,
# check that eval_metrics contains eval_metric_fn and
# eval_metric_fn_tensors with matching arguments.
try:
eval_metrics = estimator_spec.eval_metrics
except AttributeError:
eval_metrics = None
if eval_metrics:
(eval_metric_fn, eval_metric_fn_tensors) = eval_metrics
eval_metric_fn_args = function_utils.fn_args(eval_metric_fn)
if isinstance(eval_metric_fn_tensors, dict):
missing_tensors = [
i for i in eval_metric_fn_args if i not in eval_metric_fn_tensors
]
additional_tensors = [
i for i in eval_metric_fn_tensors if i not in eval_metric_fn_args
]
if missing_tensors:
raise ValueError('Arguments %s are needed by metric_fn (first '
'element of TPUEstimatorSpec.eval_metrics) but '
'they are not provided by evaluation tensors '
'(second element of TPUEstimatorSpec.eval_metrics)'
'.' % missing_tensors)
if additional_tensors:
raise ValueError('Arguments %s are provided by evaluation tensors '
'(second element of TPUEstimatorSpec.eval_metrics)'
' but they are not needed by metric_fn (first '
'element of TPUEstimatorSpec.eval_metrics).' %
additional_tensors)
return estimator_spec
def estimator_model_fn(target_model_fn=None):
"""estimator_model_fn decorates a model_fn to be compiled for execution.
Currently it only works with `TPUEstimator`. If you need to use it with base
`Estimator`, please add `tf.compat.v1.enable_resource_variables()` at the
beginning of your program.
Example 1, decorating model_fn:
```
@xla.estimator_model_fn()
def model_fn(features, labels, mode, params):
...
return EstimatorSpec(...)
est = Estimator(model_fn=model_fn, ...)
est.train(...)
```
Example 2, decorator as function:
```
def model_fn(features, labels, mode, params):
...
return EstimatorSpec(...)
est = Estimator(model_fn=xla.estimator_model_fn(model_fn), ...)
est.train(...)
```
Args:
target_model_fn: model_fn to be decorated. This is only needed when
decorator is used in function call form (example 2).
Returns:
Decorated target_model_fn.
"""
def decorated(function):
return tf_decorator.make_decorator(function, _ModelFnWrapper(function))
return decorated(target_model_fn) if target_model_fn else decorated
|
tensorflow-master
|
tensorflow/contrib/compiler/xla.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Signal processing operations.
`tf.contrib.signal` has been renamed to `tf.signal`. `tf.contrib.signal` will be
removed in TensorFlow 2.0.
See the
[Contrib Signal](https://tensorflow.org/api_guides/python/contrib.signal)
guide.
@@frame
@@hamming_window
@@hann_window
@@inverse_stft
@@inverse_stft_window_fn
@@mfccs_from_log_mel_spectrograms
@@linear_to_mel_weight_matrix
@@overlap_and_add
@@stft
[hamming]: https://en.wikipedia.org/wiki/Window_function#Hamming_window
[hann]: https://en.wikipedia.org/wiki/Window_function#Hann_window
[mel]: https://en.wikipedia.org/wiki/Mel_scale
[mfcc]: https://en.wikipedia.org/wiki/Mel-frequency_cepstrum
[stft]: https://en.wikipedia.org/wiki/Short-time_Fourier_transform
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.ops.signal.mel_ops import linear_to_mel_weight_matrix
from tensorflow.python.ops.signal.mfcc_ops import mfccs_from_log_mel_spectrograms
from tensorflow.python.ops.signal.reconstruction_ops import overlap_and_add
from tensorflow.python.ops.signal.shape_ops import frame
from tensorflow.python.ops.signal.spectral_ops import inverse_stft
from tensorflow.python.ops.signal.spectral_ops import inverse_stft_window_fn
from tensorflow.python.ops.signal.spectral_ops import stft
from tensorflow.python.ops.signal.window_ops import hamming_window
from tensorflow.python.ops.signal.window_ops import hann_window
from tensorflow.python.util.all_util import remove_undocumented
# `frame` used to be named `frames`, which is a noun and not a verb.
# Keep an alias to `frames` for backwards compatibility.
frames = frame
remove_undocumented(__name__)
|
tensorflow-master
|
tensorflow/contrib/signal/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Signal ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
|
tensorflow-master
|
tensorflow/contrib/signal/python/ops/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Classes representing statistical distributions and ops for working with them.
Use [tfp.distributions](/probability/api_docs/python/tfp/distributions) instead.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.util import deprecation
# pylint: disable=unused-import,wildcard-import,line-too-long,g-importing-member,g-import-not-at-top
with deprecation.silence():
from tensorflow.contrib.distributions.python.ops import bijectors
from tensorflow.contrib.distributions.python.ops.autoregressive import *
from tensorflow.contrib.distributions.python.ops.batch_reshape import *
from tensorflow.contrib.distributions.python.ops.binomial import *
from tensorflow.contrib.distributions.python.ops.cauchy import *
from tensorflow.contrib.distributions.python.ops.chi2 import *
from tensorflow.contrib.distributions.python.ops.conditional_distribution import *
from tensorflow.contrib.distributions.python.ops.conditional_transformed_distribution import *
from tensorflow.contrib.distributions.python.ops.deterministic import *
from tensorflow.contrib.distributions.python.ops.distribution_util import fill_triangular
from tensorflow.contrib.distributions.python.ops.distribution_util import fill_triangular_inverse
from tensorflow.contrib.distributions.python.ops.distribution_util import matrix_diag_transform
from tensorflow.contrib.distributions.python.ops.distribution_util import reduce_weighted_logsumexp
from tensorflow.contrib.distributions.python.ops.distribution_util import softplus_inverse
from tensorflow.contrib.distributions.python.ops.distribution_util import tridiag
from tensorflow.contrib.distributions.python.ops.estimator import *
from tensorflow.contrib.distributions.python.ops.geometric import *
from tensorflow.contrib.distributions.python.ops.half_normal import *
from tensorflow.contrib.distributions.python.ops.independent import *
from tensorflow.contrib.distributions.python.ops.inverse_gamma import *
from tensorflow.contrib.distributions.python.ops.kumaraswamy import *
from tensorflow.contrib.distributions.python.ops.logistic import *
from tensorflow.contrib.distributions.python.ops.mixture import *
from tensorflow.contrib.distributions.python.ops.mixture_same_family import *
from tensorflow.contrib.distributions.python.ops.moving_stats import *
from tensorflow.contrib.distributions.python.ops.mvn_diag import *
from tensorflow.contrib.distributions.python.ops.mvn_diag_plus_low_rank import *
from tensorflow.contrib.distributions.python.ops.mvn_full_covariance import *
from tensorflow.contrib.distributions.python.ops.mvn_tril import *
from tensorflow.contrib.distributions.python.ops.negative_binomial import *
from tensorflow.contrib.distributions.python.ops.normal_conjugate_posteriors import *
from tensorflow.contrib.distributions.python.ops.onehot_categorical import *
from tensorflow.contrib.distributions.python.ops.poisson import *
from tensorflow.contrib.distributions.python.ops.poisson_lognormal import *
from tensorflow.contrib.distributions.python.ops.quantized_distribution import *
from tensorflow.contrib.distributions.python.ops.relaxed_bernoulli import *
from tensorflow.contrib.distributions.python.ops.relaxed_onehot_categorical import *
from tensorflow.contrib.distributions.python.ops.sample_stats import *
from tensorflow.contrib.distributions.python.ops.seed_stream import *
from tensorflow.contrib.distributions.python.ops.sinh_arcsinh import *
from tensorflow.contrib.distributions.python.ops.test_util import *
from tensorflow.contrib.distributions.python.ops.vector_diffeomixture import *
from tensorflow.contrib.distributions.python.ops.vector_exponential_diag import *
from tensorflow.contrib.distributions.python.ops.vector_laplace_diag import *
from tensorflow.contrib.distributions.python.ops.vector_sinh_arcsinh_diag import *
from tensorflow.contrib.distributions.python.ops.wishart import *
from tensorflow.python.ops.distributions.bernoulli import *
from tensorflow.python.ops.distributions.beta import *
from tensorflow.python.ops.distributions.categorical import *
from tensorflow.python.ops.distributions.dirichlet import *
from tensorflow.python.ops.distributions.dirichlet_multinomial import *
from tensorflow.python.ops.distributions.distribution import *
from tensorflow.python.ops.distributions.exponential import *
from tensorflow.python.ops.distributions.gamma import *
from tensorflow.python.ops.distributions.kullback_leibler import *
from tensorflow.python.ops.distributions.laplace import *
from tensorflow.python.ops.distributions.multinomial import *
from tensorflow.python.ops.distributions.normal import *
from tensorflow.python.ops.distributions.student_t import *
from tensorflow.python.ops.distributions.transformed_distribution import *
from tensorflow.python.ops.distributions.uniform import *
# pylint: enable=unused-import,wildcard-import,line-too-long,g-importing-member
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = [
'auto_correlation',
'bijectors',
'Cauchy',
'ConditionalDistribution',
'ConditionalTransformedDistribution',
'FULLY_REPARAMETERIZED',
'NOT_REPARAMETERIZED',
'ReparameterizationType',
'Distribution',
'Autoregressive',
'BatchReshape',
'Bernoulli',
'Beta',
'Binomial',
'BetaWithSoftplusConcentration',
'Categorical',
'Chi2',
'Chi2WithAbsDf',
'Deterministic',
'VectorDeterministic',
'Exponential',
'ExponentialWithSoftplusRate',
'VectorExponentialDiag',
'Gamma',
'GammaWithSoftplusConcentrationRate',
'Geometric',
'HalfNormal',
'Independent',
'InverseGamma',
'InverseGammaWithSoftplusConcentrationRate',
'Kumaraswamy',
'Laplace',
'LaplaceWithSoftplusScale',
'Logistic',
'NegativeBinomial',
'Normal',
'NormalWithSoftplusScale',
'Poisson',
'PoissonLogNormalQuadratureCompound',
'SeedStream',
'SinhArcsinh',
'StudentT',
'StudentTWithAbsDfSoftplusScale',
'Uniform',
'MultivariateNormalDiag',
'MultivariateNormalFullCovariance',
'MultivariateNormalTriL',
'MultivariateNormalDiagPlusLowRank',
'MultivariateNormalDiagWithSoftplusScale',
'Dirichlet',
'DirichletMultinomial',
'Multinomial',
'VectorDiffeomixture',
'VectorLaplaceDiag',
'VectorSinhArcsinhDiag',
'WishartCholesky',
'WishartFull',
'TransformedDistribution',
'QuantizedDistribution',
'Mixture',
'MixtureSameFamily',
'ExpRelaxedOneHotCategorical',
'OneHotCategorical',
'RelaxedBernoulli',
'RelaxedOneHotCategorical',
'kl_divergence',
'RegisterKL',
'fill_triangular',
'fill_triangular_inverse',
'matrix_diag_transform',
'reduce_weighted_logsumexp',
'softplus_inverse',
'tridiag',
'normal_conjugates_known_scale_posterior',
'normal_conjugates_known_scale_predictive',
'percentile',
'assign_moving_mean_variance',
'assign_log_moving_mean_exp',
'moving_mean_variance',
'estimator_head_distribution_regression',
'quadrature_scheme_softmaxnormal_gauss_hermite',
'quadrature_scheme_softmaxnormal_quantiles',
'quadrature_scheme_lognormal_gauss_hermite',
'quadrature_scheme_lognormal_quantiles',
]
remove_undocumented(__name__, _allowed_symbols)
|
tensorflow-master
|
tensorflow/contrib/distributions/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""ops module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
|
tensorflow-master
|
tensorflow/contrib/distributions/python/__init__.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Relaxed One-Hot Categorical distribution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from scipy.special import gamma
from tensorflow.contrib.distributions.python.ops import relaxed_onehot_categorical
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import test
def make_relaxed_categorical(batch_shape, num_classes, dtype=dtypes.float32):
logits = random_ops.random_uniform(
list(batch_shape) + [num_classes], -10, 10, dtype=dtype) - 50.
temperatures = random_ops.random_uniform(
list(batch_shape), 0.1, 10, dtype=dtypes.float32)
return relaxed_onehot_categorical.RelaxedOneHotCategorical(
temperatures, logits, dtype=dtype)
class ExpRelaxedOneHotCategoricalTest(test.TestCase):
def testP(self):
temperature = 1.0
logits = [2.0, 3.0, -4.0]
dist = relaxed_onehot_categorical.ExpRelaxedOneHotCategorical(temperature,
logits)
expected_p = np.exp(logits)/np.sum(np.exp(logits))
with self.cached_session():
self.assertAllClose(expected_p, dist.probs.eval())
self.assertAllEqual([3], dist.probs.get_shape())
def testPdf(self):
temperature = .4
logits = [.3, .1, .4]
k = len(logits)
p = np.exp(logits)/np.sum(np.exp(logits))
dist = relaxed_onehot_categorical.ExpRelaxedOneHotCategorical(temperature,
logits)
with self.cached_session():
x = dist.sample().eval()
# analytical ExpConcrete density presented in Maddison et al. 2016
prod_term = p*np.exp(-temperature * x)
expected_pdf = (gamma(k) * np.power(temperature, k-1) *
np.prod(prod_term/np.sum(prod_term)))
pdf = dist.prob(x).eval()
self.assertAllClose(expected_pdf, pdf)
class RelaxedOneHotCategoricalTest(test.TestCase):
def testLogits(self):
temperature = 1.0
logits = [2.0, 3.0, -4.0]
dist = relaxed_onehot_categorical.RelaxedOneHotCategorical(temperature,
logits)
with self.cached_session():
# check p for ExpRelaxed base distribution
self.assertAllClose(logits, dist._distribution.logits.eval())
self.assertAllEqual([3], dist._distribution.logits.get_shape())
def testSample(self):
temperature = 1.4
with self.cached_session():
# single logit
logits = [.3, .1, .4]
dist = relaxed_onehot_categorical.RelaxedOneHotCategorical(temperature,
logits)
self.assertAllEqual([3], dist.sample().eval().shape)
self.assertAllEqual([5, 3], dist.sample(5).eval().shape)
# multiple distributions
logits = [[2.0, 3.0, -4.0], [.3, .1, .4]]
dist = relaxed_onehot_categorical.RelaxedOneHotCategorical(temperature,
logits)
self.assertAllEqual([2, 3], dist.sample().eval().shape)
self.assertAllEqual([5, 2, 3], dist.sample(5).eval().shape)
# multiple distributions
logits = np.random.uniform(size=(4, 1, 3)).astype(np.float32)
dist = relaxed_onehot_categorical.RelaxedOneHotCategorical(temperature,
logits)
self.assertAllEqual([4, 1, 3], dist.sample().eval().shape)
self.assertAllEqual([5, 4, 1, 3], dist.sample(5).eval().shape)
def testPdf(self):
def analytical_pdf(x, temperature, logits):
# analytical density of RelaxedOneHotCategorical
temperature = np.reshape(temperature, (-1, 1))
if len(x.shape) == 1:
x = np.expand_dims(x, 0)
k = logits.shape[1]
p = np.exp(logits)/np.sum(np.exp(logits), axis=1, keepdims=True)
term1 = gamma(k)*np.power(temperature, k-1)
term2 = np.sum(p/(np.power(x, temperature)), axis=1, keepdims=True)
term3 = np.prod(p/(np.power(x, temperature+1)), axis=1, keepdims=True)
expected_pdf = term1*np.power(term2, -k)*term3
return expected_pdf
with self.cached_session():
temperature = .4
logits = np.array([[.3, .1, .4]]).astype(np.float32)
dist = relaxed_onehot_categorical.RelaxedOneHotCategorical(temperature,
logits)
x = dist.sample().eval()
pdf = dist.prob(x).eval()
expected_pdf = analytical_pdf(x, temperature, logits)
self.assertAllClose(expected_pdf.flatten(), pdf, rtol=1e-4)
# variable batch size
logits = np.array([[.3, .1, .4], [.6, -.1, 2.]]).astype(np.float32)
temperatures = np.array([0.4, 2.3]).astype(np.float32)
dist = relaxed_onehot_categorical.RelaxedOneHotCategorical(temperatures,
logits)
x = dist.sample().eval()
pdf = dist.prob(x).eval()
expected_pdf = analytical_pdf(x, temperatures, logits)
self.assertAllClose(expected_pdf.flatten(), pdf, rtol=1e-4)
def testShapes(self):
with self.cached_session():
for batch_shape in ([], [1], [2, 3, 4]):
dist = make_relaxed_categorical(batch_shape, 10)
self.assertAllEqual(batch_shape, dist.batch_shape.as_list())
self.assertAllEqual(batch_shape, dist.batch_shape_tensor().eval())
self.assertAllEqual([10], dist.event_shape_tensor().eval())
self.assertAllEqual([10], dist.event_shape_tensor().eval())
for batch_shape in ([], [1], [2, 3, 4]):
dist = make_relaxed_categorical(
batch_shape, constant_op.constant(10, dtype=dtypes.int32))
self.assertAllEqual(len(batch_shape), dist.batch_shape.ndims)
self.assertAllEqual(batch_shape, dist.batch_shape_tensor().eval())
self.assertAllEqual([10], dist.event_shape_tensor().eval())
self.assertAllEqual([10], dist.event_shape_tensor().eval())
def testUnknownShape(self):
with self.cached_session():
logits_pl = array_ops.placeholder(dtypes.float32)
temperature = 1.0
dist = relaxed_onehot_categorical.ExpRelaxedOneHotCategorical(temperature,
logits_pl)
with self.cached_session():
feed_dict = {logits_pl: [.3, .1, .4]}
self.assertAllEqual([3], dist.sample().eval(feed_dict=feed_dict).shape)
self.assertAllEqual([5, 3],
dist.sample(5).eval(feed_dict=feed_dict).shape)
def testDTypes(self):
# check that sampling and log_prob work for a range of dtypes
with self.cached_session():
for dtype in (dtypes.float16, dtypes.float32, dtypes.float64):
logits = random_ops.random_uniform(shape=[3, 3], dtype=dtype)
dist = relaxed_onehot_categorical.RelaxedOneHotCategorical(
temperature=0.5, logits=logits)
dist.log_prob(dist.sample())
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/distributions/python/kernel_tests/relaxed_onehot_categorical_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for initializers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from tensorflow.contrib import distributions as distributions_lib
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
distributions = distributions_lib
class NormalTest(test.TestCase):
def testNormalConjugateKnownSigmaPosterior(self):
with session.Session():
mu0 = constant_op.constant([3.0])
sigma0 = constant_op.constant([math.sqrt(10.0)])
sigma = constant_op.constant([math.sqrt(2.0)])
x = constant_op.constant([-2.5, 2.5, 4.0, 0.0, -1.0, 2.0])
s = math_ops.reduce_sum(x)
n = array_ops.size(x)
prior = distributions.Normal(loc=mu0, scale=sigma0)
posterior = distributions.normal_conjugates_known_scale_posterior(
prior=prior, scale=sigma, s=s, n=n)
# Smoke test
self.assertTrue(isinstance(posterior, distributions.Normal))
posterior_log_pdf = posterior.log_prob(x).eval()
self.assertEqual(posterior_log_pdf.shape, (6,))
def testNormalConjugateKnownSigmaPosteriorND(self):
with session.Session():
batch_size = 6
mu0 = constant_op.constant([[3.0, -3.0]] * batch_size)
sigma0 = constant_op.constant([[math.sqrt(10.0), math.sqrt(15.0)]] *
batch_size)
sigma = constant_op.constant([[math.sqrt(2.0)]] * batch_size)
x = array_ops.transpose(
constant_op.constant(
[[-2.5, 2.5, 4.0, 0.0, -1.0, 2.0]], dtype=dtypes.float32))
s = math_ops.reduce_sum(x)
n = array_ops.size(x)
prior = distributions.Normal(loc=mu0, scale=sigma0)
posterior = distributions.normal_conjugates_known_scale_posterior(
prior=prior, scale=sigma, s=s, n=n)
# Smoke test
self.assertTrue(isinstance(posterior, distributions.Normal))
posterior_log_pdf = posterior.log_prob(x).eval()
self.assertEqual(posterior_log_pdf.shape, (6, 2))
def testNormalConjugateKnownSigmaNDPosteriorND(self):
with session.Session():
batch_size = 6
mu0 = constant_op.constant([[3.0, -3.0]] * batch_size)
sigma0 = constant_op.constant([[math.sqrt(10.0), math.sqrt(15.0)]] *
batch_size)
sigma = constant_op.constant([[math.sqrt(2.0), math.sqrt(4.0)]] *
batch_size)
x = constant_op.constant(
[[-2.5, 2.5, 4.0, 0.0, -1.0, 2.0], [2.5, -2.5, -4.0, 0.0, 1.0, -2.0]],
dtype=dtypes.float32)
s = math_ops.reduce_sum(x, axis=[1])
x = array_ops.transpose(x) # Reshape to shape (6, 2)
n = constant_op.constant([6] * 2)
prior = distributions.Normal(loc=mu0, scale=sigma0)
posterior = distributions.normal_conjugates_known_scale_posterior(
prior=prior, scale=sigma, s=s, n=n)
# Smoke test
self.assertTrue(isinstance(posterior, distributions.Normal))
# Calculate log_pdf under the 2 models
posterior_log_pdf = posterior.log_prob(x)
self.assertEqual(posterior_log_pdf.get_shape(), (6, 2))
self.assertEqual(posterior_log_pdf.eval().shape, (6, 2))
def testNormalConjugateKnownSigmaPredictive(self):
with session.Session():
batch_size = 6
mu0 = constant_op.constant([3.0] * batch_size)
sigma0 = constant_op.constant([math.sqrt(10.0)] * batch_size)
sigma = constant_op.constant([math.sqrt(2.0)] * batch_size)
x = constant_op.constant([-2.5, 2.5, 4.0, 0.0, -1.0, 2.0])
s = math_ops.reduce_sum(x)
n = array_ops.size(x)
prior = distributions.Normal(loc=mu0, scale=sigma0)
predictive = distributions.normal_conjugates_known_scale_predictive(
prior=prior, scale=sigma, s=s, n=n)
# Smoke test
self.assertTrue(isinstance(predictive, distributions.Normal))
predictive_log_pdf = predictive.log_prob(x).eval()
self.assertEqual(predictive_log_pdf.shape, (6,))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/distributions/python/kernel_tests/normal_conjugate_posteriors_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ShapeUtil."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops.shape import _DistributionShape
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
_empty_shape = np.array([], dtype=np.int32)
def _eval(x):
if hasattr(x, "__iter__"):
return [x.eval() for x in x]
return x.eval()
def _constant(x):
if hasattr(x, "__iter__"):
return [tensor_util.constant_value(x) for x in x]
return tensor_util.constant_value(x)
class MakeBatchReadyTest(test.TestCase):
def setUp(self):
self._rng = np.random.RandomState(42)
def _random_sample(self, sample_shape, dtype=np.float32):
return self._rng.random_sample(sample_shape).astype(dtype)
def _get_expected(self, x, batch_ndims, event_ndims, expand_batch_dim):
# Cast as int32 array explicitly, since an empty x.shape defaults
# to float64, and we can't index as float64 in numpy 1.12+.
x_shape = np.array(x.shape, dtype=np.int32)
n = x.ndim - batch_ndims - event_ndims
sample_shape = x_shape[:n]
y = np.reshape(x, np.concatenate([[-1], x_shape[n:]], 0))
y = np.transpose(y, np.roll(np.arange(y.ndim), -1))
if event_ndims == 0:
y = y[..., np.newaxis, :]
if batch_ndims == 0 and expand_batch_dim:
y = y[np.newaxis, ...]
return y, sample_shape
def _build_graph(self, x, batch_ndims, event_ndims, expand_batch_dim):
shaper = _DistributionShape(batch_ndims=batch_ndims,
event_ndims=event_ndims)
y, sample_shape = shaper.make_batch_of_event_sample_matrices(
x, expand_batch_dim=expand_batch_dim)
should_be_x_value = shaper.undo_make_batch_of_event_sample_matrices(
y, sample_shape, expand_batch_dim=expand_batch_dim)
return y, sample_shape, should_be_x_value
def _test_dynamic(self, x, batch_ndims, event_ndims, expand_batch_dim=True):
with self.cached_session() as sess:
x_pl = array_ops.placeholder(x.dtype)
batch_ndims_pl = array_ops.placeholder(dtypes.int32)
event_ndims_pl = array_ops.placeholder(dtypes.int32)
[y_, sample_shape_, should_be_x_value_] = sess.run(
self._build_graph(
x_pl, batch_ndims_pl, event_ndims_pl, expand_batch_dim),
feed_dict={
x_pl: x,
batch_ndims_pl: batch_ndims,
event_ndims_pl: event_ndims})
expected_y, expected_sample_shape = self._get_expected(
x, batch_ndims, event_ndims, expand_batch_dim)
self.assertAllEqual(expected_sample_shape, sample_shape_)
self.assertAllEqual(expected_y, y_)
self.assertAllEqual(x, should_be_x_value_)
def _test_static(self, x, batch_ndims, event_ndims, expand_batch_dim):
with self.cached_session() as sess:
[y_, sample_shape_, should_be_x_value_] = sess.run(
self._build_graph(x, batch_ndims, event_ndims, expand_batch_dim))
expected_y, expected_sample_shape = self._get_expected(
x, batch_ndims, event_ndims, expand_batch_dim)
self.assertAllEqual(expected_sample_shape, sample_shape_)
self.assertAllEqual(expected_y, y_)
self.assertAllEqual(x, should_be_x_value_)
# Group 1a: Static scalar input.
def testStaticScalarNdims00ExpandNo(self):
self._test_static(x=self._random_sample([]),
batch_ndims=0,
event_ndims=0,
expand_batch_dim=False)
def testStaticScalarNdims00ExpandYes(self):
self._test_static(x=self._random_sample([]),
batch_ndims=0,
event_ndims=0,
expand_batch_dim=True)
def testStaticScalarNdims01ExpandNo(self):
with self.assertRaises(ValueError):
self._test_static(x=self._random_sample([]),
batch_ndims=0,
event_ndims=1,
expand_batch_dim=False)
def testStaticScalarNdims01ExpandYes(self):
with self.assertRaises(ValueError):
self._test_static(x=self._random_sample([]),
batch_ndims=0,
event_ndims=1,
expand_batch_dim=True)
def testStaticScalarNdims10ExpandNo(self):
with self.assertRaises(ValueError):
self._test_static(x=self._random_sample([]),
batch_ndims=1,
event_ndims=0,
expand_batch_dim=False)
def testStaticScalarNdims10ExpandYes(self):
with self.assertRaises(ValueError):
self._test_static(x=self._random_sample([]),
batch_ndims=1,
event_ndims=0,
expand_batch_dim=True)
def testStaticScalarNdims11ExpandNo(self):
with self.assertRaises(ValueError):
self._test_static(x=self._random_sample([]),
batch_ndims=1,
event_ndims=1,
expand_batch_dim=False)
def testStaticScalarNdims11ExpandYes(self):
with self.assertRaises(ValueError):
self._test_static(x=self._random_sample([]),
batch_ndims=1,
event_ndims=1,
expand_batch_dim=True)
# Group 1b: Dynamic scalar input.
def testDynamicScalar3Ndims00ExpandNo(self):
self._test_dynamic(x=self._random_sample([]),
batch_ndims=0,
event_ndims=0,
expand_batch_dim=False)
def testDynamicScalar3Ndims00ExpandYes(self):
self._test_dynamic(x=self._random_sample([]),
batch_ndims=0,
event_ndims=0,
expand_batch_dim=True)
def testDynamicScalarNdims01ExpandNo(self):
with self.assertRaisesOpError(""):
self._test_dynamic(x=self._random_sample([]),
batch_ndims=0,
event_ndims=1,
expand_batch_dim=False)
def testDynamicScalarNdims01ExpandYes(self):
with self.assertRaisesOpError(""):
self._test_dynamic(x=self._random_sample([]),
batch_ndims=0,
event_ndims=1,
expand_batch_dim=True)
def testDynamicScalarNdims10ExpandNo(self):
with self.assertRaisesOpError(""):
self._test_dynamic(x=self._random_sample([]),
batch_ndims=1,
event_ndims=0,
expand_batch_dim=False)
def testDynamicScalarNdims10ExpandYes(self):
with self.assertRaisesOpError(""):
self._test_dynamic(x=self._random_sample([]),
batch_ndims=1,
event_ndims=0,
expand_batch_dim=True)
def testDynamicScalarNdims11ExpandNo(self):
with self.assertRaisesOpError(""):
self._test_dynamic(x=self._random_sample([]),
batch_ndims=1,
event_ndims=1,
expand_batch_dim=False)
def testDynamicScalarNdims11ExpandYes(self):
with self.assertRaisesOpError(""):
self._test_dynamic(x=self._random_sample([]),
batch_ndims=1,
event_ndims=1,
expand_batch_dim=True)
# Group 2a: Static vector input.
def testStaticVectorNdims00ExpandNo(self):
self._test_static(x=self._random_sample([3]),
batch_ndims=0,
event_ndims=0,
expand_batch_dim=False)
def testStaticVectorNdims00ExpandYes(self):
self._test_static(x=self._random_sample([3]),
batch_ndims=0,
event_ndims=0,
expand_batch_dim=True)
def testStaticVectorNdims01ExpandNo(self):
self._test_static(x=self._random_sample([3]),
batch_ndims=0,
event_ndims=1,
expand_batch_dim=False)
def testStaticVectorNdims01ExpandYes(self):
self._test_static(x=self._random_sample([3]),
batch_ndims=0,
event_ndims=1,
expand_batch_dim=True)
def testStaticVectorNdims10ExpandNo(self):
self._test_static(x=self._random_sample([3]),
batch_ndims=1,
event_ndims=0,
expand_batch_dim=False)
def testStaticVectorNdims10ExpandYes(self):
self._test_static(x=self._random_sample([3]),
batch_ndims=1,
event_ndims=0,
expand_batch_dim=True)
def testStaticVectorNdims11ExpandNo(self):
with self.assertRaises(ValueError):
self._test_static(x=self._random_sample([3]),
batch_ndims=1,
event_ndims=1,
expand_batch_dim=False)
def testStaticVectorNdims11ExpandYes(self):
with self.assertRaises(ValueError):
self._test_static(x=self._random_sample([3]),
batch_ndims=1,
event_ndims=1,
expand_batch_dim=True)
# Group 2b: Dynamic vector input.
def testDynamicVectorNdims00ExpandNo(self):
self._test_dynamic(x=self._random_sample([3]),
batch_ndims=0,
event_ndims=0,
expand_batch_dim=False)
def testDynamicVectorNdims00ExpandYes(self):
self._test_dynamic(x=self._random_sample([3]),
batch_ndims=0,
event_ndims=0,
expand_batch_dim=True)
def testDynamicVectorNdims01ExpandNo(self):
self._test_dynamic(x=self._random_sample([3]),
batch_ndims=0,
event_ndims=1,
expand_batch_dim=False)
def testDynamicVectorNdims01ExpandYes(self):
self._test_dynamic(x=self._random_sample([3]),
batch_ndims=0,
event_ndims=1,
expand_batch_dim=True)
def testDynamicVectorNdims10ExpandNo(self):
self._test_dynamic(x=self._random_sample([3]),
batch_ndims=1,
event_ndims=0,
expand_batch_dim=False)
def testDynamicVectorNdims10ExpandYes(self):
self._test_dynamic(x=self._random_sample([3]),
batch_ndims=1,
event_ndims=0,
expand_batch_dim=True)
def testDynamicVectorNdims11ExpandNo(self):
with self.assertRaisesOpError(""):
self._test_dynamic(x=self._random_sample([3]),
batch_ndims=1,
event_ndims=1,
expand_batch_dim=False)
def testDynamicVectorNdims11ExpandYes(self):
with self.assertRaisesOpError(""):
self._test_dynamic(x=self._random_sample([3]),
batch_ndims=1,
event_ndims=1,
expand_batch_dim=True)
# Group 3a: Static matrix input.
def testStaticMatrixNdims00ExpandNo(self):
self._test_static(x=self._random_sample([2, 3]),
batch_ndims=0,
event_ndims=0,
expand_batch_dim=False)
def testStaticMatrixNdims00ExpandYes(self):
self._test_static(x=self._random_sample([2, 3]),
batch_ndims=0,
event_ndims=0,
expand_batch_dim=True)
def testStaticMatrixNdims01ExpandNo(self):
self._test_static(x=self._random_sample([2, 3]),
batch_ndims=0,
event_ndims=1,
expand_batch_dim=False)
def testStaticMatrixNdims01ExpandYes(self):
self._test_static(x=self._random_sample([2, 3]),
batch_ndims=0,
event_ndims=1,
expand_batch_dim=True)
def testStaticMatrixNdims10ExpandNo(self):
self._test_static(x=self._random_sample([2, 3]),
batch_ndims=1,
event_ndims=0,
expand_batch_dim=False)
def testStaticMatrixNdims10ExpandYes(self):
self._test_static(x=self._random_sample([2, 3]),
batch_ndims=1,
event_ndims=0,
expand_batch_dim=True)
def testStaticMatrixNdims11ExpandNo(self):
self._test_static(x=self._random_sample([2, 3]),
batch_ndims=1,
event_ndims=1,
expand_batch_dim=False)
def testStaticMatrixNdims11ExpandYes(self):
self._test_static(x=self._random_sample([2, 3]),
batch_ndims=1,
event_ndims=1,
expand_batch_dim=True)
# Group 3b: Dynamic matrix input.
def testDynamicMatrixNdims00ExpandNo(self):
self._test_dynamic(x=self._random_sample([2, 3]),
batch_ndims=0,
event_ndims=0,
expand_batch_dim=False)
def testDynamicMatrixNdims00ExpandYes(self):
self._test_dynamic(x=self._random_sample([2, 3]),
batch_ndims=0,
event_ndims=0,
expand_batch_dim=True)
def testDynamicMatrixNdims01ExpandNo(self):
self._test_dynamic(x=self._random_sample([2, 3]),
batch_ndims=0,
event_ndims=1,
expand_batch_dim=False)
def testDynamicMatrixNdims01ExpandYes(self):
self._test_dynamic(x=self._random_sample([2, 3]),
batch_ndims=0,
event_ndims=1,
expand_batch_dim=True)
def testDynamicMatrixNdims10ExpandNo(self):
self._test_dynamic(x=self._random_sample([2, 3]),
batch_ndims=1,
event_ndims=0,
expand_batch_dim=False)
def testDynamicMatrixNdims10ExpandYes(self):
self._test_dynamic(x=self._random_sample([2, 3]),
batch_ndims=1,
event_ndims=0,
expand_batch_dim=True)
def testDynamicMatrixNdims11ExpandNo(self):
self._test_dynamic(x=self._random_sample([2, 3]),
batch_ndims=1,
event_ndims=1,
expand_batch_dim=False)
def testDynamicMatrixNdims11ExpandYes(self):
self._test_dynamic(x=self._random_sample([2, 3]),
batch_ndims=1,
event_ndims=1,
expand_batch_dim=True)
# Group 4a: Static tensor input.
def testStaticTensorNdims00ExpandNo(self):
self._test_static(x=self._random_sample([4, 1, 2, 3]),
batch_ndims=0,
event_ndims=0,
expand_batch_dim=False)
def testStaticTensorNdims00ExpandYes(self):
self._test_static(x=self._random_sample([4, 1, 2, 3]),
batch_ndims=0,
event_ndims=0,
expand_batch_dim=True)
def testStaticTensorNdims01ExpandNo(self):
self._test_static(x=self._random_sample([4, 1, 2, 3]),
batch_ndims=0,
event_ndims=1,
expand_batch_dim=False)
def testStaticTensorNdims01ExpandYes(self):
self._test_static(x=self._random_sample([4, 1, 2, 3]),
batch_ndims=0,
event_ndims=1,
expand_batch_dim=True)
def testStaticTensorNdims10ExpandNo(self):
self._test_static(x=self._random_sample([4, 1, 2, 3]),
batch_ndims=1,
event_ndims=0,
expand_batch_dim=False)
def testStaticTensorNdims10ExpandYes(self):
self._test_static(x=self._random_sample([4, 1, 2, 3]),
batch_ndims=1,
event_ndims=0,
expand_batch_dim=True)
def testStaticTensorNdims11ExpandNo(self):
self._test_static(x=self._random_sample([4, 1, 2, 3]),
batch_ndims=1,
event_ndims=1,
expand_batch_dim=False)
def testStaticTensorNdims11ExpandYes(self):
self._test_static(x=self._random_sample([4, 1, 2, 3]),
batch_ndims=1,
event_ndims=1,
expand_batch_dim=True)
# Group 4b: Dynamic tensor input.
def testDynamicTensorNdims00ExpandNo(self):
self._test_dynamic(x=self._random_sample([4, 1, 2, 3]),
batch_ndims=0,
event_ndims=0,
expand_batch_dim=False)
def testDynamicTensorNdims00ExpandYes(self):
self._test_dynamic(x=self._random_sample([4, 1, 2, 3]),
batch_ndims=0,
event_ndims=0,
expand_batch_dim=True)
def testDynamicTensorNdims01ExpandNo(self):
self._test_dynamic(x=self._random_sample([4, 1, 2, 3]),
batch_ndims=0,
event_ndims=1,
expand_batch_dim=False)
def testDynamicTensorNdims01ExpandYes(self):
self._test_dynamic(x=self._random_sample([4, 1, 2, 3]),
batch_ndims=0,
event_ndims=1,
expand_batch_dim=True)
def testDynamicTensorNdims10ExpandNo(self):
self._test_dynamic(x=self._random_sample([4, 1, 2, 3]),
batch_ndims=1,
event_ndims=0,
expand_batch_dim=False)
def testDynamicTensorNdims10ExpandYes(self):
self._test_dynamic(x=self._random_sample([4, 1, 2, 3]),
batch_ndims=1,
event_ndims=0,
expand_batch_dim=True)
def testDynamicTensorNdims11ExpandNo(self):
self._test_dynamic(x=self._random_sample([4, 1, 2, 3]),
batch_ndims=1,
event_ndims=1,
expand_batch_dim=False)
def testDynamicTensorNdims11ExpandYes(self):
self._test_dynamic(x=self._random_sample([4, 1, 2, 3]),
batch_ndims=1,
event_ndims=1,
expand_batch_dim=True)
class DistributionShapeTest(test.TestCase):
def setUp(self):
self._rng = np.random.RandomState(42)
def _random_sample(self, sample_shape, dtype=dtypes.float64):
return self._rng.random_sample(sample_shape).astype(dtype.as_numpy_dtype())
def _assertNdArrayEqual(self, expected, actual):
"""Helper which properly compares two np.ndarray-like objects.
This function checks for exact equality so is probably only suitable for
integers or powers of 2.
Args:
expected: np.ndarray. Ground-truth value.
actual: np.ndarray. Observed value.
"""
expected = np.asarray(expected)
actual = np.asarray(actual)
self.assertEqual(expected.shape, actual.shape,
"Shape mismatch: expected %s, got %s." %
(expected.shape, actual.shape))
actual_item = actual.flat
for expected_item in expected.flat:
self.assertAllEqual(expected_item, next(actual_item))
def testDistributionShapeGetNdimsStatic(self):
with self.cached_session():
shaper = _DistributionShape(batch_ndims=0, event_ndims=0)
x = 1
self.assertEqual(0, shaper.get_sample_ndims(x).eval())
self.assertEqual(0, shaper.batch_ndims.eval())
self.assertEqual(0, shaper.event_ndims.eval())
shaper = _DistributionShape(batch_ndims=1, event_ndims=1)
x = self._random_sample((1, 2, 3))
self.assertAllEqual(3, shaper.get_ndims(x).eval())
self.assertEqual(1, shaper.get_sample_ndims(x).eval())
self.assertEqual(1, shaper.batch_ndims.eval())
self.assertEqual(1, shaper.event_ndims.eval())
x += self._random_sample((1, 2, 3))
self.assertAllEqual(3, shaper.get_ndims(x).eval())
self.assertEqual(1, shaper.get_sample_ndims(x).eval())
self.assertEqual(1, shaper.batch_ndims.eval())
self.assertEqual(1, shaper.event_ndims.eval())
# Test ndims functions work, even despite unfed Tensors.
y = array_ops.placeholder(dtypes.float32, shape=(1024, None, 1024))
self.assertEqual(3, shaper.get_ndims(y).eval())
self.assertEqual(1, shaper.get_sample_ndims(y).eval())
self.assertEqual(1, shaper.batch_ndims.eval())
self.assertEqual(1, shaper.event_ndims.eval())
def testDistributionShapeGetNdimsDynamic(self):
with self.cached_session() as sess:
batch_ndims = array_ops.placeholder(dtypes.int32)
event_ndims = array_ops.placeholder(dtypes.int32)
shaper = _DistributionShape(
batch_ndims=batch_ndims, event_ndims=event_ndims)
y = array_ops.placeholder(dtypes.float32)
y_value = np.ones((4, 2), dtype=y.dtype.as_numpy_dtype())
feed_dict = {y: y_value, batch_ndims: 1, event_ndims: 1}
self.assertEqual(2, sess.run(shaper.get_ndims(y), feed_dict=feed_dict))
def testDistributionShapeGetDimsStatic(self):
with self.cached_session():
shaper = _DistributionShape(batch_ndims=0, event_ndims=0)
x = 1
self.assertAllEqual((_empty_shape, _empty_shape, _empty_shape),
_constant(shaper.get_dims(x)))
shaper = _DistributionShape(batch_ndims=1, event_ndims=2)
x += self._random_sample((1, 1, 2, 2))
self._assertNdArrayEqual(([0], [1], [2, 3]),
_constant(shaper.get_dims(x)))
x += x
self._assertNdArrayEqual(([0], [1], [2, 3]),
_constant(shaper.get_dims(x)))
def testDistributionShapeGetDimsDynamic(self):
with self.cached_session() as sess:
# Works for static {batch,event}_ndims despite unfed input.
shaper = _DistributionShape(batch_ndims=1, event_ndims=2)
y = array_ops.placeholder(dtypes.float32, shape=(10, None, 5, 5))
self._assertNdArrayEqual([[0], [1], [2, 3]], _eval(shaper.get_dims(y)))
# Works for deferred {batch,event}_ndims.
batch_ndims = array_ops.placeholder(dtypes.int32)
event_ndims = array_ops.placeholder(dtypes.int32)
shaper = _DistributionShape(
batch_ndims=batch_ndims, event_ndims=event_ndims)
y = array_ops.placeholder(dtypes.float32)
y_value = self._random_sample((10, 3, 5, 5), dtype=y.dtype)
feed_dict = {y: y_value, batch_ndims: 1, event_ndims: 2}
self._assertNdArrayEqual(
([0], [1], [2, 3]), sess.run(shaper.get_dims(y), feed_dict=feed_dict))
def testDistributionShapeGetShapeStatic(self):
with self.cached_session():
shaper = _DistributionShape(batch_ndims=0, event_ndims=0)
self.assertAllEqual((_empty_shape, _empty_shape, _empty_shape),
_constant(shaper.get_shape(1.)))
self._assertNdArrayEqual(([1], _empty_shape, _empty_shape),
_constant(shaper.get_shape(np.ones(1))))
self._assertNdArrayEqual(([2, 2], _empty_shape, _empty_shape),
_constant(shaper.get_shape(np.ones((2, 2)))))
self._assertNdArrayEqual(([3, 2, 1], _empty_shape, _empty_shape),
_constant(shaper.get_shape(np.ones((3, 2, 1)))))
shaper = _DistributionShape(batch_ndims=0, event_ndims=1)
with self.assertRaisesRegexp(ValueError, "expected .* <= ndims"):
shaper.get_shape(1.)
self._assertNdArrayEqual((_empty_shape, _empty_shape, [1]),
_constant(shaper.get_shape(np.ones(1))))
self._assertNdArrayEqual(([2], _empty_shape, [2]),
_constant(shaper.get_shape(np.ones((2, 2)))))
self._assertNdArrayEqual(([3, 2], _empty_shape, [1]),
_constant(shaper.get_shape(np.ones((3, 2, 1)))))
shaper = _DistributionShape(batch_ndims=1, event_ndims=0)
with self.assertRaisesRegexp(ValueError, "expected .* <= ndims"):
shaper.get_shape(1.)
self._assertNdArrayEqual((_empty_shape, [1], _empty_shape),
_constant(shaper.get_shape(np.ones(1))))
self._assertNdArrayEqual(([2], [2], _empty_shape),
_constant(shaper.get_shape(np.ones((2, 2)))))
self._assertNdArrayEqual(([3, 2], [1], _empty_shape),
_constant(shaper.get_shape(np.ones((3, 2, 1)))))
shaper = _DistributionShape(batch_ndims=1, event_ndims=1)
with self.assertRaisesRegexp(ValueError, "expected .* <= ndims"):
shaper.get_shape(1.)
with self.assertRaisesRegexp(ValueError, "expected .* <= ndims"):
shaper.get_shape(np.ones(1))
self._assertNdArrayEqual((_empty_shape, [2], [2]),
_constant(shaper.get_shape(np.ones((2, 2)))))
self._assertNdArrayEqual(([3], [2], [1]),
_constant(shaper.get_shape(np.ones((3, 2, 1)))))
def testDistributionShapeGetShapeDynamic(self):
with self.cached_session() as sess:
# Works for static ndims despite unknown static shape.
shaper = _DistributionShape(batch_ndims=1, event_ndims=1)
y = array_ops.placeholder(dtypes.int32, shape=(None, None, 2))
y_value = np.ones((3, 4, 2), dtype=y.dtype.as_numpy_dtype())
self._assertNdArrayEqual(
([3], [4], [2]),
sess.run(shaper.get_shape(y), feed_dict={y: y_value}))
shaper = _DistributionShape(batch_ndims=0, event_ndims=1)
y = array_ops.placeholder(dtypes.int32, shape=(None, None))
y_value = np.ones((3, 2), dtype=y.dtype.as_numpy_dtype())
self._assertNdArrayEqual(
([3], _empty_shape, [2]),
sess.run(shaper.get_shape(y), feed_dict={y: y_value}))
# Works for deferred {batch,event}_ndims.
batch_ndims = array_ops.placeholder(dtypes.int32)
event_ndims = array_ops.placeholder(dtypes.int32)
shaper = _DistributionShape(
batch_ndims=batch_ndims, event_ndims=event_ndims)
y = array_ops.placeholder(dtypes.float32)
y_value = self._random_sample((3, 4, 2), dtype=y.dtype)
feed_dict = {y: y_value, batch_ndims: 1, event_ndims: 1}
self._assertNdArrayEqual(
([3], [4], [2]), sess.run(shaper.get_shape(y), feed_dict=feed_dict))
y_value = self._random_sample((3, 2), dtype=y.dtype)
feed_dict = {y: y_value, batch_ndims: 0, event_ndims: 1}
self._assertNdArrayEqual(
([3], _empty_shape, [2]),
sess.run(shaper.get_shape(y), feed_dict=feed_dict))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/distributions/python/kernel_tests/shape_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for MixtureSameFamily distribution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops import mixture_same_family as mixture_same_family_lib
from tensorflow.contrib.distributions.python.ops import mvn_diag as mvn_diag_lib
from tensorflow.contrib.distributions.python.ops import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import bernoulli as bernoulli_lib
from tensorflow.python.ops.distributions import categorical as categorical_lib
from tensorflow.python.ops.distributions import normal as normal_lib
from tensorflow.python.platform import test
class MixtureSameFamilyTest(test_util.VectorDistributionTestHelpers,
test.TestCase):
def testSampleAndLogProbUnivariateShapes(self):
with self.cached_session():
gm = mixture_same_family_lib.MixtureSameFamily(
mixture_distribution=categorical_lib.Categorical(probs=[0.3, 0.7]),
components_distribution=normal_lib.Normal(
loc=[-1., 1], scale=[0.1, 0.5]))
x = gm.sample([4, 5], seed=42)
log_prob_x = gm.log_prob(x)
self.assertEqual([4, 5], x.shape)
self.assertEqual([4, 5], log_prob_x.shape)
def testSampleAndLogProbBatch(self):
with self.cached_session():
gm = mixture_same_family_lib.MixtureSameFamily(
mixture_distribution=categorical_lib.Categorical(probs=[[0.3, 0.7]]),
components_distribution=normal_lib.Normal(
loc=[[-1., 1]], scale=[[0.1, 0.5]]))
x = gm.sample([4, 5], seed=42)
log_prob_x = gm.log_prob(x)
self.assertEqual([4, 5, 1], x.shape)
self.assertEqual([4, 5, 1], log_prob_x.shape)
def testSampleAndLogProbShapesBroadcastMix(self):
mix_probs = np.float32([.3, .7])
bern_probs = np.float32([[.4, .6], [.25, .75]])
with self.cached_session():
bm = mixture_same_family_lib.MixtureSameFamily(
mixture_distribution=categorical_lib.Categorical(probs=mix_probs),
components_distribution=bernoulli_lib.Bernoulli(probs=bern_probs))
x = bm.sample([4, 5], seed=42)
log_prob_x = bm.log_prob(x)
x_ = x.eval()
self.assertEqual([4, 5, 2], x.shape)
self.assertEqual([4, 5, 2], log_prob_x.shape)
self.assertAllEqual(
np.ones_like(x_, dtype=np.bool), np.logical_or(x_ == 0., x_ == 1.))
def testSampleAndLogProbMultivariateShapes(self):
with self.cached_session():
gm = mixture_same_family_lib.MixtureSameFamily(
mixture_distribution=categorical_lib.Categorical(probs=[0.3, 0.7]),
components_distribution=mvn_diag_lib.MultivariateNormalDiag(
loc=[[-1., 1], [1, -1]], scale_identity_multiplier=[1., 0.5]))
x = gm.sample([4, 5], seed=42)
log_prob_x = gm.log_prob(x)
self.assertEqual([4, 5, 2], x.shape)
self.assertEqual([4, 5], log_prob_x.shape)
def testSampleAndLogProbBatchMultivariateShapes(self):
with self.cached_session():
gm = mixture_same_family_lib.MixtureSameFamily(
mixture_distribution=categorical_lib.Categorical(probs=[0.3, 0.7]),
components_distribution=mvn_diag_lib.MultivariateNormalDiag(
loc=[[[-1., 1],
[1, -1]],
[[0., 1],
[1, 0]]],
scale_identity_multiplier=[1., 0.5]))
x = gm.sample([4, 5], seed=42)
log_prob_x = gm.log_prob(x)
self.assertEqual([4, 5, 2, 2], x.shape)
self.assertEqual([4, 5, 2], log_prob_x.shape)
def testSampleConsistentLogProb(self):
with self.cached_session() as sess:
gm = mixture_same_family_lib.MixtureSameFamily(
mixture_distribution=categorical_lib.Categorical(probs=[0.3, 0.7]),
components_distribution=mvn_diag_lib.MultivariateNormalDiag(
loc=[[-1., 1], [1, -1]], scale_identity_multiplier=[1., 0.5]))
# Ball centered at component0's mean.
self.run_test_sample_consistent_log_prob(
sess.run, gm, radius=1., center=[-1., 1], rtol=0.02)
# Larger ball centered at component1's mean.
self.run_test_sample_consistent_log_prob(
sess.run, gm, radius=1., center=[1., -1], rtol=0.02)
def testLogCdf(self):
with self.cached_session() as sess:
gm = mixture_same_family_lib.MixtureSameFamily(
mixture_distribution=categorical_lib.Categorical(probs=[0.3, 0.7]),
components_distribution=normal_lib.Normal(
loc=[-1., 1], scale=[0.1, 0.5]))
x = gm.sample(10, seed=42)
actual_log_cdf = gm.log_cdf(x)
expected_log_cdf = math_ops.reduce_logsumexp(
(gm.mixture_distribution.logits +
gm.components_distribution.log_cdf(x[..., array_ops.newaxis])),
axis=1)
actual_log_cdf_, expected_log_cdf_ = sess.run([
actual_log_cdf, expected_log_cdf])
self.assertAllClose(actual_log_cdf_, expected_log_cdf_,
rtol=1e-6, atol=0.0)
def testSampleConsistentMeanCovariance(self):
with self.cached_session() as sess:
gm = mixture_same_family_lib.MixtureSameFamily(
mixture_distribution=categorical_lib.Categorical(probs=[0.3, 0.7]),
components_distribution=mvn_diag_lib.MultivariateNormalDiag(
loc=[[-1., 1], [1, -1]], scale_identity_multiplier=[1., 0.5]))
self.run_test_sample_consistent_mean_covariance(sess.run, gm)
def testVarianceConsistentCovariance(self):
with self.cached_session() as sess:
gm = mixture_same_family_lib.MixtureSameFamily(
mixture_distribution=categorical_lib.Categorical(probs=[0.3, 0.7]),
components_distribution=mvn_diag_lib.MultivariateNormalDiag(
loc=[[-1., 1], [1, -1]], scale_identity_multiplier=[1., 0.5]))
cov_, var_ = sess.run([gm.covariance(), gm.variance()])
self.assertAllClose(cov_.diagonal(), var_, atol=0.)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/distributions/python/kernel_tests/mixture_same_family_test.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.