python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module for variants of ops in tf.nn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/nn/python/ops/__init__.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops related to candidate sampling."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import nn_impl
from tensorflow.python.ops import nn_ops
def _rank_resample(weights, biases, inputs, sampled_values, num_resampled,
resampling_temperature, partition_strategy):
"""A helper function for rank_sampled_softmax_loss.
This computes, for each i in `sampled_values`,
log(sum_j exp((w_i * x_j + b_i) / resampling_temperature))
where w_i, b_i are the weight and bias of the i-th class, respectively,
and j ranges over the rows of `inputs`. For efficiency, we rearrange the
computation to
log(sum_j exp(w_i * (x_j / resampling_temperature))) +
b_i / resampling_temperature.
This translates to the following batched computation using tensorflow ops:
reduce_logsumexp(matmul(embeddings,
transpose(inputs / resampling_temperature))) +
biases / resampling_temperature
The computation of the first term is colocated with the embeddings using
`transform_fn` in `embedding_ops._embedding_lookup_and_transform`. The second
term, not the bottleneck, is computed at the worker.
Args:
weights: From `rank_sampled_softmax_loss`.
biases: From `rank_sampled_softmax_loss`.
inputs: From `rank_sampled_softmax_loss`.
sampled_values: A tuple of (`sampled_candidates`, `true_expected_count`,
`sampled_expected_count`) returned by a `*_candidate_sampler` function.
num_resampled: An `int`. This many values are selected from
`sampled_values` using the adaptive resampling algorithm. The caller
must ensure that `num_resampled` is less than the size of
`sampled_values`.
resampling_temperature: A scalar `Tensor` with the temperature parameter
for the adaptive resampling algorithm.
partition_strategy: From `rank_sampled_softmax_loss`.
Returns:
A tuple of (`resampled_candidates`, `true_expected_count`,
`resampled_expected_count`), similar to `sampled_values` but sampled
down to `num_resampled` values.
"""
# This code supports passing a Tensor for num_resampled, but since it is only
# called with an int, that's what we specify in the arg list. If this
# function is ever externalized, we should change the doc to support Tensor.
sampled, true_expected_count, sampled_expected_count = sampled_values
sampled = math_ops.cast(array_ops.stop_gradient(sampled), dtypes.int64)
true_expected_count = array_ops.stop_gradient(true_expected_count)
sampled_expected_count = array_ops.stop_gradient(sampled_expected_count)
reweighted_inputs = inputs / resampling_temperature
def logsumexp_logit(embeddings):
return math_ops.reduce_logsumexp(
math_ops.matmul(embeddings, reweighted_inputs, transpose_b=True),
axis=1,
keepdims=False)
# Calling this protected form of embedding_lookup allows co-locating
# the logsumexp computation with the partitioned weights, which yields
# a large speedup in practice.
sampled_logits = embedding_ops._embedding_lookup_and_transform( # pylint: disable=protected-access
weights, sampled, partition_strategy, transform_fn=logsumexp_logit)
sampled_b = array_ops.reshape(
embedding_ops.embedding_lookup(biases, sampled, partition_strategy), [-1])
sampled_logits += sampled_b / resampling_temperature
_, resampled_indices = nn.top_k(sampled_logits, k=num_resampled, sorted=False)
resampled = array_ops.gather(sampled, indices=resampled_indices)
resampled_expected_count = array_ops.gather(
sampled_expected_count, indices=resampled_indices)
return resampled, true_expected_count, resampled_expected_count
def rank_sampled_softmax_loss(weights,
biases,
labels,
inputs,
num_sampled,
num_resampled,
num_classes,
num_true,
sampled_values,
resampling_temperature,
remove_accidental_hits,
partition_strategy,
name=None):
"""Computes softmax loss using rank-based adaptive resampling.
This has been shown to improve rank loss after training compared to
`tf.nn.sampled_softmax_loss`. For a description of the algorithm and some
experimental results, please see: [TAPAS: Two-pass Approximate Adaptive
Sampling for Softmax](https://arxiv.org/abs/1707.03073).
Sampling follows two phases:
* In the first phase, `num_sampled` classes are selected using
`tf.nn.learned_unigram_candidate_sampler` or supplied `sampled_values`.
The logits are calculated on those sampled classes. This phases is
similar to `tf.nn.sampled_softmax_loss`.
* In the second phase, the `num_resampled` classes with highest predicted
probability are kept. Probabilities are
`LogSumExp(logits / resampling_temperature)`, where the sum is over
`inputs`.
The `resampling_temperature` parameter controls the "adaptiveness" of the
resampling. At lower temperatures, resampling is more adaptive because it
picks more candidates close to the predicted classes. A common strategy is
to decrease the temperature as training proceeds.
See `tf.nn.sampled_softmax_loss` for more documentation on sampling and
for typical default values for some of the parameters.
This operation is for training only. It is generally an underestimate of
the full softmax loss.
A common use case is to use this method for training, and calculate the full
softmax loss for evaluation or inference. In this case, you must set
`partition_strategy="div"` for the two losses to be consistent, as in the
following example:
```python
if mode == "train":
loss = rank_sampled_softmax_loss(
weights=weights,
biases=biases,
labels=labels,
inputs=inputs,
...,
partition_strategy="div")
elif mode == "eval":
logits = tf.matmul(inputs, tf.transpose(weights))
logits = tf.nn.bias_add(logits, biases)
labels_one_hot = tf.one_hot(labels, n_classes)
loss = tf.nn.softmax_cross_entropy_with_logits(
labels=labels_one_hot,
logits=logits)
```
Args:
weights: A `Tensor` or `PartitionedVariable` of shape `[num_classes, dim]`,
or a list of `Tensor` objects whose concatenation along dimension 0
has shape [num_classes, dim]. The (possibly-sharded) class embeddings.
biases: A `Tensor` or `PartitionedVariable` of shape `[num_classes]`.
The (possibly-sharded) class biases.
labels: A `Tensor` of type `int64` and shape `[batch_size,
num_true]`. The target classes. Note that this format differs from
the `labels` argument of `nn.softmax_cross_entropy_with_logits`.
inputs: A `Tensor` of shape `[batch_size, dim]`. The forward
activations of the input network.
num_sampled: An `int`. The number of classes to randomly sample per batch.
num_resampled: An `int`. The number of classes to select from the
`num_sampled` classes using the adaptive resampling algorithm. Must be
less than `num_sampled`.
num_classes: An `int`. The number of possible classes.
num_true: An `int`. The number of target classes per training example.
sampled_values: A tuple of (`sampled_candidates`, `true_expected_count`,
`sampled_expected_count`) returned by a `*_candidate_sampler` function.
If None, default to `nn.learned_unigram_candidate_sampler`.
resampling_temperature: A scalar `Tensor` with the temperature parameter
for the adaptive resampling algorithm.
remove_accidental_hits: A `bool`. Whether to remove "accidental hits"
where a sampled class equals one of the target classes.
partition_strategy: A string specifying the partitioning strategy, relevant
if `len(weights) > 1`. Currently `"div"` and `"mod"` are supported.
See `tf.nn.embedding_lookup` for more details.
name: A name for the operation (optional).
Returns:
A `batch_size` 1-D tensor of per-example sampled softmax losses.
Raises:
ValueError: If `num_sampled <= num_resampled`.
"""
if num_sampled > num_classes:
raise ValueError("num_sampled ({}) cannot be greater than num_classes ({})".
format(num_sampled, num_classes))
if num_sampled <= num_resampled:
raise ValueError("num_resampled ({}) must be less than num_sampled ({})".
format(num_resampled, num_sampled))
if partition_strategy not in ("div", "mod"):
raise ValueError(
"unsupported partition_strategy ({})".format(partition_strategy))
with ops.name_scope(name, "rank_sampled_softmax_loss", [
weights, biases, labels, inputs, sampled_values, resampling_temperature
]) as name:
if not sampled_values:
sampled_values = nn.learned_unigram_candidate_sampler(
true_classes=labels,
num_true=num_true,
num_sampled=num_sampled,
unique=True,
range_max=num_classes)
# From sampled_values, select the top num_resampled values using the
# adaptive rank resampling strategy.
resampled_values = _rank_resample(weights, biases, inputs, sampled_values,
num_resampled, resampling_temperature,
partition_strategy)
return nn.sampled_softmax_loss(
weights=weights,
biases=biases,
labels=labels,
inputs=inputs,
num_sampled=num_resampled,
num_classes=num_classes,
num_true=num_true,
sampled_values=resampled_values,
remove_accidental_hits=remove_accidental_hits,
partition_strategy=partition_strategy,
name=name)
def sampled_sparse_softmax_loss(weights,
biases,
labels,
inputs,
num_sampled,
num_classes,
sampled_values=None,
remove_accidental_hits=True,
partition_strategy="mod",
name="sampled_sparse_softmax_loss"):
"""Computes and returns the sampled sparse softmax training loss.
This is a faster way to train a softmax classifier over a huge number of
classes.
This operation is for training only. It is generally an underestimate of
the full softmax loss.
A common use case is to use this method for training, and calculate the full
softmax loss for evaluation or inference. In this case, you must set
`partition_strategy="div"` for the two losses to be consistent, as in the
following example:
```python
if mode == "train":
loss = tf.nn.sampled_sparse_softmax_loss(
weights=weights,
biases=biases,
labels=labels,
inputs=inputs,
...,
partition_strategy="div")
elif mode == "eval":
logits = tf.matmul(inputs, tf.transpose(weights))
logits = tf.nn.bias_add(logits, biases)
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=tf.squeeze(labels),
logits=logits)
```
See our [Candidate Sampling Algorithms Reference]
(https://www.tensorflow.org/extras/candidate_sampling.pdf)
Also see Section 3 of [Jean et al., 2014](http://arxiv.org/abs/1412.2007)
([pdf](http://arxiv.org/pdf/1412.2007.pdf)) for the math.
Args:
weights: A `Tensor` of shape `[num_classes, dim]`, or a list of `Tensor`
objects whose concatenation along dimension 0 has shape
[num_classes, dim]. The (possibly-sharded) class embeddings.
biases: A `Tensor` of shape `[num_classes]`. The class biases.
labels: A `Tensor` of type `int64` and shape `[batch_size, 1]`.
The index of the single target class for each row of logits. Note that
this format differs from the `labels` argument of
`nn.sparse_softmax_cross_entropy_with_logits`.
inputs: A `Tensor` of shape `[batch_size, dim]`. The forward
activations of the input network.
num_sampled: An `int`. The number of classes to randomly sample per batch.
num_classes: An `int`. The number of possible classes.
sampled_values: a tuple of (`sampled_candidates`, `true_expected_count`,
`sampled_expected_count`) returned by a `*_candidate_sampler` function.
(if None, we default to `log_uniform_candidate_sampler`)
remove_accidental_hits: A `bool`. whether to remove "accidental hits"
where a sampled class equals one of the target classes. Default is
True.
partition_strategy: A string specifying the partitioning strategy, relevant
if `len(weights) > 1`. Currently `"div"` and `"mod"` are supported.
Default is `"mod"`. See `tf.nn.embedding_lookup` for more details.
name: A name for the operation (optional).
Returns:
A `batch_size` 1-D tensor of per-example sampled softmax losses.
"""
logits, _ = nn_impl._compute_sampled_logits(
weights=weights,
biases=biases,
labels=labels,
inputs=inputs,
num_sampled=num_sampled,
num_classes=num_classes,
num_true=1,
sampled_values=sampled_values,
subtract_log_q=True,
remove_accidental_hits=remove_accidental_hits,
partition_strategy=partition_strategy,
name=name)
# There is only one true label. _compute_sampled_logits puts the true logit
# at index 0.
labels = array_ops.zeros([array_ops.shape(logits)[0], 1], dtype=dtypes.int64)
sampled_losses = nn_ops.sparse_softmax_cross_entropy_with_logits(
labels=array_ops.squeeze(labels), logits=logits)
# sampled_losses is a [batch_size] tensor.
return sampled_losses
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/nn/python/ops/sampling_ops.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numbers
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
def alpha_dropout(x, keep_prob, noise_shape=None, seed=None, name=None): # pylint: disable=invalid-name
"""Computes alpha dropout.
Alpha Dropout is a dropout that maintains the self-normalizing property. For
an input with zero mean and unit standard deviation, the output of
Alpha Dropout maintains the original mean and standard deviation of the input.
See [Self-Normalizing Neural Networks](https://arxiv.org/abs/1706.02515)
Args:
x: A tensor.
keep_prob: A scalar `Tensor` with the same type as x. The probability
that each element is kept.
noise_shape: A 1-D `Tensor` of type `int32`, representing the
shape for randomly generated keep/drop flags.
seed: A Python integer. Used to create random seeds. See
`tf.compat.v1.set_random_seed` for behavior.
name: A name for this operation (optional).
Returns:
A Tensor of the same shape of `x`.
Raises:
ValueError: If `keep_prob` is not in `(0, 1]`.
"""
with ops.name_scope(name, "alpha_dropout", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
if isinstance(keep_prob, numbers.Real) and not 0 < keep_prob <= 1.:
raise ValueError("keep_prob must be a scalar tensor or a float in the "
"range (0, 1], got %g" % keep_prob)
keep_prob = ops.convert_to_tensor(keep_prob,
dtype=x.dtype,
name="keep_prob")
keep_prob.get_shape().assert_has_rank(0)
# Do nothing if we know keep_prob == 1
if tensor_util.constant_value(keep_prob) == 1:
return x
alpha = -1.7580993408473766
noise_shape = noise_shape if noise_shape is not None else array_ops.shape(x)
random_tensor = random_ops.random_uniform(noise_shape,
seed=seed,
dtype=x.dtype)
kept_idx = gen_math_ops.greater_equal(random_tensor, 1 - keep_prob)
kept_idx = math_ops.cast(kept_idx, x.dtype)
# Mask
x = x * kept_idx + alpha * (1 - kept_idx)
# Affine transformation parameters
a = (keep_prob + keep_prob * (1 - keep_prob) * alpha ** 2) ** -0.5
b = -a * alpha * (1 - keep_prob)
# Affine transformation
return a * x + b
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/nn/python/ops/alpha_dropout.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops and modules related to input_pipeline.
@@obtain_next
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.input_pipeline.python.ops.input_pipeline_ops import obtain_next
from tensorflow.python.util.all_util import remove_undocumented
remove_undocumented(__name__)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/input_pipeline/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for input_pipeline_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.input_pipeline.python.ops import input_pipeline_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class InputPipelineOpsTest(test.TestCase):
def testObtainNext(self):
with self.cached_session():
var = state_ops.variable_op([], dtypes.int64)
state_ops.assign(var, -1).op.run()
c = constant_op.constant(["a", "b"])
sample1 = input_pipeline_ops.obtain_next(c, var)
self.assertEqual(b"a", sample1.eval())
self.assertEqual(0, var.eval())
sample2 = input_pipeline_ops.obtain_next(c, var)
self.assertEqual(b"b", sample2.eval())
self.assertEqual(1, var.eval())
sample3 = input_pipeline_ops.obtain_next(c, var)
self.assertEqual(b"a", sample3.eval())
self.assertEqual(0, var.eval())
def testSeekNext(self):
string_list = ["a", "b", "c"]
with self.cached_session() as session:
elem = input_pipeline_ops.seek_next(string_list)
session.run([variables.global_variables_initializer()])
self.assertEqual(b"a", session.run(elem))
self.assertEqual(b"b", session.run(elem))
self.assertEqual(b"c", session.run(elem))
# Make sure we loop.
self.assertEqual(b"a", session.run(elem))
# Helper method that runs the op len(expected_list) number of times, asserts
# that the results are elements of the expected_list and then throws an
# OutOfRangeError.
def _assert_output(self, expected_list, session, op):
for element in expected_list:
self.assertEqual(element, session.run(op))
with self.assertRaises(errors.OutOfRangeError):
session.run(op)
def testSeekNextLimitEpochs(self):
string_list = ["a", "b", "c"]
with self.cached_session() as session:
elem = input_pipeline_ops.seek_next(string_list, num_epochs=1)
session.run([
variables.local_variables_initializer(),
variables.global_variables_initializer()
])
self._assert_output([b"a", b"b", b"c"], session, elem)
def testSeekNextLimitEpochsThree(self):
string_list = ["a", "b", "c"]
with self.cached_session() as session:
elem = input_pipeline_ops.seek_next(string_list, num_epochs=3)
session.run([
variables.local_variables_initializer(),
variables.global_variables_initializer()
])
# Expect to see [a, b, c] three times.
self._assert_output([b"a", b"b", b"c"] * 3, session, elem)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/input_pipeline/python/ops/input_pipeline_ops_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python wrapper for input_pipeline_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
from tensorflow.contrib.input_pipeline.ops import gen_input_pipeline_ops
from tensorflow.contrib.util import loader
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import resource_loader
_input_pipeline_ops = loader.load_op_library(
resource_loader.get_path_to_datafile("_input_pipeline_ops.so"))
def obtain_next(string_list_tensor, counter):
"""Basic wrapper for the ObtainNextOp.
Args:
string_list_tensor: A tensor that is a list of strings
counter: an int64 ref tensor to keep track of which element is returned.
Returns:
An op that produces the element at counter + 1 in the list, round
robin style.
"""
return gen_input_pipeline_ops.obtain_next(string_list_tensor, counter)
def _maybe_randomize_list(string_list, shuffle):
if shuffle:
random.shuffle(string_list)
return string_list
def _create_list(string_list, shuffle, seed, num_epochs):
if shuffle and seed:
random.seed(seed)
expanded_list = _maybe_randomize_list(string_list, shuffle)[:]
if num_epochs:
for _ in range(num_epochs - 1):
expanded_list.extend(_maybe_randomize_list(string_list, shuffle))
return expanded_list
def seek_next(string_list, shuffle=False, seed=None, num_epochs=None):
"""Returns an op that seeks the next element in a list of strings.
Seeking happens in a round robin fashion. This op creates a variable called
obtain_next_counter that is initialized to -1 and is used to keep track of
which element in the list was returned, and a variable
obtain_next_expanded_list to hold the list. If num_epochs is not None, then we
limit the number of times we go around the string_list before OutOfRangeError
is thrown. It creates a variable to keep track of this.
Args:
string_list: A list of strings.
shuffle: If true, we shuffle the string_list differently for each epoch.
seed: Seed used for shuffling.
num_epochs: Returns OutOfRangeError once string_list has been repeated
num_epoch times. If unspecified then keeps on looping.
Returns:
An op that produces the next element in the provided list.
"""
expanded_list = _create_list(string_list, shuffle, seed, num_epochs)
with variable_scope.variable_scope("obtain_next"):
counter = variable_scope.get_variable(
name="obtain_next_counter",
initializer=constant_op.constant(
-1, dtype=dtypes.int64),
dtype=dtypes.int64,
trainable=False)
with ops.colocate_with(counter):
string_tensor = variable_scope.get_variable(
name="obtain_next_expanded_list",
initializer=constant_op.constant(expanded_list),
dtype=dtypes.string,
trainable=False)
if num_epochs:
filename_counter = variable_scope.get_variable(
name="obtain_next_filename_counter",
initializer=constant_op.constant(
0, dtype=dtypes.int64),
dtype=dtypes.int64,
trainable=False)
c = filename_counter.count_up_to(len(expanded_list))
with ops.control_dependencies([c]):
return obtain_next(string_tensor, counter)
else:
return obtain_next(string_tensor, counter)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/input_pipeline/python/ops/input_pipeline_ops.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops and modules related to proto.
@@decode_proto
@@encode_proto
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.ops.proto_ops import decode_proto
from tensorflow.python.ops.proto_ops import encode_proto
from tensorflow.python.util.all_util import remove_undocumented
remove_undocumented(__name__)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/proto/__init__.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Backwards compatibility tests for imports of tf.contrib.proto."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import proto
from tensorflow.python.platform import test
class ProtoImportTest(test.TestCase):
def testImport(self):
self.assertTrue(proto.decode_proto) # Should be accessible
self.assertTrue(proto.encode_proto) # Should be accessible
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/proto/import_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from tensorflow.python.ops.proto_ops import encode_proto
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/proto/python/ops/encode_proto_op.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from tensorflow.python.ops.proto_ops import decode_proto
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/proto/python/ops/decode_proto_op.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Complain about invalid or missing entries in python_*.txt files.
Problematic entries can be commented for temporary whitelisting.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import unittest
def abs_path(path):
root = os.path.dirname(__file__)
for _ in range(3):
root = os.path.join(root, os.pardir)
path = os.path.join(root, path)
path = os.path.abspath(path)
return path
def read_entries(test):
with open(abs_path(test.entries_file), "r") as f:
lines = f.readlines()
lines = [line.strip() for line in lines]
lines = [line for line in lines if line]
test.entries = []
test.whitelist = []
for line in lines:
# line is comment
if line.startswith("#"):
line = line[1:].strip()
# whitelist entry
if line.startswith("tensorflow/"):
test.whitelist.append(line)
# line has comment -> strip comment
elif line.find("#") != -1:
line = line[:line.find("#")].strip()
test.entries.append(line)
else:
test.entries.append(line)
def test_invalid_directories(test):
for entry in test.entries:
if not os.path.isdir(abs_path(entry)):
problem = "'" + test.entries_file + "' contains invalid '" + entry + "'"
solution = ("Please remove the invalid entry (or add the missing "
"directory).")
raise AssertionError(problem + "\n" + solution)
def test_missing_directory(test, path):
if path in test.whitelist:
return
dir_exists = os.path.isdir(abs_path(path))
entry_exists = path in test.entries
if dir_exists and not entry_exists:
problem = "'" + test.entries_file + "' is missing '" + path + "'"
solution = "Please add the missing entry (comment to whitelist if needed)."
raise AssertionError(problem + "\n" + solution)
class PythonModuleTest(unittest.TestCase):
def setUp(self):
self.entries_file = "tensorflow/contrib/cmake/python_modules.txt"
read_entries(self)
def testInvalidEntries(self):
test_invalid_directories(self)
def testMissingModules(self):
module_names = next(os.walk(abs_path("tensorflow/contrib")))[1]
for module_name in module_names:
path = "tensorflow/contrib/" + module_name
test_missing_directory(self, path + "/python")
test_missing_directory(self, path + "/python/ops")
test_missing_directory(self, path + "/python/kernels")
test_missing_directory(self, path + "/python/layers")
class PythonProtoTest(unittest.TestCase):
def setUp(self):
self.entries_file = "tensorflow/contrib/cmake/python_protos.txt"
read_entries(self)
def testInvalidEntries(self):
test_invalid_directories(self)
class PythonProtoCCTest(unittest.TestCase):
def setUp(self):
self.entries_file = "tensorflow/contrib/cmake/python_protos_cc.txt"
read_entries(self)
def testInvalidEntries(self):
test_invalid_directories(self)
if __name__ == "__main__":
unittest.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/cmake/python_sanity_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""create_def_file.py - tool to create a windows def file.
The def file can be used to export symbols from the tensorflow dll to enable
tf.load_library().
Because the linker allows only 64K symbols to be exported per dll
we filter the symbols down to the essentials. The regular expressions
we use for this are specific to tensorflow.
TODO: this works fine but there is an issue with exporting
'const char * const' and importing it from a user_ops. The problem is
on the importing end and using __declspec(dllimport) works around it.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import codecs
import os
import re
import subprocess
import sys
import tempfile
# External tools we use that come with visual studio sdk and
# we assume that the caller has the correct PATH to the sdk
UNDNAME = "undname.exe"
DUMPBIN = "dumpbin.exe"
# Exclude if matched
EXCLUDE_RE = re.compile(r"RTTI|deleting destructor|::internal::|Internal|"
r"python_op_gen_internal|grappler")
# Include if matched before exclude
INCLUDEPRE_RE = re.compile(r"google::protobuf::internal::ExplicitlyConstructed|"
r"tensorflow::internal::LogMessage|"
r"tensorflow::internal::LogString|"
r"tensorflow::internal::CheckOpMessageBuilder|"
r"tensorflow::internal::PickUnusedPortOrDie|"
r"tensorflow::internal::ValidateDevice|"
r"tensorflow::ops::internal::Enter|"
r"tensorflow::strings::internal::AppendPieces|"
r"tensorflow::strings::internal::CatPieces|"
r"tensorflow::errors::Internal|"
r"tensorflow::Tensor::CopyFromInternal|"
r"tensorflow::kernel_factory::"
r"OpKernelRegistrar::InitInternal|"
r"tensorflow::io::internal::JoinPathImpl")
# Include if matched after exclude
INCLUDE_RE = re.compile(r"^(TF_\w*)$|"
r"^(TFE_\w*)$|"
r"tensorflow::|"
r"functor::|"
r"\?nsync_|"
r"stream_executor::")
# We want to identify data members explicitly in the DEF file, so that no one
# can implicitly link against the DLL if they use one of the variables exported
# from the DLL and the header they use does not decorate the symbol with
# __declspec(dllimport). It is easier to detect what a data symbol does
# NOT look like, so doing it with the below regex.
DATA_EXCLUDE_RE = re.compile(r"[)(]|"
r"vftable|"
r"vbtable|"
r"vcall|"
r"RTTI|"
r"protobuf::internal::ExplicitlyConstructed")
def get_args():
"""Parse command line."""
filename_list = lambda x: x.split(";")
parser = argparse.ArgumentParser()
parser.add_argument("--input", type=filename_list,
help="paths to input libraries separated by semicolons",
required=True)
parser.add_argument("--output", help="output deffile", required=True)
parser.add_argument("--target", help="name of the target", required=True)
parser.add_argument("--bitness", help="build target bitness", required=True)
args = parser.parse_args()
return args
def main():
"""main."""
args = get_args()
# Pipe dumpbin to extract all linkable symbols from libs.
# Good symbols are collected in candidates and also written to
# a temp file.
candidates = []
tmpfile = tempfile.NamedTemporaryFile(mode="w", delete=False)
for lib_path in args.input:
proc = subprocess.Popen([DUMPBIN, "/nologo", "/linkermember:1", lib_path],
stdout=subprocess.PIPE)
for line in codecs.getreader("utf-8")(proc.stdout):
cols = line.split()
if len(cols) < 2:
continue
sym = cols[1]
tmpfile.file.write(sym + "\n")
candidates.append(sym)
exit_code = proc.wait()
if exit_code != 0:
print("{} failed, exit={}".format(DUMPBIN, exit_code))
return exit_code
tmpfile.file.close()
# Run the symbols through undname to get their undecorated name
# so we can filter on something readable.
with open(args.output, "w") as def_fp:
# track dupes
taken = set()
# Header for the def file.
def_fp.write("LIBRARY " + args.target + "\n")
def_fp.write("EXPORTS\n")
if args.bitness == "64":
def_fp.write("\t??1OpDef@tensorflow@@UEAA@XZ\n")
else:
def_fp.write("\t??1OpDef@tensorflow@@UAE@XZ\n")
# Each symbols returned by undname matches the same position in candidates.
# We compare on undname but use the decorated name from candidates.
dupes = 0
proc = subprocess.Popen([UNDNAME, tmpfile.name], stdout=subprocess.PIPE)
for idx, line in enumerate(codecs.getreader("utf-8")(proc.stdout)):
decorated = candidates[idx]
if decorated in taken:
# Symbol is already in output, done.
dupes += 1
continue
if not INCLUDEPRE_RE.search(line):
if EXCLUDE_RE.search(line):
continue
if not INCLUDE_RE.search(line):
continue
if "deleting destructor" in line:
# Some of the symbols convered by INCLUDEPRE_RE export deleting
# destructor symbols, which is a bad idea.
# So we filter out such symbols here.
continue
if DATA_EXCLUDE_RE.search(line):
def_fp.write("\t" + decorated + "\n")
else:
def_fp.write("\t" + decorated + " DATA\n")
taken.add(decorated)
exit_code = proc.wait()
if exit_code != 0:
print("{} failed, exit={}".format(UNDNAME, exit_code))
return exit_code
os.unlink(tmpfile.name)
print("symbols={}, taken={}, dupes={}"
.format(len(candidates), len(taken), dupes))
return 0
if __name__ == "__main__":
sys.exit(main())
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/cmake/tools/create_def_file.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for dealing with Tensors.
@@constant_value
@@make_tensor_proto
@@make_ndarray
@@ops_used_by_graph_def
@@stripped_op_list_for_graph
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from tensorflow.python.framework.meta_graph import ops_used_by_graph_def
from tensorflow.python.framework.meta_graph import stripped_op_list_for_graph
from tensorflow.python.framework.tensor_util import constant_value
from tensorflow.python.framework.tensor_util import make_tensor_proto
from tensorflow.python.framework.tensor_util import MakeNdarray as make_ndarray
# pylint: disable=unused_import
from tensorflow.python.util.all_util import remove_undocumented
remove_undocumented(__name__)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/util/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for loading op libraries.
@@load_op_library
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import re
from tensorflow.python.framework import load_library
from tensorflow.python.platform import resource_loader
def load_op_library(path):
"""Loads a contrib op library from the given path.
NOTE(mrry): On Windows, we currently assume that some contrib op
libraries are statically linked into the main TensorFlow Python
extension DLL - use dynamically linked ops if the .so is present.
Args:
path: An absolute path to a shared object file.
Returns:
A Python module containing the Python wrappers for Ops defined in the
plugin.
"""
if os.name == 'nt':
# To avoid making every user_ops aware of windows, re-write
# the file extension from .so to .dll if .so file doesn't exist.
if not os.path.exists(path):
path = re.sub(r'\.so$', '.dll', path)
# Currently we have only some user_ops as dlls on windows - don't try
# to load them if the dll is not found.
# TODO(mrry): Once we have all of them this check should be removed.
if not os.path.exists(path):
return None
path = resource_loader.get_path_to_datafile(path)
ret = load_library.load_op_library(path)
assert ret, 'Could not load %s' % path
return ret
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/util/loader.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops for (approximate) nearest neighbor look-ups.
## Ops for (approximate) nearest neighbor look-ups
This package provides several ops for efficient (approximate) nearest
neighbor look-ups.
### LSH multiprobe ops
The following ops generate multiprobe sequences for various hash families.
@@hyperplane_lsh_hash
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,wildcard-import, line-too-long
from tensorflow.contrib.nearest_neighbor.python.ops.nearest_neighbor_ops import *
# pylint: enable=unused-import,wildcard-import,line-too-long
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/nearest_neighbor/__init__.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for hyperplane_lsh_probes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.nearest_neighbor.python.ops.nearest_neighbor_ops import hyperplane_lsh_probes
from tensorflow.python.platform import test
class HyperplaneLshProbesTest(test.TestCase):
# We only test the batch functionality of the op here because the multiprobe
# tests in hyperplane_lsh_probes_test.cc already cover most of the LSH
# functionality.
def simple_batch_test(self):
with self.cached_session():
hyperplanes = np.eye(4)
points = np.array([[1.2, 0.5, -0.9, -1.0], [2.0, -3.0, 1.0, -1.5]])
product = np.dot(points, hyperplanes)
num_tables = 2
num_hyperplanes_per_table = 2
num_probes = 4
hashes, tables = hyperplane_lsh_probes(product,
num_tables,
num_hyperplanes_per_table,
num_probes)
self.assertAllEqual(hashes.eval(), [[3, 0, 2, 2], [2, 2, 0, 3]])
self.assertAllEqual(tables.eval(), [[0, 1, 0, 1], [0, 1, 1, 1]])
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/nearest_neighbor/python/kernel_tests/hyperplane_lsh_probes_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Wrappers for nearest neighbor operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.util import loader
from tensorflow.python.framework import ops
from tensorflow.python.platform import resource_loader
_nearest_neighbor_ops = loader.load_op_library(
resource_loader.get_path_to_datafile("_nearest_neighbor_ops.so"))
def hyperplane_lsh_probes(point_hyperplane_product,
num_tables,
num_hyperplanes_per_table,
num_probes,
name=None):
"""Computes probes for the hyperplane hash.
The op supports multiprobing, i.e., the number of requested probes can be
larger than the number of tables. In that case, the same table can be probed
multiple times.
The first `num_tables` probes are always the primary hashes for each table.
Args:
point_hyperplane_product: a matrix of inner products between the hyperplanes
and the points to be hashed. These values should not be quantized so that
we can correctly compute the probing sequence. The expected shape is
`batch_size` times `num_tables * num_hyperplanes_per_table`, i.e., each
element of the batch corresponds to one row of the matrix.
num_tables: the number of tables to compute probes for.
num_hyperplanes_per_table: the number of hyperplanes per table.
num_probes: the requested number of probes per table.
name: A name prefix for the returned tensors (optional).
Returns:
probes: the output matrix of probes. Size `batch_size` times `num_probes`.
table_ids: the output matrix of tables ids. Size `batch_size` times
`num_probes`.
"""
return _nearest_neighbor_ops.hyperplane_lsh_probes(point_hyperplane_product,
num_tables,
num_hyperplanes_per_table,
num_probes,
name=name)
ops.NotDifferentiable("HyperplaneLSHProbes")
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/nearest_neighbor/python/ops/nearest_neighbor_ops.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Framework utilities.
@@assert_same_float_dtype
@@assert_scalar
@@assert_scalar_int
@@convert_to_tensor_or_sparse_tensor
@@get_graph_from_inputs
@@is_numeric_tensor
@@is_non_decreasing
@@is_strictly_increasing
@@is_tensor
@@reduce_sum_n
@@remove_squeezable_dimensions
@@with_shape
@@with_same_shape
@@deprecated
@@deprecated_args
@@deprecated_arg_values
@@arg_scope
@@add_arg_scope
@@current_arg_scope
@@has_arg_scope
@@arg_scoped_arguments
@@prepend_name_scope
@@strip_name_scope
@@add_model_variable
@@assert_global_step
@@assert_or_get_global_step
@@assign_from_checkpoint
@@assign_from_checkpoint_fn
@@assign_from_values
@@assign_from_values_fn
@@create_global_step
@@filter_variables
@@fuse_op
@@get_global_step
@@get_or_create_global_step
@@get_local_variables
@@get_model_variables
@@get_name_scope
@@get_trainable_variables
@@get_unique_variable
@@get_variables_by_name
@@get_variables_by_suffix
@@get_variable_full_name
@@get_variables_to_restore
@@get_variables
@@global_variable
@@local_variable
@@model_variable
@@variable
@@VariableDeviceChooser
@@convolutional_delta_orthogonal
@@convolutional_orthogonal_1d
@@convolutional_orthogonal_2d
@@convolutional_orthogonal_3d
@@zero_initializer
@@load_checkpoint
@@list_variables
@@load_variable
@@init_from_checkpoint
@@load_and_remap_matrix_initializer
@@load_embedding_initializer
@@load_linear_multiclass_bias_initializer
@@load_variable_slot_initializer
@@argsort
@@py_func
@@sort
@@get_placeholders
@@smart_cond
@@smart_constant_value
@@smart_case
@@BoundedTensorSpec
@@TensorSpec
@@RecordInput
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,wildcard-import
from tensorflow.contrib.framework.python.framework import *
from tensorflow.contrib.framework.python.framework import nest
from tensorflow.contrib.framework.python.ops import *
# pylint: enable=unused-import,wildcard-import
from tensorflow.python.framework.ops import prepend_name_scope
from tensorflow.python.framework.ops import strip_name_scope
from tensorflow.python.framework.smart_cond import smart_case
from tensorflow.python.framework.smart_cond import smart_cond
from tensorflow.python.framework.smart_cond import smart_constant_value
from tensorflow.python.framework.tensor_spec import BoundedTensorSpec
from tensorflow.python.framework.tensor_spec import TensorSpec
from tensorflow.python.ops.data_flow_ops import RecordInput
from tensorflow.python.ops.init_ops import convolutional_delta_orthogonal
from tensorflow.python.ops.init_ops import convolutional_orthogonal_1d
from tensorflow.python.ops.init_ops import convolutional_orthogonal_2d
from tensorflow.python.ops.init_ops import convolutional_orthogonal_3d
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = ['nest']
_nest_allowed_symbols = [
'assert_same_structure',
'is_nested',
'is_sequence',
'is_sequence_or_composite',
'flatten',
'flatten_dict_items',
'pack_sequence_as',
'map_structure',
'map_structure_with_paths',
'map_structure_with_tuple_paths',
'assert_shallow_structure',
'flatten_up_to',
'flatten_with_tuple_paths_up_to',
'map_structure_up_to',
'map_structure_with_tuple_paths_up_to',
'get_traverse_shallow_structure',
'yield_flat_paths',
'flatten_with_joined_string_paths',
'flatten_with_tuple_paths',
]
remove_undocumented(nest.__name__, allowed_exception_list=_nest_allowed_symbols)
remove_undocumented(__name__, allowed_exception_list=_allowed_symbols)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/framework/__init__.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""@graph_util tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.framework.python.framework import graph_util
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import node_def_pb2
from tensorflow.core.framework import types_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
def GetNewNode(name, op, input_nodes):
new_node = node_def_pb2.NodeDef()
new_node.op = op
new_node.name = name
for node in input_nodes:
new_node.input.append(node)
return new_node
class GraphUtilTest(test.TestCase):
def testGraphUtil(self):
graph_def = graph_pb2.GraphDef()
node_a = GetNewNode('A', 'Placeholder', [])
node_b = GetNewNode('B', 'Op1', ['A'])
# A loop in the part that will be fused.
node_c = GetNewNode('C', 'Op1', ['B', 'C'])
node_d = GetNewNode('D', 'Op1', ['C'])
node_e = GetNewNode('E', 'Op1', ['D'])
graph_def.node.extend([node_a, node_b, node_c, node_d, node_e])
fused_graph_def = graph_util.fuse_op(
graph_def, ['A'], ['D'], [types_pb2.DT_FLOAT], True, 'FusedOp', 'Op2')
self.assertEqual(len(fused_graph_def.node), 4)
self.assertEqual(fused_graph_def.node[0].name, 'A')
self.assertEqual(fused_graph_def.node[1].name, 'FusedOp')
self.assertEqual(fused_graph_def.node[1].input[0], 'A')
self.assertEqual(fused_graph_def.node[1].op, 'Op2')
self.assertEqual(fused_graph_def.node[1].attr['_output_quantized'].b, True)
self.assertEqual(fused_graph_def.node[1].attr['_output_types'].list.type,
[types_pb2.DT_FLOAT])
self.assertEqual(fused_graph_def.node[2].name, 'D')
self.assertEqual(fused_graph_def.node[3].name, 'E')
def testGraphUtilArtificialDependencyInjection(self):
graph_def = graph_pb2.GraphDef()
node_a = GetNewNode('A', 'Placeholder', [])
node_a1 = GetNewNode('A1', 'Placeholder', [])
node_b = GetNewNode('B', 'Op1', ['A'])
node_c = GetNewNode('C', 'Op1', ['B'])
node_d = GetNewNode('D', 'Op1', ['C'])
node_e = GetNewNode('E', 'Op1', ['D'])
graph_def.node.extend([node_a, node_a1, node_b, node_c, node_d, node_e])
fused_graph_def = graph_util.fuse_op(graph_def, ['A', 'A1'], ['D'],
[types_pb2.DT_FLOAT], True, 'FusedOp',
'Op2')
self.assertEqual(len(fused_graph_def.node), 5)
self.assertEqual(fused_graph_def.node[0].name, 'A')
self.assertEqual(fused_graph_def.node[1].name, 'A1')
self.assertEqual(fused_graph_def.node[2].name, 'FusedOp')
self.assertEqual(fused_graph_def.node[2].input[0], 'A')
self.assertEqual(fused_graph_def.node[2].op, 'Op2')
self.assertEqual(fused_graph_def.node[2].attr['_output_quantized'].b, True)
self.assertEqual(fused_graph_def.node[2].attr['_output_types'].list.type,
[types_pb2.DT_FLOAT])
self.assertEqual(fused_graph_def.node[3].name, 'D')
self.assertEqual(fused_graph_def.node[4].name, 'E')
class GetPlaceholdersTest(test.TestCase):
def test_get_placeholders(self):
with ops.Graph().as_default() as g:
placeholders = [array_ops.placeholder(dtypes.float32) for _ in range(5)]
results = graph_util.get_placeholders(g)
self.assertEqual(
sorted(placeholders, key=lambda x: x._id), # pylint: disable=protected-access
sorted(results, key=lambda x: x._id)) # pylint: disable=protected-access
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/framework/python/framework/graph_util_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tensor utility functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import decorator_utils
def _add_experimental_function_notice_to_docstring(doc):
"""Adds an experimental notice to a docstring for experimental functions."""
return decorator_utils.add_notice_to_docstring(
doc, '',
'EXPERIMENTAL FUNCTION',
'(experimental)', ['THIS FUNCTION IS EXPERIMENTAL. It may change or '
'be removed at any time, and without warning.'])
def experimental(func):
"""Decorator for marking functions or methods experimental.
This decorator logs an experimental warning whenever the decorated function is
called. It has the following format:
<function> (from <module>) is experimental and may change or be removed at
any time, and without warning.
<function> will include the class name if it is a method.
It also edits the docstring of the function: ' (experimental)' is appended
to the first line of the docstring and a notice is prepended to the rest of
the docstring.
Args:
func: A function or method to mark experimental.
Returns:
Decorated function or method.
"""
decorator_utils.validate_callable(func, 'experimental')
@functools.wraps(func)
def new_func(*args, **kwargs):
logging.warning(
'%s (from %s) is experimental and may change or be removed at '
'any time, and without warning.',
decorator_utils.get_qualified_name(func), func.__module__)
return func(*args, **kwargs)
new_func.__doc__ = _add_experimental_function_notice_to_docstring(
func.__doc__)
return new_func
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/framework/python/framework/experimental.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for checkpoints tools."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.contrib.framework.python.framework import checkpoint_utils
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import saver as saver_lib
def _create_checkpoints(sess, checkpoint_dir):
checkpoint_prefix = os.path.join(checkpoint_dir, "model")
checkpoint_state_name = "checkpoint"
v1 = variable_scope.get_variable("var1", [1, 10])
v2 = variable_scope.get_variable("var2", [10, 10])
v3 = variable_scope.get_variable("var3", [100, 100])
with variable_scope.variable_scope("useful_scope"):
v4 = variable_scope.get_variable("var4", [9, 9])
sess.run(variables.global_variables_initializer())
v1_value, v2_value, v3_value, v4_value = sess.run([v1, v2, v3, v4])
saver = saver_lib.Saver()
saver.save(
sess,
checkpoint_prefix,
global_step=0,
latest_filename=checkpoint_state_name)
return v1_value, v2_value, v3_value, v4_value
def _create_partition_checkpoints(sess, checkpoint_dir):
checkpoint_prefix = os.path.join(checkpoint_dir, "model")
checkpoint_state_name = "checkpoint"
with variable_scope.variable_scope("scope"):
v1 = variable_scope.get_variable(
name="var1",
shape=[100, 100],
initializer=init_ops.truncated_normal_initializer(0.5),
partitioner=partitioned_variables.min_max_variable_partitioner(
max_partitions=5, axis=0, min_slice_size=8 << 10))
sess.run(variables.global_variables_initializer())
v1_value = sess.run(v1._get_variable_list())
saver = saver_lib.Saver()
saver.save(
sess,
checkpoint_prefix,
global_step=0,
latest_filename=checkpoint_state_name)
return v1_value
class CheckpointsTest(test.TestCase):
def testNoCheckpoints(self):
checkpoint_dir = self.get_temp_dir() + "/no_checkpoints"
with self.assertRaises(errors_impl.OpError):
self.assertAllEqual(
checkpoint_utils.load_variable(checkpoint_dir, "var1"), [])
def testNoTensor(self):
checkpoint_dir = self.get_temp_dir()
with self.cached_session() as session:
_, _, _, _ = _create_checkpoints(session, checkpoint_dir)
with self.assertRaises(errors_impl.OpError):
self.assertAllEqual(
checkpoint_utils.load_variable(checkpoint_dir, "var5"), [])
def testGetTensor(self):
checkpoint_dir = self.get_temp_dir()
with self.cached_session() as session:
v1, v2, v3, v4 = _create_checkpoints(session, checkpoint_dir)
self.assertAllEqual(
checkpoint_utils.load_variable(checkpoint_dir, "var1"), v1)
self.assertAllEqual(
checkpoint_utils.load_variable(checkpoint_dir, "var2"), v2)
self.assertAllEqual(
checkpoint_utils.load_variable(checkpoint_dir, "var3"), v3)
self.assertAllEqual(
checkpoint_utils.load_variable(checkpoint_dir, "useful_scope/var4"), v4)
def testGetAllVariables(self):
checkpoint_dir = self.get_temp_dir()
with self.cached_session() as session:
_create_checkpoints(session, checkpoint_dir)
self.assertEqual(
checkpoint_utils.list_variables(checkpoint_dir),
[("useful_scope/var4", [9, 9]), ("var1", [1, 10]), ("var2", [10, 10]),
("var3", [100, 100])])
def testInitFromCheckpoint(self):
checkpoint_dir = self.get_temp_dir()
with self.cached_session() as session:
v1, v2, v3, v4 = _create_checkpoints(session, checkpoint_dir)
# New graph and session.
with ops.Graph().as_default() as g:
with self.session(graph=g) as session:
with variable_scope.variable_scope("some_scope"):
my1 = variable_scope.get_variable("my1", [1, 10])
with variable_scope.variable_scope("some_other_scope"):
my2 = variable_scope.get_variable("my2", [10, 10])
with variable_scope.variable_scope("other_useful_scope"):
my4 = variable_scope.get_variable("var4", [9, 9])
my3 = variable_scope.get_variable("my3", [100, 100])
checkpoint_utils.init_from_checkpoint(checkpoint_dir, {
"var1": "some_scope/my1",
"useful_scope/": "some_scope/some_other_scope/other_useful_scope/",
})
checkpoint_utils.init_from_checkpoint(checkpoint_dir, {
"var2": "some_scope/some_other_scope/my2",
"var3": my3,
})
session.run(variables.global_variables_initializer())
self.assertAllEqual(my1.eval(session), v1)
self.assertAllEqual(my2.eval(session), v2)
self.assertAllEqual(my3.eval(session), v3)
self.assertAllEqual(my4.eval(session), v4)
# Check that tensors are not explicitly in the graph.
self.assertLess(len(str(session.graph.as_graph_def())), 27000)
def testInitWithScopeDoesNotCaptureSuffixes(self):
checkpoint_dir = self.get_temp_dir()
with self.cached_session() as session:
_, _, _, v4 = _create_checkpoints(session, checkpoint_dir)
with ops.Graph().as_default() as g:
with variable_scope.variable_scope("useful_scope"):
my4 = variable_scope.get_variable("var4", [9, 9])
with variable_scope.variable_scope("useful_scope_1"):
my5_init = [[1.0, 2.0], [3.0, 4.0]]
my5 = variable_scope.get_variable("var5", initializer=my5_init)
checkpoint_utils.init_from_checkpoint(checkpoint_dir,
{"useful_scope/": "useful_scope/"})
with self.session(graph=g) as session:
session.run(variables.global_variables_initializer())
self.assertAllEqual(my4.eval(session), v4)
self.assertAllEqual(my5.eval(session), my5_init)
def testInitFromRootCheckpoint(self):
checkpoint_dir = self.get_temp_dir()
with self.cached_session() as session:
v1, v2, v3, v4 = _create_checkpoints(session, checkpoint_dir)
# New graph and session.
with ops.Graph().as_default() as g:
with self.session(graph=g) as session:
with variable_scope.variable_scope("some_scope"):
my1 = variable_scope.get_variable("var1", [1, 10])
my2 = variable_scope.get_variable("var2", [10, 10])
my3 = variable_scope.get_variable("var3", [100, 100])
with variable_scope.variable_scope("useful_scope"):
my4 = variable_scope.get_variable("var4", [9, 9])
checkpoint_utils.init_from_checkpoint(checkpoint_dir,
{"/": "some_scope/",})
session.run(variables.global_variables_initializer())
self.assertAllEqual(my1.eval(session), v1)
self.assertAllEqual(my2.eval(session), v2)
self.assertAllEqual(my3.eval(session), v3)
self.assertAllEqual(my4.eval(session), v4)
def testInitToRootCheckpoint(self):
checkpoint_dir = self.get_temp_dir()
with self.cached_session() as session:
v1, v2, v3, v4 = _create_checkpoints(session, checkpoint_dir)
# New graph and session.
with ops.Graph().as_default() as g:
with self.session(graph=g) as session:
my1 = variable_scope.get_variable("var1", [1, 10])
my2 = variable_scope.get_variable("var2", [10, 10])
my3 = variable_scope.get_variable("var3", [100, 100])
with variable_scope.variable_scope("useful_scope"):
my4 = variable_scope.get_variable("var4", [9, 9])
checkpoint_utils.init_from_checkpoint(checkpoint_dir,
{"/": "/",})
session.run(variables.global_variables_initializer())
self.assertAllEqual(my1.eval(session), v1)
self.assertAllEqual(my2.eval(session), v2)
self.assertAllEqual(my3.eval(session), v3)
self.assertAllEqual(my4.eval(session), v4)
def testInitFromPartitionVar(self):
checkpoint_dir = self.get_temp_dir()
with self.cached_session() as session:
v1 = _create_partition_checkpoints(session, checkpoint_dir)
# New graph and session.
with ops.Graph().as_default() as g:
with self.session(graph=g) as session:
with variable_scope.variable_scope("some_scope"):
my1 = variable_scope.get_variable(
name="my1",
shape=[100, 100],
initializer=init_ops.truncated_normal_initializer(0.5),
partitioner=partitioned_variables.min_max_variable_partitioner(
max_partitions=5, axis=0, min_slice_size=8 << 10))
my1_var_list = my1._get_variable_list()
with variable_scope.variable_scope("some_other_scope"):
my2 = variable_scope.get_variable(
name="var1",
shape=[100, 100],
initializer=init_ops.truncated_normal_initializer(0.5),
partitioner=partitioned_variables.min_max_variable_partitioner(
max_partitions=5, axis=0, min_slice_size=8 << 10))
my2_var_list = my2._get_variable_list()
checkpoint_utils.init_from_checkpoint(checkpoint_dir, {
"scope/var1": "some_scope/my1",
"scope/": "some_other_scope/"})
session.run(variables.global_variables_initializer())
my1_values = session.run(my1_var_list)
self.assertAllEqual(my1_values, v1)
my2_values = session.run(my2_var_list)
self.assertAllEqual(my2_values, v1)
# New graph and session.
with ops.Graph().as_default() as g:
with self.session(graph=g) as session:
with variable_scope.variable_scope("some_scope"):
my1 = variable_scope.get_variable(
name="my1",
shape=[100, 100],
initializer=init_ops.truncated_normal_initializer(0.5),
partitioner=partitioned_variables.min_max_variable_partitioner(
max_partitions=5, axis=0, min_slice_size=8 << 10))
my1_var_list = my1._get_variable_list()
checkpoint_utils.init_from_checkpoint(checkpoint_dir,
{"scope/var1": my1_var_list,})
session.run(variables.global_variables_initializer())
my1_values = session.run(my1_var_list)
self.assertAllEqual(my1_values, v1)
def testInitFromCheckpointMissing(self):
checkpoint_dir = self.get_temp_dir()
with self.cached_session() as session:
_, _, _, _ = _create_checkpoints(session, checkpoint_dir)
# New graph and session.
with ops.Graph().as_default() as g:
with self.session(graph=g) as session:
with variable_scope.variable_scope("some_scope"):
_ = variable_scope.get_variable("my1", [10, 10])
_ = variable_scope.get_variable(
"my2", [1, 10],
dtype=dtypes.int64,
initializer=init_ops.zeros_initializer())
# No directory.
with self.assertRaises(errors_impl.OpError):
checkpoint_utils.init_from_checkpoint("no_dir",
{"var1": "some_scope/my1"})
# No variable in checkpoint.
with self.assertRaises(ValueError):
checkpoint_utils.init_from_checkpoint(checkpoint_dir,
{"no_var": "some_scope/my1"})
# No variable in the graph.
with self.assertRaises(ValueError):
checkpoint_utils.init_from_checkpoint(checkpoint_dir,
{"var3": "some_scope/no_var"})
# Shape mismatch.
with self.assertRaises(ValueError):
checkpoint_utils.init_from_checkpoint(checkpoint_dir,
{"var1": "some_scope/my1"})
# Variable 'my1' and 'my2' are missing in given checkpoint scope.
with self.assertRaises(ValueError):
checkpoint_utils.init_from_checkpoint(
checkpoint_dir, {"useful_scope/": "some_scope/"})
# Mapping is not to scope name.
with self.assertRaises(ValueError):
checkpoint_utils.init_from_checkpoint(checkpoint_dir,
{"useful_scope": "some_scope/"})
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/framework/python/framework/checkpoint_utils_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tensor utility functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.util.deprecation import deprecated
__all__ = [
'assert_same_float_dtype',
'assert_scalar',
'assert_scalar_int',
'convert_to_tensor_or_sparse_tensor',
'is_tensor',
'reduce_sum_n',
'remove_squeezable_dimensions',
'with_shape',
'with_same_shape']
# Temporary for backwards compatibility
is_tensor = tensor_util.is_tensor
assert_same_float_dtype = check_ops.assert_same_float_dtype
assert_scalar = check_ops.assert_scalar
convert_to_tensor_or_sparse_tensor = (
sparse_tensor.convert_to_tensor_or_sparse_tensor)
def reduce_sum_n(tensors, name=None):
"""Reduce tensors to a scalar sum.
This reduces each tensor in `tensors` to a scalar via `tf.reduce_sum`, then
adds them via `tf.add_n`.
Args:
tensors: List of tensors, all of the same numeric type.
name: Tensor name, and scope for all other ops.
Returns:
Total loss tensor, or None if no losses have been configured.
Raises:
ValueError: if `losses` is missing or empty.
"""
if not tensors:
raise ValueError('No tensors provided.')
with ops.name_scope(name, 'reduce_sum_n', tensors) as name_scope:
tensors = [
math_ops.reduce_sum(t, name='%s/sum' % t.op.name) for t in tensors]
if len(tensors) == 1:
return tensors[0]
return math_ops.add_n(tensors, name=name_scope)
@deprecated(
None, "Please switch to remove_squeezable_dimensions from "
"tf.confusion_matrix. Note that the order of the inputs and outputs of "
"labels and predictions have also been switched.")
def remove_squeezable_dimensions(predictions, labels, name=None):
"""Squeeze last dim if ranks of `predictions` and `labels` differ by 1.
This will use static shape if available. Otherwise, it will add graph
operations, which could result in a performance hit.
Args:
predictions: Predicted values, a `Tensor` of arbitrary dimensions.
labels: Label values, a `Tensor` whose dimensions match `predictions`.
name: Name of the op.
Returns:
Tuple of `predictions` and `labels`, possibly with last dim squeezed.
"""
with ops.name_scope(name, 'remove_squeezable_dimensions',
[predictions, labels]):
predictions = ops.convert_to_tensor(predictions)
labels = ops.convert_to_tensor(labels)
predictions_shape = predictions.get_shape()
predictions_rank = predictions_shape.ndims
labels_shape = labels.get_shape()
labels_rank = labels_shape.ndims
if (labels_rank is not None) and (predictions_rank is not None):
# Use static rank.
rank_diff = predictions_rank - labels_rank
if rank_diff == -1:
labels = array_ops.squeeze(labels, [-1])
elif rank_diff == 1:
predictions = array_ops.squeeze(predictions, [-1])
return predictions, labels
# Use dynamic rank.
rank_diff = array_ops.rank(predictions) - array_ops.rank(labels)
if (predictions_rank is None) or (
predictions_shape.dims[-1].is_compatible_with(1)):
predictions = control_flow_ops.cond(
math_ops.equal(1, rank_diff),
lambda: array_ops.squeeze(predictions, [-1]),
lambda: predictions)
if (labels_rank is None) or (
labels_shape.dims[-1].is_compatible_with(1)):
labels = control_flow_ops.cond(
math_ops.equal(-1, rank_diff),
lambda: array_ops.squeeze(labels, [-1]),
lambda: labels)
return predictions, labels
def _shape_tensor_compatible(expected_shape, actual_shape):
"""Returns whether actual_shape is compatible with expected_shape.
Note that -1 in `expected_shape` is recognized as unknown dimension.
Args:
expected_shape: Integer list defining the expected shape, or tensor of same.
actual_shape: Shape of the tensor to test.
Returns:
New tensor.
"""
with ops.name_scope('shape_tensor_equal',
values=[expected_shape, actual_shape]) as scope:
return math_ops.reduce_all(
math_ops.logical_or(
math_ops.equal(expected_shape, -1),
math_ops.equal(expected_shape, actual_shape, 'equal'),
name='exclude_partial_shape'),
name=scope)
def _is_rank(expected_rank, actual_tensor):
"""Returns whether actual_tensor's rank is expected_rank.
Args:
expected_rank: Integer defining the expected rank, or tensor of same.
actual_tensor: Tensor to test.
Returns:
New tensor.
"""
with ops.name_scope('is_rank', values=[actual_tensor]) as scope:
expected = ops.convert_to_tensor(expected_rank, name='expected')
actual = array_ops.rank(actual_tensor, name='actual')
return math_ops.equal(expected, actual, name=scope)
def _is_shape(expected_shape, actual_tensor, actual_shape=None):
"""Returns whether actual_tensor's shape is expected_shape.
Note that -1 in `expected_shape` is recognized as unknown dimension.
Args:
expected_shape: Integer list defining the expected shape, or tensor of same.
actual_tensor: Tensor to test.
actual_shape: Shape of actual_tensor, if we already have it.
Returns:
New tensor.
"""
with ops.name_scope('is_shape', values=[actual_tensor]) as scope:
is_rank = _is_rank(array_ops.size(expected_shape), actual_tensor)
if actual_shape is None:
actual_shape = array_ops.shape(actual_tensor, name='actual')
shape_equal = _shape_tensor_compatible(expected_shape, actual_shape)
return math_ops.logical_and(is_rank, shape_equal, name=scope)
def _assert_shape_op(expected_shape, actual_tensor):
"""Asserts actual_tensor's shape is expected_shape.
Note that unknown dimension in `expected_shape` will be ignored.
Args:
expected_shape: List of integers defining the expected shape, or tensor of
same.
actual_tensor: Tensor to test.
Returns:
New assert tensor.
"""
with ops.name_scope('assert_shape', values=[actual_tensor]) as scope:
actual_shape = array_ops.shape(actual_tensor, name='actual')
if (isinstance(expected_shape, tensor_shape.TensorShape)
and not expected_shape.is_fully_defined()):
expected_shape = [d if d else -1 for d in expected_shape.as_list()]
is_shape = _is_shape(expected_shape, actual_tensor, actual_shape)
return control_flow_ops.Assert(
is_shape, [
'Wrong shape for %s [expected] [actual].' % actual_tensor.name,
expected_shape,
actual_shape
], name=scope)
def with_same_shape(expected_tensor, tensor):
"""Assert tensors are the same shape, from the same graph.
Args:
expected_tensor: Tensor with expected shape.
tensor: Tensor of actual values.
Returns:
The original tensor argument, possibly with assert ops added.
"""
with ops.name_scope('%s/' % tensor.op.name, values=[expected_tensor, tensor]):
tensor_shape = expected_tensor.get_shape()
expected_shape = (
tensor_shape.as_list() if tensor_shape.is_fully_defined()
else array_ops.shape(expected_tensor, name='expected_shape'))
return with_shape(expected_shape, tensor)
def with_shape(expected_shape, tensor):
"""Asserts tensor has expected shape.
If tensor shape and expected_shape, are fully defined, assert they match.
Otherwise, add assert op that will validate the shape when tensor is
evaluated, and set shape on tensor.
Args:
expected_shape: Expected shape to assert, as a 1D array of ints, or tensor
of same.
tensor: Tensor whose shape we're validating.
Returns:
tensor, perhaps with a dependent assert operation.
Raises:
ValueError: if tensor has an invalid shape.
"""
if isinstance(tensor, sparse_tensor.SparseTensor):
raise ValueError('SparseTensor not supported.')
# Shape type must be 1D int32.
if tensor_util.is_tensor(expected_shape):
if expected_shape.dtype.base_dtype != dtypes.int32:
raise ValueError(
'Invalid dtype %s for shape %s expected of tensor %s.' % (
expected_shape.dtype, expected_shape, tensor.name))
if isinstance(expected_shape, (list, tuple)):
if not expected_shape:
expected_shape = np.asarray([], dtype=np.int32)
else:
np_expected_shape = np.asarray(expected_shape)
expected_shape = (
np.asarray(expected_shape, dtype=np.int32)
if np_expected_shape.dtype == np.int64 else np_expected_shape)
if isinstance(expected_shape, np.ndarray):
if expected_shape.ndim > 1:
raise ValueError(
'Invalid rank %s for shape %s expected of tensor %s.' % (
expected_shape.ndim, expected_shape, tensor.name))
if expected_shape.dtype != np.int32:
raise ValueError(
'Invalid dtype %s for shape %s expected of tensor %s.' % (
expected_shape.dtype, expected_shape, tensor.name))
actual_shape = tensor.get_shape()
if (not actual_shape.is_fully_defined()
or tensor_util.is_tensor(expected_shape)):
with ops.name_scope('%s/' % tensor.op.name, values=[tensor]):
if (not tensor_util.is_tensor(expected_shape)
and (len(expected_shape) < 1)):
# TODO(irving): Remove scalar special case
return array_ops.reshape(tensor, [])
with ops.control_dependencies([_assert_shape_op(expected_shape, tensor)]):
result = array_ops.identity(tensor)
if not tensor_util.is_tensor(expected_shape):
result.set_shape(expected_shape)
return result
if (not tensor_util.is_tensor(expected_shape) and
not actual_shape.is_compatible_with(expected_shape)):
if (len(expected_shape) < 1) and actual_shape.is_compatible_with([1]):
# TODO(irving): Remove scalar special case.
with ops.name_scope('%s/' % tensor.op.name, values=[tensor]):
return array_ops.reshape(tensor, [])
raise ValueError('Invalid shape for tensor %s, expected %s, got %s.' % (
tensor.name, expected_shape, actual_shape))
return tensor
def assert_scalar_int(tensor, name=None):
"""Assert `tensor` is 0-D, of type `tf.int32` or `tf.int64`.
Args:
tensor: `Tensor` to test.
name: Name of the op and of the new `Tensor` if one is created.
Returns:
`tensor`, for chaining.
Raises:
ValueError: if `tensor` is not 0-D, of integer type.
"""
with ops.name_scope(name, 'assert_scalar_int', [tensor]) as name_scope:
tensor = ops.convert_to_tensor(tensor)
data_type = tensor.dtype
if not data_type.base_dtype.is_integer:
raise ValueError('Expected integer type for %s, received type: %s.'
% (tensor.name, data_type))
return check_ops.assert_scalar(tensor, name=name_scope)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/framework/python/framework/tensor_util.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A module containing TensorFlow ops whose API may change in the future."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import
from tensorflow.contrib.framework.python.framework.checkpoint_utils import *
from tensorflow.contrib.framework.python.framework.experimental import experimental
from tensorflow.contrib.framework.python.framework.graph_util import *
from tensorflow.contrib.framework.python.framework.tensor_util import *
# pylint: enable=wildcard-import
from tensorflow.python.util import decorator_utils
from tensorflow.python.util import nest
from tensorflow.python.util.deprecation import deprecated
from tensorflow.python.util.deprecation import deprecated_arg_values
from tensorflow.python.util.deprecation import deprecated_args
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/framework/python/framework/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tools to work with checkpoints."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.python.framework import ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training import training as train
__all__ = [
"load_checkpoint",
"load_variable",
"list_variables",
"init_from_checkpoint"]
def _get_checkpoint_filename(filepattern):
"""Returns checkpoint filename given directory or specific filepattern."""
if gfile.IsDirectory(filepattern):
return checkpoint_management.latest_checkpoint(filepattern)
return filepattern
def load_checkpoint(filepattern):
"""Returns CheckpointReader for latest checkpoint.
Args:
filepattern: Directory with checkpoints file or path to checkpoint.
Returns:
`CheckpointReader` object.
Raises:
ValueError: if checkpoint_dir doesn't have 'checkpoint' file or checkpoints.
"""
filename = _get_checkpoint_filename(filepattern)
if filename is None:
raise ValueError("Couldn't find 'checkpoint' file or checkpoints in "
"given directory %s" % filepattern)
return train.NewCheckpointReader(filename)
def load_variable(checkpoint_dir, name):
"""Returns a Tensor with the contents of the given variable in the checkpoint.
Args:
checkpoint_dir: Directory with checkpoints file or path to checkpoint.
name: Name of the tensor to return.
Returns:
`Tensor` object.
"""
# TODO(b/29227106): Fix this in the right place and remove this.
if name.endswith(":0"):
name = name[:-2]
reader = load_checkpoint(checkpoint_dir)
return reader.get_tensor(name)
def list_variables(checkpoint_dir):
"""Returns list of all variables in the latest checkpoint.
Args:
checkpoint_dir: Directory with checkpoints file or path to checkpoint.
Returns:
List of tuples `(name, shape)`.
"""
reader = load_checkpoint(checkpoint_dir)
variable_map = reader.get_variable_to_shape_map()
names = sorted(variable_map.keys())
result = []
for name in names:
result.append((name, variable_map[name]))
return result
# pylint: disable=protected-access
# Currently variable_scope doesn't provide very good APIs to access
# all variables under scope and retrieve and check existing scopes.
# TODO(ipolosukhin): Refactor variable_scope module to provide nicer APIs.
def _set_checkpoint_initializer(variable, file_pattern, tensor_name, slice_spec,
name="checkpoint_initializer"):
"""Sets variable initializer to assign op form value in checkpoint's tensor.
Args:
variable: `Variable` object.
file_pattern: string, where to load checkpoints from.
tensor_name: Name of the `Tensor` to load from checkpoint reader.
slice_spec: Slice specification for loading partitioned variables.
name: Name of the operation.
"""
base_type = variable.dtype.base_dtype
with ops.device(variable.device), ops.device("/cpu:0"):
restore_op = io_ops.restore_v2(
file_pattern, [tensor_name], [slice_spec], [base_type], name=name)[0]
variable._initializer_op = state_ops.assign(variable, restore_op)
def _set_variable_or_list_initializer(variable_or_list, file_pattern,
tensor_name):
if isinstance(variable_or_list, (list, tuple)):
# A set of slices.
slice_name = None
for v in variable_or_list:
if slice_name is None:
slice_name = v._save_slice_info.full_name
elif slice_name != v._save_slice_info.full_name:
raise ValueError("Slices must all be from the same tensor: %s != %s" %
(slice_name, v._save_slice_info.full_name))
_set_checkpoint_initializer(v, file_pattern, tensor_name,
v._save_slice_info.spec)
else:
_set_checkpoint_initializer(variable_or_list, file_pattern, tensor_name, "")
def _collect_partitioned_variable(name, var_scope):
if name + "/part_0" in var_scope._vars:
var = []
i = 0
while name + "/part_%d" % i in var_scope._vars:
var.append(var_scope._vars[name + "/part_%d" % i])
i += 1
return var
return None
def init_from_checkpoint(checkpoint_dir, assignment_map):
"""Using assignment map initializes current variables with loaded tensors.
Note: This overrides default initialization ops of specified variables and
redefines dtype.
Assignment map supports following syntax:
* `'checkpoint_scope_name/': 'scope_name/'` - will load all variables in
current `scope_name` from `checkpoint_scope_name` with matching variable
names.
* `'checkpoint_scope_name/some_other_variable': 'scope_name/variable_name'` -
will initialize `scope_name/variable_name` variable
from `checkpoint_scope_name/some_other_variable`.
* `'scope_variable_name': variable` - will initialize given `tf.Variable`
object with variable from the checkpoint.
* `'scope_variable_name': list(variable)` - will initialize list of
partitioned variables with variable from the checkpoint.
* `'/': 'scope_name/'` - will load all variables in current `scope_name` from
checkpoint's root (e.g. no scope).
Supports loading into partitioned variables, which are represented as
`'<variable>/part_<part #>'`.
Example:
```python
# Create variables.
with tf.compat.v1.variable_scope('test'):
m = tf.compat.v1.get_variable('my_var')
with tf.compat.v1.variable_scope('test2'):
var2 = tf.compat.v1.get_variable('my_var')
var3 = tf.compat.v1.get_variable(name="my1", shape=[100, 100],
partitioner=lambda shape, dtype: [5, 1])
...
# Specify which variables to initialize from checkpoint.
init_from_checkpoint(checkpoint_dir, {
'some_var': 'test/my_var',
'some_scope/': 'test2/'})
...
# Or use `Variable` objects to identify what to initialize.
init_from_checkpoint(checkpoint_dir, {
'some_scope/var2': var2,
})
# Initialize partitioned variables
init_from_checkpoint(checkpoint_dir, {
'some_var_from_ckpt': 'part_var',
})
# Or specifying the list of `Variable` objects.
init_from_checkpoint(checkpoint_dir, {
'some_var_from_ckpt': var3._get_variable_list(),
})
...
# Initialize variables as usual.
session.run(tf.get_all_variables())
```
Args:
checkpoint_dir: Directory with checkpoints file or path to checkpoint.
assignment_map: Dict, where keys are names of the variables in the
checkpoint and values are current variables or names of current variables
(in default graph).
Raises:
tf.errors.OpError: If missing checkpoints or tensors in checkpoints.
ValueError: If missing variables in current graph.
"""
filepattern = _get_checkpoint_filename(checkpoint_dir)
reader = load_checkpoint(checkpoint_dir)
variable_map = reader.get_variable_to_shape_map()
for tensor_name_in_ckpt, current_var_or_name in six.iteritems(assignment_map):
var = None
# Check if this is Variable object or list of Variable objects (in case of
# partitioned variables).
is_var = lambda x: isinstance(x, variables.Variable)
if is_var(current_var_or_name) or (
isinstance(current_var_or_name, list)
and all(is_var(v) for v in current_var_or_name)):
var = current_var_or_name
else:
var_scope = vs._get_default_variable_store()
# Check if this variable is in var_store.
var = var_scope._vars.get(current_var_or_name, None)
# Also check if variable is partitioned as list.
if var is None:
var = _collect_partitioned_variable(current_var_or_name, var_scope)
if var is not None:
# If 1 to 1 mapping was provided, find variable in the checkpoint.
if tensor_name_in_ckpt not in variable_map:
raise ValueError("Tensor %s is not found in %s checkpoint %s" % (
tensor_name_in_ckpt, checkpoint_dir, variable_map
))
if is_var(var):
# Additional at-call-time checks.
if not var.get_shape().is_compatible_with(
variable_map[tensor_name_in_ckpt]):
raise ValueError(
"Shape of variable %s (%s) doesn't match with shape of "
"tensor %s (%s) from checkpoint reader." % (
var.name, str(var.get_shape()),
tensor_name_in_ckpt, str(variable_map[tensor_name_in_ckpt])
))
var_name = var.name
else:
var_name = ",".join([v.name for v in var])
_set_variable_or_list_initializer(var, filepattern, tensor_name_in_ckpt)
logging.info("Initialize variable %s from checkpoint %s with %s" % (
var_name, checkpoint_dir, tensor_name_in_ckpt
))
else:
scopes = ""
# TODO(vihanjain): Support list of 'current_var_or_name' here.
if "/" in current_var_or_name:
scopes = current_var_or_name[:current_var_or_name.rindex("/")]
if not tensor_name_in_ckpt.endswith("/"):
raise ValueError(
"Assignment map with scope only name {} should map to scope only "
"{}. Should be 'scope/': 'other_scope/'.".format(
scopes, tensor_name_in_ckpt))
# If scope to scope mapping was provided, find all variables in the scope
# and create variable to variable mapping.
scope_variables = set()
for var_name in var_scope._vars:
if not scopes or var_name.startswith(scopes + "/"):
# Consume /part_ if partitioned variable.
if "/part_" in var_name:
var_name = var_name[:var_name.index("/part_")]
scope_variables.add(var_name)
for var_name in scope_variables:
# Lookup name with specified prefix and suffix from current variable.
# If tensor_name given is '/' (root), don't use it for full name.
full_tensor_name = var_name[len(scopes):]
if current_var_or_name != "/":
full_tensor_name = full_tensor_name[1:]
if tensor_name_in_ckpt != "/":
full_tensor_name = tensor_name_in_ckpt + full_tensor_name
if full_tensor_name not in variable_map:
raise ValueError(
"Tensor %s (%s in %s) is not found in %s checkpoint" % (
full_tensor_name, var_name[len(scopes) + 1:],
tensor_name_in_ckpt, checkpoint_dir
))
var = var_scope._vars.get(var_name, None)
if var is None:
var = _collect_partitioned_variable(var_name, var_scope)
_set_variable_or_list_initializer(var, filepattern, full_tensor_name)
logging.info("Initialize variable %s from checkpoint %s with %s" % (
var_name, checkpoint_dir, full_tensor_name
))
# pylint: enable=protected-access
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/framework/python/framework/checkpoint_utils.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helpers to manipulate a tensor graph in python.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import six
# pylint: disable=unused-import
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import node_def_pb2
from tensorflow.python.framework import ops
from tensorflow.python.framework.graph_util_impl import _assert_nodes_are_present
from tensorflow.python.framework.graph_util_impl import _bfs_for_reachable_nodes
from tensorflow.python.framework.graph_util_impl import _extract_graph_summary
from tensorflow.python.framework.graph_util_impl import _node_name
__all__ = ["fuse_op", "get_placeholders"]
def fuse_op(graph_def, input_nodes, output_nodes, output_dtypes,
output_quantized, op_name, op_type):
"""Fuse subgraph between input_nodes and output_nodes into a single custom op.
Args:
graph_def: A graph_pb2.GraphDef proto.
input_nodes: input nodes to the subgraph to be fused.
output_nodes: output nodes to the subgraph to be fused.
output_dtypes: A list of output datatypes for the custom op
output_quantized: A boolean flag that indicates if output is quantized
op_name: fused op name.
op_type: fused op type.
Returns:
The GraphDef of the new graph.
Raises:
TypeError: If 'graph_def' is not a graph_pb2.GraphDef proto.
"""
if not isinstance(graph_def, graph_pb2.GraphDef):
raise TypeError("graph_def must be a graph_pb2.GraphDef proto.")
if isinstance(input_nodes, six.string_types):
raise TypeError("input_nodes must be a list.")
if isinstance(output_nodes, six.string_types):
raise TypeError("output_nodes must be a list.")
name_to_input_name, name_to_node, name_to_seq_num = _extract_graph_summary(
graph_def)
_assert_nodes_are_present(name_to_node, input_nodes + output_nodes)
# Nodes upto and including input_nodes
reachable_by_input = _bfs_for_reachable_nodes(input_nodes, name_to_input_name)
# Nodes upto and including output_nodes
reachable_by_output = _bfs_for_reachable_nodes(output_nodes,
name_to_input_name)
# Set of nodes in the list input_nodes
input_nodes_set = set(input_nodes)
# Set of nodes in the list output_nodes
output_nodes_set = set(output_nodes)
nodes_post_output = []
for node in graph_def.node:
n = _node_name(node.name)
if n in reachable_by_output:
if n not in reachable_by_input and n not in output_nodes_set:
# n is between input and output, i.e., part of the fused op
next_to_visit = [n]
visited = set()
while next_to_visit:
cur_node = next_to_visit[0]
visited.add(cur_node)
del next_to_visit[0]
if cur_node in reachable_by_input and cur_node not in input_nodes_set:
raise TypeError("Node %s uses input %s not in input_nodes." %
(n, cur_node))
if cur_node not in input_nodes_set:
next_to_visit += [
input_node for input_node in name_to_input_name[cur_node]
if input_node not in visited
]
elif n not in reachable_by_input:
nodes_post_output.append(n)
# Add all nodes upto the input nodes
out = graph_pb2.GraphDef()
reachable_by_input_sorted = sorted(
list(reachable_by_input), key=lambda n: name_to_seq_num[n])
for node in reachable_by_input_sorted:
out.node.extend([copy.deepcopy(name_to_node[node])])
# Add the custom op
new_node = node_def_pb2.NodeDef()
for node in input_nodes:
new_node.input.append(node)
new_node.attr["_output_types"].list.type[:] = output_dtypes
new_node.attr["_output_quantized"].b = output_quantized
new_node.op = op_type
new_node.name = op_name
out.node.extend([new_node])
# Add the nodes in the output of the custom op
for index, n in enumerate(output_nodes):
assert len(name_to_node[n].input) == 1
new_node = copy.deepcopy(name_to_node[n])
del new_node.input[:]
new_node.input.append(op_name + (":" + str(index) if index != 0 else ""))
out.node.extend([new_node])
# Add the nodes post output_nodes
for n in nodes_post_output:
out.node.extend([copy.deepcopy(name_to_node[n])])
out.library.CopyFrom(graph_def.library)
out.versions.CopyFrom(graph_def.versions)
return out
def get_placeholders(graph):
"""Get placeholders of a graph.
For example:
```python
a = tf.compat.v1.placeholder(dtype=tf.float32, shape=[2, 2], name='a')
a = tf.compat.v1.placeholder(dtype=tf.int32, shape=[3, 2], name='b')
tf.contrib.framework.get_placeholders(tf.compat.v1.get_default_graph())
# Returns:
# [<tf.Tensor 'a:0' shape=(2, 2) dtype=float32>,
# <tf.Tensor 'b:0' shape=(3, 2) dtype=int32>]
```
Args:
graph: A tf.Graph.
Returns:
A list contains all placeholders of given graph.
Raises:
TypeError: If `graph` is not a tensorflow graph.
"""
if not isinstance(graph, ops.Graph):
raise TypeError("Input graph needs to be a Graph: %s" % graph)
# For each placeholder() call, there is a corresponding
# operation of type 'Placeholder' registered to the graph.
# The return value (a Tensor) of placeholder() is the
# first output of this operation in fact.
operations = graph.get_operations()
result = [i.outputs[0] for i in operations if i.type == "Placeholder"]
return result
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/framework/python/framework/graph_util.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""tensor_util tests."""
# pylint: disable=unused-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import numpy as np
from tensorflow.contrib.framework.python.framework import tensor_util
from tensorflow.contrib.framework.python.ops import variables as variables_lib2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import test
class LocalVariabletest(test.TestCase):
def test_local_variable(self):
with self.cached_session() as sess:
self.assertEquals([], variables_lib.local_variables())
value0 = 42
variables_lib2.local_variable(value0)
value1 = 43
variables_lib2.local_variable(value1)
variables = variables_lib.local_variables()
self.assertEquals(2, len(variables))
self.assertRaises(errors_impl.OpError, sess.run, variables)
variables_lib.variables_initializer(variables).run()
self.assertAllEqual(set([value0, value1]), set(sess.run(variables)))
class ReduceSumNTest(test.TestCase):
def test_reduce_sum_n(self):
with self.cached_session():
a = constant_op.constant(1)
b = constant_op.constant([2])
c = constant_op.constant([[3, 4], [5, 6]])
self.assertEqual(21, tensor_util.reduce_sum_n([a, b, c]).eval())
class AssertScalarIntTest(test.TestCase):
def test_assert_scalar_int(self):
tensor_util.assert_scalar_int(constant_op.constant(3, dtype=dtypes.int32))
tensor_util.assert_scalar_int(constant_op.constant(3, dtype=dtypes.int64))
tensor_util.assert_scalar_int(3)
with self.assertRaisesRegexp(ValueError, "Expected integer"):
tensor_util.assert_scalar_int(
constant_op.constant(
3, dtype=dtypes.float32))
with self.assertRaisesRegexp(ValueError, "Expected scalar"):
tensor_util.assert_scalar_int(
constant_op.constant(
[3, 4], dtype=dtypes.int32))
class WithShapeTest(test.TestCase):
def _assert_with_shape(self, tensor, expected_value, expected_shape,
unexpected_shapes):
for unexpected_shape in unexpected_shapes:
self.assertRaises(ValueError, tensor_util.with_shape, unexpected_shape,
tensor)
pattern = (
r"\[Wrong shape for %s \[expected\] \[actual\].\] \[%s\] \[%s\]" %
(tensor.name, " ".join([str(dim) for dim in unexpected_shape]),
" ".join([str(dim) for dim in expected_shape])))
self.assertRaisesRegexp(errors_impl.OpError,
re.compile(pattern),
tensor_util.with_shape(
constant_op.constant(unexpected_shape),
tensor).eval)
expected_placeholder = array_ops.placeholder(dtypes.float32)
self.assertRaisesRegexp(errors_impl.OpError,
re.compile(pattern),
tensor_util.with_same_shape(expected_placeholder,
tensor).eval,
{expected_placeholder: np.ones(unexpected_shape)})
self.assertIs(tensor, tensor_util.with_shape(expected_shape, tensor))
self.assertIs(
tensor,
tensor_util.with_same_shape(
constant_op.constant(
1, shape=expected_shape), tensor))
tensor_with_shape = tensor_util.with_shape(
constant_op.constant(expected_shape), tensor)
np.testing.assert_array_equal(expected_value, tensor_with_shape.eval())
tensor_with_same_shape = tensor_util.with_same_shape(expected_placeholder,
tensor)
np.testing.assert_array_equal(expected_value,
tensor_with_same_shape.eval({
expected_placeholder:
np.ones(expected_shape)
}))
def test_with_shape_invalid_expected_shape(self):
with self.cached_session():
self.assertRaisesRegexp(ValueError, "Invalid rank",
tensor_util.with_shape, [[1], [2]],
constant_op.constant(1.0))
def test_with_shape_invalid_type(self):
with self.cached_session():
self.assertRaisesRegexp(ValueError, "Invalid dtype",
tensor_util.with_shape, [1.1],
constant_op.constant([1.0]))
self.assertRaisesRegexp(ValueError, "Invalid dtype",
tensor_util.with_shape,
np.array([1.1]), constant_op.constant(1.0))
self.assertRaisesRegexp(ValueError, "Invalid dtype",
tensor_util.with_shape,
constant_op.constant(np.array([1.1])),
constant_op.constant(1.0))
def test_with_shape_0(self):
with self.cached_session():
value = 42
shape = [0]
unexpected_shapes = [[1], [2], [1, 1]]
self._assert_with_shape(
constant_op.constant(
value, shape=shape),
value,
shape,
unexpected_shapes)
def test_with_shape_1(self):
with self.cached_session():
value = [42]
shape = [1]
unexpected_shapes = [[0], [2], [1, 1]]
self._assert_with_shape(
constant_op.constant(
value, shape=shape),
value,
shape,
unexpected_shapes)
def test_with_shape_2(self):
with self.cached_session():
value = [42, 43]
shape = [2]
unexpected_shapes = [[0], [1], [2, 1]]
self._assert_with_shape(
constant_op.constant(
value, shape=shape),
value,
shape,
unexpected_shapes)
def test_with_shape_2x2(self):
with self.cached_session():
value = [[42, 43], [44, 45]]
shape = [2, 2]
unexpected_shapes = [[0], [1], [2, 1]]
self._assert_with_shape(
constant_op.constant(
value, shape=shape),
value,
shape,
unexpected_shapes)
def test_with_shape_2x2_with_partial_expected_shape(self):
with self.cached_session():
value = [[42, 43], [44, 45]]
actual_shape = [2, 2]
tensor = constant_op.constant(value, shape=actual_shape)
partial_expected_shape = tensor_shape.TensorShape([None, 2])
# Won't raise any exception here:
tensor_with_shape = tensor_util.with_shape(partial_expected_shape, tensor)
np.testing.assert_array_equal(value, tensor_with_shape.eval())
def test_with_shape_none(self):
with self.cached_session():
tensor_no_shape = array_ops.placeholder(dtypes.float32)
compatible_shape = [2, 2]
with_present_2x2 = tensor_util.with_shape(compatible_shape,
tensor_no_shape)
self.assertEquals(compatible_shape, with_present_2x2.get_shape().dims)
with_future_2x2 = tensor_util.with_shape(
constant_op.constant(compatible_shape), tensor_no_shape)
array_2x2 = [[42.0, 43.0], [44.0, 45.0]]
for tensor_2x2 in [with_present_2x2, with_future_2x2]:
np.testing.assert_array_equal(array_2x2,
tensor_2x2.eval({
tensor_no_shape: array_2x2
}))
self.assertRaisesRegexp(errors_impl.OpError, "Wrong shape",
tensor_2x2.eval,
{tensor_no_shape: [42.0, 43.0]})
self.assertRaisesRegexp(errors_impl.OpError, "Wrong shape",
tensor_2x2.eval, {tensor_no_shape: [42.0]})
def test_with_shape_partial(self):
with self.cached_session():
tensor_partial_shape = array_ops.placeholder(dtypes.float32)
tensor_partial_shape.set_shape([None, 2])
for incompatible_shape in [[0], [1]]:
self.assertRaisesRegexp(
ValueError, "Shapes must be equal rank, but are 2 and 1",
tensor_util.with_shape, incompatible_shape, tensor_partial_shape)
for incompatible_shape in [[1, 2, 1]]:
self.assertRaisesRegexp(ValueError, "Dimensions must be equal",
tensor_util.with_shape, incompatible_shape,
tensor_partial_shape)
for incompatible_shape in [[2, 1]]:
self.assertRaisesRegexp(
ValueError,
r"Dimension 1 in both shapes must be equal, but are 2 and 1. "
r"Shapes are \[\?,2\] and \[2,1\].",
tensor_util.with_shape, incompatible_shape, tensor_partial_shape)
compatible_shape = [2, 2]
with_present_2x2 = tensor_util.with_shape(compatible_shape,
tensor_partial_shape)
self.assertEquals(compatible_shape, with_present_2x2.get_shape().dims)
with_future_2x2 = tensor_util.with_shape(
constant_op.constant(compatible_shape), tensor_partial_shape)
array_2x2 = [[42.0, 43.0], [44.0, 45.0]]
for tensor_2x2 in [with_present_2x2, with_future_2x2]:
np.testing.assert_array_equal(array_2x2,
tensor_2x2.eval({
tensor_partial_shape: array_2x2
}))
self.assertRaises(ValueError, tensor_2x2.eval,
{tensor_partial_shape: [42.0, 43.0]})
self.assertRaises(ValueError, tensor_2x2.eval,
{tensor_partial_shape: [42.0]})
class RemoveSqueezableDimensionsTest(test.TestCase):
def testRemoveSqueezableDimensions(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=False,
predictions_have_extra_dim=False,
labels_have_static_shape=False,
labels_have_extra_dim=False)
def testRemoveSqueezableDimensions_extraLabelDim(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=False,
predictions_have_extra_dim=False,
labels_have_static_shape=False,
labels_have_extra_dim=True)
def testRemoveSqueezableDimensions_staticLabel(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=False,
predictions_have_extra_dim=False,
labels_have_static_shape=True,
labels_have_extra_dim=False)
def testRemoveSqueezableDimensions_staticLabel_extraLabelDim(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=False,
predictions_have_extra_dim=False,
labels_have_static_shape=True,
labels_have_extra_dim=True)
def testRemoveSqueezableDimensions_extraPredictionDim(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=False,
predictions_have_extra_dim=True,
labels_have_static_shape=False,
labels_have_extra_dim=False)
def testRemoveSqueezableDimensions_extraPredictionDim_staticLabel(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=False,
predictions_have_extra_dim=True,
labels_have_static_shape=True,
labels_have_extra_dim=False)
def testRemoveSqueezableDimensions_staticPrediction(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=True,
predictions_have_extra_dim=False,
labels_have_static_shape=False,
labels_have_extra_dim=False)
def testRemoveSqueezableDimensions_staticPrediction_extraLabelDim(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=True,
predictions_have_extra_dim=False,
labels_have_static_shape=False,
labels_have_extra_dim=True)
def testRemoveSqueezableDimensions_static(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=True,
predictions_have_extra_dim=False,
labels_have_static_shape=True,
labels_have_extra_dim=False)
def testRemoveSqueezableDimensions_static_extraLabelDim(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=True,
predictions_have_extra_dim=False,
labels_have_static_shape=True,
labels_have_extra_dim=True)
def testRemoveSqueezableDimensions_staticPrediction_extraPredictionDim(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=True,
predictions_have_extra_dim=True,
labels_have_static_shape=False,
labels_have_extra_dim=False)
def testRemoveSqueezableDimensions_static_extraPredictionDim(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=True,
predictions_have_extra_dim=True,
labels_have_static_shape=True,
labels_have_extra_dim=False)
# TODO(ptucker): Replace this with parameterized test.
def _testRemoveSqueezableDimensions(self, predictions_have_static_shape,
predictions_have_extra_dim,
labels_have_static_shape,
labels_have_extra_dim):
assert not (predictions_have_extra_dim and labels_have_extra_dim)
predictions_value = (0, 1, 1, 0, 0, 1, 0)
labels_value = (0, 0, 1, 1, 0, 0, 0)
input_predictions_value = ([[p] for p in predictions_value] if
predictions_have_extra_dim else
predictions_value)
input_labels_value = ([[l] for l in labels_value] if labels_have_extra_dim
else labels_value)
with ops.Graph().as_default() as g:
feed_dict = {}
if predictions_have_static_shape:
predictions = constant_op.constant(
input_predictions_value, dtype=dtypes.int32)
else:
predictions = array_ops.placeholder(
dtype=dtypes.int32, name="predictions")
feed_dict[predictions] = input_predictions_value
if labels_have_static_shape:
labels = constant_op.constant(input_labels_value, dtype=dtypes.int32)
else:
labels = array_ops.placeholder(dtype=dtypes.int32, name="labels")
feed_dict[labels] = input_labels_value
squeezed_predictions, squeezed_labels = (
tensor_util.remove_squeezable_dimensions(predictions, labels))
with self.session(g):
variables_lib.local_variables_initializer().run()
self.assertAllClose(
predictions_value, squeezed_predictions.eval(feed_dict=feed_dict))
self.assertAllClose(
labels_value, squeezed_labels.eval(feed_dict=feed_dict))
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/framework/python/framework/tensor_util_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""@experimental tests."""
# pylint: disable=unused-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.framework.python.framework import experimental
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
class ExperimentalTest(test.TestCase):
@test.mock.patch.object(logging, "warning", autospec=True)
def test_warning(self, mock_warning):
@experimental
def _fn(arg0, arg1):
"""fn doc.
Args:
arg0: Arg 0.
arg1: Arg 1.
Returns:
Sum of args.
"""
return arg0 + arg1
# Assert function docs are properly updated.
self.assertEqual("_fn", _fn.__name__)
self.assertEqual(
"fn doc. (experimental)"
"\n"
"\nWarning: THIS FUNCTION IS EXPERIMENTAL. It may change "
"or be removed at any time, and without warning."
"\n"
"\nArgs:"
"\n arg0: Arg 0."
"\n arg1: Arg 1."
"\n"
"\nReturns:"
"\n Sum of args.", _fn.__doc__)
# Assert calling new fn issues log warning.
self.assertEqual(3, _fn(1, 2))
self.assertEqual(1, mock_warning.call_count)
(args, _) = mock_warning.call_args
self.assertRegexpMatches(args[0], r"is experimental and may change")
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/framework/python/framework/experimental_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=unused-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.framework.python.ops import prettyprint_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class PrettyPrintOpsTest(test.TestCase):
def testPrintTensorPassthrough(self):
a = constant_op.constant([1])
a = prettyprint_ops.print_op(a)
with self.cached_session():
self.assertEqual(a.eval(), constant_op.constant([1]).eval())
def testPrintSparseTensorPassthrough(self):
a = sparse_tensor.SparseTensor(
indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4])
b = sparse_tensor.SparseTensor(
indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4])
a = prettyprint_ops.print_op(a)
with self.cached_session():
self.assertAllEqual(
sparse_ops.sparse_tensor_to_dense(a).eval(),
sparse_ops.sparse_tensor_to_dense(b).eval())
def testPrintTensorArrayPassthrough(self):
a = tensor_array_ops.TensorArray(
size=2, dtype=dtypes.int32, clear_after_read=False)
a = a.write(1, 1)
a = a.write(0, 0)
a = prettyprint_ops.print_op(a)
with self.cached_session():
self.assertAllEqual(a.stack().eval(), constant_op.constant([0, 1]).eval())
def testPrintVariable(self):
a = variables.Variable(1.0)
a = prettyprint_ops.print_op(a)
with self.cached_session():
variables.global_variables_initializer().run()
a.eval()
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/framework/python/ops/prettyprint_ops_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Script Language Operators.
@@py_func
"""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops.script_ops import py_func as _py_func
from tensorflow.python.util import nest
__all__ = ['py_func']
def py_func(func,
args=(),
kwargs=None,
output_types=None,
output_shapes=None,
stateful=True,
name=None):
"""Wraps a python function and uses it as a TensorFlow op.
This function is a wrapper around `tf.compat.v1.py_func` and improve it with
kwargs
and output_shapes. Further it changed some argument names.
Given a python function `func`, which takes numpy arrays as its
inputs and returns numpy arrays as its outputs, wrap this function as an
operation in a TensorFlow graph. The following snippet constructs a simple
TensorFlow graph that invokes the `np.sinh()` NumPy function as a operation
in the graph:
```python
def my_func(x):
# x will be a numpy array with the contents of the placeholder below
return np.sinh(x)
inp = tf.compat.v1.placeholder(tf.float32)
y = tf.compat.v1.py_func(my_func, [inp], tf.float32)
```
**N.B.** The `tf.compat.v1.py_func()` operation has the following known
limitations:
* The body of the function (i.e. `func`) will not be serialized in a
`GraphDef`. Therefore, you should not use this function if you need to
serialize your model and restore it in a different environment.
* The operation must run in the same address space as the Python program
that calls `tf.compat.v1.py_func()`. If you are using distributed
TensorFlow, you
must run a `tf.distribute.Server` in the same process as the program that
calls
`tf.compat.v1.py_func()` and you must pin the created operation to a device
in that
server (e.g. using `with tf.device():`).
Args:
func: A Python function, which accepts a list of NumPy `ndarray` objects
having element types that match the corresponding `tf.Tensor` objects in
`inp`, and returns a list of `ndarray` objects (or a single `ndarray`)
having element types that match the corresponding values in `Tout`.
args: A list of `Tensor` objects.
kwargs: A dict with `Tensor` objects as values.
output_types: A nested structure of tensorflow data types or a single
tensorflow data type if there is only one, indicating what `func` returns.
output_shapes: Same as output_types, except the types are replaces with
shapes (optional).
stateful: (Boolean.) If True, the function should be considered stateful. If
a function is stateless, when given the same input it will return the same
output and have no observable side effects. Optimizations such as common
subexpression elimination are only performed on stateless operations.
name: A name for the operation (optional).
Returns:
Tensorflow op that wraps the input python function.
"""
if kwargs is None:
kwargs = {}
if not isinstance(args, (list, tuple)):
raise TypeError('args must be list and not {}. args: {}'.format(
type(args), args))
if not isinstance(kwargs, dict):
raise TypeError('kwargs must be dict and not {}. args: {}'.format(
type(kwargs), kwargs))
# For dynamic type inference use callable output_types and output_shapes
if callable(output_types):
# If callable assume same signature and call with tensors and get the types
output_types = output_types(*args, **kwargs)
if callable(output_shapes):
# If callable assume same signature and call with tensors and get the shapes
output_shapes = output_shapes(*args, **kwargs)
flat_output_types = nest.flatten(output_types)
args = (args, kwargs)
flat_args = nest.flatten(args)
def python_function_wrapper(*py_args):
py_args, py_kwargs = nest.pack_sequence_as(args, py_args)
ret = func(*py_args, **py_kwargs)
# TODO(alextp): Catch Exceptions and improve msg, because tensorflow
# ist not able to preserve the traceback, i.e. the Exceptions does not
# contain any information where the Exception was raised.
nest.assert_shallow_structure(output_types, ret)
return nest.flatten(ret)
flat_values = _py_func(
python_function_wrapper,
flat_args,
flat_output_types,
stateful=stateful,
name=name)
if output_shapes is not None:
# I am not sure if this is nessesary
output_shapes = nest.map_structure_up_to(output_types,
tensor_shape.as_shape,
output_shapes)
flattened_shapes = nest.flatten(output_shapes)
for ret_t, shape in zip(flat_values, flattened_shapes):
ret_t.set_shape(shape)
return nest.pack_sequence_as(output_types, flat_values)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/framework/python/ops/script_ops.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Variable functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import re
from tensorflow.contrib.framework.python.ops import add_arg_scope as contrib_add_arg_scope
from tensorflow.contrib.framework.python.ops import gen_variable_ops
from tensorflow.contrib.util import loader
from tensorflow.core.protobuf import saver_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.framework import device as tf_device
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import resource_loader
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import saver as tf_saver
from tensorflow.python.training import training_util
from tensorflow.python.util.deprecation import deprecated
__all__ = ['add_model_variable',
'assert_global_step',
'assert_or_get_global_step',
'assign_from_checkpoint',
'assign_from_checkpoint_fn',
'assign_from_values',
'assign_from_values_fn',
'create_global_step',
'filter_variables',
'get_global_step',
'get_or_create_global_step',
'get_local_variables',
'get_model_variables',
'get_trainable_variables',
'get_unique_variable',
'get_variables_by_name',
'get_variables_by_suffix',
'get_variable_full_name',
'get_variables_to_restore',
'get_variables',
'global_variable',
'local_variable',
'model_variable',
'variable',
'VariableDeviceChooser',
'zero_initializer']
def zero_initializer(ref, use_locking=True, name="zero_initializer"):
"""Initialize 'ref' with all zeros, ref tensor should be uninitialized.
If already initialized, you will get ValueError. This op is intended to
save memory during initialization.
Args:
ref: ref of the tensor need to be zero initialized.
name: optional name for this operation.
Returns:
ref that initialized.
Raises:
ValueError: If ref tensor is initialized.
"""
loader.load_op_library(
resource_loader.get_path_to_datafile('_variable_ops.so'))
if resource_variable_ops.is_resource_variable(ref):
return gen_variable_ops.zero_var_initializer(
ref.handle, shape=ref.shape, dtype=ref.dtype, name=name)
else:
return gen_variable_ops.zero_initializer(ref, name=name)
@deprecated(None, 'Please switch to tf.train.assert_global_step')
def assert_global_step(global_step_tensor):
training_util.assert_global_step(global_step_tensor)
def assert_or_get_global_step(graph=None, global_step_tensor=None):
"""Verifies that a global step tensor is valid or gets one if None is given.
If `global_step_tensor` is not None, check that it is a valid global step
tensor (using `assert_global_step`). Otherwise find a global step tensor using
`get_global_step` and return it.
Args:
graph: The graph to find the global step tensor for.
global_step_tensor: The tensor to check for suitability as a global step. If
None is given (the default), find a global step tensor.
Returns:
A tensor suitable as a global step, or `None` if none was provided and none
was found.
"""
if global_step_tensor is None:
# Get the global step tensor the same way the supervisor would.
global_step_tensor = get_global_step(graph)
else:
assert_global_step(global_step_tensor)
return global_step_tensor
@deprecated(None, 'Please switch to tf.train.get_global_step')
def get_global_step(graph=None):
return training_util.get_global_step(graph)
@deprecated(None, 'Please switch to tf.train.create_global_step')
def create_global_step(graph=None):
"""Create global step tensor in graph.
This API is deprecated. Use core framework training version instead.
Args:
graph: The graph in which to create the global step tensor. If missing, use
default graph.
Returns:
Global step tensor.
Raises:
ValueError: if global step tensor is already defined.
"""
return training_util.create_global_step(graph)
@deprecated(None, 'Please switch to tf.train.get_or_create_global_step')
def get_or_create_global_step(graph=None):
"""Returns and create (if necessary) the global step tensor.
Args:
graph: The graph in which to create the global step tensor. If missing, use
default graph.
Returns:
The global step tensor.
"""
return training_util.get_or_create_global_step(graph)
def local_variable(initial_value,
validate_shape=True,
name=None,
use_resource=None):
"""Create a variable with a value and add it to `GraphKeys.LOCAL_VARIABLES`.
Args:
initial_value: See variables.Variable.__init__.
validate_shape: See variables.Variable.__init__.
name: See variables.Variable.__init__.
use_resource: If `True` use a ResourceVariable instead of a Variable.
Returns:
New variable.
"""
return variable_scope.variable(
initial_value,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
validate_shape=validate_shape,
use_resource=use_resource,
name=name)
def global_variable(initial_value,
validate_shape=True,
name=None,
use_resource=None):
"""Create a variable with a value and add it to `GraphKeys.GLOBAL_VARIABLES`.
Args:
initial_value: See variables.Variable.__init__.
validate_shape: See variables.Variable.__init__.
name: See variables.Variable.__init__.
use_resource: If `True` use a ResourceVariable instead of a Variable.
Returns:
New variable.
"""
return variable_scope.variable(
initial_value,
trainable=False,
collections=[ops.GraphKeys.GLOBAL_VARIABLES],
validate_shape=validate_shape,
use_resource=use_resource,
name=name)
@contrib_add_arg_scope
def variable(name,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
trainable=True,
collections=None,
caching_device=None,
device=None,
partitioner=None,
custom_getter=None,
use_resource=None,
synchronization=variables.VariableSynchronization.AUTO,
aggregation=variables.VariableAggregation.NONE):
"""Gets an existing variable with these parameters or creates a new one.
Args:
name: the name of the new or existing variable.
shape: shape of the new or existing variable.
dtype: type of the new or existing variable (defaults to `DT_FLOAT`).
initializer: initializer for the variable if one is created.
regularizer: a (Tensor -> Tensor or None) function; the result of applying
it on a newly created variable will be added to the collection
GraphKeys.REGULARIZATION_LOSSES and can be used for regularization.
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
collections: A list of collection names to which the Variable will be added.
If None it would default to `tf.GraphKeys.GLOBAL_VARIABLES`.
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's device.
device: Optional device to place the variable. It can be an string or a
function that is called to get the device for the variable.
partitioner: Optional callable that accepts a fully defined `TensorShape`
and dtype of the `Variable` to be created, and returns a list of
partitions for each axis (currently only one axis can be partitioned).
custom_getter: Callable that allows overwriting the internal get_variable
method and has to have the same signature.
use_resource: If `True` use a ResourceVariable instead of a Variable.
synchronization: Indicates when a distributed a variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses when to synchronize.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
Returns:
The created or existing variable.
"""
collections = list(collections if collections is not None else
[ops.GraphKeys.GLOBAL_VARIABLES])
# Remove duplicates
collections = list(set(collections))
getter = variable_scope.get_variable
if custom_getter is not None:
getter = functools.partial(
custom_getter, reuse=variable_scope.get_variable_scope().reuse)
with ops.device(device or ''):
return getter(
name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
trainable=trainable,
collections=collections,
caching_device=caching_device,
partitioner=partitioner,
use_resource=use_resource,
synchronization=synchronization,
aggregation=aggregation)
@contrib_add_arg_scope
def model_variable(name,
shape=None,
dtype=dtypes.float32,
initializer=None,
regularizer=None,
trainable=True,
collections=None,
caching_device=None,
device=None,
partitioner=None,
custom_getter=None,
use_resource=None,
synchronization=variables.VariableSynchronization.AUTO,
aggregation=variables.VariableAggregation.NONE):
"""Gets an existing model variable with these parameters or creates a new one.
Args:
name: the name of the new or existing variable.
shape: shape of the new or existing variable.
dtype: type of the new or existing variable (defaults to `DT_FLOAT`).
initializer: initializer for the variable if one is created.
regularizer: a (Tensor -> Tensor or None) function; the result of applying
it on a newly created variable will be added to the collection
GraphKeys.REGULARIZATION_LOSSES and can be used for regularization.
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
collections: A list of collection names to which the Variable will be added.
Note that the variable is always also added to the
`GraphKeys.GLOBAL_VARIABLES` and `GraphKeys.MODEL_VARIABLES` collections.
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's device.
device: Optional device to place the variable. It can be an string or a
function that is called to get the device for the variable.
partitioner: Optional callable that accepts a fully defined `TensorShape`
and dtype of the `Variable` to be created, and returns a list of
partitions for each axis (currently only one axis can be partitioned).
custom_getter: Callable that allows overwriting the internal get_variable
method and has to have the same signature.
use_resource: If `True` use a ResourceVariable instead of a Variable.
synchronization: Indicates when a distributed a variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses when to synchronize.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
Returns:
The created or existing variable.
"""
collections = list(collections or [])
collections += [ops.GraphKeys.GLOBAL_VARIABLES, ops.GraphKeys.MODEL_VARIABLES]
var = variable(
name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
trainable=trainable,
collections=collections,
caching_device=caching_device,
device=device,
partitioner=partitioner,
custom_getter=custom_getter,
use_resource=use_resource,
synchronization=synchronization,
aggregation=aggregation)
return var
def add_model_variable(var):
"""Adds a variable to the `GraphKeys.MODEL_VARIABLES` collection.
Args:
var: a variable.
"""
if var not in ops.get_collection(ops.GraphKeys.MODEL_VARIABLES):
ops.add_to_collection(ops.GraphKeys.MODEL_VARIABLES, var)
def get_variables(scope=None,
suffix=None,
collection=ops.GraphKeys.GLOBAL_VARIABLES):
"""Gets the list of variables, filtered by scope and/or suffix.
Args:
scope: an optional scope for filtering the variables to return. Can be a
variable scope or a string.
suffix: an optional suffix for filtering the variables to return.
collection: in which collection search for. Defaults to
`GraphKeys.GLOBAL_VARIABLES`.
Returns:
a list of variables in collection with scope and suffix.
"""
if isinstance(scope, variable_scope.VariableScope):
scope = scope.name
if suffix is not None:
if ':' not in suffix:
suffix += ':'
scope = (scope or '') + '.*' + suffix
return ops.get_collection(collection, scope)
def get_model_variables(scope=None, suffix=None):
"""Gets the list of model variables, filtered by scope and/or suffix.
Args:
scope: an optional scope for filtering the variables to return.
suffix: an optional suffix for filtering the variables to return.
Returns:
a list of variables in collection with scope and suffix.
"""
return get_variables(scope, suffix, ops.GraphKeys.MODEL_VARIABLES)
def get_local_variables(scope=None, suffix=None):
"""Gets the list of local variables, filtered by scope and/or suffix.
Args:
scope: an optional scope for filtering the variables to return.
suffix: an optional suffix for filtering the variables to return.
Returns:
a list of variables in collection with scope and suffix.
"""
return get_variables(scope, suffix, ops.GraphKeys.LOCAL_VARIABLES)
def get_trainable_variables(scope=None, suffix=None):
"""Gets the list of trainable variables, filtered by scope and/or suffix.
Args:
scope: an optional scope for filtering the variables to return.
suffix: an optional suffix for filtering the variables to return.
Returns:
a list of variables in the trainable collection with scope and suffix.
"""
return get_variables(scope, suffix, ops.GraphKeys.TRAINABLE_VARIABLES)
def get_variables_to_restore(include=None, exclude=None):
"""Gets the list of the variables to restore.
Args:
include: an optional list/tuple of scope strings for filtering which
variables from the VARIABLES collection to include. None would include all
the variables.
exclude: an optional list/tuple of scope strings for filtering which
variables from the VARIABLES collection to exclude. None it would not
exclude any.
Returns:
a list of variables to restore.
Raises:
TypeError: include or exclude is provided but is not a list or a tuple.
"""
if include is None:
# Include all variables.
vars_to_include = get_variables()
else:
if not isinstance(include, (list, tuple)):
raise TypeError('include is provided but is not a list or a tuple.')
vars_to_include = []
for scope in include:
vars_to_include += get_variables(scope)
vars_to_exclude = set()
if exclude is not None:
if not isinstance(exclude, (list, tuple)):
raise TypeError('exclude is provided but is not a list or a tuple.')
for scope in exclude:
vars_to_exclude |= set(get_variables(scope))
# Exclude the variables in vars_to_exclude
return [v for v in vars_to_include if v not in vars_to_exclude]
def get_variables_by_suffix(suffix, scope=None):
"""Gets the list of variables that end with the given suffix.
Args:
suffix: suffix for filtering the variables to return.
scope: an optional scope for filtering the variables to return.
Returns:
a copied list of variables with the given name and prefix.
"""
return get_variables(scope=scope, suffix=suffix)
def get_variables_by_name(given_name, scope=None):
"""Gets the list of variables that were given that name.
Args:
given_name: name given to the variable without any scope.
scope: an optional scope for filtering the variables to return.
Returns:
a copied list of variables with the given name and scope.
"""
suffix = '/' + given_name + ':|^' + given_name + ':'
return get_variables(scope=scope, suffix=suffix)
def get_unique_variable(var_op_name):
"""Gets the variable uniquely identified by that var_op_name.
Args:
var_op_name: the full name of the variable op, including the scope.
Returns:
a tensorflow variable.
Raises:
ValueError: if no variable uniquely identified by the name exists.
"""
candidates = get_variables(scope=var_op_name)
if not candidates:
raise ValueError('Couldn\'t find variable %s' % var_op_name)
for candidate in candidates:
if candidate.op.name == var_op_name:
return candidate
raise ValueError('Variable %s does not uniquely identify a variable' %
var_op_name)
def assign_from_values(var_names_to_values):
"""Creates an assignment operation from a given mapping.
This function provides a mechanism for performing assignment of variables
to values in a way that does not fill the graph with large assignment values.
Args:
var_names_to_values: A map from variable names to values.
Returns:
assign_op: An `Operation` that assigns each of the given variables to the
requested values.
feed_dict: The feed dictionary to use when evaluating `assign_op`.
Raises:
ValueError: if any of the given variable names were not found.
"""
feed_dict = {}
assign_ops = []
for var_name in var_names_to_values:
var_value = var_names_to_values[var_name]
var = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES, var_name)
if not var:
raise ValueError('Variable %s wasn\'t found' % var_name)
elif len(var) > 1:
# tf.compat.v1.get_collection is just a filter on the prefix: find the exact match:
found = False
for v in var:
if v.op.name == var_name:
var = v
found = True
break
if not found:
raise ValueError('Variable %s doesn\'t uniquely identify a variable' %
var_name)
else:
var = var[0]
# TODO(nsilberman): ensure placeholder and assign are on the same device.
# Assign a placeholder to the value that will be filled later.
placeholder_name = 'placeholder/' + var.op.name
placeholder_value = array_ops.placeholder(
dtype=var.dtype.base_dtype,
shape=var.get_shape(),
name=placeholder_name)
assign_ops.append(var.assign(placeholder_value))
feed_dict[placeholder_value] = var_value.reshape(var.get_shape())
assign_op = control_flow_ops.group(*assign_ops)
return assign_op, feed_dict
def assign_from_values_fn(var_names_to_values):
"""Returns a function that assigns specific variables from the given values.
This function provides a mechanism for performing assignment of variables
to values in a way that does not fill the graph with large assignment values.
Args:
var_names_to_values: A map from variable names to values.
Returns:
A function that takes a single argument, a `tf.compat.v1.Session`, that
applies the
assignment operation.
Raises:
ValueError: if any of the given variable names were not found.
"""
assign_op, feed_dict = assign_from_values(var_names_to_values)
def callback(session):
return session.run(assign_op, feed_dict)
return callback
# pylint: disable=protected-access
# Currently variable_scope doesn't provide very good APIs to access
# all variables under scope and retrieve and check existing scopes.
def get_variable_full_name(var):
"""Returns the full name of a variable.
For normal Variables, this is the same as the var.op.name. For
sliced or PartitionedVariables, this name is the same for all the
slices/partitions. In both cases, this is normally the name used in
a checkpoint file.
Args:
var: A `Variable` object.
Returns:
A string that is the full name.
"""
if var._save_slice_info:
return var._save_slice_info.full_name
else:
return var.op.name
# TODO(nsilberman): add flag to load exponential moving averages instead
#
# TODO(sguada): Update docs in slim/g3doc/index.md to describe
# the new feature where the var_list dictionary can have values that
# are each a list of Variables.
def assign_from_checkpoint(model_path, var_list, ignore_missing_vars=False):
"""Creates an operation to assign specific variables from a checkpoint.
Args:
model_path: The full path to the model checkpoint. To get latest checkpoint
use `model_path = tf.train.latest_checkpoint(checkpoint_dir)`
var_list: A list of (possibly partitioned) `Variable` objects or a
dictionary mapping names in the checkpoint to the corresponding variables
or list of variables to initialize from that checkpoint value. For
partitioned Variables, the name in the checkpoint must be the full
variable, not the name of the partitioned variable, eg. "my_var" rather
than "my_var/part_4". If empty, returns no_op(), {}.
ignore_missing_vars: Boolean, if True ignore variables missing in the
checkpoint with a warning instead of failing.
Returns:
the restore_op and the feed_dict that need to be run to restore var_list.
Raises:
ValueError: If `ignore_missing_vars` is False and the checkpoint specified
at `model_path` is missing one of the variables in `var_list`.
"""
# Normalize var_list into a dictionary mapping names in the
# checkpoint to the list of variables to initialize from that
# checkpoint variable. Sliced (including partitioned) variables will
# end up under the same key.
grouped_vars = {}
if isinstance(var_list, (tuple, list)):
for var in var_list:
ckpt_name = get_variable_full_name(var)
if ckpt_name not in grouped_vars:
grouped_vars[ckpt_name] = []
grouped_vars[ckpt_name].append(var)
else:
for ckpt_name, value in var_list.items():
if isinstance(value, (tuple, list)):
grouped_vars[ckpt_name] = value
else:
grouped_vars[ckpt_name] = [value]
# Read each checkpoint entry. Create a placeholder variable and
# add the (possibly sliced) data from the checkpoint to the feed_dict.
reader = pywrap_tensorflow.NewCheckpointReader(model_path)
feed_dict = {}
assign_ops = []
for ckpt_name in grouped_vars:
if not reader.has_tensor(ckpt_name):
log_str = 'Checkpoint is missing variable [%s]' % ckpt_name
if ignore_missing_vars:
logging.warning(log_str)
continue
else:
raise ValueError(log_str)
ckpt_value = reader.get_tensor(ckpt_name)
for var in grouped_vars[ckpt_name]:
placeholder_tensor = array_ops.placeholder(
dtype=var.dtype.base_dtype,
shape=var.get_shape(),
name='placeholder/' + var.op.name)
assign_ops.append(var.assign(placeholder_tensor))
if not var._save_slice_info:
if var.get_shape() != ckpt_value.shape:
raise ValueError(
'Total size of new array must be unchanged for %s '
'lh_shape: [%s], rh_shape: [%s]' %
(ckpt_name, str(ckpt_value.shape), str(var.get_shape())))
feed_dict[placeholder_tensor] = ckpt_value.reshape(ckpt_value.shape)
else:
slice_dims = zip(var._save_slice_info.var_offset,
var._save_slice_info.var_shape)
slice_dims = [(start, start + size) for (start, size) in slice_dims]
slice_dims = [slice(*x) for x in slice_dims]
slice_value = ckpt_value[slice_dims]
slice_value = slice_value.reshape(var._save_slice_info.var_shape)
feed_dict[placeholder_tensor] = slice_value
assign_op = control_flow_ops.group(*assign_ops)
return assign_op, feed_dict
# pylint: enable=protected-access
def assign_from_checkpoint_fn(model_path,
var_list,
ignore_missing_vars=False,
reshape_variables=False):
"""Returns a function that assigns specific variables from a checkpoint.
If ignore_missing_vars is True and no variables are found in the checkpoint
it returns None.
Args:
model_path: The full path to the model checkpoint. To get latest checkpoint
use `model_path = tf.train.latest_checkpoint(checkpoint_dir)`
var_list: A list of `Variable` objects or a dictionary mapping names in the
checkpoint to the corresponding variables to initialize. If empty or
`None`, it would return `no_op(), None`.
ignore_missing_vars: Boolean, if True it would ignore variables missing in
the checkpoint with a warning instead of failing.
reshape_variables: Boolean, if True it would automatically reshape variables
which are of different shape then the ones stored in the checkpoint but
which have the same number of elements.
Returns:
A function that takes a single argument, a `tf.compat.v1.Session`, that
applies the
assignment operation. If no matching variables were found in the checkpoint
then `None` is returned.
Raises:
ValueError: If var_list is empty.
"""
if not var_list:
raise ValueError('var_list cannot be empty')
if ignore_missing_vars:
reader = pywrap_tensorflow.NewCheckpointReader(model_path)
if isinstance(var_list, dict):
var_dict = var_list
else:
var_dict = {var.op.name: var for var in var_list}
available_vars = {}
for var in var_dict:
if reader.has_tensor(var):
available_vars[var] = var_dict[var]
else:
logging.warning('Variable %s missing in checkpoint %s', var, model_path)
var_list = available_vars
if var_list:
saver = tf_saver.Saver(
var_list,
reshape=reshape_variables,
write_version=saver_pb2.SaverDef.V1)
def callback(session):
saver.restore(session, model_path)
return callback
else:
logging.warning('No Variables to restore')
return None
class VariableDeviceChooser(object):
"""Device chooser for variables.
When using a parameter server it will assign them in a round-robin fashion.
When not using a parameter server it allows GPU or CPU placement.
"""
def __init__(self,
num_tasks=0,
job_name='ps',
device_type='CPU',
device_index=0,
replica=None):
"""Initialize VariableDeviceChooser.
Usage:
To use with 2 parameter servers:
VariableDeviceChooser(2)
To use without parameter servers:
VariableDeviceChooser()
VariableDeviceChooser(device_type='GPU') # For GPU placement
Args:
num_tasks: number of tasks.
job_name: String, a name for the parameter server job.
device_type: Optional device type string (e.g. "CPU" or "GPU")
device_index: int. Optional device index. If left unspecified, device
represents 'any' device_index.
"""
self._job_name = job_name
self._device_type = device_type
self._device_index = device_index
self._replica = replica
self._num_tasks = num_tasks
self._next_task_id = 0
def __call__(self, op):
device_spec = tf_device.DeviceSpec(
replica=self._replica,
device_type=self._device_type,
device_index=self._device_index)
if self._num_tasks > 0:
task_id = self._next_task_id
self._next_task_id = (self._next_task_id + 1) % self._num_tasks
device_spec.job = self._job_name
device_spec.task = task_id
return device_spec.to_string()
def filter_variables(var_list,
include_patterns=None,
exclude_patterns=None,
reg_search=True):
"""Filter a list of variables using regular expressions.
First includes variables according to the list of include_patterns.
Afterwards, eliminates variables according to the list of exclude_patterns.
For example, one can obtain a list of variables with the weights of all
convolutional layers (depending on the network definition) by:
```python
variables = tf.contrib.framework.get_model_variables()
conv_weight_variables = tf.contrib.framework.filter_variables(
variables,
include_patterns=['Conv'],
exclude_patterns=['biases', 'Logits'])
```
Args:
var_list: list of variables.
include_patterns: list of regular expressions to include. Defaults to None,
which means all variables are selected according to the include rules. A
variable is included if it matches any of the include_patterns.
exclude_patterns: list of regular expressions to exclude. Defaults to None,
which means all variables are selected according to the exclude rules. A
variable is excluded if it matches any of the exclude_patterns.
reg_search: boolean. If True (default), performs re.search to find matches
(i.e. pattern can match any substring of the variable name). If False,
performs re.match (i.e. regexp should match from the beginning of the
variable name).
Returns:
filtered list of variables.
"""
if reg_search:
reg_exp_func = re.search
else:
reg_exp_func = re.match
# First include variables.
if include_patterns is None:
included_variables = list(var_list)
else:
included_variables = []
for var in var_list:
if any(reg_exp_func(ptrn, var.name) for ptrn in include_patterns):
included_variables.append(var)
# Afterwards, exclude variables.
if exclude_patterns is None:
filtered_variables = included_variables
else:
filtered_variables = []
for var in included_variables:
if not any(reg_exp_func(ptrn, var.name) for ptrn in exclude_patterns):
filtered_variables.append(var)
return filtered_variables
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/framework/python/ops/variables.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""arg_scope tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.framework.python.ops import add_arg_scope
from tensorflow.contrib.framework.python.ops import arg_scope
from tensorflow.contrib.framework.python.ops import arg_scoped_arguments
from tensorflow.python.platform import test
@add_arg_scope
def func1(*args, **kwargs):
return (args, kwargs)
@add_arg_scope
def func2(*args, **kwargs):
return (args, kwargs)
@add_arg_scope
def func3(args, a=None, b=1, c=2):
"""Some cool doc string."""
return (args, a, b, c)
@add_arg_scope
def func4(x='x', y='y'):
if x:
pass
if y:
pass
def _key_op(op):
return getattr(op, '_key_op', str(op))
class ArgScopeTest(test.TestCase):
def testEmptyArgScope(self):
with self.cached_session():
with arg_scope([]) as sc:
self.assertEqual(sc, {})
def testClearArgScope(self):
func1_kwargs = {'a': 1, 'b': None, 'c': [1]}
key_op = _key_op(func1)
func1_scope = {key_op: func1_kwargs.copy()}
with self.cached_session():
with arg_scope([func1], a=1, b=None, c=[1]) as sc1:
self.assertEqual(sc1, func1_scope)
with arg_scope({}) as sc2:
self.assertEqual(sc2, {})
with arg_scope([]) as current_arg_scope:
self.assertEqual(current_arg_scope, func1_scope)
def testNonDecorated(self):
def my_func(t, a=None):
return (t, a)
with self.assertRaises(ValueError):
with arg_scope([my_func], a=1):
pass
def testUnexpectedArg(self):
with self.assertRaises(TypeError):
with arg_scope([func3], d=1):
func3(1)
def testCurrentArgScope(self):
func1_kwargs = {'a': 1, 'b': None, 'c': [1]}
key_op = _key_op(func1)
current_scope = {key_op: func1_kwargs.copy()}
with self.cached_session():
with arg_scope([func1], a=1, b=None, c=[1]) as scope:
self.assertDictEqual(scope, current_scope)
def testArgScopedArguments(self):
func3_kwargs = ('a', 'b', 'c')
self.assertEquals(arg_scoped_arguments(func3), func3_kwargs)
def testCurrentArgScopeNested(self):
func1_kwargs = {'a': 1, 'b': None, 'c': [1]}
func2_kwargs = {'b': 2, 'd': [2]}
key = _key_op
current_scope = {
key(func1): func1_kwargs.copy(),
key(func2): func2_kwargs.copy()
}
with self.cached_session():
with arg_scope([func1], a=1, b=None, c=[1]):
with arg_scope([func2], b=2, d=[2]) as scope:
self.assertDictEqual(scope, current_scope)
def testReuseArgScope(self):
func1_kwargs = {'a': 1, 'b': None, 'c': [1]}
key_op = _key_op(func1)
current_scope = {key_op: func1_kwargs.copy()}
with self.cached_session():
with arg_scope([func1], a=1, b=None, c=[1]) as scope1:
pass
with arg_scope(scope1) as scope:
self.assertDictEqual(scope, current_scope)
def testReuseArgScopeNested(self):
func1_kwargs = {'a': 1, 'b': None, 'c': [1]}
func2_kwargs = {'b': 2, 'd': [2]}
key = _key_op
current_scope1 = {key(func1): func1_kwargs.copy()}
current_scope2 = {
key(func1): func1_kwargs.copy(),
key(func2): func2_kwargs.copy()
}
with self.cached_session():
with arg_scope([func1], a=1, b=None, c=[1]) as scope1:
with arg_scope([func2], b=2, d=[2]) as scope2:
pass
with arg_scope(scope1):
with arg_scope([]) as current_arg_scope:
self.assertDictEqual(current_arg_scope, current_scope1)
with arg_scope(scope2):
with arg_scope([]) as current_arg_scope:
self.assertDictEqual(current_arg_scope, current_scope2)
def testSimpleArgScope(self):
func1_args = (0,)
func1_kwargs = {'a': 1, 'b': None, 'c': [1]}
with self.cached_session():
with arg_scope([func1], a=1, b=None, c=[1]):
args, kwargs = func1(0)
self.assertTupleEqual(args, func1_args)
self.assertDictEqual(kwargs, func1_kwargs)
def testSimpleArgScopeWithTuple(self):
func1_args = (0,)
func1_kwargs = {'a': 1, 'b': None, 'c': [1]}
with self.cached_session():
with arg_scope((func1,), a=1, b=None, c=[1]):
args, kwargs = func1(0)
self.assertTupleEqual(args, func1_args)
self.assertDictEqual(kwargs, func1_kwargs)
def testOverwriteArgScope(self):
func1_args = (0,)
func1_kwargs = {'a': 1, 'b': 2, 'c': [1]}
with arg_scope([func1], a=1, b=None, c=[1]):
args, kwargs = func1(0, b=2)
self.assertTupleEqual(args, func1_args)
self.assertDictEqual(kwargs, func1_kwargs)
def testNestedArgScope(self):
func1_args = (0,)
func1_kwargs = {'a': 1, 'b': None, 'c': [1]}
with arg_scope([func1], a=1, b=None, c=[1]):
args, kwargs = func1(0)
self.assertTupleEqual(args, func1_args)
self.assertDictEqual(kwargs, func1_kwargs)
func1_kwargs['b'] = 2
with arg_scope([func1], b=2):
args, kwargs = func1(0)
self.assertTupleEqual(args, func1_args)
self.assertDictEqual(kwargs, func1_kwargs)
def testNestedArgScopeObjectCreatedOutsideScopeOverridesArgScope(self):
def get_scope_object():
with arg_scope([func1], a=1, b=None, c=[1]) as sc:
return sc
scope_object = get_scope_object()
with arg_scope([func1], b=2, d=10):
with arg_scope(scope_object):
args, kwargs = func1(0)
self.assertTupleEqual(args, (0,))
self.assertDictEqual(kwargs, {'a': 1, 'b': None, 'c': [1]})
def testArgScopeObjectCreatedWithinScopeInheritsArgScope(self):
def get_scope_object():
with arg_scope([func1], a=1, b=None, c=[1]) as sc:
return sc
with arg_scope([func1], b=2, d=10):
with arg_scope(get_scope_object()):
args, kwargs = func1(0)
self.assertTupleEqual(args, (0,))
self.assertDictEqual(kwargs, {'a': 1, 'b': None, 'c': [1], 'd': 10})
def testSharedArgScope(self):
func1_args = (0,)
func1_kwargs = {'a': 1, 'b': None, 'c': [1]}
with arg_scope([func1, func2], a=1, b=None, c=[1]):
args, kwargs = func1(0)
self.assertTupleEqual(args, func1_args)
self.assertDictEqual(kwargs, func1_kwargs)
args, kwargs = func2(0)
self.assertTupleEqual(args, func1_args)
self.assertDictEqual(kwargs, func1_kwargs)
def testSharedArgScopeTuple(self):
func1_args = (0,)
func1_kwargs = {'a': 1, 'b': None, 'c': [1]}
with arg_scope((func1, func2), a=1, b=None, c=[1]):
args, kwargs = func1(0)
self.assertTupleEqual(args, func1_args)
self.assertDictEqual(kwargs, func1_kwargs)
args, kwargs = func2(0)
self.assertTupleEqual(args, func1_args)
self.assertDictEqual(kwargs, func1_kwargs)
def testPartiallySharedArgScope(self):
func1_args = (0,)
func1_kwargs = {'a': 1, 'b': None, 'c': [1]}
func2_args = (1,)
func2_kwargs = {'a': 1, 'b': None, 'd': [2]}
with arg_scope([func1, func2], a=1, b=None):
with arg_scope([func1], c=[1]):
with arg_scope([func2], d=[2]):
args, kwargs = func1(0)
self.assertTupleEqual(args, func1_args)
self.assertDictEqual(kwargs, func1_kwargs)
args, kwargs = func2(1)
self.assertTupleEqual(args, func2_args)
self.assertDictEqual(kwargs, func2_kwargs)
def testAddArgScopeRaceCondition(self):
func4_kwargs = ('a', 'b', 'c', 'd', 'e', 'f', 'g', 'h')
for i in range(4):
# redefine the function with different args
@add_arg_scope
def func4(a=1, b=2, c=3, d=4, e=5, f=6, g=7, h=8):
pass
self.assertTupleEqual(arg_scoped_arguments(func4), func4_kwargs)
def testDocString(self):
self.assertEqual(func3.__doc__, 'Some cool doc string.')
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/framework/python/ops/arg_scope_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions to provide simpler and prettier logging."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variables
__all__ = ["print_op"]
def _get_tensor_repr(t,
print_tensor_name=True,
print_tensor_type=True,
print_shape=True,
summarize_indicator_vector=True):
"""Return a list of Tensors that summarize the given tensor t."""
tensor_list = []
if print_tensor_name and isinstance(t, ops.Tensor):
tensor_list.append(constant_op.constant("Name: " + t.name))
if print_tensor_type:
if isinstance(t, ops.Tensor):
t_type_str = "Type: Tensor ({})".format(t.dtype.name)
elif isinstance(t, sparse_tensor.SparseTensor):
t_type_str = "Type: SparseTensor ({})".format(t.dtype.name)
elif isinstance(t, tensor_array_ops.TensorArray):
t_type_str = "Type: TensorArray ({})".format(t.dtype.name)
elif isinstance(t, variables.Variable):
t_type_str = "Type: Variable ({})".format(t.dtype.name)
else:
raise ValueError("t must be a Tensor, SparseTensor, TensorArray or "
"Variable.")
tensor_list.append(constant_op.constant(t_type_str))
if print_shape:
if isinstance(t, sparse_tensor.SparseTensor):
tensor_list.append(constant_op.constant("Shape:"))
tensor_list.append(t.dense_shape)
elif isinstance(t, ops.Tensor):
tensor_list.append(constant_op.constant("Shape: " + str(t.get_shape(
).dims)))
elif isinstance(t, tensor_array_ops.TensorArray):
tensor_list.append(constant_op.constant("Size:"))
tensor_list.append(t.size())
if summarize_indicator_vector and t.dtype == dtypes.bool:
int_tensor = math_ops.cast(t, dtypes.uint8)
tensor_list.append(constant_op.constant("First True in Boolean tensor at:"))
tensor_list.append(math_ops.argmax(int_tensor, 0))
if isinstance(t, sparse_tensor.SparseTensor):
tensor_list.append(constant_op.constant("Sparse indices:"))
tensor_list.append(t.indices)
tensor_list.append(constant_op.constant("Sparse values:"))
tensor_list.append(t.values)
elif isinstance(t, ops.Tensor):
tensor_list.append(constant_op.constant("Value:"))
tensor_list.append(t)
elif isinstance(t, tensor_array_ops.TensorArray):
tensor_list.append(constant_op.constant("Value:"))
tensor_list.append(t.stack())
return tensor_list
def print_op(input_,
data=None,
message=None,
first_n=None,
summarize=20,
print_tensor_name=True,
print_tensor_type=True,
print_shape=True,
summarize_indicator_vector=True,
name=None):
"""Creates a print op that will print when a tensor is accessed.
Wraps the tensor passed in so that whenever that tensor is accessed,
the message `message` is printed, along with the current value of the
tensor `t` and an optional list of other tensors.
Args:
input_: A Tensor/SparseTensor/TensorArray to print when it is evaluated.
data: A list of other tensors to print.
message: A string message to print as a prefix.
first_n: Only log `first_n` number of times. Negative numbers log always;
this is the default.
summarize: Print this number of elements in the tensor.
print_tensor_name: Print the tensor name.
print_tensor_type: Print the tensor type.
print_shape: Print the tensor's shape.
summarize_indicator_vector: Whether to print the index of the first true
value in an indicator vector (a Boolean tensor).
name: The name to give this op.
Returns:
A Print op. The Print op returns `input_`.
Raises:
ValueError: If the tensor `input_` is not a Tensor, SparseTensor or
TensorArray.
"""
message = message or ""
if input_ is None:
raise ValueError("input_ must be of type "
"Tensor, SparseTensor or TensorArray")
tensor_list = _get_tensor_repr(input_, print_tensor_name, print_tensor_type,
print_shape, summarize_indicator_vector)
if data is not None:
for t in data:
tensor_list.extend(_get_tensor_repr(t, print_tensor_name,
print_tensor_type, print_shape,
summarize_indicator_vector))
if isinstance(input_, ops.Tensor) or isinstance(input_, variables.Variable):
input_ = logging_ops.Print(input_, tensor_list, message, first_n, summarize,
name)
elif isinstance(input_, sparse_tensor.SparseTensor):
p = logging_ops.Print(
constant_op.constant([]), tensor_list, message, first_n, summarize,
name)
with ops.control_dependencies([p]):
input_ = sparse_tensor.SparseTensor(
array_ops.identity(input_.indices),
array_ops.identity(input_.values),
array_ops.identity(input_.dense_shape))
elif isinstance(input_, tensor_array_ops.TensorArray):
p = logging_ops.Print(
constant_op.constant([]), tensor_list, message, first_n, summarize,
name)
with ops.control_dependencies([p]):
input_ = tensor_array_ops.TensorArray(dtype=input_.dtype,
handle=input_.handle,
flow=input_.flow)
else:
raise ValueError("input_ must be of type "
"Tensor, SparseTensor or TensorArray")
return input_
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/framework/python/ops/prettyprint_ops.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A module containing TensorFlow ops whose API may change in the future."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# TODO(ptucker): Add these to tf.contrib.variables?
# pylint: disable=wildcard-import
from tensorflow.contrib.framework.python.ops.arg_scope import *
from tensorflow.contrib.framework.python.ops.checkpoint_ops import *
from tensorflow.contrib.framework.python.ops.ops import *
from tensorflow.contrib.framework.python.ops.prettyprint_ops import *
from tensorflow.contrib.framework.python.ops.script_ops import *
from tensorflow.contrib.framework.python.ops.sort_ops import *
from tensorflow.contrib.framework.python.ops.variables import *
# pylint: enable=wildcard-import
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/framework/python/ops/__init__.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for the op to generate vocab remapping."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from tensorflow.contrib import framework as contrib_framework
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import flags
from tensorflow.python.platform import test
from tensorflow.python.training import saver
FLAGS = flags.FLAGS
_TESTDATA_PATH = 'contrib/framework/testdata'
class LoadMulticlassBiasTest(test.TestCase):
"""Tests for the load_linear_multiclass_bias_initializer functionality."""
def setUp(self):
ops.reset_default_graph()
dim = 1
num = 3
with ops.name_scope('some_scope'):
# Basically from 0 to dim*num-1.
flat_data = math_ops.linspace(0.0, dim * num - 1, dim * num)
bias = variables.Variable(
array_ops.reshape(flat_data, (num, dim)), name='bias')
save = saver.Saver([bias])
with self.cached_session() as sess:
variables.global_variables_initializer().run()
self.bundle_file = os.path.join(test.get_temp_dir(), 'bias_checkpoint')
save.save(sess, self.bundle_file)
self.new_class_vocab_file = os.path.join(
test.test_src_dir_path(_TESTDATA_PATH), 'keyword_new.txt')
self.old_class_vocab_file = os.path.join(
test.test_src_dir_path(_TESTDATA_PATH), 'keyword.txt')
self.init_val = 42
def _init_val_initializer(shape, dtype=None, partition_info=None):
del dtype, partition_info # Unused by this unit-testing initializer.
return array_ops.tile(
constant_op.constant([[self.init_val]], dtype=dtypes.float32), shape)
self.initializer = _init_val_initializer
def test_load_linear_multiclass_bias_initializer(self):
"""Tests for the bias initializer wrapper."""
bias_loading_initializer = (
contrib_framework.load_linear_multiclass_bias_initializer(
new_class_vocab_file=self.new_class_vocab_file,
old_class_vocab_file=self.old_class_vocab_file,
new_class_vocab_size=4,
bias_tensor_name='some_scope/bias',
ckpt_path=[self.bundle_file],
num_class_oov_buckets=1,
initializer=self.initializer))
expected_remapped_bias_vector = np.reshape(
[2, 0, self.init_val, 1, self.init_val], [5, 1])
# The new bias vector is of size [4 class vocab + 1 class OOV, 1].
remapped_bias_vector = variable_scope.get_variable(
name='bias/obtained_bias_vector',
shape=[5, 1],
initializer=bias_loading_initializer,
partitioner=partitioned_variables.fixed_size_partitioner(3))
with self.cached_session():
variables.global_variables_initializer().run()
self.assertAllClose(expected_remapped_bias_vector,
remapped_bias_vector.as_tensor().eval())
class LoadVariableSlotTest(test.TestCase):
"""Tests for the load_variable_slot_initializer functionality."""
def setUp(self):
ops.reset_default_graph()
dim = 1
num = 3
with ops.name_scope('some_scope'):
# Basically from 0 to dim*num-1.
flat_data = math_ops.linspace(0.0, dim * num - 1, dim * num)
accum = variables.Variable(
array_ops.reshape(flat_data, (num, dim)), name='accum')
save = saver.Saver([accum])
with self.cached_session() as sess:
variables.global_variables_initializer().run()
self.bundle_file = os.path.join(test.get_temp_dir(), 'accum_checkpoint')
save.save(sess, self.bundle_file)
self.new_class_vocab_file = os.path.join(
test.test_src_dir_path(_TESTDATA_PATH), 'keyword_new.txt')
self.old_class_vocab_file = os.path.join(
test.test_src_dir_path(_TESTDATA_PATH), 'keyword.txt')
self.init_val = 42
def _init_val_initializer(shape, dtype=None, partition_info=None):
del dtype, partition_info # Unused by this unit-testing initializer.
return array_ops.tile(
constant_op.constant([[self.init_val]], dtype=dtypes.float32), shape)
self.initializer = _init_val_initializer
def test_load_variable_slot_initializer(self):
"""Tests for the slot initializer wrapper."""
# We have an initializer for each of two partitioned variables, which will
# be [3, 1] and [2, 1]. The partitioning information is passed here in
# initializer construction, as opposed to through a variable scope during
# variable creation.
variable_slot_initializer_part_0 = (
contrib_framework.load_variable_slot_initializer(
new_row_vocab_file=self.new_class_vocab_file,
old_row_vocab_file=self.old_class_vocab_file,
new_row_vocab_size=4,
new_col_vocab_size=1,
primary_partition_info=variable_scope._PartitionInfo(
full_shape=[5, 1], var_offset=[0, 0]),
old_tensor_name='some_scope/accum',
ckpt_path=[self.bundle_file],
num_row_oov_buckets=1,
initializer=self.initializer))
variable_slot_initializer_part_1 = (
contrib_framework.load_variable_slot_initializer(
new_row_vocab_file=self.new_class_vocab_file,
old_row_vocab_file=self.old_class_vocab_file,
new_row_vocab_size=4,
new_col_vocab_size=1,
primary_partition_info=variable_scope._PartitionInfo(
full_shape=[5, 1], var_offset=[3, 0]),
old_tensor_name='some_scope/accum',
ckpt_path=[self.bundle_file],
num_row_oov_buckets=1,
initializer=self.initializer))
expected_remapped_accum_vector_part_0 = np.reshape([2, 0, self.init_val],
[3, 1])
expected_remapped_accum_vector_part_1 = np.reshape([1, self.init_val],
[2, 1])
# Since there is no variable scope here, partition_info will be None, so
# if variable_slot_initializer_part_0 and variable_slot_initializer_part_1
# were instead instances of load_and_remap_matrix_initializer, the part_0
# obtained vector would still be [2, 0, self.init_val], but the part_1
# obtained vector would be [2, 0], since the partition_info would default to
# assuming a single partition.
remapped_accum_vector_part_0 = variable_scope.get_variable(
name='accum/obtained_accum_vector_part_0',
shape=[3, 1],
initializer=variable_slot_initializer_part_0)
remapped_accum_vector_part_1 = variable_scope.get_variable(
name='accum/obtained_accum_vector_part_1',
shape=[2, 1],
initializer=variable_slot_initializer_part_1)
with self.cached_session():
variables.global_variables_initializer().run()
self.assertAllClose(expected_remapped_accum_vector_part_0,
remapped_accum_vector_part_0.eval())
self.assertAllClose(expected_remapped_accum_vector_part_1,
remapped_accum_vector_part_1.eval())
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/framework/python/ops/checkpoint_ops_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Support for sorting tensors.
@@argsort
@@sort
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.ops import sort_ops
sort = sort_ops.sort
argsort = sort_ops.argsort
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/framework/python/ops/sort_ops.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Classes and functions used to construct graphs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
__all__ = ['get_graph_from_inputs',
'get_name_scope']
def get_graph_from_inputs(op_input_list, graph=None):
"""Returns the appropriate graph to use for the given inputs.
1. If `graph` is provided, we validate that all inputs in `op_input_list` are
from the same graph.
2. Otherwise, we attempt to select a graph from the first Operation- or
Tensor-valued input in `op_input_list`, and validate that all other
such inputs are in the same graph.
3. If the graph was not specified and it could not be inferred from
`op_input_list`, we attempt to use the default graph.
Args:
op_input_list: A list of inputs to an operation, which may include `Tensor`,
`Operation`, and other objects that may be converted to a graph element.
graph: (Optional) The explicit graph to use.
Raises:
TypeError: If `op_input_list` is not a list or tuple, or if graph is not a
Graph.
ValueError: If a graph is explicitly passed and not all inputs are from it,
or if the inputs are from multiple graphs, or we could not find a graph
and there was no default graph.
Returns:
The appropriate graph to use for the given inputs.
"""
# pylint: disable=protected-access
return ops._get_graph_from_inputs(op_input_list, graph)
def get_name_scope():
"""Returns the current name scope of the default graph.
For example:
```python
with tf.name_scope('scope1'):
with tf.name_scope('scope2'):
print(tf.contrib.framework.get_name_scope())
```
would print the string `scope1/scope2`.
Returns:
A string representing the current name scope.
"""
return ops.get_default_graph().get_name_scope()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/framework/python/ops/ops.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operations for generating and loading vocab remappings."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import init_ops
from tensorflow.python.training import checkpoint_ops
# pylint: disable=protected-access,line-too-long
load_and_remap_matrix_initializer = checkpoint_ops._load_and_remap_matrix_initializer
# pylint: enable=line-too-long
load_embedding_initializer = checkpoint_ops._load_embedding_initializer
# pylint: enable=protected-access
def load_linear_multiclass_bias_initializer(ckpt_path,
bias_tensor_name,
new_class_vocab_size,
old_class_vocab_file,
new_class_vocab_file,
num_class_oov_buckets=0,
initializer=None,
max_rows_in_memory=-1):
"""Loads pre-trained multi-class biases for linear models from checkpoint.
Wrapper around `load_and_remap_matrix_initializer()` specialized for loading
multi-class bias and remapping according to the provided vocab files. See docs
for `load_and_remap_matrix_initializer()` for more details. In this case, the
provided row_vocab is the class vocabulary, and the expected shape is
`[new_class_vocab_size, 1]`.
Args:
ckpt_path: Path to the TensorFlow checkpoint (version 2, `TensorBundle`)
from which the old matrix `Tensor` will be loaded.
bias_tensor_name: Tensor name to load from in the checkpoints.
new_class_vocab_size: Number of entries in the new class vocab.
old_class_vocab_file: A scalar `Tensor` of type `string` containing the
path to the old class vocabulary file.
new_class_vocab_file: A scalar `Tensor` of type `string` containing the
path to the new class vocabulary file.
num_class_oov_buckets: `int` specifying the number of out-of-vocabulary
buckets to use for the classes. Must be >= 0.
initializer: Initializer function that accepts a 1-D tensor as the arg to
specify the shape of the returned tensor. If `None`, defaults to using
`zeros_initializer()`.
max_rows_in_memory: `int` specifying the maximum number of rows to load from
the checkpoint at once. If less than or equal to 0, the entire matrix will
be loaded into memory. Setting this arg trades increased disk reads for
lower memory usage.
Returns:
A variable initializer function.
"""
# Linear multi-class biases should be zero-initialized.
if initializer is None:
initializer = init_ops.zeros_initializer()
return load_and_remap_matrix_initializer(
ckpt_path=ckpt_path,
old_tensor_name=bias_tensor_name,
new_row_vocab_size=new_class_vocab_size,
new_col_vocab_size=1,
old_row_vocab_file=old_class_vocab_file,
new_row_vocab_file=new_class_vocab_file,
old_col_vocab_file=None,
new_col_vocab_file=None,
num_row_oov_buckets=num_class_oov_buckets,
num_col_oov_buckets=0,
initializer=initializer,
max_rows_in_memory=max_rows_in_memory)
def load_variable_slot_initializer(ckpt_path,
old_tensor_name,
primary_partition_info,
new_row_vocab_size,
new_col_vocab_size,
old_row_vocab_file=None,
new_row_vocab_file=None,
old_col_vocab_file=None,
new_col_vocab_file=None,
num_row_oov_buckets=0,
num_col_oov_buckets=0,
initializer=None,
max_rows_in_memory=-1):
"""Loads pre-trained multi-class slots for linear models from checkpoint.
Wrapper around `load_and_remap_matrix_initializer()` specialized for loading
multi-class slots (such as optimizer accumulators) and remapping them
according to the provided vocab files. See docs for
`load_and_remap_matrix_initializer()` for more details. Takes in a
`variable_scope._PartitionInfo` representing the slot's primary `Variable`'s
partitioning. This is necessary since accumulator `Variable` creation ignores
primary scoping and partitioning information.
Args:
ckpt_path: Path to the TensorFlow checkpoint (version 2, `TensorBundle`)
from which the old matrix `Tensor` will be loaded.
old_tensor_name: Name of the 2-D `Tensor` to load from checkpoint.
primary_partition_info: A `variable_scope._PartitionInfo` containing this
slot's primary `Variable`'s partitioning information. This is used to
calculate the offset and override the partition_info passed to the call to
_initialize.
new_row_vocab_size: `int` specifying the number of entries in
`new_row_vocab_file`. If no row remapping is needed (no row vocab
provided), this should be equal to the number of rows to load from the old
matrix (which can theoretically be smaller than the number of rows in the
old matrix).
new_col_vocab_size: `int` specifying the number of entries in
`new_col_vocab_file`. If no column remapping is needed (no column vocab
provided), this should be equal to the number of columns in the old
matrix.
old_row_vocab_file: A scalar `Tensor` of type `string` containing the
path to the old row vocabulary file. Can be None, which represents no
remapping on the row axis.
new_row_vocab_file: A scalar `Tensor` of type `string` containing the path
to the new row vocabulary file. Can be None, which represents no remapping
on the row axis.
old_col_vocab_file: A scalar `Tensor` of type `string` containing the
path to the old column vocabulary file. Can be None, which represents no
remapping on the column axis.
new_col_vocab_file: A scalar `Tensor` of type `string` containing the path
to the new column vocabulary file. Can be None, which represents no
remapping on the column axis.
num_row_oov_buckets: `int` specifying the number of out-of-vocabulary rows
to append. Must be >= 0.
num_col_oov_buckets: `int` specifying the number of out-of-vocabulary
columns to append. Must be >= 0.
initializer: Initializer function to initialize missing values. Accepts a
1-D tensor as the arg to specify the shape of the returned tensor. If
`None`, defaults to using `zeros_initializer()`.
max_rows_in_memory: `int` specifying the maximum number of rows to load from
the checkpoint at once. If less than or equal to 0, the entire matrix will
be loaded into memory. Setting this arg trades increased disk reads for
lower memory usage.
Returns:
A variable initializer function that should be used to initialize a
(potentially partitioned) `Variable` whose complete shape is
`[new_row_vocab_size + num_row_oov_buckets, new_col_vocab_size +
num_col_oov_buckets]`.
Raises:
TypeError: If `initializer` is specified but not callable.
"""
initializer_fn = load_and_remap_matrix_initializer(
ckpt_path=ckpt_path,
old_tensor_name=old_tensor_name,
new_row_vocab_size=new_row_vocab_size,
new_col_vocab_size=new_col_vocab_size,
old_row_vocab_file=old_row_vocab_file,
new_row_vocab_file=new_row_vocab_file,
old_col_vocab_file=old_col_vocab_file,
new_col_vocab_file=new_col_vocab_file,
num_row_oov_buckets=num_row_oov_buckets,
num_col_oov_buckets=num_col_oov_buckets,
initializer=initializer,
max_rows_in_memory=max_rows_in_memory)
def _initializer(shape, dtype=dtypes.float32, partition_info=None):
del partition_info # Unused by this override.
return initializer_fn(shape, dtype, partition_info=primary_partition_info)
return _initializer
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/framework/python/ops/checkpoint_ops.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""tensor_util tests."""
# pylint: disable=unused-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.framework.python.ops import ops as ops_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
class OpsTest(test.TestCase):
def testGetGraphFromEmptyInputs(self):
with ops.Graph().as_default() as g0:
self.assertIs(g0, ops_lib.get_graph_from_inputs([]))
def testGetGraphFromValidInputs(self):
g0 = ops.Graph()
with g0.as_default():
values = [constant_op.constant(0.0), constant_op.constant(1.0)]
self.assertIs(g0, ops_lib.get_graph_from_inputs(values))
self.assertIs(g0, ops_lib.get_graph_from_inputs(values, g0))
with ops.Graph().as_default():
self.assertIs(g0, ops_lib.get_graph_from_inputs(values))
self.assertIs(g0, ops_lib.get_graph_from_inputs(values, g0))
def testGetGraphFromInvalidInputs(self):
g0 = ops.Graph()
with g0.as_default():
values = [constant_op.constant(0.0), constant_op.constant(1.0)]
g1 = ops.Graph()
with self.assertRaisesRegexp(ValueError, "not from the passed-in graph"):
ops_lib.get_graph_from_inputs(values, g1)
with g1.as_default():
values.append(constant_op.constant(2.0))
with self.assertRaisesRegexp(ValueError, "must be from the same graph"):
ops_lib.get_graph_from_inputs(values)
with self.assertRaisesRegexp(ValueError, "not from the passed-in graph"):
ops_lib.get_graph_from_inputs(values, g0)
with self.assertRaisesRegexp(ValueError, "not from the passed-in graph"):
ops_lib.get_graph_from_inputs(values, g1)
def testGetNameScope(self):
with ops.name_scope("scope1"):
with ops.name_scope("scope2"):
with ops.name_scope("scope3"):
self.assertEqual("scope1/scope2/scope3", ops_lib.get_name_scope())
self.assertEqual("scope1/scope2", ops_lib.get_name_scope())
self.assertEqual("scope1", ops_lib.get_name_scope())
self.assertEqual("", ops_lib.get_name_scope())
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/framework/python/ops/ops_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the arg_scope used for scoping layers arguments.
Allows one to define models much more compactly by eliminating boilerplate
code. This is accomplished through the use of argument scoping (arg_scope).
Example of how to use tf.contrib.framework.arg_scope:
```
from third_party.tensorflow.contrib.layers.python import layers
arg_scope = tf.contrib.framework.arg_scope
with arg_scope([layers.conv2d], padding='SAME',
initializer=layers.variance_scaling_initializer(),
regularizer=layers.l2_regularizer(0.05)):
net = layers.conv2d(inputs, 64, [11, 11], 4, padding='VALID', scope='conv1')
net = layers.conv2d(net, 256, [5, 5], scope='conv2')
```
The first call to conv2d will behave as follows:
layers.conv2d(inputs, 64, [11, 11], 4, padding='VALID',
initializer=layers.variance_scaling_initializer(),
regularizer=layers.l2_regularizer(0.05), scope='conv1')
The second call to conv2d will also use the arg_scope's default for padding:
layers.conv2d(inputs, 256, [5, 5], padding='SAME',
initializer=layers.variance_scaling_initializer(),
regularizer=layers.l2_regularizer(0.05), scope='conv2')
Example of how to reuse an arg_scope:
```
with arg_scope([layers.conv2d], padding='SAME',
initializer=layers.variance_scaling_initializer(),
regularizer=layers.l2_regularizer(0.05)) as sc:
net = layers.conv2d(net, 256, [5, 5], scope='conv1')
....
with arg_scope(sc):
net = layers.conv2d(net, 256, [5, 5], scope='conv2')
```
Example of how to use tf.contrib.framework.add_arg_scope to enable your
function to be called within an arg_scope later:
@tf.contrib.framework.add_arg_scope
def conv2d(*args, **kwargs)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util import tf_decorator
__all__ = [
'arg_scope', 'add_arg_scope', 'current_arg_scope', 'has_arg_scope',
'arg_scoped_arguments', 'arg_scope_func_key'
]
_ARGSTACK = [{}]
_DECORATED_OPS = {}
def _get_arg_stack():
if _ARGSTACK:
return _ARGSTACK
else:
_ARGSTACK.append({})
return _ARGSTACK
def current_arg_scope():
stack = _get_arg_stack()
return stack[-1]
def arg_scope_func_key(op):
return getattr(op, '_key_op', str(op))
def _name_op(op):
return (op.__module__, op.__name__)
def _kwarg_names(func):
kwargs_length = len(func.__defaults__) if func.__defaults__ else 0
return func.__code__.co_varnames[-kwargs_length:func.__code__.co_argcount]
def _add_op(op):
key_op = arg_scope_func_key(op)
_DECORATED_OPS[key_op] = _kwarg_names(op)
@tf_contextlib.contextmanager
def arg_scope(list_ops_or_scope, **kwargs):
"""Stores the default arguments for the given set of list_ops.
For usage, please see examples at top of the file.
Args:
list_ops_or_scope: List or tuple of operations to set argument scope for or
a dictionary containing the current scope. When list_ops_or_scope is a
dict, kwargs must be empty. When list_ops_or_scope is a list or tuple,
then every op in it need to be decorated with @add_arg_scope to work.
**kwargs: keyword=value that will define the defaults for each op in
list_ops. All the ops need to accept the given set of arguments.
Yields:
the current_scope, which is a dictionary of {op: {arg: value}}
Raises:
TypeError: if list_ops is not a list or a tuple.
ValueError: if any op in list_ops has not be decorated with @add_arg_scope.
"""
if isinstance(list_ops_or_scope, dict):
# Assumes that list_ops_or_scope is a scope that is being reused.
if kwargs:
raise ValueError('When attempting to re-use a scope by suppling a'
'dictionary, kwargs must be empty.')
current_scope = list_ops_or_scope.copy()
try:
_get_arg_stack().append(current_scope)
yield current_scope
finally:
_get_arg_stack().pop()
else:
# Assumes that list_ops_or_scope is a list/tuple of ops with kwargs.
if not isinstance(list_ops_or_scope, (list, tuple)):
raise TypeError('list_ops_or_scope must either be a list/tuple or reused '
'scope (i.e. dict)')
try:
current_scope = current_arg_scope().copy()
for op in list_ops_or_scope:
key = arg_scope_func_key(op)
if not has_arg_scope(op):
raise ValueError('%s is not decorated with @add_arg_scope',
_name_op(op))
if key in current_scope:
current_kwargs = current_scope[key].copy()
current_kwargs.update(kwargs)
current_scope[key] = current_kwargs
else:
current_scope[key] = kwargs.copy()
_get_arg_stack().append(current_scope)
yield current_scope
finally:
_get_arg_stack().pop()
def add_arg_scope(func):
"""Decorates a function with args so it can be used within an arg_scope.
Args:
func: function to decorate.
Returns:
A tuple with the decorated function func_with_args().
"""
def func_with_args(*args, **kwargs):
current_scope = current_arg_scope()
current_args = kwargs
key_func = arg_scope_func_key(func)
if key_func in current_scope:
current_args = current_scope[key_func].copy()
current_args.update(kwargs)
return func(*args, **current_args)
_add_op(func)
setattr(func_with_args, '_key_op', arg_scope_func_key(func))
return tf_decorator.make_decorator(func, func_with_args)
def has_arg_scope(func):
"""Checks whether a func has been decorated with @add_arg_scope or not.
Args:
func: function to check.
Returns:
a boolean.
"""
return arg_scope_func_key(func) in _DECORATED_OPS
def arg_scoped_arguments(func):
"""Returns the list kwargs that arg_scope can set for a func.
Args:
func: function which has been decorated with @add_arg_scope.
Returns:
a list of kwargs names.
"""
assert has_arg_scope(func)
return _DECORATED_OPS[arg_scope_func_key(func)]
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/framework/python/ops/arg_scope.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""variables tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
import numpy as np
from tensorflow.contrib.framework.python.ops import arg_scope
from tensorflow.contrib.framework.python.ops import variables as variables_lib2
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.training import device_setter
from tensorflow.python.training import saver as saver_lib
class LocalVariableTest(test.TestCase):
def test_local_variable(self):
with self.cached_session() as sess:
self.assertEquals([], variables_lib.local_variables())
value0 = 42
variables_lib2.local_variable(value0)
value1 = 43
variables_lib2.local_variable(value1)
variables = variables_lib.local_variables()
self.assertEquals(2, len(variables))
self.assertRaises(errors_impl.OpError, sess.run, variables)
variables_lib.variables_initializer(variables).run()
self.assertAllEqual(set([value0, value1]), set(sess.run(variables)))
def testLocalVariableNameAndShape(self):
with self.cached_session():
with variable_scope.variable_scope('A'):
a = variables_lib2.local_variable([1, 1, 1, 1, 1], name='a')
self.assertEquals(a.op.name, 'A/a')
self.assertListEqual(a.get_shape().as_list(), [5])
self.assertListEqual([a], variables_lib2.get_local_variables())
def testLocalVariableNotInAllVariables(self):
with self.cached_session():
with variable_scope.variable_scope('A'):
a = variables_lib2.local_variable(0)
self.assertFalse(a in variables_lib.global_variables())
self.assertTrue(a in variables_lib.local_variables())
def testLocalVariableNotInVariablesToRestore(self):
with self.cached_session():
with variable_scope.variable_scope('A'):
a = variables_lib2.local_variable(0)
self.assertFalse(a in variables_lib2.get_variables_to_restore())
self.assertTrue(a in variables_lib.local_variables())
def testGetVariablesDontReturnsTransients(self):
with self.cached_session():
with variable_scope.variable_scope('A'):
variables_lib2.local_variable(0)
with variable_scope.variable_scope('B'):
variables_lib2.local_variable(0)
self.assertEquals([], variables_lib2.get_variables('A'))
self.assertEquals([], variables_lib2.get_variables('B'))
def testGetLocalVariablesReturnsTransients(self):
with self.cached_session():
with variable_scope.variable_scope('A'):
a = variables_lib2.local_variable(0)
with variable_scope.variable_scope('B'):
b = variables_lib2.local_variable(0)
self.assertEquals([a], variables_lib2.get_local_variables('A'))
self.assertEquals([b], variables_lib2.get_local_variables('B'))
def testInitializedVariableValue(self):
with self.cached_session() as sess:
a = variables_lib2.local_variable([0, 0, 0, 0, 0], name='a')
sess.run(variables_lib.local_variables_initializer())
self.assertAllEqual(a.eval(), [0] * 5)
def testResourceVariable(self):
a = variables_lib2.local_variable(0)
b = variables_lib2.local_variable(0, use_resource=True)
self.assertTrue(isinstance(a, variables_lib.Variable))
self.assertFalse(isinstance(a, resource_variable_ops.ResourceVariable))
self.assertTrue(isinstance(b, resource_variable_ops.ResourceVariable))
class GlobalVariableTest(test.TestCase):
def test_global_variable(self):
with self.cached_session() as sess:
self.assertEquals([], variables_lib.global_variables())
value0 = 42
variables_lib2.global_variable(value0)
value1 = 43
variables_lib2.global_variable(value1)
variables = variables_lib.global_variables()
self.assertEquals(2, len(variables))
with self.assertRaisesOpError(
'Attempting to use uninitialized value Variable'):
sess.run(variables)
variables_lib.variables_initializer(variables).run()
self.assertAllEqual(set([value0, value1]), set(sess.run(variables)))
def testVariableNameAndShape(self):
with self.cached_session():
with variable_scope.variable_scope('A'):
a = variables_lib2.global_variable([1, 1, 1, 1, 1], name='a')
self.assertEquals(a.op.name, 'A/a')
self.assertListEqual(a.get_shape().as_list(), [5])
self.assertListEqual([a], variables_lib.global_variables())
def testGlobalVariableNotInLocalVariables(self):
with self.cached_session():
with variable_scope.variable_scope('A'):
a = variables_lib2.global_variable(0)
self.assertFalse(a in variables_lib.local_variables())
self.assertTrue(a in variables_lib.global_variables())
def testGlobalVariableInVariablesToRestore(self):
with self.cached_session():
with variable_scope.variable_scope('A'):
a = variables_lib2.global_variable(0)
self.assertFalse(a in variables_lib.local_variables())
self.assertTrue(a in variables_lib2.get_variables_to_restore())
def testGetVariablesReturnsThem(self):
with self.cached_session():
with variable_scope.variable_scope('A'):
a = variables_lib2.global_variable(0)
with variable_scope.variable_scope('B'):
b = variables_lib2.global_variable(0)
self.assertEquals([a], variables_lib2.get_variables('A'))
self.assertEquals([b], variables_lib2.get_variables('B'))
def testGetLocalVariablesDontReturnsThem(self):
with self.cached_session():
with variable_scope.variable_scope('A'):
variables_lib2.global_variable(0)
with variable_scope.variable_scope('B'):
variables_lib2.global_variable(0)
self.assertEquals([], variables_lib2.get_local_variables('A'))
self.assertEquals([], variables_lib2.get_local_variables('B'))
def testInitializedVariableValue(self):
with self.cached_session() as sess:
a = variables_lib2.global_variable([0, 0, 0, 0, 0], name='a')
sess.run(variables_lib.global_variables_initializer())
self.assertAllEqual(a.eval(), [0] * 5)
def testResourceVariable(self):
a = variables_lib2.global_variable(0)
b = variables_lib2.global_variable(0, use_resource=True)
self.assertTrue(isinstance(a, variables_lib.Variable))
self.assertFalse(isinstance(a, resource_variable_ops.ResourceVariable))
self.assertTrue(isinstance(b, resource_variable_ops.ResourceVariable))
class GlobalStepTest(test.TestCase):
def _assert_global_step(self, global_step, expected_dtype=dtypes.int64):
self.assertEquals('%s:0' % ops.GraphKeys.GLOBAL_STEP, global_step.name)
self.assertEquals(expected_dtype, global_step.dtype.base_dtype)
self.assertEquals([], global_step.get_shape().as_list())
def test_invalid_dtype(self):
with ops.Graph().as_default() as g:
self.assertEquals(None, variables_lib2.get_global_step())
variables_lib.VariableV1(
0.0,
trainable=False,
dtype=dtypes.float32,
name=ops.GraphKeys.GLOBAL_STEP)
self.assertRaisesRegexp(TypeError, 'does not have integer type',
variables_lib2.get_global_step)
self.assertRaisesRegexp(TypeError, 'does not have integer type',
variables_lib2.get_global_step, g)
def test_invalid_shape(self):
with ops.Graph().as_default() as g:
self.assertEquals(None, variables_lib2.get_global_step())
variables_lib.VariableV1(
[0],
trainable=False,
dtype=dtypes.int32,
name=ops.GraphKeys.GLOBAL_STEP)
self.assertRaisesRegexp(TypeError, 'not scalar',
variables_lib2.get_global_step)
self.assertRaisesRegexp(TypeError, 'not scalar',
variables_lib2.get_global_step, g)
def test_create_global_step(self):
self.assertEquals(None, variables_lib2.get_global_step())
with ops.Graph().as_default() as g:
global_step = variables_lib2.create_global_step()
self._assert_global_step(global_step)
self.assertRaisesRegexp(ValueError, 'already exists',
variables_lib2.create_global_step)
self.assertRaisesRegexp(ValueError, 'already exists',
variables_lib2.create_global_step, g)
self._assert_global_step(variables_lib2.create_global_step(ops.Graph()))
def test_get_global_step(self):
with ops.Graph().as_default() as g:
self.assertEquals(None, variables_lib2.get_global_step())
variables_lib.VariableV1(
0,
trainable=False,
dtype=dtypes.int32,
name=ops.GraphKeys.GLOBAL_STEP)
self._assert_global_step(
variables_lib2.get_global_step(), expected_dtype=dtypes.int32)
self._assert_global_step(
variables_lib2.get_global_step(g), expected_dtype=dtypes.int32)
def test_get_or_create_global_step(self):
with ops.Graph().as_default() as g:
self.assertEquals(None, variables_lib2.get_global_step())
self._assert_global_step(variables_lib2.get_or_create_global_step())
self._assert_global_step(variables_lib2.get_or_create_global_step(g))
class VariablesTest(test.TestCase):
def testCreateVariable(self):
with self.cached_session():
with variable_scope.variable_scope('A'):
a = variables_lib2.variable('a', [5])
self.assertEquals(a.op.name, 'A/a')
self.assertListEqual(a.get_shape().as_list(), [5])
self.assertTrue(a in ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES))
self.assertFalse(a in ops.get_collection(ops.GraphKeys.MODEL_VARIABLES))
self.assertFalse(a in variables_lib.local_variables())
def testGetVariables(self):
with self.cached_session():
with variable_scope.variable_scope('A'):
a = variables_lib2.variable('a', [5])
with variable_scope.variable_scope('B'):
b = variables_lib2.variable('a', [5])
self.assertEquals([a, b], variables_lib2.get_variables())
self.assertEquals([a], variables_lib2.get_variables('A'))
self.assertEquals([b], variables_lib2.get_variables('B'))
def testGetVariablesWithScope(self):
with self.cached_session():
with variable_scope.variable_scope('A') as var_scope:
a = variables_lib2.variable('a', [5])
b = variables_lib2.variable('b', [5])
self.assertSetEqual(
set([a, b]), set(variables_lib2.get_variables(var_scope)))
def testGetVariablesSuffix(self):
with self.cached_session():
with variable_scope.variable_scope('A'):
a = variables_lib2.variable('a', [5])
with variable_scope.variable_scope('A'):
b = variables_lib2.variable('b', [5])
self.assertEquals([a], variables_lib2.get_variables(suffix='a'))
self.assertEquals([b], variables_lib2.get_variables(suffix='b'))
def testGetVariableWithSingleVar(self):
with self.cached_session():
with variable_scope.variable_scope('parent'):
a = variables_lib2.variable('child', [5])
self.assertEquals(a, variables_lib2.get_unique_variable('parent/child'))
def testGetVariableWithDistractors(self):
with self.cached_session():
with variable_scope.variable_scope('parent'):
a = variables_lib2.variable('child', [5])
with variable_scope.variable_scope('child'):
variables_lib2.variable('grandchild1', [7])
variables_lib2.variable('grandchild2', [9])
self.assertEquals(a, variables_lib2.get_unique_variable('parent/child'))
def testGetVariableThrowsExceptionWithNoMatch(self):
var_name = 'cant_find_me'
with self.cached_session():
with self.assertRaises(ValueError):
variables_lib2.get_unique_variable(var_name)
def testGetThrowsExceptionWithChildrenButNoMatch(self):
var_name = 'parent/child'
with self.cached_session():
with variable_scope.variable_scope(var_name):
variables_lib2.variable('grandchild1', [7])
variables_lib2.variable('grandchild2', [9])
with self.assertRaises(ValueError):
variables_lib2.get_unique_variable(var_name)
def testGetVariablesToRestore(self):
with self.cached_session():
with variable_scope.variable_scope('A'):
a = variables_lib2.variable('a', [5])
with variable_scope.variable_scope('B'):
b = variables_lib2.variable('a', [5])
self.assertEquals([a, b], variables_lib2.get_variables_to_restore())
def testIncludeGetVariablesToRestore(self):
with self.cached_session():
with variable_scope.variable_scope('A'):
a = variables_lib2.variable('a', [5])
with variable_scope.variable_scope('B'):
b = variables_lib2.variable('a', [5])
self.assertEquals([a, b], variables_lib2.get_variables())
self.assertEquals([a], variables_lib2.get_variables_to_restore(['A']))
def testExcludeGetVariablesToRestore(self):
with self.cached_session():
with variable_scope.variable_scope('A'):
a = variables_lib2.variable('a', [5])
with variable_scope.variable_scope('B'):
b = variables_lib2.variable('a', [5])
self.assertEquals([a, b], variables_lib2.get_variables())
self.assertEquals(
[a], variables_lib2.get_variables_to_restore(exclude=['B']))
def testWrongIncludeGetVariablesToRestore(self):
with self.cached_session():
with variable_scope.variable_scope('A'):
a = variables_lib2.variable('a', [5])
with variable_scope.variable_scope('B'):
b = variables_lib2.variable('a', [5])
self.assertEquals([a, b], variables_lib2.get_variables())
self.assertEquals([], variables_lib2.get_variables_to_restore(['a']))
def testGetMixedVariablesToRestore(self):
with self.cached_session():
with variable_scope.variable_scope('A'):
a = variables_lib2.variable('a', [5])
b = variables_lib2.variable('b', [5])
with variable_scope.variable_scope('B'):
c = variables_lib2.variable('c', [5])
d = variables_lib2.variable('d', [5])
self.assertEquals([a, b, c, d], variables_lib2.get_variables())
self.assertEquals(
[a, c],
variables_lib2.get_variables_to_restore(include=['A/a', 'B/c']))
def testExcludeGetMixedVariablesToRestore(self):
with self.cached_session():
with variable_scope.variable_scope('A'):
a = variables_lib2.variable('a', [5])
b = variables_lib2.variable('b', [5])
with variable_scope.variable_scope('B'):
c = variables_lib2.variable('c', [5])
d = variables_lib2.variable('d', [5])
self.assertEquals([a, b, c, d], variables_lib2.get_variables())
self.assertEquals(
[b, d],
variables_lib2.get_variables_to_restore(exclude=['A/a', 'B/c']))
def testReuseVariable(self):
with self.cached_session():
with variable_scope.variable_scope('A'):
a = variables_lib2.variable('a', [])
with variable_scope.variable_scope('A', reuse=True):
b = variables_lib2.variable('a', [])
self.assertEquals(a, b)
self.assertListEqual([a], variables_lib2.get_variables())
def testVariableWithRegularizer(self):
with self.cached_session():
with variable_scope.variable_scope('A'):
a = variables_lib2.variable('a', [], regularizer=nn_ops.l2_loss)
loss = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)[0]
self.assertDeviceEqual(loss.device, a.device)
def testVariableWithRegularizerColocate(self):
with self.cached_session():
with variable_scope.variable_scope('A'):
a = variables_lib2.variable(
'a', [], device='gpu:0', regularizer=nn_ops.l2_loss)
loss = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)[0]
self.assertDeviceEqual(loss.device, a.device)
def testVariableWithDevice(self):
with self.cached_session():
with variable_scope.variable_scope('A'):
a = variables_lib2.variable('a', [], device='cpu:0')
b = variables_lib2.variable('b', [], device='cpu:1')
self.assertDeviceEqual(a.device, 'cpu:0')
self.assertDeviceEqual(b.device, 'cpu:1')
def testVariableWithDeviceFromScope(self):
with self.cached_session():
with ops.device('/cpu:0'):
a = variables_lib2.variable('a', [])
b = variables_lib2.variable('b', [], device='cpu:1')
self.assertDeviceEqual(a.device, 'cpu:0')
self.assertDeviceEqual(b.device, 'cpu:1')
def testVariableWithDeviceFunction(self):
class DevFn(object):
def __init__(self):
self.counter = -1
def __call__(self, op):
self.counter += 1
return 'cpu:%d' % self.counter
with self.cached_session():
with arg_scope([variables_lib2.variable], device=DevFn()):
a = variables_lib2.variable('a', [])
b = variables_lib2.variable('b', [])
c = variables_lib2.variable('c', [], device='cpu:12')
d = variables_lib2.variable('d', [])
with ops.device('cpu:99'):
e_init = constant_op.constant(12)
e = variables_lib2.variable('e', initializer=e_init)
self.assertDeviceEqual(a.device, 'cpu:0')
self.assertEqual(a.initial_value.op.colocation_groups(),
a.op.colocation_groups())
self.assertDeviceEqual(b.device, 'cpu:1')
self.assertEqual(b.initial_value.op.colocation_groups(),
b.op.colocation_groups())
self.assertDeviceEqual(c.device, 'cpu:12')
self.assertEqual(c.initial_value.op.colocation_groups(),
c.op.colocation_groups())
self.assertDeviceEqual(d.device, 'cpu:2')
self.assertEqual(d.initial_value.op.colocation_groups(),
d.op.colocation_groups())
self.assertDeviceEqual(e.device, 'cpu:3')
self.assertDeviceEqual(e.initial_value.device, 'cpu:99')
def testVariableWithReplicaDeviceSetter(self):
with self.cached_session():
with ops.device(device_setter.replica_device_setter(ps_tasks=2)):
a = variables_lib2.variable('a', [])
b = variables_lib2.variable('b', [])
c = variables_lib2.variable('c', [], device='cpu:12')
d = variables_lib2.variable('d', [])
with ops.device('cpu:99'):
e_init = constant_op.constant(12)
e = variables_lib2.variable('e', initializer=e_init)
# The values below highlight how the replica_device_setter puts initial
# values on the worker job, and how it merges explicit devices.
self.assertDeviceEqual(a.device, '/job:ps/task:0/cpu:0')
self.assertEqual(a.initial_value.op.colocation_groups(),
a.op.colocation_groups())
self.assertDeviceEqual(b.device, '/job:ps/task:1/cpu:0')
self.assertEqual(b.initial_value.op.colocation_groups(),
b.op.colocation_groups())
self.assertDeviceEqual(c.device, '/job:ps/task:0/cpu:12')
self.assertEqual(c.initial_value.op.colocation_groups(),
c.op.colocation_groups())
self.assertDeviceEqual(d.device, '/job:ps/task:1/cpu:0')
self.assertEqual(d.initial_value.op.colocation_groups(),
d.op.colocation_groups())
self.assertDeviceEqual(e.device, '/job:ps/task:0/cpu:0')
self.assertDeviceEqual(e.initial_value.device, '/job:worker/cpu:99')
def testVariableWithVariableDeviceChooser(self):
with ops.Graph().as_default():
device_fn = variables_lib2.VariableDeviceChooser(num_tasks=2)
with arg_scope([variables_lib2.variable], device=device_fn):
a = variables_lib2.variable('a', [])
b = variables_lib2.variable('b', [])
c = variables_lib2.variable('c', [], device='cpu:12')
d = variables_lib2.variable('d', [])
with ops.device('cpu:99'):
e_init = constant_op.constant(12)
e = variables_lib2.variable('e', initializer=e_init)
# The values below highlight how the VariableDeviceChooser puts initial
# values on the same device as the variable job.
self.assertDeviceEqual(a.device, '/job:ps/task:0/cpu:0')
self.assertEqual(a.initial_value.op.colocation_groups(),
a.op.colocation_groups())
self.assertDeviceEqual(b.device, '/job:ps/task:1/cpu:0')
self.assertEqual(b.initial_value.op.colocation_groups(),
b.op.colocation_groups())
self.assertDeviceEqual(c.device, '/cpu:12')
self.assertEqual(c.initial_value.op.colocation_groups(),
c.op.colocation_groups())
self.assertDeviceEqual(d.device, '/job:ps/task:0/cpu:0')
self.assertEqual(d.initial_value.op.colocation_groups(),
d.op.colocation_groups())
self.assertDeviceEqual(e.device, '/job:ps/task:1/cpu:0')
self.assertDeviceEqual(e.initial_value.device, '/cpu:99')
def testVariableWithVariableDeviceChooserWithReplica(self):
with ops.Graph().as_default():
device_fn = variables_lib2.VariableDeviceChooser(replica=3, num_tasks=2)
with arg_scope([variables_lib2.variable], device=device_fn):
a = variables_lib2.variable('a', [])
b = variables_lib2.variable('b', [])
c = variables_lib2.variable('c', [], device='cpu:12')
d = variables_lib2.variable('d', [])
with ops.device('cpu:99'):
e_init = constant_op.constant(12)
e = variables_lib2.variable('e', initializer=e_init)
# The values below highlight how the VariableDeviceChooser puts initial
# values on the same device as the variable job.
self.assertDeviceEqual(a.device, '/job:ps/replica:3/task:0/cpu:0')
self.assertEqual(a.initial_value.op.colocation_groups(),
a.op.colocation_groups())
self.assertDeviceEqual(b.device, '/job:ps/replica:3/task:1/cpu:0')
self.assertEqual(b.initial_value.op.colocation_groups(),
b.op.colocation_groups())
self.assertDeviceEqual(c.device, '/cpu:12')
self.assertEqual(c.initial_value.op.colocation_groups(),
c.op.colocation_groups())
self.assertDeviceEqual(d.device, '/job:ps/replica:3/task:0/cpu:0')
self.assertEqual(d.initial_value.op.colocation_groups(),
d.op.colocation_groups())
self.assertDeviceEqual(e.device, '/job:ps/replica:3/task:1/cpu:0')
self.assertDeviceEqual(e.initial_value.device, '/cpu:99')
def testVariableGPUPlacement(self):
with ops.Graph().as_default():
device_fn = variables_lib2.VariableDeviceChooser(device_type='GPU')
with arg_scope([variables_lib2.variable], device=device_fn):
a = variables_lib2.variable('a', [])
b = variables_lib2.variable('b', [])
c = variables_lib2.variable('c', [], device='cpu:12')
d = variables_lib2.variable('d', [])
with ops.device('cpu:99'):
e_init = constant_op.constant(12)
e = variables_lib2.variable('e', initializer=e_init)
# The values below highlight how the VariableDeviceChooser puts initial
# values on the same device as the variable job.
self.assertDeviceEqual(a.device, '/device:GPU:0')
self.assertEqual(a.initial_value.op.colocation_groups(),
a.op.colocation_groups())
self.assertDeviceEqual(b.device, '/device:GPU:0')
self.assertEqual(b.initial_value.op.colocation_groups(),
b.op.colocation_groups())
self.assertDeviceEqual(c.device, '/cpu:12')
self.assertEqual(c.initial_value.op.colocation_groups(),
c.op.colocation_groups())
self.assertDeviceEqual(d.device, '/device:GPU:0')
self.assertEqual(d.initial_value.op.colocation_groups(),
d.op.colocation_groups())
self.assertDeviceEqual(e.device, '/device:GPU:0')
self.assertDeviceEqual(e.initial_value.device, '/cpu:99')
class ModelVariablesTest(test.TestCase):
def testNameAndShape(self):
with self.cached_session():
with variable_scope.variable_scope('A'):
a = variables_lib2.model_variable('a', [5])
self.assertEquals(a.op.name, 'A/a')
self.assertListEqual(a.get_shape().as_list(), [5])
self.assertListEqual([a], variables_lib2.get_model_variables('A'))
def testNotInLocalVariables(self):
with self.cached_session():
with variable_scope.variable_scope('A'):
a = variables_lib2.model_variable('a', [5])
self.assertTrue(a in variables_lib.global_variables())
self.assertTrue(a in ops.get_collection(ops.GraphKeys.MODEL_VARIABLES))
self.assertFalse(a in variables_lib.local_variables())
def testGetVariablesReturns(self):
with self.cached_session():
with variable_scope.variable_scope('A'):
a = variables_lib2.model_variable('a', [5])
with variable_scope.variable_scope('B'):
b = variables_lib2.model_variable('a', [5])
self.assertEquals([a], variables_lib2.get_variables('A'))
self.assertEquals([b], variables_lib2.get_variables('B'))
def testGetModelVariables(self):
with self.cached_session():
with variable_scope.variable_scope('A'):
a = variables_lib2.model_variable('a', [5])
with variable_scope.variable_scope('B'):
b = variables_lib2.model_variable('a', [5])
self.assertEquals([a], variables_lib2.get_model_variables('A'))
self.assertEquals([b], variables_lib2.get_model_variables('B'))
def testGetTrainableVariables(self):
with self.cached_session():
with variable_scope.variable_scope('A'):
variables_lib2.local_variable([5])
a = variables_lib.VariableV1([5])
with variable_scope.variable_scope('B'):
variables_lib2.local_variable([5])
b = variables_lib.VariableV1([5])
self.assertEquals([a], variables_lib2.get_trainable_variables('A'))
self.assertEquals([b], variables_lib2.get_trainable_variables('B'))
def testGetLocalVariables(self):
with self.cached_session():
with variable_scope.variable_scope('A'):
_ = variables_lib2.model_variable('a', [5])
with variable_scope.variable_scope('B'):
_ = variables_lib2.model_variable('a', [5])
self.assertEquals([], variables_lib2.get_local_variables('A'))
self.assertEquals([], variables_lib2.get_local_variables('B'))
def testInitializedVariableValue(self):
with self.cached_session() as sess:
a = variables_lib2.model_variable(
'a', [5], initializer=init_ops.ones_initializer())
sess.run(variables_lib.global_variables_initializer())
self.assertAllEqual(a.eval(), [1] * 5)
def testDeviceFn(self):
class DevFn(object):
def __init__(self):
self.counter = -1
def __call__(self, op):
self.counter += 1
return '/cpu:%d' % self.counter
with ops.Graph().as_default():
with arg_scope([variables_lib2.model_variable], device=DevFn()):
a = variables_lib2.model_variable('a', [5])
b = variables_lib2.model_variable('b', [20])
self.assertDeviceEqual(a.device, '/cpu:0')
self.assertEqual(a.initial_value.op.colocation_groups(),
a.op.colocation_groups())
self.assertDeviceEqual(b.device, '/cpu:1')
self.assertEqual(b.initial_value.op.colocation_groups(),
b.op.colocation_groups())
def testVariableWithVariableDeviceChooser(self):
with ops.Graph().as_default():
device_fn = variables_lib2.VariableDeviceChooser()
with arg_scope([variables_lib2.model_variable], device=device_fn):
a = variables_lib2.model_variable('a', [5])
b = variables_lib2.model_variable('b', [20])
self.assertDeviceEqual(a.device, 'cpu:0')
self.assertEqual(a.initial_value.op.colocation_groups(),
a.op.colocation_groups())
self.assertDeviceEqual(b.device, 'cpu:0')
self.assertEqual(a.initial_value.op.colocation_groups(),
a.op.colocation_groups())
class GetVariablesCollections(test.TestCase):
def testVariableCollection(self):
with self.cached_session():
a = variables_lib2.variable('a', [], collections='A')
b = variables_lib2.variable('b', [], collections='B')
self.assertEquals(a, ops.get_collection('A')[0])
self.assertEquals(b, ops.get_collection('B')[0])
def testVariableCollections(self):
with self.cached_session():
a = variables_lib2.variable('a', [], collections=['A', 'C'])
b = variables_lib2.variable('b', [], collections=['B', 'C'])
self.assertEquals(a, ops.get_collection('A')[0])
self.assertEquals(b, ops.get_collection('B')[0])
self.assertListEqual([a, b], ops.get_collection('C'))
def testVariableCollectionsWithArgScope(self):
with self.cached_session():
with arg_scope([variables_lib2.variable], collections='A'):
a = variables_lib2.variable('a', [])
b = variables_lib2.variable('b', [])
self.assertListEqual([a, b], ops.get_collection('A'))
def testVariableCollectionsWithArgScopeNested(self):
with self.cached_session():
with arg_scope([variables_lib2.variable], collections='A'):
a = variables_lib2.variable('a', [])
with arg_scope([variables_lib2.variable], collections='B'):
b = variables_lib2.variable('b', [])
self.assertEquals(a, ops.get_collection('A')[0])
self.assertEquals(b, ops.get_collection('B')[0])
def testVariableCollectionsWithArgScopeNonNested(self):
with self.cached_session():
with arg_scope([variables_lib2.variable], collections='A'):
a = variables_lib2.variable('a', [])
with arg_scope([variables_lib2.variable], collections='B'):
b = variables_lib2.variable('b', [])
variables_lib2.variable('c', [])
self.assertListEqual([a], ops.get_collection('A'))
self.assertListEqual([b], ops.get_collection('B'))
def testVariableRestoreWithArgScopeNested(self):
with self.cached_session():
a = variables_lib2.variable('a', [])
with arg_scope(
[variables_lib2.variable], trainable=False, collections=['A', 'B']):
b = variables_lib2.variable('b', [])
c = variables_lib2.variable('c', [], trainable=False)
self.assertEquals([a, c], variables_lib2.get_variables_to_restore())
self.assertEquals([a], variables_lib.trainable_variables())
self.assertEquals([b], ops.get_collection('A'))
self.assertEquals([b], ops.get_collection('B'))
class GetVariablesBySuffixTest(test.TestCase):
def testGetVariableGivenNameScoped(self):
with self.cached_session():
with variable_scope.variable_scope('A'):
a = variables_lib2.variable('a', [5])
b = variables_lib2.variable('b', [5])
self.assertEquals([a], variables_lib2.get_variables_by_suffix('a'))
self.assertEquals([b], variables_lib2.get_variables_by_suffix('b'))
def testGetVariableWithScope(self):
with self.cached_session():
with variable_scope.variable_scope('A'):
a = variables_lib2.variable('a', [5])
fooa = variables_lib2.variable('fooa', [5])
with variable_scope.variable_scope('B'):
a2 = variables_lib2.variable('a', [5])
matched_variables = variables_lib2.get_variables_by_suffix('a')
self.assertEquals([a, fooa, a2], matched_variables)
matched_variables = variables_lib2.get_variables_by_suffix('/a')
self.assertEquals([a, a2], matched_variables)
matched_variables = variables_lib2.get_variables_by_suffix('a', scope='A')
self.assertEquals([a, fooa], matched_variables)
def testGetVariableWithoutScope(self):
with self.cached_session():
a = variables_lib2.variable('a', [5])
fooa = variables_lib2.variable('fooa', [5])
b_a = variables_lib2.variable('B/a', [5])
matched_variables = variables_lib2.get_variables_by_suffix('a')
self.assertEquals([a, fooa, b_a], matched_variables)
matched_variables = variables_lib2.get_variables_by_suffix('fooa')
self.assertEquals([fooa], matched_variables)
class GetVariablesByNameTest(test.TestCase):
def testGetVariableGivenNameScoped(self):
with self.cached_session():
with variable_scope.variable_scope('A'):
a = variables_lib2.variable('a', [5])
b = variables_lib2.variable('b', [5])
self.assertEquals([a], variables_lib2.get_variables_by_name('a'))
self.assertEquals([b], variables_lib2.get_variables_by_name('b'))
def testGetVariableWithScope(self):
with self.cached_session():
with variable_scope.variable_scope('A'):
a = variables_lib2.variable('a', [5])
fooa = variables_lib2.variable('fooa', [5])
with variable_scope.variable_scope('B'):
a2 = variables_lib2.variable('a', [5])
matched_variables = variables_lib2.get_variables_by_name('a')
self.assertEquals([a, a2], matched_variables)
matched_variables = variables_lib2.get_variables_by_name('fooa')
self.assertEquals([fooa], matched_variables)
matched_variables = variables_lib2.get_variables_by_name('/a')
self.assertEquals([], matched_variables)
matched_variables = variables_lib2.get_variables_by_name('a', scope='A')
self.assertEquals([a], matched_variables)
def testGetVariableWithoutScope(self):
with self.cached_session():
a = variables_lib2.variable('a', [5])
fooa = variables_lib2.variable('fooa', [5])
b_a = variables_lib2.variable('B/a', [5])
matched_variables = variables_lib2.get_variables_by_name('a')
self.assertEquals([a, b_a], matched_variables)
matched_variables = variables_lib2.get_variables_by_name('fooa')
self.assertEquals([fooa], matched_variables)
class GetVariableFullNameTest(test.TestCase):
def testVariable(self):
my_var0 = variables_lib2.variable('my_var0', shape=[])
full_name = variables_lib2.get_variable_full_name(my_var0)
self.assertEquals(full_name, my_var0.op.name)
def testPartitionedVariable(self):
input_full_name = 'my_var0'
partitioner = partitioned_variables.variable_axis_size_partitioner(2)
my_var0 = variables_lib2.variable(
'my_var0', shape=[2, 2], partitioner=partitioner)
for part_var in list(my_var0):
computed_full_name = variables_lib2.get_variable_full_name(part_var)
self.assertEquals(input_full_name, computed_full_name)
class AssignFromValuesTest(test.TestCase):
def testNoScopes(self):
init_value0 = np.asarray([1.0, 3.0, 9.0]).reshape((1, 3, 1))
init_value1 = np.asarray([2.0, 4.0, 6.0, 8.0]).reshape((2, 1, 2))
with self.cached_session() as sess:
initializer = init_ops.truncated_normal_initializer(stddev=.1)
var0 = variables_lib2.variable(
'my_var0', shape=[1, 3, 1], initializer=initializer)
var1 = variables_lib2.variable(
'my_var1', shape=[2, 1, 2], initializer=initializer)
var_names_to_values = {'my_var0': init_value0, 'my_var1': init_value1}
assign_op, feed_dict = variables_lib2.assign_from_values(
var_names_to_values)
# Initialize the variables.
sess.run(variables_lib.global_variables_initializer())
# Perform the assignment.
sess.run(assign_op, feed_dict)
# Request and test the variable values:
var0, var1 = sess.run([var0, var1])
self.assertAllEqual(init_value0, var0)
self.assertAllEqual(init_value1, var1)
def testWithScopes(self):
init_value0 = np.asarray([1.0, 3.0, 9.0]).reshape((1, 3, 1))
init_value1 = np.asarray([2.0, 4.0, 6.0, 8.0]).reshape((2, 1, 2))
with self.cached_session() as sess:
initializer = init_ops.truncated_normal_initializer(stddev=.1)
with variable_scope.variable_scope('my_model/my_layer0'):
var0 = variables_lib2.variable(
'my_var0', shape=[1, 3, 1], initializer=initializer)
with variable_scope.variable_scope('my_model/my_layer1'):
var1 = variables_lib2.variable(
'my_var1', shape=[2, 1, 2], initializer=initializer)
var_names_to_values = {
'my_model/my_layer0/my_var0': init_value0,
'my_model/my_layer1/my_var1': init_value1
}
assign_op, feed_dict = variables_lib2.assign_from_values(
var_names_to_values)
# Initialize the variables.
sess.run(variables_lib.global_variables_initializer())
# Perform the assignment.
sess.run(assign_op, feed_dict)
# Request and test the variable values:
var0, var1 = sess.run([var0, var1])
self.assertAllEqual(init_value0, var0)
self.assertAllEqual(init_value1, var1)
class AssignFromValuesFnTest(test.TestCase):
def testNoScopes(self):
init_value0 = np.asarray([1.0, 3.0, 9.0]).reshape((1, 3, 1))
init_value1 = np.asarray([2.0, 4.0, 6.0, 8.0]).reshape((2, 1, 2))
with self.cached_session() as sess:
initializer = init_ops.truncated_normal_initializer(stddev=.1)
var0 = variables_lib2.variable(
'my_var0', shape=[1, 3, 1], initializer=initializer)
var1 = variables_lib2.variable(
'my_var1', shape=[2, 1, 2], initializer=initializer)
var_names_to_values = {'my_var0': init_value0, 'my_var1': init_value1}
init_fn = variables_lib2.assign_from_values_fn(var_names_to_values)
# Initialize the variables.
sess.run(variables_lib.global_variables_initializer())
# Perform the assignment.
init_fn(sess)
# Request and test the variable values:
var0, var1 = sess.run([var0, var1])
self.assertAllEqual(init_value0, var0)
self.assertAllEqual(init_value1, var1)
def testWithScopes(self):
init_value0 = np.asarray([1.0, 3.0, 9.0]).reshape((1, 3, 1))
init_value1 = np.asarray([2.0, 4.0, 6.0, 8.0]).reshape((2, 1, 2))
with self.cached_session() as sess:
initializer = init_ops.truncated_normal_initializer(stddev=.1)
with variable_scope.variable_scope('my_model/my_layer0'):
var0 = variables_lib2.variable(
'my_var0', shape=[1, 3, 1], initializer=initializer)
with variable_scope.variable_scope('my_model/my_layer1'):
var1 = variables_lib2.variable(
'my_var1', shape=[2, 1, 2], initializer=initializer)
var_names_to_values = {
'my_model/my_layer0/my_var0': init_value0,
'my_model/my_layer1/my_var1': init_value1
}
init_fn = variables_lib2.assign_from_values_fn(var_names_to_values)
# Initialize the variables.
sess.run(variables_lib.global_variables_initializer())
# Perform the assignment.
init_fn(sess)
# Request and test the variable values:
var0, var1 = sess.run([var0, var1])
self.assertAllEqual(init_value0, var0)
self.assertAllEqual(init_value1, var1)
class AssignFromCheckpointTest(test.TestCase):
def create_checkpoint_from_values(self,
var_names_to_values,
checkpoint_dir,
global_step=None):
"""Creates a checkpoint from a mapping of name to values in model_dir.
Args:
var_names_to_values: a map from variable names to values.
checkpoint_dir: the directory where the checkpoint will be saved.
global_step: the global step used to save the checkpoint.
Returns:
the model_path to the checkpoint.
"""
var_list = []
with session.Session('', graph=ops.Graph()) as sess:
# Create a set of variables to save in the checkpoint.
for var_name in var_names_to_values:
var_value = var_names_to_values[var_name]
var_list.append(variables_lib.VariableV1(var_value, name=var_name))
saver = saver_lib.Saver(var_list)
init_op = variables_lib.variables_initializer(var_list)
sess.run(init_op)
# Save the initialized values in the file at 'checkpoint_dir'
return saver.save(sess, checkpoint_dir, global_step=global_step)
def testLoadExistingVariables(self):
model_dir = tempfile.mkdtemp(
prefix=os.path.join(self.get_temp_dir(), 'load_existing_variables'))
init_value0 = 10.0
init_value1 = 20.0
var_names_to_values = {'v0': init_value0, 'v1': init_value1}
with self.cached_session() as sess:
model_path = self.create_checkpoint_from_values(var_names_to_values,
model_dir)
var0 = variables_lib2.variable('my_var0', shape=[])
var1 = variables_lib2.variable('my_var1', shape=[])
vars_to_restore = {'v0': var0, 'v1': var1}
op, feed_dict = variables_lib2.assign_from_checkpoint(
model_path, vars_to_restore)
# Initialize the variables.
sess.run(variables_lib.global_variables_initializer())
# Perform the assignment.
sess.run(op, feed_dict)
# Request and test the variable values:
self.assertEqual(init_value0, var0.eval())
self.assertEqual(init_value1, var1.eval())
# Tests restoring PartitionedVariables and tests using a dictionary
# of lists as the assign_from_checkpoint() var_list param.
def testLoadPartitionedVariables(self):
model_dir = tempfile.mkdtemp(
prefix=os.path.join(self.get_temp_dir(), 'load_partitioned_variables'))
init_value0 = np.array([[10.0, 11.0], [12.0, 13.0]])
init_value1 = np.array([20.0]) # Partitioned into 1 part, edge case.
var_names_to_values = {'var0': init_value0, 'var1': init_value1}
with self.cached_session() as sess:
model_path = self.create_checkpoint_from_values(var_names_to_values,
model_dir)
# var0 and var1 are PartitionedVariables.
partitioner = partitioned_variables.variable_axis_size_partitioner(2)
var0 = variables_lib2.variable(
'var0', shape=init_value0.shape, partitioner=partitioner)
var0full = variables_lib2.variable('var0full', shape=init_value0.shape)
var1 = variables_lib2.variable(
'var1', shape=init_value1.shape, partitioner=partitioner)
# Convert var0 and var1 into a list of underlying variables.
vars_to_restore = {'var0': list(var0) + [var0full], 'var1': list(var1)}
op, feed_dict = variables_lib2.assign_from_checkpoint(
model_path, vars_to_restore)
# Initialize the variables.
sess.run(variables_lib.global_variables_initializer())
# Perform the assignment.
sess.run(op, feed_dict)
# Request and test the variable values. PartitionedVariables can't
# be evaled so we wrap them in an identity.
self.assertTrue(
np.array_equal(init_value0,
array_ops.identity(var0).eval()))
self.assertTrue(np.array_equal(init_value0, var0full.eval()))
self.assertTrue(
np.array_equal(init_value1,
array_ops.identity(var1).eval()))
def testRaisesValueErrorIfAVariableIsntFound(self):
model_dir = tempfile.mkdtemp(
prefix=os.path.join(self.get_temp_dir(),
'raises_value_error_if_var_isnt_found'))
init_value0 = 10.0
init_value1 = 20.0
var_names_to_values = {'v0': init_value0, 'v1': init_value1}
with self.cached_session():
model_path = self.create_checkpoint_from_values(var_names_to_values,
model_dir)
var0 = variables_lib2.variable('my_var0', shape=[])
var1 = variables_lib2.variable('my_var1', shape=[])
vars_to_restore = {'v0_fake': var0, 'v1': var1}
with self.assertRaises(ValueError):
variables_lib2.assign_from_checkpoint(model_path, vars_to_restore)
def testInitFromCheckpointWithScopes(self):
model_dir = tempfile.mkdtemp(
prefix=os.path.join(self.get_temp_dir(),
'init_from_checkpoint_with_scopes'))
init_value0 = np.asarray(
[1.0, 3.0, 9.0], dtype=np.float32).reshape((1, 3, 1))
init_value1 = np.asarray(
[2.0, 4.0, 6.0, 8.0], dtype=np.float32).reshape((2, 1, 2))
var_names_to_values = {'layer0/v0': init_value0, 'layer1/v1': init_value1}
with self.cached_session() as sess:
model_path = self.create_checkpoint_from_values(var_names_to_values,
model_dir)
with variable_scope.variable_scope('my_model/my_layer0'):
var0 = variables_lib2.variable('my_var0', shape=init_value0.shape)
with variable_scope.variable_scope('my_model/my_layer1'):
var1 = variables_lib2.variable('my_var1', shape=init_value1.shape)
vars_to_restore = {'layer0/v0': var0, 'layer1/v1': var1}
op, feed_dict = variables_lib2.assign_from_checkpoint(
model_path, vars_to_restore)
# Initialize the variables.
sess.run(variables_lib.global_variables_initializer())
# Perform the assignment.
sess.run(op, feed_dict)
# Request and test the variable values:
self.assertAllEqual(init_value0, var0.eval())
self.assertAllEqual(init_value1, var1.eval())
class AssignFromCheckpointFnTest(test.TestCase):
def create_checkpoint_from_values(self,
var_names_to_values,
checkpoint_dir,
global_step=None):
"""Creates a checkpoint from a mapping of name to values in model_dir.
Args:
var_names_to_values: a map from variable names to values.
checkpoint_dir: the directory where the checkpoint will be saved.
global_step: the global step used to save the checkpoint.
Returns:
the model_path to the checkpoint.
"""
var_list = []
with session.Session('', graph=ops.Graph()) as sess:
# Create a set of variables to save in the checkpoint.
for var_name in var_names_to_values:
var_value = var_names_to_values[var_name]
var_list.append(variables_lib.VariableV1(var_value, name=var_name))
saver = saver_lib.Saver(var_list)
init_op = variables_lib.variables_initializer(var_list)
sess.run(init_op)
# Save the initialized values in the file at 'checkpoint_dir'
return saver.save(sess, checkpoint_dir, global_step=global_step)
def testLoadExistingVariables(self):
model_dir = tempfile.mkdtemp(
prefix=os.path.join(self.get_temp_dir(), 'load_existing_variables'))
if gfile.Exists(model_dir):
gfile.DeleteRecursively(model_dir)
init_value0 = 10.0
init_value1 = 20.0
var_names_to_values = {'v0': init_value0, 'v1': init_value1}
with self.cached_session() as sess:
model_path = self.create_checkpoint_from_values(var_names_to_values,
model_dir)
var0 = variables_lib2.variable('my_var0', shape=[])
var1 = variables_lib2.variable('my_var1', shape=[])
vars_to_restore = {'v0': var0, 'v1': var1}
init_fn = variables_lib2.assign_from_checkpoint_fn(
model_path, vars_to_restore)
# Initialize the variables.
sess.run(variables_lib.global_variables_initializer())
# Perform the assignment.
init_fn(sess)
# Request and test the variable values:
self.assertEqual(init_value0, var0.eval())
self.assertEqual(init_value1, var1.eval())
def testLoadExistingVariablesDifferentShapeDefaultDoesNotAllowReshape(self):
model_dir = tempfile.mkdtemp(
prefix=os.path.join(self.get_temp_dir(),
'load_existing_vars_no_reshape'))
if gfile.Exists(model_dir):
gfile.DeleteRecursively(model_dir)
init_value0 = [[10.0, 11.0]]
init_value1 = 20.0
var_names_to_values = {'v0': init_value0, 'v1': init_value1}
with self.cached_session() as sess:
model_path = self.create_checkpoint_from_values(var_names_to_values,
model_dir)
var0 = variables_lib2.variable('my_var0', shape=[2, 1])
var1 = variables_lib2.variable('my_var1', shape=[])
vars_to_restore = {'v0': var0, 'v1': var1}
init_fn = variables_lib2.assign_from_checkpoint_fn(
model_path, vars_to_restore)
# Initialize the variables.
sess.run(variables_lib.global_variables_initializer())
# Perform the assignment.
with self.assertRaises(errors_impl.InvalidArgumentError):
init_fn(sess)
def testLoadExistingVariablesDifferentShapeAllowReshape(self):
model_dir = tempfile.mkdtemp(
prefix=os.path.join(
self.get_temp_dir(),
'load_existing_variables_different_shape_allow_reshape'))
if gfile.Exists(model_dir):
gfile.DeleteRecursively(model_dir)
init_value0 = [[10.0, 11.0]]
init_value1 = 20.0
var_names_to_values = {'v0': init_value0, 'v1': init_value1}
with self.cached_session() as sess:
model_path = self.create_checkpoint_from_values(var_names_to_values,
model_dir)
var0 = variables_lib2.variable('my_var0', shape=[2, 1])
var1 = variables_lib2.variable('my_var1', shape=[])
vars_to_restore = {'v0': var0, 'v1': var1}
init_fn = variables_lib2.assign_from_checkpoint_fn(
model_path, vars_to_restore, reshape_variables=True)
# Initialize the variables.
sess.run(variables_lib.global_variables_initializer())
# Perform the assignment.
init_fn(sess)
# Request and test the variable values:
self.assertAllEqual(np.transpose(np.array(init_value0)), var0.eval())
self.assertEqual(init_value1, var1.eval())
def testNotFoundError(self):
model_dir = tempfile.mkdtemp(
prefix=os.path.join(self.get_temp_dir(), 'not_found_error'))
if gfile.Exists(model_dir):
gfile.DeleteRecursively(model_dir)
init_value0 = 10.0
init_value1 = 20.0
var_names_to_values = {'v0': init_value0, 'v1': init_value1}
with self.cached_session() as sess:
model_path = self.create_checkpoint_from_values(var_names_to_values,
model_dir)
var0 = variables_lib2.variable('my_var0', shape=[])
var1 = variables_lib2.variable('my_var1', shape=[])
var2 = variables_lib2.variable('my_var2', shape=[])
vars_to_restore = {'v0': var0, 'v1': var1, 'v2': var2}
init_fn = variables_lib2.assign_from_checkpoint_fn(
model_path, vars_to_restore)
# Initialize the variables.
sess.run(variables_lib.global_variables_initializer())
# Perform the assignment.
with self.assertRaises(errors_impl.NotFoundError):
init_fn(sess)
def testMissingVariablesList(self):
model_dir = tempfile.mkdtemp(
prefix=os.path.join(self.get_temp_dir(), 'missing_variables_list'))
if gfile.Exists(model_dir):
gfile.DeleteRecursively(model_dir)
init_value0 = 10.0
init_value1 = 20.0
var_names_to_values = {'v0': init_value0, 'v1': init_value1}
with self.cached_session() as sess:
model_path = self.create_checkpoint_from_values(var_names_to_values,
model_dir)
var0 = variables_lib2.variable('v0', shape=[])
var1 = variables_lib2.variable('v1', shape=[])
var2 = variables_lib2.variable('v2', shape=[])
vars_to_restore = [var0, var1, var2]
init_fn = variables_lib2.assign_from_checkpoint_fn(
model_path, vars_to_restore, ignore_missing_vars=True)
# Initialize the variables.
sess.run(variables_lib.global_variables_initializer())
# Perform the assignment.
init_fn(sess)
# Request and test the variable values:
self.assertEqual(init_value0, var0.eval())
self.assertEqual(init_value1, var1.eval())
def testMissingVariablesDict(self):
model_dir = tempfile.mkdtemp(
prefix=os.path.join(self.get_temp_dir(), 'missing_variables_dict'))
if gfile.Exists(model_dir):
gfile.DeleteRecursively(model_dir)
init_value0 = 10.0
init_value1 = 20.0
var_names_to_values = {'v0': init_value0, 'v1': init_value1}
with self.cached_session() as sess:
model_path = self.create_checkpoint_from_values(var_names_to_values,
model_dir)
var0 = variables_lib2.variable('my_var0', shape=[])
var1 = variables_lib2.variable('my_var1', shape=[])
var2 = variables_lib2.variable('my_var2', shape=[])
vars_to_restore = {'v0': var0, 'v1': var1, 'v2': var2}
init_fn = variables_lib2.assign_from_checkpoint_fn(
model_path, vars_to_restore, ignore_missing_vars=True)
# Initialize the variables.
sess.run(variables_lib.global_variables_initializer())
# Perform the assignment.
init_fn(sess)
# Request and test the variable values:
self.assertEqual(init_value0, var0.eval())
self.assertEqual(init_value1, var1.eval())
class ZeroInitializerOpTest(test.TestCase):
def _testZeroInitializer(self, shape, initializer, use_init):
var = variables_lib.VariableV1(initializer)
var_zero = variables_lib2.zero_initializer(var)
with self.cached_session() as sess:
with self.assertRaisesOpError('Attempting to use uninitialized value'):
var.eval()
if use_init:
sess.run(var.initializer)
with self.assertRaisesOpError('input is already initialized'):
var_zero.eval()
self.assertAllClose(np.ones(shape), var.eval())
else:
var_zero.eval()
self.assertAllClose(np.zeros(shape), var.eval())
def testZeroInitializer(self):
for dtype in (dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64):
for use_init in (False, True):
self._testZeroInitializer([10, 20], array_ops.ones(
[10, 20], dtype=dtype), use_init)
class ZeroVarInitializerOpTest(test.TestCase):
def _testZeroVarInitializer(self, shape, initializer, use_init):
var = resource_variable_ops.ResourceVariable(initializer)
var_zero = variables_lib2.zero_initializer(var)
with self.cached_session() as sess:
with self.assertRaisesOpError('Error while reading resource variable'):
var.eval()
if use_init:
sess.run(var.initializer)
with self.assertRaisesOpError('input is already initialized'):
var_zero.eval()
self.assertAllClose(np.ones(shape), var.eval())
else:
var_zero.eval()
self.assertAllClose(np.zeros(shape), var.eval())
def testZeroVarInitializer(self):
for dtype in (dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64):
for use_init in (False, True):
self._testZeroVarInitializer([10, 20],
array_ops.ones([10, 20], dtype=dtype),
use_init)
class FilterVariablesTest(test.TestCase):
def setUp(self):
g = ops.Graph()
with g.as_default():
var_list = []
var_list.append(variables_lib.VariableV1(0, name='conv1/weights'))
var_list.append(variables_lib.VariableV1(0, name='conv1/biases'))
var_list.append(variables_lib.VariableV1(0, name='conv2/weights'))
var_list.append(variables_lib.VariableV1(0, name='conv2/biases'))
var_list.append(variables_lib.VariableV1(0, name='clfs/weights'))
var_list.append(variables_lib.VariableV1(0, name='clfs/biases'))
self._var_list = var_list
def _test_filter_variables(self,
expected_var_names,
include_patterns=None,
exclude_patterns=None,
reg_search=True):
filtered_var_list = variables_lib2.filter_variables(
self._var_list,
include_patterns=include_patterns,
exclude_patterns=exclude_patterns,
reg_search=reg_search)
filtered_var_names = [var.op.name for var in filtered_var_list]
for name in filtered_var_names:
self.assertIn(name, expected_var_names)
for name in expected_var_names:
self.assertIn(name, filtered_var_names)
self.assertEqual(len(filtered_var_names), len(expected_var_names))
def testNoFiltering(self):
self._test_filter_variables(expected_var_names=[
'conv1/weights', 'conv1/biases', 'conv2/weights', 'conv2/biases',
'clfs/weights', 'clfs/biases'
])
def testIncludeBiases(self):
self._test_filter_variables(
expected_var_names=['conv1/biases', 'conv2/biases', 'clfs/biases'],
include_patterns=['biases'])
def testExcludeWeights(self):
self._test_filter_variables(
expected_var_names=['conv1/biases', 'conv2/biases', 'clfs/biases'],
exclude_patterns=['weights'])
def testExcludeWeightsAndConv1(self):
self._test_filter_variables(
expected_var_names=['conv2/biases', 'clfs/biases'],
exclude_patterns=['weights', 'conv1'])
def testTwoIncludePatternsEnsureNoVariablesTwiceInFilteredList(self):
self._test_filter_variables(
expected_var_names=[
'conv1/weights', 'conv1/biases', 'conv2/weights', 'clfs/weights'
],
include_patterns=['conv1', 'weights'])
def testIncludeConv1ExcludeBiases(self):
self._test_filter_variables(
expected_var_names=['conv1/weights'],
include_patterns=['conv1'],
exclude_patterns=['biases'])
def testRegMatchIncludeBiases(self):
self._test_filter_variables(
expected_var_names=['conv1/biases', 'conv2/biases', 'clfs/biases'],
include_patterns=['.*biases'],
reg_search=False)
def testRegMatchIncludeBiasesWithIncompleteRegExpHasNoMatches(self):
self._test_filter_variables(
expected_var_names=[], include_patterns=['biases'], reg_search=False)
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/framework/python/ops/variables_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-short-docstring-punctuation
"""Audio processing and decoding ops.
@@decode_wav
@@encode_wav
@@audio_spectrogram
@@mfcc
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_audio_ops import *
# pylint: enable=wildcard-import
from tensorflow.python.util.all_util import remove_undocumented
remove_undocumented(__name__, [])
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/framework/python/ops/audio_ops.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Integration and ODE solvers.
See the
[Contrib Integrate](https://tensorflow.org/api_guides/python/contrib.integrate)
guide.
@@odeint
@@odeint_fixed
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import
from tensorflow.contrib.integrate.python.ops.odes import *
from tensorflow.python.util.all_util import remove_undocumented
remove_undocumented(__name__)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/integrate/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ODE solvers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.integrate.python.ops import odes
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class OdeIntTest(test.TestCase):
def setUp(self):
super(OdeIntTest, self).setUp()
# simple defaults (solution is a sin-wave)
matrix = constant_op.constant([[0, 1], [-1, 0]], dtype=dtypes.float64)
self.func = lambda y, t: math_ops.matmul(matrix, y)
self.y0 = np.array([[1.0], [0.0]])
def test_odeint_exp(self):
# Test odeint by an exponential function:
# dy / dt = y, y(0) = 1.0.
# Its analytical solution is y = exp(t).
func = lambda y, t: y
y0 = constant_op.constant(1.0, dtype=dtypes.float64)
t = np.linspace(0.0, 1.0, 11)
y_solved = odes.odeint(func, y0, t)
self.assertIn('odeint', y_solved.name)
self.assertEqual(y_solved.get_shape(), tensor_shape.TensorShape([11]))
with self.cached_session() as sess:
y_solved = sess.run(y_solved)
y_true = np.exp(t)
self.assertAllClose(y_true, y_solved)
def test_odeint_complex(self):
# Test a complex, linear ODE:
# dy / dt = k * y, y(0) = 1.0.
# Its analytical solution is y = exp(k * t).
k = 1j - 0.1
func = lambda y, t: k * y
t = np.linspace(0.0, 1.0, 11)
y_solved = odes.odeint(func, 1.0 + 0.0j, t)
with self.cached_session() as sess:
y_solved = sess.run(y_solved)
y_true = np.exp(k * t)
self.assertAllClose(y_true, y_solved)
def test_odeint_riccati(self):
# The Ricatti equation is:
# dy / dt = (y - t) ** 2 + 1.0, y(0) = 0.5.
# Its analytical solution is y = 1.0 / (2.0 - t) + t.
func = lambda t, y: (y - t)**2 + 1.0
t = np.linspace(0.0, 1.0, 11)
y_solved = odes.odeint(func, np.float64(0.5), t)
with self.cached_session() as sess:
y_solved = sess.run(y_solved)
y_true = 1.0 / (2.0 - t) + t
self.assertAllClose(y_true, y_solved)
def test_odeint_2d_linear(self):
# Solve the 2D linear differential equation:
# dy1 / dt = 3.0 * y1 + 4.0 * y2,
# dy2 / dt = -4.0 * y1 + 3.0 * y2,
# y1(0) = 0.0,
# y2(0) = 1.0.
# Its analytical solution is
# y1 = sin(4.0 * t) * exp(3.0 * t),
# y2 = cos(4.0 * t) * exp(3.0 * t).
matrix = constant_op.constant(
[[3.0, 4.0], [-4.0, 3.0]], dtype=dtypes.float64)
func = lambda y, t: math_ops.matmul(matrix, y)
y0 = constant_op.constant([[0.0], [1.0]], dtype=dtypes.float64)
t = np.linspace(0.0, 1.0, 11)
y_solved = odes.odeint(func, y0, t)
with self.cached_session() as sess:
y_solved = sess.run(y_solved)
y_true = np.zeros((len(t), 2, 1))
y_true[:, 0, 0] = np.sin(4.0 * t) * np.exp(3.0 * t)
y_true[:, 1, 0] = np.cos(4.0 * t) * np.exp(3.0 * t)
self.assertAllClose(y_true, y_solved, atol=1e-5)
def test_odeint_higher_rank(self):
func = lambda y, t: y
y0 = constant_op.constant(1.0, dtype=dtypes.float64)
t = np.linspace(0.0, 1.0, 11)
for shape in [(), (1,), (1, 1)]:
expected_shape = (len(t),) + shape
y_solved = odes.odeint(func, array_ops.reshape(y0, shape), t)
self.assertEqual(y_solved.get_shape(),
tensor_shape.TensorShape(expected_shape))
with self.cached_session() as sess:
y_solved = sess.run(y_solved)
self.assertEquals(y_solved.shape, expected_shape)
def test_odeint_all_dtypes(self):
func = lambda y, t: y
t = np.linspace(0.0, 1.0, 11)
for y0_dtype in [
dtypes.float32, dtypes.float64, dtypes.complex64, dtypes.complex128
]:
for t_dtype in [dtypes.float32, dtypes.float64]:
y0 = math_ops.cast(1.0, y0_dtype)
y_solved = odes.odeint(func, y0, math_ops.cast(t, t_dtype))
with self.cached_session() as sess:
y_solved = sess.run(y_solved)
expected = np.asarray(np.exp(t))
self.assertAllClose(y_solved, expected, rtol=1e-5)
self.assertEqual(dtypes.as_dtype(y_solved.dtype), y0_dtype)
def test_odeint_required_dtypes(self):
with self.assertRaisesRegexp(TypeError, '`y0` must have a floating point'):
odes.odeint(self.func, math_ops.cast(self.y0, dtypes.int32), [0, 1])
with self.assertRaisesRegexp(TypeError, '`t` must have a floating point'):
odes.odeint(self.func, self.y0, math_ops.cast([0, 1], dtypes.int32))
def test_odeint_runtime_errors(self):
with self.assertRaisesRegexp(ValueError, 'cannot supply `options` without'):
odes.odeint(self.func, self.y0, [0, 1], options={'first_step': 1.0})
y = odes.odeint(
self.func,
self.y0, [0, 1],
method='dopri5',
options={'max_num_steps': 0})
with self.cached_session() as sess:
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
'max_num_steps'):
sess.run(y)
y = odes.odeint(self.func, self.y0, [1, 0])
with self.cached_session() as sess:
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
'monotonic increasing'):
sess.run(y)
def test_odeint_different_times(self):
# integrate steps should be independent of interpolation times
times0 = np.linspace(0, 10, num=11, dtype=float)
times1 = np.linspace(0, 10, num=101, dtype=float)
with self.cached_session() as sess:
y_solved_0, info_0 = sess.run(
odes.odeint(self.func, self.y0, times0, full_output=True))
y_solved_1, info_1 = sess.run(
odes.odeint(self.func, self.y0, times1, full_output=True))
self.assertAllClose(y_solved_0, y_solved_1[::10])
self.assertEqual(info_0['num_func_evals'], info_1['num_func_evals'])
self.assertAllEqual(info_0['integrate_points'], info_1['integrate_points'])
self.assertAllEqual(info_0['error_ratio'], info_1['error_ratio'])
def test_odeint_5th_order_accuracy(self):
t = [0, 20]
kwargs = dict(
full_output=True, method='dopri5', options=dict(max_num_steps=2000))
with self.cached_session() as sess:
_, info_0 = sess.run(
odes.odeint(self.func, self.y0, t, rtol=0, atol=1e-6, **kwargs))
_, info_1 = sess.run(
odes.odeint(self.func, self.y0, t, rtol=0, atol=1e-9, **kwargs))
self.assertAllClose(
info_0['integrate_points'].size * 1000**0.2,
float(info_1['integrate_points'].size),
rtol=0.01)
class StepSizeTest(test.TestCase):
def test_error_ratio_one(self):
new_step = odes._optimal_step_size(
last_step=constant_op.constant(1.0),
error_ratio=constant_op.constant(1.0))
with self.cached_session() as sess:
new_step = sess.run(new_step)
self.assertAllClose(new_step, 0.9)
def test_ifactor(self):
new_step = odes._optimal_step_size(
last_step=constant_op.constant(1.0),
error_ratio=constant_op.constant(0.0))
with self.cached_session() as sess:
new_step = sess.run(new_step)
self.assertAllClose(new_step, 10.0)
def test_dfactor(self):
new_step = odes._optimal_step_size(
last_step=constant_op.constant(1.0),
error_ratio=constant_op.constant(1e6))
with self.cached_session() as sess:
new_step = sess.run(new_step)
self.assertAllClose(new_step, 0.2)
class InterpolationTest(test.TestCase):
def test_5th_order_polynomial(self):
# this should be an exact fit
f = lambda x: x**4 + x**3 - 2 * x**2 + 4 * x + 5
f_prime = lambda x: 4 * x**3 + 3 * x**2 - 4 * x + 4
coeffs = odes._interp_fit(
f(0.0), f(10.0), f(5.0), f_prime(0.0), f_prime(10.0), 10.0)
times = np.linspace(0, 10, dtype=np.float32)
y_fit = array_ops.stack(
[odes._interp_evaluate(coeffs, 0.0, 10.0, t) for t in times])
y_expected = f(times)
with self.cached_session() as sess:
y_actual = sess.run(y_fit)
self.assertAllClose(y_expected, y_actual)
# attempt interpolation outside bounds
y_invalid = odes._interp_evaluate(coeffs, 0.0, 10.0, 100.0)
with self.cached_session() as sess:
with self.assertRaises(errors_impl.InvalidArgumentError):
sess.run(y_invalid)
class OdeIntFixedTest(test.TestCase):
def _test_integrate_sine(self, method, t, dt=None):
def evol_func(y, t):
del t
return array_ops.stack([y[1], -y[0]])
y0 = [0., 1.]
y_grid = odes.odeint_fixed(evol_func, y0, t, dt, method=method)
with self.cached_session() as sess:
y_grid_array = sess.run(y_grid)
np.testing.assert_allclose(
y_grid_array[:, 0], np.sin(t), rtol=1e-2, atol=1e-2)
def _test_integrate_gaussian(self, method, t, dt=None):
def evol_func(y, t):
return -math_ops.cast(t, dtype=y.dtype) * y[0]
y0 = [1.]
y_grid = odes.odeint_fixed(evol_func, y0, t, dt, method=method)
with self.cached_session() as sess:
y_grid_array = sess.run(y_grid)
np.testing.assert_allclose(
y_grid_array[:, 0], np.exp(-t**2 / 2), rtol=1e-2, atol=1e-2)
def _test_integrate_sine_all(self, method):
uniform_time_grid = np.linspace(0., 10., 200)
non_uniform_time_grid = np.asarray([0.0, 0.4, 4.7, 5.2, 7.0])
uniform_dt = 0.02
non_uniform_dt = np.asarray([0.01, 0.001, 0.05, 0.03])
self._test_integrate_sine(method, uniform_time_grid)
self._test_integrate_sine(method, non_uniform_time_grid, uniform_dt)
self._test_integrate_sine(method, non_uniform_time_grid, non_uniform_dt)
def _test_integrate_gaussian_all(self, method):
uniform_time_grid = np.linspace(0., 2., 100)
non_uniform_time_grid = np.asarray([0.0, 0.1, 0.7, 1.2, 2.0])
uniform_dt = 0.01
non_uniform_dt = np.asarray([0.01, 0.001, 0.1, 0.03])
self._test_integrate_gaussian(method, uniform_time_grid)
self._test_integrate_gaussian(method, non_uniform_time_grid, uniform_dt)
self._test_integrate_gaussian(method, non_uniform_time_grid, non_uniform_dt)
def _test_everything(self, method):
self._test_integrate_sine_all(method)
self._test_integrate_gaussian_all(method)
def test_midpoint(self):
self._test_everything('midpoint')
def test_rk4(self):
self._test_everything('rk4')
def test_dt_size_exceptions(self):
times = np.linspace(0., 2., 100)
dt = np.ones(99) * 0.01
dt_wrong_length = np.asarray([0.01, 0.001, 0.1, 0.03])
dt_wrong_dim = np.expand_dims(np.linspace(0., 2., 99), axis=0)
times_wrong_dim = np.expand_dims(np.linspace(0., 2., 100), axis=0)
with self.assertRaises(ValueError):
self._test_integrate_gaussian('midpoint', times, dt_wrong_length)
with self.assertRaises(ValueError):
self._test_integrate_gaussian('midpoint', times, dt_wrong_dim)
with self.assertRaises(ValueError):
self._test_integrate_gaussian('midpoint', times_wrong_dim, dt)
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/integrate/python/ops/odes_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""ODE solvers for TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import six
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import tensor_array_ops
_ButcherTableau = collections.namedtuple('_ButcherTableau',
'alpha beta c_sol c_mid c_error')
# Parameters from Shampine (1986), section 4.
_DORMAND_PRINCE_TABLEAU = _ButcherTableau(
alpha=[1 / 5, 3 / 10, 4 / 5, 8 / 9, 1., 1.],
beta=[
[1 / 5],
[3 / 40, 9 / 40],
[44 / 45, -56 / 15, 32 / 9],
[19372 / 6561, -25360 / 2187, 64448 / 6561, -212 / 729],
[9017 / 3168, -355 / 33, 46732 / 5247, 49 / 176, -5103 / 18656],
[35 / 384, 0, 500 / 1113, 125 / 192, -2187 / 6784, 11 / 84],
],
c_sol=[35 / 384, 0, 500 / 1113, 125 / 192, -2187 / 6784, 11 / 84, 0],
c_mid=[
6025192743 / 30085553152 / 2, 0, 51252292925 / 65400821598 / 2,
-2691868925 / 45128329728 / 2, 187940372067 / 1594534317056 / 2,
-1776094331 / 19743644256 / 2, 11237099 / 235043384 / 2
],
c_error=[
1951 / 21600 - 35 / 384,
0,
22642 / 50085 - 500 / 1113,
451 / 720 - 125 / 192,
-12231 / 42400 - -2187 / 6784,
649 / 6300 - 11 / 84,
1 / 60,
],)
def _possibly_nonzero(x):
return isinstance(x, ops.Tensor) or x != 0
def _scaled_dot_product(scale, xs, ys, name=None):
"""Calculate a scaled, vector inner product between lists of Tensors."""
with ops.name_scope(name, 'scaled_dot_product', [scale, xs, ys]) as scope:
# Some of the parameters in our Butcher tableau include zeros. Using
# _possibly_nonzero lets us avoid wasted computation.
return math_ops.add_n(
[(scale * x) * y for x, y in zip(xs, ys)
if _possibly_nonzero(x) and _possibly_nonzero(y)],
name=scope)
def _dot_product(xs, ys, name=None):
"""Calculate the vector inner product between two lists of Tensors."""
with ops.name_scope(name, 'dot_product', [xs, ys]) as scope:
return math_ops.add_n([x * y for x, y in zip(xs, ys)], name=scope)
def _runge_kutta_step(func,
y0,
f0,
t0,
dt,
tableau=_DORMAND_PRINCE_TABLEAU,
name=None):
"""Take an arbitrary Runge-Kutta step and estimate error.
Args:
func: Function to evaluate like `func(y, t)` to compute the time derivative
of `y`.
y0: Tensor initial value for the state.
f0: Tensor initial value for the derivative, computed from `func(y0, t0)`.
t0: float64 scalar Tensor giving the initial time.
dt: float64 scalar Tensor giving the size of the desired time step.
tableau: optional _ButcherTableau describing how to take the Runge-Kutta
step.
name: optional name for the operation.
Returns:
Tuple `(y1, f1, y1_error, k)` giving the estimated function value after
the Runge-Kutta step at `t1 = t0 + dt`, the derivative of the state at `t1`,
estimated error at `t1`, and a list of Runge-Kutta coefficients `k` used for
calculating these terms.
"""
with ops.name_scope(name, 'runge_kutta_step', [y0, f0, t0, dt]) as scope:
y0 = ops.convert_to_tensor(y0, name='y0')
f0 = ops.convert_to_tensor(f0, name='f0')
t0 = ops.convert_to_tensor(t0, name='t0')
dt = ops.convert_to_tensor(dt, name='dt')
dt_cast = math_ops.cast(dt, y0.dtype)
k = [f0]
for alpha_i, beta_i in zip(tableau.alpha, tableau.beta):
ti = t0 + alpha_i * dt
yi = y0 + _scaled_dot_product(dt_cast, beta_i, k)
k.append(func(yi, ti))
if not (tableau.c_sol[-1] == 0 and tableau.c_sol[:-1] == tableau.beta[-1]):
# This property (true for Dormand-Prince) lets us save a few FLOPs.
yi = y0 + _scaled_dot_product(dt_cast, tableau.c_sol, k)
y1 = array_ops.identity(yi, name='%s/y1' % scope)
f1 = array_ops.identity(k[-1], name='%s/f1' % scope)
y1_error = _scaled_dot_product(
dt_cast, tableau.c_error, k, name='%s/y1_error' % scope)
return (y1, f1, y1_error, k)
def _interp_fit(y0, y1, y_mid, f0, f1, dt):
"""Fit coefficients for 4th order polynomial interpolation.
Args:
y0: function value at the start of the interval.
y1: function value at the end of the interval.
y_mid: function value at the mid-point of the interval.
f0: derivative value at the start of the interval.
f1: derivative value at the end of the interval.
dt: width of the interval.
Returns:
List of coefficients `[a, b, c, d, e]` for interpolating with the polynomial
`p = a * x ** 4 + b * x ** 3 + c * x ** 2 + d * x + e` for values of `x`
between 0 (start of interval) and 1 (end of interval).
"""
# a, b, c, d, e = sympy.symbols('a b c d e')
# x, dt, y0, y1, y_mid, f0, f1 = sympy.symbols('x dt y0 y1 y_mid f0 f1')
# p = a * x ** 4 + b * x ** 3 + c * x ** 2 + d * x + e
# sympy.solve([p.subs(x, 0) - y0,
# p.subs(x, 1 / 2) - y_mid,
# p.subs(x, 1) - y1,
# (p.diff(x) / dt).subs(x, 0) - f0,
# (p.diff(x) / dt).subs(x, 1) - f1],
# [a, b, c, d, e])
# {a: -2.0*dt*f0 + 2.0*dt*f1 - 8.0*y0 - 8.0*y1 + 16.0*y_mid,
# b: 5.0*dt*f0 - 3.0*dt*f1 + 18.0*y0 + 14.0*y1 - 32.0*y_mid,
# c: -4.0*dt*f0 + dt*f1 - 11.0*y0 - 5.0*y1 + 16.0*y_mid,
# d: dt*f0,
# e: y0}
a = _dot_product([-2 * dt, 2 * dt, -8, -8, 16], [f0, f1, y0, y1, y_mid])
b = _dot_product([5 * dt, -3 * dt, 18, 14, -32], [f0, f1, y0, y1, y_mid])
c = _dot_product([-4 * dt, dt, -11, -5, 16], [f0, f1, y0, y1, y_mid])
d = dt * f0
e = y0
return [a, b, c, d, e]
def _interp_fit_rk(y0, y1, k, dt, tableau=_DORMAND_PRINCE_TABLEAU):
"""Fit an interpolating polynomial to the results of a Runge-Kutta step."""
with ops.name_scope('interp_fit_rk'):
dt = math_ops.cast(dt, y0.dtype)
y_mid = y0 + _scaled_dot_product(dt, tableau.c_mid, k)
f0 = k[0]
f1 = k[-1]
return _interp_fit(y0, y1, y_mid, f0, f1, dt)
def _interp_evaluate(coefficients, t0, t1, t):
"""Evaluate polynomial interpolation at the given time point.
Args:
coefficients: list of Tensor coefficients as created by `interp_fit`.
t0: scalar float64 Tensor giving the start of the interval.
t1: scalar float64 Tensor giving the end of the interval.
t: scalar float64 Tensor giving the desired interpolation point.
Returns:
Polynomial interpolation of the coefficients at time `t`.
"""
with ops.name_scope('interp_evaluate'):
t0 = ops.convert_to_tensor(t0)
t1 = ops.convert_to_tensor(t1)
t = ops.convert_to_tensor(t)
dtype = coefficients[0].dtype
assert_op = control_flow_ops.Assert(
(t0 <= t) & (t <= t1),
['invalid interpolation, fails `t0 <= t <= t1`:', t0, t, t1])
with ops.control_dependencies([assert_op]):
x = math_ops.cast((t - t0) / (t1 - t0), dtype)
xs = [constant_op.constant(1, dtype), x]
for _ in range(2, len(coefficients)):
xs.append(xs[-1] * x)
return _dot_product(coefficients, reversed(xs))
def _optimal_step_size(last_step,
error_ratio,
safety=0.9,
ifactor=10.0,
dfactor=0.2,
order=5,
name=None):
"""Calculate the optimal size for the next Runge-Kutta step."""
with ops.name_scope(name, 'optimal_step_size', [last_step,
error_ratio]) as scope:
error_ratio = math_ops.cast(error_ratio, last_step.dtype)
exponent = math_ops.cast(1 / order, last_step.dtype)
# this looks more complex than necessary, but importantly it keeps
# error_ratio in the numerator so we can't divide by zero:
factor = math_ops.maximum(1 / ifactor,
math_ops.minimum(error_ratio**exponent / safety,
1 / dfactor))
return math_ops.div(last_step, factor, name=scope)
def _abs_square(x):
if x.dtype.is_complex:
return math_ops.square(math_ops.real(x)) + math_ops.square(math_ops.imag(x))
else:
return math_ops.square(x)
def _ta_append(tensor_array, value):
"""Append a value to the end of a tf.TensorArray."""
return tensor_array.write(tensor_array.size(), value)
class _RungeKuttaState(
collections.namedtuple('_RungeKuttaState',
'y1, f1, t0, t1, dt, interp_coeff')):
"""Saved state of the Runge Kutta solver.
Attributes:
y1: Tensor giving the function value at the end of the last time step.
f1: Tensor giving derivative at the end of the last time step.
t0: scalar float64 Tensor giving start of the last time step.
t1: scalar float64 Tensor giving end of the last time step.
dt: scalar float64 Tensor giving the size for the next time step.
interp_coef: list of Tensors giving coefficients for polynomial
interpolation between `t0` and `t1`.
"""
class _History(
collections.namedtuple('_History', 'integrate_points, error_ratio')):
"""Saved integration history for use in `info_dict`.
Attributes:
integrate_points: tf.TensorArray storing integrating time points.
error_ratio: tf.TensorArray storing computed error ratios at each
integration step.
"""
def _assert_increasing(t):
assert_increasing = control_flow_ops.Assert(
math_ops.reduce_all(t[1:] > t[:-1]), ['`t` must be monotonic increasing'])
return ops.control_dependencies([assert_increasing])
def _check_input_types(y0, t, dt=None):
if not (y0.dtype.is_floating or y0.dtype.is_complex):
raise TypeError('`y0` must have a floating point or complex floating '
'point dtype')
if not t.dtype.is_floating:
raise TypeError('`t` must have a floating point dtype')
if dt is not None and not dt.dtype.is_floating:
raise TypeError('`dt` must have a floating point dtype')
def _check_input_sizes(t, dt):
if len(t.get_shape().as_list()) > 1:
raise ValueError('t must be a 1D tensor')
if len(dt.get_shape().as_list()) > 1:
raise ValueError('t must be a 1D tensor')
if t.get_shape()[0] != dt.get_shape()[0] + 1:
raise ValueError('t and dt have incompatible lengths, must be N and N-1')
def _dopri5(func,
y0,
t,
rtol,
atol,
full_output=False,
first_step=None,
safety=0.9,
ifactor=10.0,
dfactor=0.2,
max_num_steps=1000,
name=None):
"""Solve an ODE for `odeint` using method='dopri5'."""
if first_step is None:
# at some point, we might want to switch to picking the step size
# automatically
first_step = 1.0
with ops.name_scope(name, 'dopri5', [
y0, t, rtol, atol, safety, ifactor, dfactor, max_num_steps
]) as scope:
first_step = ops.convert_to_tensor(
first_step, dtype=t.dtype, name='first_step')
safety = ops.convert_to_tensor(safety, dtype=t.dtype, name='safety')
ifactor = ops.convert_to_tensor(ifactor, dtype=t.dtype, name='ifactor')
dfactor = ops.convert_to_tensor(dfactor, dtype=t.dtype, name='dfactor')
max_num_steps = ops.convert_to_tensor(
max_num_steps, dtype=dtypes.int32, name='max_num_steps')
def adaptive_runge_kutta_step(rk_state, history, n_steps):
"""Take an adaptive Runge-Kutta step to integrate the ODE."""
y0, f0, _, t0, dt, interp_coeff = rk_state
with ops.name_scope('assertions'):
check_underflow = control_flow_ops.Assert(t0 + dt > t0,
['underflow in dt', dt])
check_max_num_steps = control_flow_ops.Assert(
n_steps < max_num_steps, ['max_num_steps exceeded'])
check_numerics = control_flow_ops.Assert(
math_ops.reduce_all(math_ops.is_finite(abs(y0))),
['non-finite values in state `y`', y0])
with ops.control_dependencies(
[check_underflow, check_max_num_steps, check_numerics]):
y1, f1, y1_error, k = _runge_kutta_step(func, y0, f0, t0, dt)
with ops.name_scope('error_ratio'):
# We use the same approach as the dopri5 fortran code.
error_tol = atol + rtol * math_ops.maximum(abs(y0), abs(y1))
tensor_error_ratio = _abs_square(y1_error) / _abs_square(error_tol)
# Could also use reduce_maximum here.
error_ratio = math_ops.sqrt(math_ops.reduce_mean(tensor_error_ratio))
accept_step = error_ratio <= 1
with ops.name_scope('update/rk_state'):
# If we don't accept the step, the _RungeKuttaState will be useless
# (covering a time-interval of size 0), but that's OK, because in such
# cases we always immediately take another Runge-Kutta step.
y_next = control_flow_ops.cond(accept_step, lambda: y1, lambda: y0)
f_next = control_flow_ops.cond(accept_step, lambda: f1, lambda: f0)
t_next = control_flow_ops.cond(accept_step, lambda: t0 + dt, lambda: t0)
interp_coeff = control_flow_ops.cond(
accept_step, lambda: _interp_fit_rk(y0, y1, k, dt),
lambda: interp_coeff)
dt_next = _optimal_step_size(dt, error_ratio, safety, ifactor, dfactor)
rk_state = _RungeKuttaState(y_next, f_next, t0, t_next, dt_next,
interp_coeff)
with ops.name_scope('update/history'):
history = _History(
_ta_append(history.integrate_points, t0 + dt),
_ta_append(history.error_ratio, error_ratio))
return rk_state, history, n_steps + 1
def interpolate(solution, history, rk_state, i):
"""Interpolate through the next time point, integrating as necessary."""
with ops.name_scope('interpolate'):
rk_state, history, _ = control_flow_ops.while_loop(
lambda rk_state, *_: t[i] > rk_state.t1,
adaptive_runge_kutta_step, (rk_state, history, 0),
name='integrate_loop')
y = _interp_evaluate(rk_state.interp_coeff, rk_state.t0, rk_state.t1,
t[i])
solution = solution.write(i, y)
return solution, history, rk_state, i + 1
with _assert_increasing(t):
num_times = array_ops.size(t)
solution = tensor_array_ops.TensorArray(
y0.dtype, size=num_times).write(0, y0)
history = _History(
integrate_points=tensor_array_ops.TensorArray(
t.dtype, size=0, dynamic_size=True),
error_ratio=tensor_array_ops.TensorArray(
rtol.dtype, size=0, dynamic_size=True))
rk_state = _RungeKuttaState(
y0, func(y0, t[0]), t[0], t[0], first_step, interp_coeff=[y0] * 5)
solution, history, _, _ = control_flow_ops.while_loop(
lambda _, __, ___, i: i < num_times,
interpolate, (solution, history, rk_state, 1),
name='interpolate_loop')
y = solution.stack(name=scope)
y.set_shape(t.get_shape().concatenate(y0.get_shape()))
if not full_output:
return y
else:
integrate_points = history.integrate_points.stack()
info_dict = {
'num_func_evals': 6 * array_ops.size(integrate_points) + 1,
'integrate_points': integrate_points,
'error_ratio': history.error_ratio.stack()
}
return (y, info_dict)
def odeint(func,
y0,
t,
rtol=1e-6,
atol=1e-12,
method=None,
options=None,
full_output=False,
name=None):
"""Integrate a system of ordinary differential equations.
Solves the initial value problem for a non-stiff system of first order ODEs:
```
dy/dt = func(y, t), y(t[0]) = y0
```
where y is a Tensor of any shape.
For example:
```
# solve `dy/dt = -y`, corresponding to exponential decay
tf.contrib.integrate.odeint(lambda y, _: -y, 1.0, [0, 1, 2])
=> [1, exp(-1), exp(-2)]
```
Output dtypes and numerical precision are based on the dtypes of the inputs
`y0` and `t`.
Currently, implements 5th order Runge-Kutta with adaptive step size control
and dense output, using the Dormand-Prince method. Similar to the 'dopri5'
method of `scipy.integrate.ode` and MATLAB's `ode45`.
Based on: Shampine, Lawrence F. (1986), "Some Practical Runge-Kutta Formulas",
Mathematics of Computation, American Mathematical Society, 46 (173): 135-150,
doi:10.2307/2008219
Args:
func: Function that maps a Tensor holding the state `y` and a scalar Tensor
`t` into a Tensor of state derivatives with respect to time.
y0: N-D Tensor giving starting value of `y` at time point `t[0]`. May
have any floating point or complex dtype.
t: 1-D Tensor holding a sequence of time points for which to solve for
`y`. The initial time point should be the first element of this sequence,
and each time must be larger than the previous time. May have any floating
point dtype. If not provided as a Tensor, converted to a Tensor with
float64 dtype.
rtol: optional float64 Tensor specifying an upper bound on relative error,
per element of `y`.
atol: optional float64 Tensor specifying an upper bound on absolute error,
per element of `y`.
method: optional string indicating the integration method to use. Currently,
the only valid option is `'dopri5'`.
options: optional dict of configuring options for the indicated integration
method. Can only be provided if a `method` is explicitly set. For
`'dopri5'`, valid options include:
* first_step: an initial guess for the size of the first integration
(current default: 1.0, but may later be changed to use heuristics based
on the gradient).
* safety: safety factor for adaptive step control, generally a constant
in the range 0.8-1 (default: 0.9).
* ifactor: maximum factor by which the adaptive step may be increased
(default: 10.0).
* dfactor: maximum factor by which the adpative step may be decreased
(default: 0.2).
* max_num_steps: integer maximum number of integrate steps between time
points in `t` (default: 1000).
full_output: optional boolean. If True, `odeint` returns a tuple
`(y, info_dict)` describing the integration process.
name: Optional name for this operation.
Returns:
y: (N+1)-D tensor, where the first dimension corresponds to different
time points. Contains the solved value of y for each desired time point in
`t`, with the initial value `y0` being the first element along the first
dimension.
info_dict: only if `full_output == True`. A dict with the following values:
* num_func_evals: integer Tensor counting the number of function
evaluations.
* integrate_points: 1D float64 Tensor with the upper bound of each
integration time step.
* error_ratio: 1D float Tensor with the estimated ratio of the integration
error to the error tolerance at each integration step. An ratio greater
than 1 corresponds to rejected steps.
Raises:
ValueError: if an invalid `method` is provided.
TypeError: if `options` is supplied without `method`, or if `t` or `y0` has
an invalid dtype.
"""
if method is not None and method != 'dopri5':
raise ValueError('invalid method: %r' % method)
if options is None:
options = {}
elif method is None:
raise ValueError('cannot supply `options` without specifying `method`')
with ops.name_scope(name, 'odeint', [y0, t, rtol, atol]) as scope:
# TODO(shoyer): use nest.flatten (like tf.while_loop) to allow `y0` to be an
# arbitrarily nested tuple. This will help performance and usability by
# avoiding the need to pack/unpack in user functions.
y0 = ops.convert_to_tensor(y0, name='y0')
t = ops.convert_to_tensor(t, preferred_dtype=dtypes.float64, name='t')
_check_input_types(y0, t)
error_dtype = abs(y0).dtype
rtol = ops.convert_to_tensor(rtol, dtype=error_dtype, name='rtol')
atol = ops.convert_to_tensor(atol, dtype=error_dtype, name='atol')
return _dopri5(
func,
y0,
t,
rtol=rtol,
atol=atol,
full_output=full_output,
name=scope,
**options)
@six.add_metaclass(abc.ABCMeta)
class _FixedGridIntegrator(object):
"""Base class for fixed-grid ODE integrators."""
def integrate(self, evol_func, y0, time_grid, dt_grid, steps_on_intervals):
"""Returns integrated values of differential equation on the `time grid`.
Numerically integrates differential equation defined via time derivative
evaluator `evol_func` using fixed time steps specified in dt_grid.
Args:
evol_func: Callable, evaluates time derivative of y at a given time.
y0: N-D Tensor holds initial values of the solution.
time_grid: 1-D Tensor holding the time points at which the solution
will be recorded, must have a floating dtype.
dt_grid: 1-D Tensor holds fixed time steps to be used on time_grid
intervals. Must be a floating dtype and have one less element than that
of the time_grid.
steps_on_intervals: 1-D Tensor of integer dtype, must have the same size
as dt_grid. Specifies number of steps needed for every interval. Assumes
steps_on_intervals * dt_grid == time intervals.
Returns:
(N+1)-D tensor, where the first dimension corresponds to different
time points. Contains the solved value of y for each desired time point in
`t`, with the initial value `y0` being the first element along the first
dimension.
"""
iteration_func = self._make_iteration_func(evol_func, dt_grid)
integrate_interval = self._make_interval_integrator(iteration_func,
steps_on_intervals)
num_times = array_ops.size(time_grid)
current_time = time_grid[0]
solution_array = tensor_array_ops.TensorArray(y0.dtype, num_times)
solution_array = solution_array.write(0, y0)
solution_array, _, _, _ = control_flow_ops.while_loop(
lambda _, __, ___, i: i < num_times,
integrate_interval,
(solution_array, y0, current_time, 1)
)
solution_array = solution_array.stack()
solution_array.set_shape(time_grid.get_shape().concatenate(y0.get_shape()))
return solution_array
def _make_iteration_func(self, evol_func, dt_grid):
"""Returns a function that builds operations of a single time step."""
def iteration_func(y, t, dt_step, interval_step):
"""Performs a single time step advance."""
dt = dt_grid[interval_step - 1]
dy = self._step_func(evol_func, t, dt, y)
dy = math_ops.cast(dy, dtype=y.dtype)
return y + dy, t + dt, dt_step + 1, interval_step
return iteration_func
def _make_interval_integrator(self, iteration_func, interval_sizes):
"""Returns a function that builds operations for interval integration."""
def integrate_interval(solution_array, y, t, interval_num):
"""Integrates y with fixed time step on interval `interval_num`."""
y, t, _, _ = control_flow_ops.while_loop(
lambda _, __, j, interval_num: j < interval_sizes[interval_num - 1],
iteration_func,
(y, t, 0, interval_num)
)
return solution_array.write(interval_num, y), y, t, interval_num + 1
return integrate_interval
@abc.abstractmethod
def _step_func(self, evol_func, t, dt, y):
pass
class _MidpointFixedGridIntegrator(_FixedGridIntegrator):
"""Fixed grid integrator implementing midpoint scheme."""
def _step_func(self, evol_func, t, dt, y):
dt_cast = math_ops.cast(dt, y.dtype)
# yn1 = yn + h * f(tn + h/2, yn + f(tn, yn) * h/2)
return dt_cast * evol_func(y + evol_func(y, t) * dt_cast / 2, t + dt / 2)
class _RK4FixedGridIntegrator(_FixedGridIntegrator):
"""Fixed grid integrator implementing RK4 scheme."""
def _step_func(self, evol_func, t, dt, y):
k1 = evol_func(y, t)
half_step = t + dt / 2
dt_cast = math_ops.cast(dt, y.dtype)
k2 = evol_func(y + dt_cast * k1 / 2, half_step)
k3 = evol_func(y + dt_cast * k2 / 2, half_step)
k4 = evol_func(y + dt_cast * k3, t + dt)
return math_ops.add_n([k1, 2 * k2, 2 * k3, k4]) * (dt_cast / 6)
def odeint_fixed(func, y0, t, dt=None, method='rk4', name=None):
"""ODE integration on a fixed grid (with no step size control).
Useful in certain scenarios to avoid the overhead of adaptive step size
control, e.g. when differentiation of the integration result is desired and/or
the time grid is known a priori to be sufficient.
Args:
func: Function that maps a Tensor holding the state `y` and a scalar Tensor
`t` into a Tensor of state derivatives with respect to time.
y0: N-D Tensor giving starting value of `y` at time point `t[0]`.
t: 1-D Tensor holding a sequence of time points for which to solve for
`y`. The initial time point should be the first element of this sequence,
and each time must be larger than the previous time. May have any floating
point dtype.
dt: 0-D or 1-D Tensor providing time step suggestion to be used on time
integration intervals in `t`. 1-D Tensor should provide values
for all intervals, must have 1 less element than that of `t`.
If given a 0-D Tensor, the value is interpreted as time step suggestion
same for all intervals. If passed None, then time step is set to be the
t[1:] - t[:-1]. Defaults to None. The actual step size is obtained by
insuring an integer number of steps per interval, potentially reducing the
time step.
method: One of 'midpoint' or 'rk4'.
name: Optional name for the resulting operation.
Returns:
y: (N+1)-D tensor, where the first dimension corresponds to different
time points. Contains the solved value of y for each desired time point in
`t`, with the initial value `y0` being the first element along the first
dimension.
Raises:
ValueError: Upon caller errors.
"""
with ops.name_scope(name, 'odeint_fixed', [y0, t, dt]):
t = ops.convert_to_tensor(t, preferred_dtype=dtypes.float64, name='t')
y0 = ops.convert_to_tensor(y0, name='y0')
intervals = t[1:] - t[:-1]
if dt is None:
dt = intervals
dt = ops.convert_to_tensor(dt, preferred_dtype=dtypes.float64, name='dt')
steps_on_intervals = math_ops.ceil(intervals / dt)
dt = intervals / steps_on_intervals
steps_on_intervals = math_ops.cast(steps_on_intervals, dtype=dtypes.int32)
_check_input_types(y0, t, dt)
_check_input_sizes(t, dt)
with _assert_increasing(t):
with ops.name_scope(method):
if method == 'midpoint':
return _MidpointFixedGridIntegrator().integrate(func, y0, t, dt,
steps_on_intervals)
elif method == 'rk4':
return _RK4FixedGridIntegrator().integrate(func, y0, t, dt,
steps_on_intervals)
else:
raise ValueError('method not supported: {!s}'.format(method))
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/integrate/python/ops/odes.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""IgniteDataset that allows to get data from Apache Ignite.
Apache Ignite is a memory-centric distributed database, caching, and
processing platform for transactional, analytical, and streaming workloads,
delivering in-memory speeds at petabyte scale. This contrib package
contains an integration between Apache Ignite and TensorFlow. The
integration is based on tf.data from TensorFlow side and Binary Client
Protocol from Apache Ignite side. It allows to use Apache Ignite as a
datasource for neural network training, inference and all other
computations supported by TensorFlow. Ignite Dataset is based on Apache
Ignite Binary Client Protocol:
https://apacheignite.readme.io/v2.6/docs/binary-client-protocol.
@@IgniteDataset
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.ignite.python.ops.ignite_dataset_ops import IgniteDataset
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = [
"IgniteDataset",
]
remove_undocumented(__name__)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/ignite/__init__.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
# ==============================================================================
"""Tests for IgniteDataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow import compat
from tensorflow.contrib.ignite import IgniteDataset
from tensorflow.python.client import session
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.platform import test
class IgniteDatasetTest(test.TestCase):
"""The Apache Ignite servers have to setup before the test and tear down
after the test manually. The docker engine has to be installed.
To setup Apache Ignite servers:
$ bash start_ignite.sh
To tear down Apache Ignite servers:
$ bash stop_ignite.sh
"""
def test_ignite_dataset_with_plain_client(self):
"""Test Ignite Dataset with plain client.
"""
self._clear_env()
ds = IgniteDataset(cache_name="SQL_PUBLIC_TEST_CACHE", port=42300)
self._check_dataset(ds)
def _clear_env(self):
"""Clears environment variables used by Ignite Dataset.
"""
if "IGNITE_DATASET_USERNAME" in os.environ:
del os.environ["IGNITE_DATASET_USERNAME"]
if "IGNITE_DATASET_PASSWORD" in os.environ:
del os.environ["IGNITE_DATASET_PASSWORD"]
if "IGNITE_DATASET_CERTFILE" in os.environ:
del os.environ["IGNITE_DATASET_CERTFILE"]
if "IGNITE_DATASET_CERT_PASSWORD" in os.environ:
del os.environ["IGNITE_DATASET_CERT_PASSWORD"]
def _check_dataset(self, dataset):
"""Checks that dataset provides correct data."""
self.assertEqual(dtypes.int64, dataset.output_types["key"])
self.assertEqual(dtypes.string, dataset.output_types["val"]["NAME"])
self.assertEqual(dtypes.int64, dataset.output_types["val"]["VAL"])
it = compat.v1.data.make_one_shot_iterator(dataset)
ne = it.get_next()
with session.Session() as sess:
rows = [sess.run(ne), sess.run(ne), sess.run(ne)]
with self.assertRaises(errors.OutOfRangeError):
sess.run(ne)
self.assertEqual({"key": 1, "val": {"NAME": b"TEST1", "VAL": 42}}, rows[0])
self.assertEqual({"key": 2, "val": {"NAME": b"TEST2", "VAL": 43}}, rows[1])
self.assertEqual({"key": 3, "val": {"NAME": b"TEST3", "VAL": 44}}, rows[2])
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/ignite/python/tests/ignite_dataset_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
# ==============================================================================
"""Tests for IGFS."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.contrib.ignite.python.ops.igfs_ops # pylint: disable=unused-import
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
class IGFSTest(test.TestCase):
"""The Apache Ignite servers have to setup before the test and tear down
after the test manually. The docker engine has to be installed.
To setup Apache Ignite servers:
$ bash start_ignite.sh
To tear down Apache Ignite servers:
$ bash stop_ignite.sh
"""
def test_create_file(self):
"""Test create file.
"""
# Setup and check preconditions.
file_name = "igfs:///test_create_file/1"
self.assertFalse(gfile.Exists(file_name))
# Create file.
with gfile.Open(file_name, mode="w") as w:
w.write("")
# Check that file was created.
self.assertTrue(gfile.Exists(file_name))
def test_write_read_file(self):
"""Test write/read file.
"""
# Setup and check preconditions.
file_name = "igfs:///test_write_read_file/1"
rows = 10000
self.assertFalse(gfile.Exists(file_name))
# Write data.
with gfile.Open(file_name, mode="w") as w:
for i in range(rows):
w.write("This is row\n")
# Read data.
with gfile.Open(file_name, mode="r") as r:
lines = r.readlines()
# Check that data is equal.
self.assertEqual(rows, len(lines))
for i in range(rows):
self.assertEqual("This is row\n", lines[i])
def test_delete_recursively(self):
"""Test delete recursively.
"""
# Setup and check preconditions.
dir_name = "igfs:///test_delete_recursively/"
file_name = "igfs:///test_delete_recursively/1"
self.assertFalse(gfile.Exists(dir_name))
self.assertFalse(gfile.Exists(file_name))
gfile.MkDir(dir_name)
with gfile.Open(file_name, mode="w") as w:
w.write("")
self.assertTrue(gfile.Exists(dir_name))
self.assertTrue(gfile.Exists(file_name))
# Delete directory recursively.
gfile.DeleteRecursively(dir_name)
# Check that directory was deleted.
self.assertFalse(gfile.Exists(dir_name))
self.assertFalse(gfile.Exists(file_name))
def test_copy(self):
"""Test copy.
"""
# Setup and check preconditions.
src_file_name = "igfs:///test_copy/1"
dst_file_name = "igfs:///test_copy/2"
self.assertFalse(gfile.Exists(src_file_name))
self.assertFalse(gfile.Exists(dst_file_name))
with gfile.Open(src_file_name, mode="w") as w:
w.write("42")
self.assertTrue(gfile.Exists(src_file_name))
self.assertFalse(gfile.Exists(dst_file_name))
# Copy file.
gfile.Copy(src_file_name, dst_file_name)
# Check that files are identical.
self.assertTrue(gfile.Exists(src_file_name))
self.assertTrue(gfile.Exists(dst_file_name))
with gfile.Open(dst_file_name, mode="r") as r:
data = r.read()
self.assertEqual("42", data)
def test_is_directory(self):
"""Test is directory.
"""
# Setup and check preconditions.
dir_name = "igfs:///test_is_directory/1"
file_name = "igfs:///test_is_directory/2"
with gfile.Open(file_name, mode="w") as w:
w.write("")
gfile.MkDir(dir_name)
# Check that directory is a directory.
self.assertTrue(gfile.IsDirectory(dir_name))
# Check that file is not a directory.
self.assertFalse(gfile.IsDirectory(file_name))
def test_list_directory(self):
"""Test list directory.
"""
# Setup and check preconditions.
dir_name = "igfs:///test_list_directory/"
file_names = [
"igfs:///test_list_directory/1", "igfs:///test_list_directory/2/3"
]
ch_dir_names = [
"igfs:///test_list_directory/4",
]
for file_name in file_names:
with gfile.Open(file_name, mode="w") as w:
w.write("")
for ch_dir_name in ch_dir_names:
gfile.MkDir(ch_dir_name)
ls_expected_result = file_names + ch_dir_names
# Get list of files in directory.
ls_result = gfile.ListDirectory(dir_name)
# Check that list of files is correct.
self.assertEqual(len(ls_expected_result), len(ls_result))
for e in ["1", "2", "4"]:
self.assertTrue(e in ls_result)
def test_make_dirs(self):
"""Test make dirs.
"""
# Setup and check preconditions.
dir_name = "igfs:///test_make_dirs/"
self.assertFalse(gfile.Exists(dir_name))
# Make directory.
gfile.MkDir(dir_name)
# Check that directory was created.
self.assertTrue(gfile.Exists(dir_name))
def test_remove(self):
"""Test remove.
"""
# Setup and check preconditions.
file_name = "igfs:///test_remove/1"
self.assertFalse(gfile.Exists(file_name))
with gfile.Open(file_name, mode="w") as w:
w.write("")
self.assertTrue(gfile.Exists(file_name))
# Remove file.
gfile.Remove(file_name)
# Check that file was removed.
self.assertFalse(gfile.Exists(file_name))
def test_rename_file(self):
"""Test rename file.
"""
# Setup and check preconditions.
src_file_name = "igfs:///test_rename_file/1"
dst_file_name = "igfs:///test_rename_file/2"
with gfile.Open(src_file_name, mode="w") as w:
w.write("42")
self.assertTrue(gfile.Exists(src_file_name))
# Rename file.
gfile.Rename(src_file_name, dst_file_name)
# Check that only new name of file is available.
self.assertFalse(gfile.Exists(src_file_name))
self.assertTrue(gfile.Exists(dst_file_name))
with gfile.Open(dst_file_name, mode="r") as r:
data = r.read()
self.assertEqual("42", data)
def test_rename_dir(self):
"""Test rename dir.
"""
# Setup and check preconditions.
src_dir_name = "igfs:///test_rename_dir/1"
dst_dir_name = "igfs:///test_rename_dir/2"
gfile.MkDir(src_dir_name)
# Rename directory.
gfile.Rename(src_dir_name, dst_dir_name)
# Check that only new name of directory is available.
self.assertFalse(gfile.Exists(src_dir_name))
self.assertTrue(gfile.Exists(dst_dir_name))
self.assertTrue(gfile.IsDirectory(dst_dir_name))
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/ignite/python/tests/igfs_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ignite File System for checkpointing and communication with TensorBoard.
Apache Ignite is a memory-centric distributed database, caching, and
processing platform for transactional, analytical, and streaming workloads,
delivering in-memory speeds at petabyte scale. In addition to database
functionality Apache Ignite provides a distributed file system called
IGFS (https://ignite.apache.org/features/igfs.html). IGFS delivers a similar
functionality to Hadoop HDFS, but only in-memory. In fact, in addition to
its own APIs, IGFS implements Hadoop FileSystem API and can be transparently
plugged into Hadoop or Spark deployments. This contrib package contains an
integration between IGFS and TensorFlow.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.contrib.ignite.python.ops import ignite_op_loader # pylint: disable=unused-import
from tensorflow.python.framework import load_library
from tensorflow.python.platform import resource_loader
file_system_library = os.path.join(resource_loader.get_data_files_path(),
"../../_ignite_ops.so")
load_library.load_file_system_library(file_system_library)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/ignite/python/ops/igfs_ops.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ignite Dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import socket
import ssl
import struct
import six
from tensorflow.contrib.ignite.python.ops import gen_dataset_ops
from tensorflow.contrib.ignite.python.ops import ignite_op_loader # pylint: disable=unused-import
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import structure
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.util import deprecation
@six.add_metaclass(abc.ABCMeta)
class Readable(object):
"""Abstract class that exposes methods to do reading-related operations."""
@abc.abstractmethod
def __init__(self):
pass
def read_byte(self):
"""Reads and returnes byte."""
return self._read("b", 1)
def read_short(self):
"""Reads and returns short (2 bytes, little-endian)."""
return self._read("h", 2)
def read_int(self):
"""Reads and returns int (4 bytes, little-endian)."""
return self._read("i", 4)
def read_long(self):
"""Reads and returns long (8 bytes, little-endian)."""
return self._read("q", 8)
def skip(self, length):
"""Skips the specified number of bytes."""
self.read_data(length)
@abc.abstractmethod
def read_data(self, length):
"""Reads the specified number of bytes and returns them as a buffer."""
return None
def _read(self, data_type, length):
"""Reads, unpacks and returns specified type (little-endian)."""
data_buffer = self.read_data(length)
return struct.unpack("<" + data_type, data_buffer)[0]
class DataBuffer(Readable):
"""DataBuffer class that exposes methods to read data from a byte buffer."""
def __init__(self, data_buffer):
"""Constructs a new instance based on the specified byte buffer.
Args:
data_buffer: Buffer to be read.
"""
Readable.__init__(self)
self.buffer = data_buffer
self.ptr = 0
def read_data(self, length):
"""Reads the specified number of bytes and returns them as a buffer."""
data_buffer = self.buffer[self.ptr:][:length]
self.ptr += length
return data_buffer
class TcpClient(Readable):
"""TcpClient class that exposes methods to read data from a socket."""
def __init__(self, host, port, certfile=None, keyfile=None, password=None):
"""Constructs a new instance based on the specified host and port.
Args:
host: Host to be connected.
port: Port to be connected.
certfile: File in PEM format containing the certificate as well as any
number of CA certificates needed to establish the certificate's
authenticity.
keyfile: File containing the private key (otherwise the private key will
be taken from certfile as well).
password: Password to be used if the private key is encrypted and a
password is necessary.
Raises:
ValueError: If the wrong combination of arguments is provided.
"""
Readable.__init__(self)
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if certfile is not None:
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.load_cert_chain(certfile, keyfile, password)
self.sock = context.wrap_socket(self.sock)
else:
if keyfile is not None:
raise ValueError("SSL is disabled, keyfile must not be specified "
"(to enable SSL specify certfile)")
if password is not None:
raise ValueError("SSL is disabled, password must not be specified "
"(to enable SSL specify certfile)")
self.host = host
self.port = port
def __enter__(self):
"""Connects to host and port specified in the constructor."""
self.sock.connect((self.host, self.port))
return self
def __exit__(self, t, v, traceback):
"""Disconnects the socket."""
self.sock.close()
def write_byte(self, v):
"""Writes the specified byte."""
self._write(v, "b")
def write_short(self, v):
"""Writes the specified short (2 bytes, little-endian)."""
self._write(v, "h")
def write_int(self, v):
"""Writes the specified short (4 bytes, little-endian)."""
self._write(v, "i")
def write_long(self, v):
"""Writes the specified int (8 bytes, little-endian)."""
self._write(v, "q")
def write_string(self, v):
"""Writes the specified string."""
self.sock.sendall(v.encode("UTF-8"))
def read_data(self, length):
"""Reads the specified number of bytes and returns them as a buffer."""
data_buffer = None
rem = length
while rem > 0:
buf = self.sock.recv(rem)
rem = rem - len(buf)
if data_buffer is None:
data_buffer = buf
else:
data_buffer += buf
return data_buffer
def _write(self, value, data_type):
"""Packs and writes data using the specified type (little-endian)."""
data_buffer = struct.pack("<" + data_type, value)
self.sock.sendall(data_buffer)
class BinaryType(object):
"""BinaryType class that encapsulated type id, type name and fields."""
def __init__(self, type_id, type_name, fields):
"""Constructs a new instance of BinaryType."""
self.type_id = type_id
self.type_name = type_name
self.fields = fields
class BinaryField(object):
"""BinaryField class that encapsulated field name, type id and field id."""
def __init__(self, field_name, type_id, field_id):
"""Constructs a new instance of BinaryField."""
self.field_name = field_name
self.type_id = type_id
self.field_id = field_id
# Binary types defined in Apache Ignite Thin client and supported by
# TensorFlow on Apache Ignite, see
# https://apacheignite.readme.io/v2.6/docs/binary-client-protocol.
# True means that type is a vector, False means type is scalar.
types = {
1: (dtypes.uint8, False),
2: (dtypes.int16, False),
3: (dtypes.int32, False),
4: (dtypes.int64, False),
5: (dtypes.float32, False),
6: (dtypes.float64, False),
7: (dtypes.uint16, False),
8: (dtypes.bool, False),
9: (dtypes.string, False),
12: (dtypes.uint8, True),
13: (dtypes.int16, True),
14: (dtypes.int32, True),
15: (dtypes.int64, True),
16: (dtypes.float32, True),
17: (dtypes.float64, True),
18: (dtypes.uint16, True),
19: (dtypes.bool, True),
20: (dtypes.string, True)
}
class TypeTreeNode(object):
"""TypeTreeNode class exposes methods to format object tree structure data."""
def __init__(self, name, type_id, fields=None, permutation=None):
"""Constructs a new instance of TypeTreeNode.
Args:
name: Name of the object tree node.
type_id: Type id of the object tree node.
fields: List of fields (children of the object tree node).
permutation: Permutation that should be applied to order object children.
"""
self.name = name
self.type_id = type_id
self.fields = fields
self.permutation = permutation
def to_output_classes(self):
"""Formats the tree object as required by `Dataset.output_classes`."""
if self.fields is None:
return ops.Tensor
output_classes = {}
for field in self.fields:
output_classes[field.name] = field.to_output_classes()
return output_classes
def to_output_shapes(self):
"""Formats the tree object as required by `Dataset.output_shapes`."""
if self.fields is None:
if self.type_id in types:
object_type = types[self.type_id]
is_array = object_type[1]
if is_array:
return tensor_shape.TensorShape([None])
return tensor_shape.TensorShape([])
raise ValueError("Unsupported type [type_id=%d]" % self.type_id)
output_shapes = {}
for field in self.fields:
output_shapes[field.name] = field.to_output_shapes()
return output_shapes
def to_output_types(self):
"""Formats the tree object as required by `Dataset.output_types`."""
if self.fields is None:
if self.type_id in types:
object_type = types[self.type_id]
return object_type[0]
raise ValueError("Unsupported type [type_id=%d]" % self.type_id)
else:
output_types = {}
for field in self.fields:
output_types[field.name] = field.to_output_types()
return output_types
def to_flat(self):
"""Returns a list of node types."""
return self.to_flat_rec([])
def to_permutation(self):
"""Returns a permutation that should be applied to order object leaves."""
correct_order_dict = {}
self.traversal_rec(correct_order_dict, 0)
object_order = []
self.traversal_permutation_rec(object_order)
return [correct_order_dict[o] for o in object_order]
def to_flat_rec(self, flat):
"""Formats a list of leaf node types in pre-order."""
if self.fields is None:
flat.append(self.type_id)
else:
for field in self.fields:
field.to_flat_rec(flat)
return flat
def traversal_permutation_rec(self, permutation):
"""Collects nodes in accordance with permutation."""
if self.fields is None:
permutation.append(self)
else:
for idx in self.permutation:
field = self.fields[idx]
field.traversal_permutation_rec(permutation)
def traversal_rec(self, d, i):
"""Collects nodes in pre-order traversal."""
if self.fields is None:
d[self] = i
i += 1
else:
for field in self.fields:
i = field.traversal_rec(d, i)
return i
class IgniteClient(TcpClient):
"""IgniteClient enables working with Apache Ignite using a thin client.
This client works with assumption that all object in the cache
have the same structure (homogeneous objects) and the cache contains at
least one object.
"""
def __init__(self,
host,
port,
username=None,
password=None,
certfile=None,
keyfile=None,
cert_password=None):
"""Constructs a new instance of IgniteClient.
Args:
host: Apache Ignite Thin client host to be connected.
port: Apache Ignite Thin client port to be connected.
username: Apache Ignite Thin Client authentication username.
password: Apache Ignite Thin Client authentication password.
certfile: File in PEM format containing the certificate as well as any
number of CA certificates needed to establish the certificate's
authenticity.
keyfile: File containing the private key (otherwise the private key will
be taken from certfile as well).
cert_password: Password to be used if the private key is encrypted and a
password is necessary.
"""
TcpClient.__init__(self, host, port, certfile, keyfile, cert_password)
self.username = username
self.password = password
def handshake(self):
"""Makes a handshake after connect and before any other calls."""
msg_len = 8
if self.username is None:
msg_len += 1
else:
msg_len += 5 + len(self.username)
if self.password is None:
msg_len += 1
else:
msg_len += 5 + len(self.password)
self.write_int(msg_len) # Message length
self.write_byte(1) # Handshake operation
self.write_short(1) # Version (1.1.0)
self.write_short(1)
self.write_short(0)
self.write_byte(2) # Thin client
if self.username is None: # Username
self.write_byte(101)
else:
self.write_byte(9)
self.write_int(len(self.username))
self.write_string(self.username)
if self.password is None: # Password
self.write_byte(101)
else:
self.write_byte(9)
self.write_int(len(self.password))
self.write_string(self.password)
self.read_int() # Result length
res = self.read_byte()
if res != 1:
serv_ver_major = self.read_short()
serv_ver_minor = self.read_short()
serv_ver_patch = self.read_short()
err_msg = self._parse_string()
if err_msg is None:
raise RuntimeError(
"Handshake Error [result=%d, version=%d.%d.%d]" %
(res, serv_ver_major, serv_ver_minor, serv_ver_patch))
else:
raise RuntimeError(
"Handshake Error [result=%d, version=%d.%d.%d, message='%s']" %
(res, serv_ver_major, serv_ver_minor, serv_ver_patch, err_msg))
def get_cache_type(self, cache_name):
"""Collects type information about objects stored in the specified cache."""
cache_name_hash = self._java_hash_code(cache_name)
self.write_int(25) # Message length
self.write_short(2000) # Operation code
self.write_long(0) # Request ID
self.write_int(cache_name_hash) # Cache name
self.write_byte(0) # Flags
self.write_byte(101) # Filter (NULL)
self.write_int(1) # Cursor page size
self.write_int(-1) # Partition to query
self.write_byte(0) # Local flag
result_length = self.read_int()
self.read_long() # Request id
status = self.read_int()
if status != 0:
err_msg = self._parse_string()
if err_msg is None:
raise RuntimeError("Scan Query Error [status=%s]" % status)
else:
raise RuntimeError(
"Scan Query Error [status=%s, message='%s']" % (status, err_msg))
self.read_long() # Cursor id
row_count = self.read_int()
if row_count == 0:
raise RuntimeError("Scan Query returned empty result, so it's "
"impossible to derive the cache type")
payload = DataBuffer(self.read_data(result_length - 25))
self.read_byte() # Next page
res = TypeTreeNode("root", 0, [
self._collect_types("key", payload),
self._collect_types("val", payload)
], [0, 1])
return res
def _java_hash_code(self, s):
"""Computes hash code of the specified string using Java code."""
h = 0
for c in s:
h = (31 * h + ord(c)) & 0xFFFFFFFF
return ((h + 0x80000000) & 0xFFFFFFFF) - 0x80000000
def _collect_types(self, field_name, data):
"""Extracts type information from the specified object."""
type_id = data.read_byte()
# Byte scalar.
if type_id == 1:
data.skip(1)
return TypeTreeNode(field_name, type_id)
# Short scalar.
if type_id == 2:
data.skip(2)
return TypeTreeNode(field_name, type_id)
# Integer scalar.
if type_id == 3:
data.skip(4)
return TypeTreeNode(field_name, type_id)
# Long scalar.
if type_id == 4:
data.skip(8)
return TypeTreeNode(field_name, type_id)
# Float scalar.
if type_id == 5:
data.skip(4)
return TypeTreeNode(field_name, type_id)
# Double scalar.
if type_id == 6:
data.skip(8)
return TypeTreeNode(field_name, type_id)
# Char scalar.
if type_id == 7:
data.skip(2)
return TypeTreeNode(field_name, type_id)
# Bool scalar.
if type_id == 8:
data.skip(1)
return TypeTreeNode(field_name, type_id)
# String scalar.
if type_id == 9:
length = data.read_int()
data.skip(length)
return TypeTreeNode(field_name, type_id)
# UUID scalar.
if type_id == 10:
data.skip(16)
return TypeTreeNode(field_name, type_id)
# Date scalar.
if type_id == 11:
data.skip(8)
return TypeTreeNode(field_name, type_id)
# Byte array.
if type_id == 12:
length = data.read_int()
data.skip(length)
return TypeTreeNode(field_name, type_id)
# Short array.
if type_id == 13:
length = data.read_int()
data.skip(length * 2)
return TypeTreeNode(field_name, type_id)
# Integer array.
if type_id == 14:
length = data.read_int()
data.skip(length * 4)
return TypeTreeNode(field_name, type_id)
# Long array.
if type_id == 15:
length = data.read_int()
data.skip(length * 8)
return TypeTreeNode(field_name, type_id)
# Float array.
if type_id == 16:
length = data.read_int()
data.skip(length * 4)
return TypeTreeNode(field_name, type_id)
# Double array.
if type_id == 17:
length = data.read_int()
data.skip(length * 8)
return TypeTreeNode(field_name, type_id)
# Char array.
if type_id == 18:
length = data.read_int()
data.skip(length * 2)
return TypeTreeNode(field_name, type_id)
# Bool array.
if type_id == 19:
length = data.read_int()
data.skip(length)
return TypeTreeNode(field_name, type_id)
# String array.
if type_id == 20:
length = data.read_int()
for _ in range(length):
header = data.read_byte()
if header == 9:
str_length = data.read_int()
data.skip(str_length)
elif header == 101:
pass
else:
raise RuntimeError(
"Unknown binary type when expected string [type_id=%d]" % header)
return TypeTreeNode(field_name, type_id)
# UUID array.
if type_id == 21:
length = data.read_int()
data.skip(length * 16) # TODO(dmitrievanthony): support NULL values.
return TypeTreeNode(field_name, type_id)
# Date array.
if type_id == 22:
length = data.read_int()
data.skip(length * 8)
return TypeTreeNode(field_name, type_id)
# Wrapped Binary Object.
if type_id == 27:
length = data.read_int()
inner_data = data.read_data(length)
data.read_int() # Offset
return self._collect_types(field_name, DataBuffer(inner_data))
# Complex Object.
if type_id == 103:
data.read_byte() # Object version
data.read_short() # Object flags
obj_type_id = data.read_int()
data.read_int() # Object hash code
obj_length = data.read_int()
data.read_int() # Object schema id
obj_schema_offset = data.read_int()
obj_type = self._get_type(obj_type_id)
children = []
for obj_field in obj_type.fields:
child = self._collect_types(obj_field.field_name, data)
children.append(child)
children_sorted = sorted(children, key=lambda child: child.name)
permutation = [children_sorted.index(child) for child in children]
children = children_sorted
data.skip(obj_length - obj_schema_offset)
return TypeTreeNode(field_name, type_id, children, permutation)
raise RuntimeError("Unknown binary type [type_id=%d]" % type_id)
def _get_type(self, type_id):
"""Queries Apache Ignite information about type by type id."""
self.write_int(14) # Message length
self.write_short(3002) # Operation code
self.write_long(0) # Request ID
self.write_int(type_id) # Type ID
self.read_int() # Result length
self.read_long() # Request id
status = self.read_int()
if status != 0:
err_msg = self._parse_string()
if err_msg is None:
raise RuntimeError("Get Binary Type Error [status=%d, message='%s']" %
(status, err_msg))
else:
raise RuntimeError("Get Binary Type Error [status=%d]" % status)
binary_type_exists = self.read_byte()
if binary_type_exists == 0:
raise RuntimeError("Binary type not found [type_id=%d] " % type_id)
binary_type_id = self.read_int()
binary_type_name = self._parse_string()
self._parse_string() # Affinity field name
fields = []
for _ in range(self.read_int()):
field_name = self._parse_string()
field_type_id = self.read_int()
field_id = self.read_int()
field = BinaryField(field_name, field_type_id, field_id)
fields.append(field)
is_enum = self.read_byte()
if is_enum == 1:
raise RuntimeError("Enum fields are not supported yet")
schema_cnt = self.read_int()
for _ in range(schema_cnt):
self.read_int() # Schema id
field_cnt = self.read_int()
self.skip(field_cnt * 4)
return BinaryType(binary_type_id, binary_type_name, fields)
def _parse_string(self):
"""Parses string."""
header = self.read_byte()
if header == 9:
length = self.read_int()
return self.read_data(length).decode("utf-8")
if header == 101:
return None
raise RuntimeError(
"Unknown binary type when expected string [type_id=%d]" % header)
class IgniteDataset(dataset_ops.DatasetSource):
"""Apache Ignite is a memory-centric distributed database.
It acts as a caching and processing platform for transactional, analytical,
and streaming workloads, delivering in-memory speeds at petabyte scale.
This contrib package contains an integration between Apache Ignite and
TensorFlow. The integration is based on tf.data from TensorFlow side and
Binary Client Protocol from Apache Ignite side. It allows to use Apache
Ignite as a datasource for neural network training, inference and all other
computations supported by TensorFlow. Ignite Dataset is based on Apache
Ignite Binary Client Protocol.
"""
@deprecation.deprecated(
None,
"tf.contrib.ignite will be removed in 2.0, the support for Apache Ignite "
"will continue to be provided through the tensorflow/io GitHub project.")
def __init__(self,
cache_name,
host="localhost",
port=10800,
local=False,
part=-1,
page_size=100,
username=None,
password=None,
certfile=None,
keyfile=None,
cert_password=None):
"""Create a IgniteDataset.
Args:
cache_name: Cache name to be used as datasource.
host: Apache Ignite Thin Client host to be connected.
port: Apache Ignite Thin Client port to be connected.
local: Local flag that defines to query only local data.
part: Number of partitions to be queried.
page_size: Apache Ignite Thin Client page size.
username: Apache Ignite Thin Client authentication username.
password: Apache Ignite Thin Client authentication password.
certfile: File in PEM format containing the certificate as well as any
number of CA certificates needed to establish the certificate's
authenticity.
keyfile: File containing the private key (otherwise the private key will
be taken from certfile as well).
cert_password: Password to be used if the private key is encrypted and a
password is necessary.
"""
with IgniteClient(host, port, username, password, certfile, keyfile,
cert_password) as client:
client.handshake()
self.cache_type = client.get_cache_type(cache_name)
self.cache_name = ops.convert_to_tensor(
cache_name, dtype=dtypes.string, name="cache_name")
self.host = ops.convert_to_tensor(host, dtype=dtypes.string, name="host")
self.port = ops.convert_to_tensor(port, dtype=dtypes.int32, name="port")
self.local = ops.convert_to_tensor(local, dtype=dtypes.bool, name="local")
self.part = ops.convert_to_tensor(part, dtype=dtypes.int32, name="part")
self.page_size = ops.convert_to_tensor(
page_size, dtype=dtypes.int32, name="page_size")
self.schema = ops.convert_to_tensor(
self.cache_type.to_flat(), dtype=dtypes.int32, name="schema")
self.permutation = ops.convert_to_tensor(
self.cache_type.to_permutation(),
dtype=dtypes.int32,
name="permutation")
self._element_spec = structure.convert_legacy_structure(
self.cache_type.to_output_types(), self.cache_type.to_output_shapes(),
self.cache_type.to_output_classes())
super(IgniteDataset, self).__init__(self._as_variant_tensor())
def _as_variant_tensor(self):
return gen_dataset_ops.ignite_dataset(self.cache_name, self.host, self.port,
self.local, self.part, self.page_size,
self.schema, self.permutation)
@property
def element_spec(self):
return self._element_spec
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/ignite/python/ops/ignite_dataset_ops.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python helper for loading IGFS ops and kernels."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.util import loader
from tensorflow.python.platform import resource_loader
_dataset_ops = loader.load_op_library(
resource_loader.get_path_to_datafile("../../_ignite_ops.so"))
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/ignite/python/ops/igfs_op_loader.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python helper for loading Ignite ops and kernels."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.util import loader
from tensorflow.python.platform import resource_loader
_dataset_ops = loader.load_op_library(
resource_loader.get_path_to_datafile("../../_ignite_ops.so"))
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/ignite/python/ops/ignite_op_loader.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/specs/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Builder for TensorFlow models specified using specs_ops.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six import exec_
from tensorflow.contrib.specs.python import params_ops
from tensorflow.contrib.specs.python import specs_lib
from tensorflow.contrib.specs.python import specs_ops
from tensorflow.python.util import tf_inspect
def eval_params(params, environment=None):
"""Evaluates a parameter specification and returns the environment.
Args:
params: parameter assignments as a string
environment: a dictionary of input bindings
Returns:
Environment with additional bindings created by
executing `params`
Raises:
Exception: other exceptions raised during execution of `params`
"""
specs_lib.check_keywords(params)
bindings = {}
if environment:
bindings.update(environment)
exec_(params, vars(params_ops), bindings) # pylint: disable=exec-used
return bindings
def eval_spec(spec, environment=None):
"""Evaluates a spec and returns the environment.
This function allows you to use a spec to obtain multiple bindings
in an environment. That is useful if you use the spec language to
specify multiple components of a larger network, for example: "left
= Cr(64, [5,5]); right = Fc(64)" Usually, you will want to use
`create_net` or `create_net_fun` below.
Args:
spec: specification as a string
environment: a dictionary of input bindings
Returns:
Environment with additional bindings created by spec.
Raises:
Exception: other exceptions raised during execution of `spec`
"""
specs_lib.check_keywords(spec)
bindings = {}
if environment:
bindings.update(environment)
exec_(spec, vars(specs_ops), bindings) # pylint: disable=exec-used
return bindings
def create_net_fun(spec, environment=None):
"""Evaluates a spec and returns the binding of `net`.
Specs are written in a DSL based on function composition. A spec
like `net = Cr(64, [3, 3])` assigns an object that represents a
single argument function capable of creating a network to
the variable `net`.
Args:
spec: specification as a string, ending with a `net = ...` statement
environment: a dictionary of input bindings
Returns:
A callable that instantiates the `net` binding.
Raises:
ValueError: spec failed to create a `net`
Exception: other exceptions raised during execution of `spec`
"""
bindings = eval_spec(spec, environment)
net = bindings.get("net", None)
if net is None:
raise ValueError("spec failed to create 'net': %s" % (spec,))
return net.funcall
def create_net(spec, inputs, environment=None):
"""Evaluates a spec and creates a network instance given the inputs.
Args:
spec: specification as a string, ending with a `net = ...` statement
inputs: input that `net` is applied to
environment: a dictionary of input bindings
Returns:
A callable that instantiates the `net` binding.
Raises:
ValueError: spec failed to create a `net`
Exception: other exceptions raised during execution of `spec`
"""
return create_net_fun(spec, environment)(inputs)
class LocalImport(object):
"""A class that allows us to temporarily import something.
Attributes:
frame: the frame in which the context manager was invocked
names: a dictionary containing the new bindings
old: variable bindings that have been shadowed by the import
"""
def __init__(self, names):
"""Create a context manager that binds the names in values.
Args:
names: A dictionary or module containing the bindings.
"""
if not isinstance(names, dict):
names = vars(names)
self.names = names
def __enter__(self):
self.frame = tf_inspect.currentframe()
bindings = self.frame.f_back.f_globals
self.old = {k: bindings.get(k, None) for k in self.names.keys()}
bindings.update(self.names)
def __exit__(self, some_type, value, traceback):
del some_type, value, traceback
bindings = self.frame.f_back.f_globals
bindings.update(self.old)
for k, v in self.old.items():
if v is None:
del bindings[k]
del self.frame
ops = LocalImport(specs_ops)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/specs/python/specs.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implement the "specs" DSL for describing deep networks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import importlib
import operator
import re
from six import exec_
QUOTED = re.compile(r"""
"([^"\\]|\\.)*" |
'([^'\\]|\\.)*'
""", re.VERBOSE)
KEYWORDS = re.compile(r"""\b(import|while|def|exec)\b""")
debug_ = False
def check_keywords(spec):
"""Check for common Python keywords in spec.
This function discourages the use of complex constructs
in TensorFlow specs; it doesn't completely prohibit them
(if necessary, we could check the AST).
Args:
spec: spec string
Raises:
ValueError: raised if spec contains a prohibited keyword.
"""
spec = re.sub(QUOTED, "", spec)
match = re.search(KEYWORDS, spec)
if match:
raise ValueError("keyword '%s' found in spec" % match.group(1))
def get_positional(args, kw, kw_overrides=False):
"""Interpolates keyword arguments into argument lists.
If `kw` contains keywords of the form "_0", "_1", etc., these
are positionally interpolated into the argument list.
Args:
args: argument list
kw: keyword dictionary
kw_overrides: key/value pairs that override kw
Returns:
(new_args, new_kw), new argument lists and keyword dictionaries
with values interpolated.
"""
new_kw = {k: v for k, v in kw.items() if k[0] != "_"}
if len(new_kw) == len(kw):
return args, kw
new_args = list(args)
for key, value in kw.items():
if key[0] != "_": continue
index = int(key[1:])
while len(new_args) <= index:
new_args += [None]
if kw_overrides or new_args[index] is None:
new_args[index] = value
return new_args, new_kw
class Composable(object):
"""A composable function.
This defines the operators common to all composable objects.
Currently defines copmosition (via "|") and repeated application
(via "**"), and maps addition ("+") and multiplication ("*")
as "(f + g)(x) = f(x) + g(x)".
"""
def __or__(self, f):
return Composition(self, f)
def __add__(self, g):
return Operator(operator.add, self, g)
def __mul__(self, g):
return Operator(operator.mul, self, g)
def __pow__(self, n):
assert n >= 0
if n == 0:
return Function(lambda x, *args, **kw: x)
result = self
for _ in range(n-1):
result = Composition(result, self)
return result
class Callable(Composable):
"""A composable function that simply defers to a callable function.
"""
def __init__(self, f):
self.f = f
def funcall(self, x):
return self.f(x)
class Operator(Composable):
"""A wrapper for an operator.
This takes an operator and an argument list and returns
the result of applying the operator to the results of applying
the functions in the argument list.
"""
def __init__(self, op, *args):
self.op = op
self.funs = args
def funcall(self, x):
outputs = [f.funcall(x) for f in self.funs]
return self.op(*outputs)
class Function(Composable):
"""A composable function wrapper for a regular Python function.
This overloads the regular __call__ operator for currying, i.e.,
arguments passed to __call__ are remembered for the eventual
function application.
The final function application happens via the `of` method.
"""
def __init__(self, f, *args, **kw):
if not callable(f):
raise ValueError("%s: is not callable" % f)
self.f = f
self.args = list(args)
self.kw = kw
def __call__(self, *args, **kw):
new_args = list(args) + self.args
new_kw = self.kw.copy()
new_kw.update(kw)
return Function(self.f, *new_args, **new_kw)
# TODO(tmb) The `of` method may be renamed to `function`.
def funcall(self, x):
args, kw = get_positional(self.args, self.kw)
if debug_:
print("DEBUG:", self.f, x, args, kw)
return self.f(x, *args, **kw)
class Composition(Composable):
"""A function composition.
This simply composes its two argument functions when
applied to a final argument via `of`.
"""
def __init__(self, f, g):
self.f = f
self.g = g
def funcall(self, x):
return self.g.funcall(self.f.funcall(x))
# These are DSL names, not Python names
# pylint: disable=invalid-name, exec-used
def External(module_name, function_name):
"""Import a function from an external module.
Note that the `module_name` must be a module name
that works with the usual import mechanisms. Shorthands
like "tf.nn" will not work.
Args:
module_name: name of the module
function_name: name of the function within the module
Returns:
Function-wrapped value of symbol.
"""
module = importlib.import_module(module_name)
return Function(vars(module)[function_name])
def Import(statements):
"""Import a function by exec.
Args:
statements: Python statements
Returns:
Function-wrapped value of `f`.
Raises:
ValueError: the statements didn't define a value for "f"
"""
environ = {}
exec_(statements, environ)
if "f" not in environ:
raise ValueError("failed to define \"f\": %s", statements)
f = environ["f"]
return Function(f)
# pylint: enable=invalid-name, exec-used
def debug(mode=True):
"""Turn on/off debugging mode.
Debugging mode prints more information about the construction
of a network.
Args:
mode: True if turned on, False otherwise
"""
global debug_
debug_ = mode
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/specs/python/specs_lib.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operators for concise TensorFlow parameter specifications.
This module is used as an environment for evaluating expressions
in the "params" DSL.
Specifications are intended to assign simple numerical
values. Examples:
--params "n=64; d=5" --spec "(Cr(n) | Mp([2, 2])) ** d | Fm"
The random parameter primitives are useful for running large numbers
of experiments with randomly distributed parameters:
--params "n=Li(5,500); d=Ui(1,5)" --spec "(Cr(n) | Mp([2, 2])) ** d | Fm"
Internally, this might be implemented as follows:
params = specs.create_params(FLAGS.params, {})
logging.info(repr(params))
net = specs.create_net(FLAGS.spec, inputs, params)
Note that separating the specifications into parameters and network
creation allows us to log the random parameter values easily.
The implementation of this will change soon in order to support
hyperparameter tuning with steering. Instead of returning a number,
the primitives below will return a class instance that is then
used to generate a random number by the framework.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Lint disabled because these are operators in the DSL, not regular
# Python functions.
# pylint: disable=invalid-name
# pylint: disable=wildcard-import,unused-wildcard-import,redefining-builtin
# pylint: disable=redefined-builtin,g-importing-member,no-member
# make available all math expressions
import math
from math import *
import random
# pylint: enable=wildcard-import,unused-wildcard-import,redefining-builtin
# pylint: enable=redefined-builtin,g-importing-member,no-member
def Uf(lo=0.0, hi=1.0):
"""Uniformly distributed floating number."""
return random.uniform(lo, hi)
def Ui(lo, hi):
"""Uniformly distributed integer, inclusive limits."""
return random.randint(lo, hi)
def Lf(lo, hi):
"""Log-uniform distributed floatint point number."""
return math.exp(random.uniform(math.log(lo), math.log(hi)))
def Li(lo, hi):
"""Log-uniform distributed integer, inclusive limits."""
return int(math.floor(math.exp(random.uniform(math.log(lo),
math.log(hi+1-1e-5)))))
def Nt(mu, sigma, limit=3.0):
"""Normally distributed floating point number with truncation."""
return min(max(random.gauss(mu, sigma), mu-limit*sigma), mu+limit*sigma)
# pylint: enable=invalid-name
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/specs/python/params_ops.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for specs-related summarization functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.specs.python import specs
from tensorflow.contrib.specs.python import summaries
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
def _rand(*size):
return np.random.uniform(size=size).astype("f")
class SummariesTest(test.TestCase):
def testStructure(self):
with self.cached_session():
inputs_shape = (1, 18, 19, 5)
inputs = constant_op.constant(_rand(*inputs_shape))
spec = "net = Cr(64, [5, 5])"
outputs = specs.create_net(spec, inputs)
variables.global_variables_initializer().run()
result = outputs.eval()
self.assertEqual(tuple(result.shape), (1, 18, 19, 64))
self.assertEqual(
summaries.tf_spec_structure(
spec, input_shape=inputs_shape),
"_ variablev2 conv variablev2 biasadd relu")
def testStructureFromTensor(self):
with self.cached_session():
inputs = constant_op.constant(_rand(1, 18, 19, 5))
spec = "net = Cr(64, [5, 5])"
outputs = specs.create_net(spec, inputs)
variables.global_variables_initializer().run()
result = outputs.eval()
self.assertEqual(tuple(result.shape), (1, 18, 19, 64))
self.assertEqual(
summaries.tf_spec_structure(spec, inputs),
"_ variablev2 conv variablev2 biasadd relu")
def testPrint(self):
with self.cached_session():
inputs = constant_op.constant(_rand(1, 18, 19, 5))
spec = "net = Cr(64, [5, 5])"
outputs = specs.create_net(spec, inputs)
variables.global_variables_initializer().run()
result = outputs.eval()
self.assertEqual(tuple(result.shape), (1, 18, 19, 64))
summaries.tf_spec_print(spec, inputs)
def testSummary(self):
with self.cached_session():
inputs = constant_op.constant(_rand(1, 18, 19, 5))
spec = "net = Cr(64, [5, 5])"
outputs = specs.create_net(spec, inputs)
variables.global_variables_initializer().run()
result = outputs.eval()
self.assertEqual(tuple(result.shape), (1, 18, 19, 64))
summaries.tf_spec_summary(spec, inputs)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/specs/python/summaries_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Testing specs specifications."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.specs import python
from tensorflow.contrib.specs.python import summaries
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import variables
import tensorflow.python.ops.math_ops # pylint: disable=unused-import
from tensorflow.python.platform import test
specs = python
def _rand(*size):
return np.random.uniform(size=size).astype("f")
class SpecsTest(test.TestCase):
def testSimpleConv(self):
with self.cached_session():
inputs = constant_op.constant(_rand(1, 18, 19, 5))
spec = "net = Cr(64, [5, 5])"
outputs = specs.create_net(spec, inputs)
self.assertEqual(outputs.get_shape().as_list(), [1, 18, 19, 64])
variables.global_variables_initializer().run()
result = outputs.eval()
self.assertEqual(tuple(result.shape), (1, 18, 19, 64))
self.assertEqual(
summaries.tf_spec_structure(spec, inputs),
"_ variablev2 conv variablev2 biasadd relu")
def testUnary(self):
# This is just a quick and dirty check that these ops exist
# and work as unary ops.
with self.cached_session():
inputs = constant_op.constant(_rand(17, 55))
spec = "net = Do(0.5) | Bn | Unit(1) | Relu | Sig | Tanh | Smax"
outputs = specs.create_net(spec, inputs)
self.assertEqual(outputs.get_shape().as_list(), [17, 55])
variables.global_variables_initializer().run()
result = outputs.eval()
self.assertEqual(tuple(result.shape), (17, 55))
def testAdd(self):
with self.cached_session():
inputs = constant_op.constant(_rand(17, 55))
spec = "net = Fs(10) + Fr(10)"
outputs = specs.create_net(spec, inputs)
self.assertEqual(outputs.get_shape().as_list(), [17, 10])
variables.global_variables_initializer().run()
result = outputs.eval()
self.assertEqual(tuple(result.shape), (17, 10))
self.assertRegexpMatches(
summaries.tf_spec_structure(spec, inputs),
"_ variablev2 dot variablev2 biasadd sig "
"<> variablev2 dot variablev2 biasadd relu add(v2)?")
def testMpPower(self):
with self.cached_session():
inputs = constant_op.constant(_rand(1, 64, 64, 5))
spec = "M2 = Mp([2, 2]); net = M2**3"
outputs = specs.create_net(spec, inputs)
self.assertEqual(outputs.get_shape().as_list(), [1, 8, 8, 5])
variables.global_variables_initializer().run()
result = outputs.eval()
self.assertEqual(tuple(result.shape), (1, 8, 8, 5))
self.assertEqual(
summaries.tf_spec_structure(spec, inputs),
"_ maxpool maxpool maxpool")
def testAbbrevPower(self):
with self.cached_session():
inputs = constant_op.constant(_rand(1, 64, 64, 5))
spec = "C3 = Cr([3, 3]); M2 = Mp([2, 2]); net = (C3(5) | M2)**3"
outputs = specs.create_net(spec, inputs)
self.assertEqual(outputs.get_shape().as_list(), [1, 8, 8, 5])
variables.global_variables_initializer().run()
result = outputs.eval()
self.assertEqual(tuple(result.shape), (1, 8, 8, 5))
self.assertEqual(
summaries.tf_spec_structure(spec, inputs),
"_ variablev2 conv variablev2 biasadd relu maxpool"
" variablev2 conv variablev2"
" biasadd relu maxpool variablev2 conv variablev2"
" biasadd relu maxpool")
def testAbbrevPower2(self):
with self.cached_session():
inputs = constant_op.constant(_rand(1, 64, 64, 5))
spec = "C3 = Cr(_1=[3, 3]); M2 = Mp([2, 2]);"
spec += "net = (C3(_0=5) | M2)**3"
outputs = specs.create_net(spec, inputs)
self.assertEqual(outputs.get_shape().as_list(), [1, 8, 8, 5])
variables.global_variables_initializer().run()
result = outputs.eval()
self.assertEqual(tuple(result.shape), (1, 8, 8, 5))
self.assertEqual(
summaries.tf_spec_structure(spec, inputs),
"_ variablev2 conv variablev2 biasadd relu maxpool"
" variablev2 conv variablev2 biasadd relu"
" maxpool variablev2 conv variablev2 biasadd relu"
" maxpool")
def testConc(self):
with self.cached_session():
inputs = constant_op.constant(_rand(10, 20))
spec = "net = Conc(1, Fs(20), Fs(10))"
outputs = specs.create_net(spec, inputs)
self.assertEqual(outputs.get_shape().as_list(), [10, 30])
variables.global_variables_initializer().run()
result = outputs.eval()
self.assertEqual(tuple(result.shape), (10, 30))
self.assertEqual(
summaries.tf_spec_structure(spec, inputs),
"_ variablev2 dot variablev2 biasadd sig "
"<> variablev2 dot variablev2 biasadd sig _ concatv2")
def testImport(self):
with self.cached_session():
inputs = constant_op.constant(_rand(10, 20))
spec = ("S = Import('from tensorflow.python.ops" +
" import math_ops; f = math_ops.sigmoid')")
spec += "; net = S | S"
outputs = specs.create_net(spec, inputs)
self.assertEqual(outputs.get_shape().as_list(), [10, 20])
variables.global_variables_initializer().run()
result = outputs.eval()
self.assertEqual(tuple(result.shape), (10, 20))
self.assertEqual(summaries.tf_spec_structure(spec, inputs), "_ sig sig")
def testKeywordRestriction(self):
with self.cached_session():
inputs = constant_op.constant(_rand(10, 20))
spec = "import re; net = Conc(1, Fs(20), Fs(10))"
self.assertRaises(ValueError, lambda: specs.create_net(spec, inputs))
def testParams(self):
params = "x = 3; y = Ui(-10, 10); z = Lf(1, 100); q = Nt(0.0, 1.0)"
bindings = specs.eval_params(params, {})
self.assertTrue("x" in bindings)
self.assertEqual(bindings["x"], 3)
self.assertTrue("y" in bindings)
self.assertTrue("z" in bindings)
self.assertTrue("q" in bindings)
# XXX: the cleverness of this code is over 9000
# TODO: original author please fix
def DISABLED_testSpecsOps(self):
# pylint: disable=undefined-variable
with self.assertRaises(NameError):
_ = Cr
with specs.ops:
self.assertIsNotNone(Cr)
self.assertTrue(callable(Cr(64, [3, 3])))
with self.assertRaises(NameError):
_ = Cr
# XXX: the cleverness of this code is over 9000
# TODO: original author please fix
def DISABLED_testVar(self):
with self.cached_session() as sess:
with specs.ops:
# pylint: disable=undefined-variable
v = Var("test_var",
shape=[2, 2],
initializer=init_ops.constant_initializer(42.0))
inputs = constant_op.constant(_rand(10, 100))
outputs = v.funcall(inputs)
self.assertEqual(len(variables.global_variables()), 1)
sess.run([outputs.initializer])
outputs_value = outputs.eval()
self.assertEqual(outputs_value.shape, (2, 2))
self.assertEqual(outputs_value[1, 1], 42.0)
# XXX: the cleverness of this code is over 9000
# TODO: original author please fix
def DISABLED_testShared(self):
with self.cached_session():
with specs.ops:
# pylint: disable=undefined-variable
f = Shared(Fr(100))
g = f | f | f | f
inputs = constant_op.constant(_rand(10, 100))
_ = g.funcall(inputs)
self.assertEqual(len(variables.global_variables()), 2)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/specs/python/specs_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Init file, giving convenient access to all specs ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import,g-importing-member,redefined-builtin
from tensorflow.contrib.specs.python.params_ops import *
from tensorflow.contrib.specs.python.specs import *
from tensorflow.contrib.specs.python.specs_lib import *
from tensorflow.contrib.specs.python.specs_ops import *
from tensorflow.contrib.specs.python.summaries import *
# pylint: enable=wildcard-import,redefined-builtin
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/specs/python/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operators for concise TensorFlow network models.
This module is used as an environment for evaluating expressions
in the "specs" DSL.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.layers.python.layers import layers
from tensorflow.contrib.specs.python import specs_lib
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variable_scope
# The following assignments don't appear to follow Google naming
# conventions, but that's because these are functions defined by
# higher-order function application, not "constants" and because they
# are the commands of the DSL.
# pylint: disable=invalid-name
class Idx(specs_lib.Composable):
"""Implements the identity function in network specifications."""
def funcall(self, x):
return x
class Conc(specs_lib.Composable):
"""Implements tensor concatenation in network specifications."""
def __init__(self, dim, *args):
"""Concatenates tensors along the given dimension.
Args:
dim: dimension along which concatenation takes place
*args: argument tensor functions to be concatenated
"""
self.dim = dim
self.funs = args
def funcall(self, x):
outputs = [f.funcall(x) for f in self.funs]
return array_ops.concat(outputs, self.dim)
External = specs_lib.External
Import = specs_lib.Import
Fun = specs_lib.Function
debug = specs_lib.debug
Print = Fun(logging_ops.Print)
Id = Fun(array_ops.identity)
# TODO(tmb) add Assert
# Two letter names for the most common layers.
# 2D Convolutional layers with nonlinearities (s/t/r/m/l)
# TODO(tmb) add Cbs, Fbs etc. for batch norms
Cx = Fun(layers.conv2d)
Cs = Fun(layers.conv2d, activation_fn=math_ops.sigmoid)
Ct = Fun(layers.conv2d, activation_fn=math_ops.tanh)
Cr = Fun(layers.conv2d, activation_fn=nn_ops.relu)
Cm = Fun(layers.conv2d, activation_fn=nn_ops.softmax)
Cl = Fun(layers.conv2d, activation_fn=None)
# Fully connected slim with nonlinearities (s/t/r/m/l)
Fx = Fun(layers.fully_connected)
Fs = Fun(layers.fully_connected, activation_fn=math_ops.sigmoid)
Ft = Fun(layers.fully_connected, activation_fn=math_ops.tanh)
Fr = Fun(layers.fully_connected, activation_fn=nn_ops.relu)
Fm = Fun(layers.fully_connected, activation_fn=nn_ops.softmax)
Fl = Fun(layers.fully_connected, activation_fn=None)
# Pooling
Mp = Fun(layers.max_pool2d)
Ap = Fun(layers.avg_pool2d)
# Batch manipulations
Do = Fun(layers.dropout)
Bn = Fun(layers.batch_norm)
Lrn = Fun(nn.local_response_normalization)
Unit = Fun(layers.unit_norm)
# Shape changes
Flat = Fun(layers.flatten)
Reshape = Fun(array_ops.reshape)
Transpose = Fun(array_ops.transpose)
Squeeze = Fun(array_ops.squeeze)
Expand = Fun(array_ops.expand_dims)
# Nonlinearities (rarely needed on their own)
Relu = Fun(nn_ops.relu)
Sig = Fun(math_ops.sigmoid)
Tanh = Fun(math_ops.tanh)
Smax = Fun(nn_ops.softmax)
def Dws(n):
"""Depth-wise convolution + sigmoid (used after LSTM)."""
return Cs(n, [1, 1])
def Dwm(n):
"""Depth-wise convolution + softmax (used after LSTM)."""
return Cm(n, [1, 1])
# Sharing of Variables
def Var(name, *args, **kw):
"""Implements an operator that generates a variable.
This function is still experimental. Use it only
for generating a single variable instance for
each name.
Args:
name: Name of the variable.
*args: Other arguments to get_variable.
**kw: Other keywords for get_variable.
Returns:
A specs object for generating a variable.
"""
def var(_):
return variable_scope.get_variable(name, *args, **kw)
return specs_lib.Callable(var)
class Shared(specs_lib.Composable):
"""Wraps a scope with variable reuse around the subnetwork.
This function is still experimental.
Attributes:
f: The shared subnetwork.
name: A name for the shared scope.
used: A flag indicating whether the scope has already been used.
"""
shared_number = 1
def __init__(self, subnet, name=None, scope=None):
"""Create the Shared operator.
Use this as:
f = Shared(Cr(100, 3))
g = f | f | f
Ordinarily, you do not need to provide either a name or a scope.
Providing a name is useful if you want a well-defined namespace
for the variables (e.g., for saving a subnet).
Args:
subnet: Definition of the shared network.
name: Optional name for the shared context.
scope: Optional shared scope (must be a Scope, not a string).
Raises:
ValueError: Scope is not of type tf.Scope, name is not
of type string, or both scope and name are given together.
"""
if scope is not None and not isinstance(scope,
variable_scope.VariableScope):
raise ValueError("scope must be None or a VariableScope")
if name is not None and not isinstance(scope, str):
raise ValueError("name must be None or a string")
if scope is not None and name is not None:
raise ValueError("cannot provide both a name and a scope")
if name is None:
name = "Shared_%d" % Shared.shared_number
Shared.shared_number += 1
self.subnet = subnet
self.name = name
self.scope = scope
def funcall(self, x):
"""Apply the shared operator to an input.
This wraps a variable scope around the creation of the subnet.
Args:
x: The input argument on which the subnet is invoked.
Returns:
The output tensor from invoking the subnet constructor.
"""
if self.scope is None:
with variable_scope.variable_scope(self.name, values=[x]) as scope:
self.scope = scope
return self.subnet.funcall(x)
else:
with variable_scope.variable_scope(self.scope, values=[x], reuse=True):
return self.subnet.funcall(x)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/specs/python/specs_ops.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions for summarizing and describing TensorFlow graphs.
This contains functions that generate string descriptions from
TensorFlow graphs, for debugging, testing, and model size
estimation.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
from tensorflow.contrib.specs.python import specs
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
# These are short abbreviations for common TensorFlow operations used
# in test cases with tf_structure to verify that specs_lib generates a
# graph structure with the right operations. Operations outside the
# scope of specs (e.g., Const and Placeholder) are just assigned "_"
# since they are not relevant to testing.
SHORT_NAMES_SRC = """
BiasAdd biasadd
Const _
Conv2D conv
MatMul dot
Placeholder _
Sigmoid sig
Variable var
""".split()
SHORT_NAMES = {
x: y
for x, y in zip(SHORT_NAMES_SRC[::2], SHORT_NAMES_SRC[1::2])
}
def _truncate_structure(x):
"""A helper function that disables recursion in tf_structure.
Some constructs (e.g., HorizontalLstm) are complex unrolled
structures and don't need to be represented in the output
of tf_structure or tf_print. This helper function defines
which tree branches should be pruned. This is a very imperfect
way of dealing with unrolled LSTM's (since it truncates
useful information as well), but it's not worth doing something
better until the new fused and unrolled ops are ready.
Args:
x: a Tensor or Op
Returns:
A bool indicating whether the subtree should be pruned.
"""
if "/HorizontalLstm/" in x.name:
return True
return False
def tf_structure(x, include_shapes=False, finished=None):
"""A postfix expression summarizing the TF graph.
This is intended to be used as part of test cases to
check for gross differences in the structure of the graph.
The resulting string is not invertible or unabiguous
and cannot be used to reconstruct the graph accurately.
Args:
x: a tf.Tensor or tf.Operation
include_shapes: include shapes in the output string
finished: a set of ops that have already been output
Returns:
A string representing the structure as a string of
postfix operations.
"""
if finished is None:
finished = set()
if isinstance(x, ops.Tensor):
shape = x.get_shape().as_list()
x = x.op
else:
shape = []
if x in finished:
return " <>"
finished |= {x}
result = ""
if not _truncate_structure(x):
for y in x.inputs:
result += tf_structure(y, include_shapes, finished)
if include_shapes:
result += " %s" % (shape,)
if x.type != "Identity":
name = SHORT_NAMES.get(x.type, x.type.lower())
result += " " + name
return result
def tf_print(x, depth=0, finished=None, printer=print):
"""A simple print function for a TensorFlow graph.
Args:
x: a tf.Tensor or tf.Operation
depth: current printing depth
finished: set of nodes already output
printer: print function to use
Returns:
Total number of parameters found in the
subtree.
"""
if finished is None:
finished = set()
if isinstance(x, ops.Tensor):
shape = x.get_shape().as_list()
x = x.op
else:
shape = ""
if x.type == "Identity":
x = x.inputs[0].op
if x in finished:
printer("%s<%s> %s %s" % (" " * depth, x.name, x.type, shape))
return
finished |= {x}
printer("%s%s %s %s" % (" " * depth, x.name, x.type, shape))
if not _truncate_structure(x):
for y in x.inputs:
tf_print(y, depth + 1, finished, printer=printer)
def tf_num_params(x):
"""Number of parameters in a TensorFlow subgraph.
Args:
x: root of the subgraph (Tensor, Operation)
Returns:
Total number of elements found in all Variables
in the subgraph.
"""
if isinstance(x, ops.Tensor):
shape = x.get_shape()
x = x.op
if x.type in ["Variable", "VariableV2"]:
return shape.num_elements()
totals = [tf_num_params(y) for y in x.inputs]
return sum(totals)
def tf_left_split(op):
"""Split the parameters of op for left recursion.
Args:
op: tf.Operation
Returns:
A tuple of the leftmost input tensor and a list of the
remaining arguments.
"""
if len(op.inputs) < 1:
return None, []
if op.type == "Concat":
return op.inputs[1], op.inputs[2:]
return op.inputs[0], op.inputs[1:]
def tf_parameter_iter(x):
"""Iterate over the left branches of a graph and yield sizes.
Args:
x: root of the subgraph (Tensor, Operation)
Yields:
A triple of name, number of params, and shape.
"""
while 1:
if isinstance(x, ops.Tensor):
shape = x.get_shape().as_list()
x = x.op
else:
shape = ""
left, right = tf_left_split(x)
totals = [tf_num_params(y) for y in right]
total = sum(totals)
yield x.name, total, shape
if left is None:
break
x = left
def _combine_filter(x):
"""A filter for combining successive layers with similar names."""
last_name = None
last_total = 0
last_shape = None
for name, total, shape in x:
name = re.sub("/.*", "", name)
if name == last_name:
last_total += total
continue
if last_name is not None:
yield last_name, last_total, last_shape
last_name = name
last_total = total
last_shape = shape
if last_name is not None:
yield last_name, last_total, last_shape
def tf_parameter_summary(x, printer=print, combine=True):
"""Summarize parameters by depth.
Args:
x: root of the subgraph (Tensor, Operation)
printer: print function for output
combine: combine layers by top-level scope
"""
seq = tf_parameter_iter(x)
if combine:
seq = _combine_filter(seq)
seq = reversed(list(seq))
for name, total, shape in seq:
printer("%10d %-20s %s" % (total, name, shape))
def tf_spec_structure(spec,
inputs=None,
input_shape=None,
input_type=dtypes.float32):
"""Return a postfix representation of the specification.
This is intended to be used as part of test cases to
check for gross differences in the structure of the graph.
The resulting string is not invertible or unabiguous
and cannot be used to reconstruct the graph accurately.
Args:
spec: specification
inputs: input to the spec construction (usually a Tensor)
input_shape: tensor shape (in lieu of inputs)
input_type: type of the input tensor
Returns:
A string with a postfix representation of the
specification.
"""
if inputs is None:
inputs = array_ops.placeholder(input_type, input_shape)
outputs = specs.create_net(spec, inputs)
return str(tf_structure(outputs).strip())
def tf_spec_summary(spec,
inputs=None,
input_shape=None,
input_type=dtypes.float32):
"""Output a summary of the specification.
This prints a list of left-most tensor operations and summarized the
variables found in the right branches. This kind of representation
is particularly useful for networks that are generally structured
like pipelines.
Args:
spec: specification
inputs: input to the spec construction (usually a Tensor)
input_shape: optional shape of input
input_type: type of the input tensor
"""
if inputs is None:
inputs = array_ops.placeholder(input_type, input_shape)
outputs = specs.create_net(spec, inputs)
tf_parameter_summary(outputs)
def tf_spec_print(spec,
inputs=None,
input_shape=None,
input_type=dtypes.float32):
"""Print a tree representing the spec.
Args:
spec: specification
inputs: input to the spec construction (usually a Tensor)
input_shape: optional shape of input
input_type: type of the input tensor
"""
if inputs is None:
inputs = array_ops.placeholder(input_type, input_shape)
outputs = specs.create_net(spec, inputs)
tf_print(outputs)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/specs/python/summaries.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Shared representations for tree-based models in tensorflow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,wildcard-import
from tensorflow.contrib.decision_trees.proto import *
# pylint: enable=unused-import,wildcard-import
from tensorflow.python.util.all_util import remove_undocumented
remove_undocumented(__name__)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/decision_trees/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops and modules related to factorization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import
from tensorflow.contrib.factorization.python.ops.clustering_ops import *
from tensorflow.contrib.factorization.python.ops.factorization_ops import *
from tensorflow.contrib.factorization.python.ops.gmm import *
from tensorflow.contrib.factorization.python.ops.gmm_ops import *
from tensorflow.contrib.factorization.python.ops.kmeans import *
from tensorflow.contrib.factorization.python.ops.wals import *
# pylint: enable=wildcard-import
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = [
'COSINE_DISTANCE',
'GMM',
'gmm',
'GmmAlgorithm',
'KMeans',
'KMEANS_PLUS_PLUS_INIT',
'KMeansClustering',
'RANDOM_INIT',
'SQUARED_EUCLIDEAN_DISTANCE',
'WALSMatrixFactorization',
'WALSModel',
]
remove_undocumented(__name__, _allowed_symbols)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/factorization/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The python module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/factorization/python/__init__.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmark for masked_matmul_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=g-bad-todo, g-import-not-at-top
import time
from tensorflow.contrib.factorization.python.ops import gen_factorization_ops
from tensorflow.python.client import session as session_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class MaskedmatmulBenchmark(test.Benchmark):
"""Benchmark masked_matmul_ops."""
def _make_sparse_mask(self, mask_shape, nnz, sort=False):
"""Creates a sparse tensor to be used as a mask in masked_matmul.
Args:
mask_shape: int list, the shape of the mask.
nnz: int, the number of non-zero elements in the mask.
sort: boolean, whether to sort the indices of the mask (in lexicographic
order).
Returns:
A sparse tensor, with nnz indices, drawn uniformly at random.
"""
num_rows = mask_shape[0]
num_cols = mask_shape[1]
row_idx = random_ops.random_uniform(
[nnz], minval=0, maxval=num_rows, dtype=dtypes.int64)
col_idx = random_ops.random_uniform(
[nnz], minval=0, maxval=num_cols, dtype=dtypes.int64)
indices = array_ops.stack([row_idx, col_idx], axis=1)
values = array_ops.ones([nnz])
unordered_mask = sparse_tensor.SparseTensor(indices, values, mask_shape)
return sparse_ops.sparse_reorder(unordered_mask) if sort else unordered_mask
def _run_graph(self, a_shape, b_shape, nnz, num_iters, sort=False,
transpose_a=False, transpose_b=False):
"""Run the graph and return its average execution time.
Args:
a_shape: int list, the shape of the a matrix.
b_shape: int list, the shape of the b matrix.
nnz: int, the number of non-zero elements in the mask.
num_iters: int, the number of iterations to run (the output is the average
execution time, over num_iters).
sort: Boolean, whether to sort the indices in the mask.
transpose_a: boolean, whether to transpose the a matrix.
transpose_b: boolean, whether to transpose the b matrix.
Returns:
The average duration of the masked_matmul op in seconds.
"""
graph = ops.Graph()
with graph.as_default(), session_lib.Session(graph=graph) as session:
mask_shape = [a_shape[0], b_shape[1]]
a_shape = a_shape if not transpose_a else [a_shape[1], a_shape[0]]
b_shape = b_shape if not transpose_b else [b_shape[1], b_shape[0]]
a_var = variables.Variable(random_ops.random_normal(a_shape))
b_var = variables.Variable(random_ops.random_normal(b_shape))
mask_indices_ph = array_ops.placeholder(dtypes.int64, shape=[nnz, 2])
a_ph = array_ops.placeholder(dtypes.float32, shape=a_shape)
b_ph = array_ops.placeholder(dtypes.float32, shape=b_shape)
mask = self._make_sparse_mask(mask_shape, nnz, sort)
masked_prod = gen_factorization_ops.masked_matmul(
a_ph, b_ph, mask_indices_ph, transpose_a, transpose_b)
with ops.control_dependencies([masked_prod]):
result = control_flow_ops.no_op()
variables.global_variables_initializer().run()
avg_wall_time = 0
for _ in range(num_iters):
a, b, mask_indices = session.run([a_var, b_var, mask.indices])
feed_dict = {
mask_indices_ph: mask_indices,
a_ph: a,
b_ph: b
}
start_time = time.time()
session.run(result, feed_dict=feed_dict)
avg_wall_time += (time.time() - start_time)/num_iters
bench_name = (
"cpu nnz:{nnz} a_shape:{a_shape} b_shape:{b_shape} tr_a:{tr_a} "
"tr_b:{tr_b} sort:{sort}"
).format(
nnz=nnz,
a_shape=a_shape,
b_shape=b_shape,
tr_a=int(transpose_a),
tr_b=int(transpose_b),
sort=int(sort)
)
print(bench_name + " - %f secs" % avg_wall_time)
name = bench_name.replace(", ", "_").replace(":", "_").replace(" ", "_")
self.report_benchmark(
name=name,
iters=num_iters,
wall_time=avg_wall_time)
return avg_wall_time
# TODO(walidk): compare benchmarks to using existing tf ops.
def benchmark_matmul(self):
num_iters = 10
nnz = 100000
for transpose_a in [False, True]:
for transpose_b in [False, True]:
for dim in [200, 400, 800]:
for sort in [False, True]:
a_shape = [10000, dim]
b_shape = [dim, 10000]
self._run_graph(a_shape, b_shape, nnz, num_iters, sort, transpose_a,
transpose_b)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/factorization/python/kernel_tests/masked_matmul_benchmark.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
# ==============================================================================
"""Tests for masked_matmul_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=g-bad-todo, g-import-not-at-top
import numpy as np
from tensorflow.contrib.factorization.python.ops import gen_factorization_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
def MakeMask():
inds = [[0, 0], [0, 2], [1, 1], [2, 0], [2, 3]] * 100
indices = np.array(inds).astype(np.int64)
shape = np.array([5, 4]).astype(np.int64)
return (indices, shape)
class MaskedProductOpsTest(test.TestCase):
def setUp(self):
a = [
[0.1, 0.2, 0.3],
[0.4, 0.5, 0.6],
[0.7, 0.8, 0.9],
[1.1, 1.2, 1.3],
[1.4, 1.5, 1.6],
]
b = [
[0.1, 0.4, 0.7, 1.1],
[0.2, 0.5, 0.8, 1.2],
[0.3, 0.6, 0.9, 1.3],
]
self._dot_products = np.array([0.14, 0.5, 0.77, 0.5, 2.9] * 100)
self._a = np.array(a).astype(np.float32)
self._b = np.array(b).astype(np.float32)
self._mask_ind, self._mask_shape = MakeMask()
def _runTestMaskedProduct(self, transpose_a, transpose_b):
with ops.Graph().as_default(), self.cached_session() as sess:
a = self._a if not transpose_a else array_ops.transpose(self._a)
b = self._b if not transpose_b else array_ops.transpose(self._b)
def AssertClose(sp_x, sp_y):
x_inds, x_vals, y_inds, y_vals = sess.run(
[sp_x.indices, sp_x.values,
sp_y.indices, sp_y.values])
self.assertAllClose(x_inds, y_inds)
self.assertAllClose(x_vals, y_vals)
values = gen_factorization_ops.masked_matmul(
a, b, self._mask_ind, transpose_a, transpose_b)
result = sparse_tensor.SparseTensor(
self._mask_ind, values, self._mask_shape)
true_result = sparse_tensor.SparseTensor(
self._mask_ind, self._dot_products, self._mask_shape)
AssertClose(result, true_result)
def _runTestEmptyMaskedProduct(self):
with ops.Graph().as_default(), self.cached_session() as sess:
empty_mask = constant_op.constant(0, shape=[0, 2], dtype=dtypes.int64)
values = gen_factorization_ops.masked_matmul(
self._a, self._b, empty_mask, False, False)
self.assertEqual(len(values.eval(session=sess)), 0)
def testMaskedProduct(self):
self._runTestMaskedProduct(False, False)
def testMaskedProductTransposeA(self):
self._runTestMaskedProduct(True, False)
def testMaskedProductTransposeB(self):
self._runTestMaskedProduct(False, True)
def testMaskedProductTransposeAAndB(self):
self._runTestMaskedProduct(True, True)
def testEmptyMaskedProduct(self):
self._runTestEmptyMaskedProduct()
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/factorization/python/kernel_tests/masked_matmul_ops_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
# ==============================================================================
"""Tests for clustering_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.factorization.python.ops import clustering_ops
from tensorflow.python.platform import test
class KmeansPlusPlusInitializationTest(test.TestCase):
# All but one input point are close to (101, 1). With uniform random sampling,
# it is highly improbable for (-1, -1) to be selected.
def setUp(self):
self._points = np.array([[100., 0.],
[101., 2.],
[102., 0.],
[100., 1.],
[100., 2.],
[101., 0.],
[101., 0.],
[101., 1.],
[102., 0.],
[-1., -1.]]).astype(np.float32)
def runTestWithSeed(self, seed):
with self.cached_session():
sampled_points = clustering_ops.kmeans_plus_plus_initialization(
self._points, 3, seed, (seed % 5) - 1)
self.assertAllClose(
sorted(sampled_points.eval().tolist()), [[-1., -1.],
[101., 1.],
[101., 1.]],
atol=1.0)
def testBasic(self):
for seed in range(100):
self.runTestWithSeed(seed)
class KMC2InitializationTest(test.TestCase):
def runTestWithSeed(self, seed):
with self.cached_session():
distances = np.zeros(1000).astype(np.float32)
distances[6] = 10e7
distances[4] = 10e3
sampled_point = clustering_ops.kmc2_chain_initialization(distances, seed)
self.assertEquals(sampled_point.eval(), 6)
distances[6] = 0.0
sampled_point = clustering_ops.kmc2_chain_initialization(distances, seed)
self.assertEquals(sampled_point.eval(), 4)
def testBasic(self):
for seed in range(100):
self.runTestWithSeed(seed)
class KMC2InitializationLargeTest(test.TestCase):
def setUp(self):
self._distances = np.zeros(1001)
self._distances[500] = 100.0
self._distances[1000] = 50.0
def testBasic(self):
with self.cached_session():
counts = {}
seed = 0
for i in range(50):
sample = clustering_ops.kmc2_chain_initialization(
self._distances, seed + i).eval()
counts[sample] = counts.get(sample, 0) + 1
self.assertEquals(len(counts), 2)
self.assertTrue(500 in counts)
self.assertTrue(1000 in counts)
self.assertGreaterEqual(counts[500], 5)
self.assertGreaterEqual(counts[1000], 5)
class KMC2InitializationCornercaseTest(test.TestCase):
def setUp(self):
self._distances = np.zeros(10)
def runTestWithSeed(self, seed):
with self.cached_session():
sampled_point = clustering_ops.kmc2_chain_initialization(
self._distances, seed)
self.assertEquals(sampled_point.eval(), 0)
def testBasic(self):
for seed in range(100):
self.runTestWithSeed(seed)
# A simple test that can be verified by hand.
class NearestCentersTest(test.TestCase):
def setUp(self):
self._points = np.array([[100., 0.],
[101., 2.],
[99., 2.],
[1., 1.]]).astype(np.float32)
self._centers = np.array([[100., 0.],
[99., 1.],
[50., 50.],
[0., 0.],
[1., 1.]]).astype(np.float32)
def testNearest1(self):
with self.cached_session():
[indices, distances] = clustering_ops.nearest_neighbors(self._points,
self._centers, 1)
self.assertAllClose(indices.eval(), [[0], [0], [1], [4]])
self.assertAllClose(distances.eval(), [[0.], [5.], [1.], [0.]])
def testNearest2(self):
with self.cached_session():
[indices, distances] = clustering_ops.nearest_neighbors(self._points,
self._centers, 2)
self.assertAllClose(indices.eval(), [[0, 1], [0, 1], [1, 0], [4, 3]])
self.assertAllClose(distances.eval(),
[[0., 2.], [5., 5.], [1., 5.], [0., 2.]])
# A test with large inputs.
class NearestCentersLargeTest(test.TestCase):
def setUp(self):
num_points = 1000
num_centers = 2000
num_dim = 100
max_k = 5
# Construct a small number of random points and later tile them.
points_per_tile = 10
assert num_points % points_per_tile == 0
points = np.random.standard_normal(
[points_per_tile, num_dim]).astype(np.float32)
# Construct random centers.
self._centers = np.random.standard_normal(
[num_centers, num_dim]).astype(np.float32)
# Exhaustively compute expected nearest neighbors.
def squared_distance(x, y):
return np.linalg.norm(x - y, ord=2)**2
nearest_neighbors = [
sorted([(squared_distance(point, self._centers[j]), j)
for j in range(num_centers)])[:max_k] for point in points
]
expected_nearest_neighbor_indices = np.array(
[[i for _, i in nn] for nn in nearest_neighbors])
expected_nearest_neighbor_squared_distances = np.array(
[[dist for dist, _ in nn] for nn in nearest_neighbors])
# Tile points and expected results to reach requested size (num_points)
(self._points, self._expected_nearest_neighbor_indices,
self._expected_nearest_neighbor_squared_distances) = (
np.tile(x, (int(num_points / points_per_tile), 1))
for x in (points, expected_nearest_neighbor_indices,
expected_nearest_neighbor_squared_distances))
def testNearest1(self):
with self.cached_session():
[indices, distances] = clustering_ops.nearest_neighbors(self._points,
self._centers, 1)
self.assertAllClose(indices.eval(),
self._expected_nearest_neighbor_indices[:, [0]])
self.assertAllClose(
distances.eval(),
self._expected_nearest_neighbor_squared_distances[:, [0]])
def testNearest5(self):
with self.cached_session():
[indices, distances] = clustering_ops.nearest_neighbors(self._points,
self._centers, 5)
self.assertAllClose(indices.eval(),
self._expected_nearest_neighbor_indices[:, 0:5])
self.assertAllClose(
distances.eval(),
self._expected_nearest_neighbor_squared_distances[:, 0:5])
if __name__ == "__main__":
np.random.seed(0)
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/factorization/python/kernel_tests/clustering_ops_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
# ==============================================================================
"""Tests for wals_solver_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.factorization.python.ops import gen_factorization_ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.platform import test
def SparseBlock3x3():
ind = np.array(
[[0, 0], [0, 2], [1, 1], [2, 0], [2, 1], [3, 2]]).astype(np.int64)
val = np.array([0.1, 0.2, 1.1, 2.0, 2.1, 3.2]).astype(np.float32)
shape = np.array([4, 3]).astype(np.int64)
return sparse_tensor.SparseTensor(ind, val, shape)
class WalsSolverOpsTest(test.TestCase):
def setUp(self):
self._column_factors = np.array([
[0.1, 0.2, 0.3],
[0.4, 0.5, 0.6],
[0.7, 0.8, 0.9],
]).astype(np.float32)
self._row_factors = np.array([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6],
[0.7, 0.8, 0.9],
[1.1, 1.2, 1.3]]).astype(np.float32)
self._column_weights = np.array([0.1, 0.2, 0.3]).astype(np.float32)
self._row_weights = np.array([0.1, 0.2, 0.3, 0.4]).astype(np.float32)
self._unobserved_weights = 0.1
def testWalsSolverLhs(self):
sparse_block = SparseBlock3x3()
with self.cached_session():
[lhs_tensor,
rhs_matrix] = gen_factorization_ops.wals_compute_partial_lhs_and_rhs(
self._column_factors, self._column_weights, self._unobserved_weights,
self._row_weights, sparse_block.indices, sparse_block.values,
[],
input_block_size=sparse_block.dense_shape[0],
input_is_transpose=False)
self.assertAllClose(lhs_tensor.eval(), [[
[0.014800, 0.017000, 0.019200],
[0.017000, 0.019600, 0.022200],
[0.019200, 0.022200, 0.025200],
], [
[0.0064000, 0.0080000, 0.0096000],
[0.0080000, 0.0100000, 0.0120000],
[0.0096000, 0.0120000, 0.0144000],
], [
[0.0099000, 0.0126000, 0.0153000],
[0.0126000, 0.0162000, 0.0198000],
[0.0153000, 0.0198000, 0.0243000],
], [
[0.058800, 0.067200, 0.075600],
[0.067200, 0.076800, 0.086400],
[0.075600, 0.086400, 0.097200],
]])
self.assertAllClose(rhs_matrix.eval(), [[0.019300, 0.023000, 0.026700],
[0.061600, 0.077000, 0.092400],
[0.160400, 0.220000, 0.279600],
[0.492800, 0.563200, 0.633600]])
def testWalsSolverLhsEntryWeights(self):
sparse_block = SparseBlock3x3()
with self.cached_session():
[lhs_tensor,
rhs_matrix] = gen_factorization_ops.wals_compute_partial_lhs_and_rhs(
self._column_factors, [], self._unobserved_weights,
[], sparse_block.indices, sparse_block.values,
[0.01, 0.03, 0.04, 0.03, 0.06, 0.12],
input_block_size=sparse_block.dense_shape[0],
input_is_transpose=False)
self.assertAllClose(lhs_tensor.eval(), [[
[0.014800, 0.017000, 0.019200],
[0.017000, 0.019600, 0.022200],
[0.019200, 0.022200, 0.025200],
], [
[0.0064000, 0.0080000, 0.0096000],
[0.0080000, 0.0100000, 0.0120000],
[0.0096000, 0.0120000, 0.0144000],
], [
[0.0099000, 0.0126000, 0.0153000],
[0.0126000, 0.0162000, 0.0198000],
[0.0153000, 0.0198000, 0.0243000],
], [
[0.058800, 0.067200, 0.075600],
[0.067200, 0.076800, 0.086400],
[0.075600, 0.086400, 0.097200],
]])
self.assertAllClose(rhs_matrix.eval(), [[0.019300, 0.023000, 0.026700],
[0.061600, 0.077000, 0.092400],
[0.160400, 0.220000, 0.279600],
[0.492800, 0.563200, 0.633600]])
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/factorization/python/kernel_tests/wals_solver_ops_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of Gaussian mixture model (GMM) clustering using tf.Learn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
from tensorflow.contrib import framework
from tensorflow.contrib.factorization.python.ops import gmm_ops
from tensorflow.contrib.framework.python.framework import checkpoint_utils
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import model_fn as model_fn_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import logging_ops as logging
from tensorflow.python.ops import state_ops
from tensorflow.python.ops.control_flow_ops import with_dependencies
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import training_util
def _streaming_sum(scalar_tensor):
"""Create a sum metric and update op."""
sum_metric = framework.local_variable(constant_op.constant(0.0))
sum_update = sum_metric.assign_add(scalar_tensor)
return sum_metric, sum_update
class _InitializeClustersHook(session_run_hook.SessionRunHook):
"""Initializes clusters or waits for cluster initialization."""
def __init__(self, init_op, is_initialized_op, is_chief):
self._init_op = init_op
self._is_chief = is_chief
self._is_initialized_op = is_initialized_op
def after_create_session(self, session, _):
assert self._init_op.graph == ops.get_default_graph()
assert self._is_initialized_op.graph == self._init_op.graph
while True:
try:
if session.run(self._is_initialized_op):
break
elif self._is_chief:
session.run(self._init_op)
else:
time.sleep(1)
except RuntimeError as e:
logging.info(e)
class GMM(estimator.Estimator):
"""An estimator for GMM clustering."""
SCORES = 'scores'
LOG_LIKELIHOOD = 'loss'
ASSIGNMENTS = 'assignments'
def __init__(self,
num_clusters,
model_dir=None,
random_seed=0,
params='wmc',
initial_clusters='random',
covariance_type='full',
config=None):
"""Creates a model for running GMM training and inference.
Args:
num_clusters: number of clusters to train.
model_dir: the directory to save the model results and log files.
random_seed: Python integer. Seed for PRNG used to initialize centers.
params: Controls which parameters are updated in the training process.
Can contain any combination of "w" for weights, "m" for means,
and "c" for covars.
initial_clusters: specifies how to initialize the clusters for training.
See gmm_ops.gmm for the possible values.
covariance_type: one of "full", "diag".
config: See Estimator
"""
self._num_clusters = num_clusters
self._params = params
self._training_initial_clusters = initial_clusters
self._covariance_type = covariance_type
self._training_graph = None
self._random_seed = random_seed
super(GMM, self).__init__(
model_fn=self._model_builder(), model_dir=model_dir, config=config)
def predict_assignments(self, input_fn=None, batch_size=None, outputs=None):
"""See BaseEstimator.predict."""
results = self.predict(input_fn=input_fn,
batch_size=batch_size,
outputs=outputs)
for result in results:
yield result[GMM.ASSIGNMENTS]
def score(self, input_fn=None, batch_size=None, steps=None):
"""Predict total log-likelihood.
Args:
input_fn: see predict.
batch_size: see predict.
steps: see predict.
Returns:
Total log-likelihood.
"""
results = self.evaluate(input_fn=input_fn, batch_size=batch_size,
steps=steps)
return np.log(np.sum(np.exp(results[GMM.SCORES])))
def weights(self):
"""Returns the cluster weights."""
return checkpoint_utils.load_variable(
self.model_dir, gmm_ops.GmmAlgorithm.CLUSTERS_WEIGHT)
def clusters(self):
"""Returns cluster centers."""
clusters = checkpoint_utils.load_variable(
self.model_dir, gmm_ops.GmmAlgorithm.CLUSTERS_VARIABLE)
return np.squeeze(clusters, 1)
def covariances(self):
"""Returns the covariances."""
return checkpoint_utils.load_variable(
self.model_dir, gmm_ops.GmmAlgorithm.CLUSTERS_COVS_VARIABLE)
def _parse_tensor_or_dict(self, features):
if isinstance(features, dict):
return array_ops.concat([features[k] for k in sorted(features.keys())],
1)
return features
def _model_builder(self):
"""Creates a model function."""
def _model_fn(features, labels, mode, config):
"""Model function."""
assert labels is None, labels
(loss,
scores,
model_predictions,
training_op,
init_op,
is_initialized) = gmm_ops.gmm(self._parse_tensor_or_dict(features),
self._training_initial_clusters,
self._num_clusters, self._random_seed,
self._covariance_type,
self._params)
incr_step = state_ops.assign_add(training_util.get_global_step(), 1)
training_op = with_dependencies([training_op, incr_step], loss)
training_hooks = [_InitializeClustersHook(
init_op, is_initialized, config.is_chief)]
predictions = {
GMM.ASSIGNMENTS: model_predictions[0][0],
}
eval_metric_ops = {
GMM.SCORES: scores,
GMM.LOG_LIKELIHOOD: _streaming_sum(loss),
}
return model_fn_lib.ModelFnOps(mode=mode, predictions=predictions,
eval_metric_ops=eval_metric_ops,
loss=loss, train_op=training_op,
training_hooks=training_hooks)
return _model_fn
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/factorization/python/ops/gmm.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for factorization_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.contrib.factorization.python.ops import factorization_ops
from tensorflow.contrib.factorization.python.ops import factorization_ops_test_utils
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
INPUT_MATRIX = factorization_ops_test_utils.INPUT_MATRIX
np_matrix_to_tf_sparse = factorization_ops_test_utils.np_matrix_to_tf_sparse
class WalsModelTest(test.TestCase):
def sparse_input(self):
return np_matrix_to_tf_sparse(INPUT_MATRIX)
def count_rows(self, sp_input):
return math_ops.cast(
array_ops.shape(array_ops.unique(sp_input.indices[:, 0])[0])[0],
dtypes.float32)
def count_cols(self, sp_input):
return math_ops.cast(
array_ops.shape(array_ops.unique(sp_input.indices[:, 1])[0])[0],
dtypes.float32)
def calculate_loss_from_wals_model(self, wals_model, sp_inputs):
current_rows = embedding_ops.embedding_lookup(
wals_model.row_factors,
math_ops.range(wals_model._input_rows),
partition_strategy="div")
current_cols = embedding_ops.embedding_lookup(
wals_model.col_factors,
math_ops.range(wals_model._input_cols),
partition_strategy="div")
row_wts = embedding_ops.embedding_lookup(
wals_model._row_weights,
math_ops.range(wals_model._input_rows),
partition_strategy="div")
col_wts = embedding_ops.embedding_lookup(
wals_model._col_weights,
math_ops.range(wals_model._input_cols),
partition_strategy="div")
return factorization_ops_test_utils.calculate_loss(
sp_inputs, current_rows, current_cols, wals_model._regularization,
wals_model._unobserved_weight, row_wts, col_wts)
def setUp(self):
self.col_init = [
# shard 0
[
[-0.36444709, -0.39077035, -0.32528427], # pyformat line break
[1.19056475, 0.07231052, 2.11834812],
[0.93468881, -0.71099287, 1.91826844]
],
# shard 1
[[1.18160152, 1.52490723, -0.50015002],
[1.82574749, -0.57515913, -1.32810032]],
# shard 2
[[-0.15515432, -0.84675711, 0.13097958],
[-0.9246484, 0.69117504, 1.2036494]]
]
self.row_wts = [[0.1, 0.2, 0.3], [0.4, 0.5]]
self.col_wts = [[0.1, 0.2, 0.3], [0.4, 0.5], [0.6, 0.7]]
# Values of factor shards after running one iteration of row and column
# updates.
self._row_factors_0 = [
[0.097689, -0.219293, -0.020780], # pyformat line break
[0.50842, 0.64626, 0.22364],
[0.401159, -0.046558, -0.192854]
]
self._row_factors_1 = [[1.20597, -0.48025, 0.35582],
[1.5564, 1.2528, 1.0528]]
self._col_factors_0 = [
[2.4725, -1.2950, -1.9980], # pyformat line break
[0.44625, 1.50771, 1.27118],
[1.39801, -2.10134, 0.73572]
]
self._col_factors_1 = [[3.36509, -0.66595, -3.51208],
[0.57191, 1.59407, 1.33020]]
self._col_factors_2 = [[3.3459, -1.3341, -3.3008],
[0.57366, 1.83729, 1.26798]]
def _run_test_sum_weights(self, test_rows):
# test_rows: True to test row weights, False to test column weights.
num_rows = 5
num_cols = 5
unobserved_weight = 0.1
row_weights = [[8., 18., 28., 38., 48.]]
col_weights = [[90., 91., 92., 93., 94.]]
sparse_indices = [[0, 1], [2, 3], [4, 1]]
sparse_values = [666., 777., 888.]
unobserved = unobserved_weight * num_rows * num_cols
observed = 8. * 91. + 28. * 93. + 48. * 91.
# sparse_indices has three unique rows and two unique columns
observed *= num_rows / 3. if test_rows else num_cols / 2.
want_weight_sum = unobserved + observed
with ops.Graph().as_default(), self.cached_session() as sess:
wals_model = factorization_ops.WALSModel(
input_rows=num_rows,
input_cols=num_cols,
n_components=5,
unobserved_weight=unobserved_weight,
row_weights=row_weights,
col_weights=col_weights,
use_factors_weights_cache=False)
wals_model.initialize_op.run()
wals_model.worker_init.run()
update_factors = (wals_model.update_row_factors
if test_rows else wals_model.update_col_factors)
(_, _, _, _, sum_weights) = update_factors(
sp_input=sparse_tensor.SparseTensor(
indices=sparse_indices,
values=sparse_values,
dense_shape=[num_rows, num_cols]),
transpose_input=False)
got_weight_sum = sess.run(sum_weights)
self.assertNear(
got_weight_sum,
want_weight_sum,
err=.001,
msg="got weight sum [{}], want weight sum [{}]".format(
got_weight_sum, want_weight_sum))
def _run_test_process_input(self,
use_factors_weights_cache,
compute_loss=False):
with ops.Graph().as_default(), self.cached_session() as sess:
self._wals_inputs = self.sparse_input()
sp_feeder = array_ops.sparse_placeholder(dtypes.float32)
num_rows = 5
num_cols = 7
factor_dim = 3
wals_model = factorization_ops.WALSModel(
num_rows,
num_cols,
factor_dim,
num_row_shards=2,
num_col_shards=3,
regularization=0.01,
unobserved_weight=0.1,
col_init=self.col_init,
row_weights=self.row_wts,
col_weights=self.col_wts,
use_factors_weights_cache=use_factors_weights_cache)
wals_model.initialize_op.run()
wals_model.worker_init.run()
# Split input into multiple sparse tensors with scattered rows. Note that
# this split can be different than the factor sharding and the inputs can
# consist of non-consecutive rows. Each row needs to include all non-zero
# elements in that row.
sp_r0 = np_matrix_to_tf_sparse(INPUT_MATRIX, [0, 2]).eval()
sp_r1 = np_matrix_to_tf_sparse(INPUT_MATRIX, [1, 4], shuffle=True).eval()
sp_r2 = np_matrix_to_tf_sparse(INPUT_MATRIX, [3], shuffle=True).eval()
input_scattered_rows = [sp_r0, sp_r1, sp_r2]
# Test updating row factors.
# Here we feed in scattered rows of the input.
wals_model.row_update_prep_gramian_op.run()
wals_model.initialize_row_update_op.run()
(_, process_input_op, unregularized_loss, regularization,
_) = wals_model.update_row_factors(
sp_input=sp_feeder, transpose_input=False)
factor_loss = unregularized_loss + regularization
for inp in input_scattered_rows:
feed_dict = {sp_feeder: inp}
process_input_op.run(feed_dict=feed_dict)
row_factors = [x.eval() for x in wals_model.row_factors]
self.assertAllClose(row_factors[0], self._row_factors_0, atol=1e-3)
self.assertAllClose(row_factors[1], self._row_factors_1, atol=1e-3)
# Test row projection.
# Using the specified projection weights for the 2 row feature vectors.
# This is expected to reproduce the same row factors in the model as the
# weights and feature vectors are identical to that used in model
# training.
projected_rows = wals_model.project_row_factors(
sp_input=sp_feeder,
transpose_input=False,
projection_weights=[0.2, 0.5])
# Don't specify the projection weight, so 1.0 will be used. The feature
# weights will be those specified in model.
projected_rows_no_weights = wals_model.project_row_factors(
sp_input=sp_feeder, transpose_input=False)
feed_dict = {
sp_feeder:
np_matrix_to_tf_sparse(INPUT_MATRIX, [1, 4], shuffle=False)
.eval()
}
self.assertAllClose(
projected_rows.eval(feed_dict=feed_dict),
[self._row_factors_0[1], self._row_factors_1[1]],
atol=1e-3)
self.assertAllClose(
projected_rows_no_weights.eval(feed_dict=feed_dict),
[[0.569082, 0.715088, 0.31777], [1.915879, 1.992677, 1.109057]],
atol=1e-3)
if compute_loss:
# Test loss computation after the row update
loss = sum(
sess.run(
factor_loss * self.count_rows(inp) / num_rows,
feed_dict={sp_feeder: inp}) for inp in input_scattered_rows)
true_loss = self.calculate_loss_from_wals_model(wals_model,
self._wals_inputs)
self.assertNear(
loss,
true_loss,
err=.001,
msg="After row update, computed loss [{}] does not match"
" true loss [{}]".format(loss, true_loss))
# Split input into multiple sparse tensors with scattered columns. Note
# that here the elements in the sparse tensors are not ordered and also
# do not need to consist of consecutive columns. However, each column
# needs to include all non-zero elements in that column.
sp_c0 = np_matrix_to_tf_sparse(INPUT_MATRIX, col_slices=[2, 0]).eval()
sp_c1 = np_matrix_to_tf_sparse(
INPUT_MATRIX, col_slices=[5, 3, 1], shuffle=True).eval()
sp_c2 = np_matrix_to_tf_sparse(INPUT_MATRIX, col_slices=[4, 6]).eval()
sp_c3 = np_matrix_to_tf_sparse(
INPUT_MATRIX, col_slices=[3, 6], shuffle=True).eval()
input_scattered_cols = [sp_c0, sp_c1, sp_c2, sp_c3]
input_scattered_cols_non_duplicate = [sp_c0, sp_c1, sp_c2]
# Test updating column factors.
# Here we feed in scattered columns of the input.
wals_model.col_update_prep_gramian_op.run()
wals_model.initialize_col_update_op.run()
(_, process_input_op, unregularized_loss, regularization,
_) = wals_model.update_col_factors(
sp_input=sp_feeder, transpose_input=False)
factor_loss = unregularized_loss + regularization
for inp in input_scattered_cols:
feed_dict = {sp_feeder: inp}
process_input_op.run(feed_dict=feed_dict)
col_factors = [x.eval() for x in wals_model.col_factors]
self.assertAllClose(col_factors[0], self._col_factors_0, atol=1e-3)
self.assertAllClose(col_factors[1], self._col_factors_1, atol=1e-3)
self.assertAllClose(col_factors[2], self._col_factors_2, atol=1e-3)
# Test column projection.
# Using the specified projection weights for the 3 column feature vectors.
# This is expected to reproduce the same column factors in the model as
# the weights and feature vectors are identical to that used in model
# training.
projected_cols = wals_model.project_col_factors(
sp_input=sp_feeder,
transpose_input=False,
projection_weights=[0.6, 0.4, 0.2])
# Don't specify the projection weight, so 1.0 will be used. The feature
# weights will be those specified in model.
projected_cols_no_weights = wals_model.project_col_factors(
sp_input=sp_feeder, transpose_input=False)
feed_dict = {
sp_feeder:
np_matrix_to_tf_sparse(
INPUT_MATRIX, col_slices=[5, 3, 1], shuffle=False).eval()
}
self.assertAllClose(
projected_cols.eval(feed_dict=feed_dict), [
self._col_factors_2[0], self._col_factors_1[0],
self._col_factors_0[1]
],
atol=1e-3)
self.assertAllClose(
projected_cols_no_weights.eval(feed_dict=feed_dict),
[[3.471045, -1.250835, -3.598917], [3.585139, -0.487476, -3.852232],
[0.346433, 1.360644, 1.677121]],
atol=1e-3)
if compute_loss:
# Test loss computation after the column update.
loss = sum(
sess.run(
factor_loss * self.count_cols(inp) / num_cols,
feed_dict={sp_feeder: inp})
for inp in input_scattered_cols_non_duplicate)
true_loss = self.calculate_loss_from_wals_model(wals_model,
self._wals_inputs)
self.assertNear(
loss,
true_loss,
err=.001,
msg="After col update, computed loss [{}] does not match"
" true loss [{}]".format(loss, true_loss))
def _run_test_process_input_transposed(self,
use_factors_weights_cache,
compute_loss=False):
with ops.Graph().as_default(), self.cached_session() as sess:
self._wals_inputs = self.sparse_input()
sp_feeder = array_ops.sparse_placeholder(dtypes.float32)
num_rows = 5
num_cols = 7
factor_dim = 3
wals_model = factorization_ops.WALSModel(
num_rows,
num_cols,
factor_dim,
num_row_shards=2,
num_col_shards=3,
regularization=0.01,
unobserved_weight=0.1,
col_init=self.col_init,
row_weights=self.row_wts,
col_weights=self.col_wts,
use_factors_weights_cache=use_factors_weights_cache)
wals_model.initialize_op.run()
wals_model.worker_init.run()
# Split input into multiple SparseTensors with scattered rows.
# Here the inputs are transposed. But the same constraints as described in
# the previous non-transposed test case apply to these inputs (before they
# are transposed).
sp_r0_t = np_matrix_to_tf_sparse(
INPUT_MATRIX, [0, 3], transpose=True).eval()
sp_r1_t = np_matrix_to_tf_sparse(
INPUT_MATRIX, [4, 1], shuffle=True, transpose=True).eval()
sp_r2_t = np_matrix_to_tf_sparse(INPUT_MATRIX, [2], transpose=True).eval()
sp_r3_t = sp_r1_t
input_scattered_rows = [sp_r0_t, sp_r1_t, sp_r2_t, sp_r3_t]
input_scattered_rows_non_duplicate = [sp_r0_t, sp_r1_t, sp_r2_t]
# Test updating row factors.
# Here we feed in scattered rows of the input.
# Note that the needed suffix of placeholder are in the order of test
# case name lexicographical order and then in the line order of where
# they appear.
wals_model.row_update_prep_gramian_op.run()
wals_model.initialize_row_update_op.run()
(_, process_input_op, unregularized_loss, regularization,
_) = wals_model.update_row_factors(
sp_input=sp_feeder, transpose_input=True)
factor_loss = unregularized_loss + regularization
for inp in input_scattered_rows:
feed_dict = {sp_feeder: inp}
process_input_op.run(feed_dict=feed_dict)
row_factors = [x.eval() for x in wals_model.row_factors]
self.assertAllClose(row_factors[0], self._row_factors_0, atol=1e-3)
self.assertAllClose(row_factors[1], self._row_factors_1, atol=1e-3)
# Test row projection.
# Using the specified projection weights for the 2 row feature vectors.
# This is expected to reproduce the same row factors in the model as the
# weights and feature vectors are identical to that used in model
# training.
projected_rows = wals_model.project_row_factors(
sp_input=sp_feeder,
transpose_input=True,
projection_weights=[0.5, 0.2])
# Don't specify the projection weight, so 1.0 will be used. The feature
# weights will be those specified in model.
projected_rows_no_weights = wals_model.project_row_factors(
sp_input=sp_feeder, transpose_input=True)
feed_dict = {
sp_feeder:
np_matrix_to_tf_sparse(
INPUT_MATRIX, [4, 1], shuffle=False, transpose=True).eval()
}
self.assertAllClose(
projected_rows.eval(feed_dict=feed_dict),
[self._row_factors_1[1], self._row_factors_0[1]],
atol=1e-3)
self.assertAllClose(
projected_rows_no_weights.eval(feed_dict=feed_dict),
[[1.915879, 1.992677, 1.109057], [0.569082, 0.715088, 0.31777]],
atol=1e-3)
if compute_loss:
# Test loss computation after the row update
loss = sum(
sess.run(
factor_loss * self.count_cols(inp) / num_rows,
feed_dict={sp_feeder: inp})
for inp in input_scattered_rows_non_duplicate)
true_loss = self.calculate_loss_from_wals_model(wals_model,
self._wals_inputs)
self.assertNear(
loss,
true_loss,
err=.001,
msg="After row update, computed loss [{}] does not match"
" true loss [{}]".format(loss, true_loss))
# Split input into multiple SparseTensors with scattered columns.
# Here the inputs are transposed. But the same constraints as described in
# the previous non-transposed test case apply to these inputs (before they
# are transposed).
sp_c0_t = np_matrix_to_tf_sparse(
INPUT_MATRIX, col_slices=[0, 1], transpose=True).eval()
sp_c1_t = np_matrix_to_tf_sparse(
INPUT_MATRIX, col_slices=[4, 2], transpose=True).eval()
sp_c2_t = np_matrix_to_tf_sparse(
INPUT_MATRIX, col_slices=[5], transpose=True, shuffle=True).eval()
sp_c3_t = np_matrix_to_tf_sparse(
INPUT_MATRIX, col_slices=[3, 6], transpose=True).eval()
sp_c4_t = sp_c2_t
input_scattered_cols = [sp_c0_t, sp_c1_t, sp_c2_t, sp_c3_t, sp_c4_t]
input_scattered_cols_non_duplicate = [sp_c0_t, sp_c1_t, sp_c2_t, sp_c3_t]
# Test updating column factors.
# Here we feed in scattered columns of the input.
wals_model.col_update_prep_gramian_op.run()
wals_model.initialize_col_update_op.run()
(_, process_input_op, unregularized_loss, regularization,
_) = wals_model.update_col_factors(
sp_input=sp_feeder, transpose_input=True)
factor_loss = unregularized_loss + regularization
for inp in input_scattered_cols:
feed_dict = {sp_feeder: inp}
process_input_op.run(feed_dict=feed_dict)
col_factors = [x.eval() for x in wals_model.col_factors]
self.assertAllClose(col_factors[0], self._col_factors_0, atol=1e-3)
self.assertAllClose(col_factors[1], self._col_factors_1, atol=1e-3)
self.assertAllClose(col_factors[2], self._col_factors_2, atol=1e-3)
# Test column projection.
# Using the specified projection weights for the 2 column feature vectors.
# This is expected to reproduce the same column factors in the model as
# the weights and feature vectors are identical to that used in model
# training.
projected_cols = wals_model.project_col_factors(
sp_input=sp_feeder,
transpose_input=True,
projection_weights=[0.4, 0.7])
# Don't specify the projection weight, so 1.0 will be used. The feature
# weights will be those specified in model.
projected_cols_no_weights = wals_model.project_col_factors(
sp_input=sp_feeder, transpose_input=True)
feed_dict = {sp_feeder: sp_c3_t}
self.assertAllClose(
projected_cols.eval(feed_dict=feed_dict),
[self._col_factors_1[0], self._col_factors_2[1]],
atol=1e-3)
self.assertAllClose(
projected_cols_no_weights.eval(feed_dict=feed_dict),
[[3.585139, -0.487476, -3.852232], [0.557937, 1.813907, 1.331171]],
atol=1e-3)
if compute_loss:
# Test loss computation after the col update
loss = sum(
sess.run(
factor_loss * self.count_rows(inp) / num_cols,
feed_dict={sp_feeder: inp})
for inp in input_scattered_cols_non_duplicate)
true_loss = self.calculate_loss_from_wals_model(wals_model,
self._wals_inputs)
self.assertNear(
loss,
true_loss,
err=.001,
msg="After col update, computed loss [{}] does not match"
" true loss [{}]".format(loss, true_loss))
# Note that when row_weights and col_weights are 0, WALS gives identical
# results as ALS (Alternating Least Squares). However our implementation does
# not handle the case of zero weights differently. Instead, when row_weights
# and col_weights are set to None, we interpret that as the ALS case, and
# trigger the more efficient ALS updates.
# Here we test that those two give identical results.
def _run_test_als(self, use_factors_weights_cache):
with ops.Graph().as_default(), self.cached_session():
self._wals_inputs = self.sparse_input()
col_init = np.random.rand(7, 3)
als_model = factorization_ops.WALSModel(
5,
7,
3,
col_init=col_init,
row_weights=None,
col_weights=None,
use_factors_weights_cache=use_factors_weights_cache)
als_model.initialize_op.run()
als_model.worker_init.run()
als_model.row_update_prep_gramian_op.run()
als_model.initialize_row_update_op.run()
process_input_op = als_model.update_row_factors(self._wals_inputs)[1]
process_input_op.run()
row_factors1 = [x.eval() for x in als_model.row_factors]
# Testing row projection. Projection weight doesn't matter in this case
# since the model is ALS special case.
als_projected_row_factors1 = als_model.project_row_factors(
self._wals_inputs).eval()
wals_model = factorization_ops.WALSModel(
5,
7,
3,
col_init=col_init,
row_weights=0,
col_weights=0,
use_factors_weights_cache=use_factors_weights_cache)
wals_model.initialize_op.run()
wals_model.worker_init.run()
wals_model.row_update_prep_gramian_op.run()
wals_model.initialize_row_update_op.run()
process_input_op = wals_model.update_row_factors(self._wals_inputs)[1]
process_input_op.run()
row_factors2 = [x.eval() for x in wals_model.row_factors]
for r1, r2 in zip(row_factors1, row_factors2):
self.assertAllClose(r1, r2, atol=1e-3)
self.assertAllClose(
als_projected_row_factors1,
[row for shard in row_factors2 for row in shard],
atol=1e-3)
# Here we test partial column updates.
sp_c = np_matrix_to_tf_sparse(
INPUT_MATRIX, col_slices=[2, 0], shuffle=True).eval()
sp_feeder = array_ops.sparse_placeholder(dtypes.float32)
feed_dict = {sp_feeder: sp_c}
als_model.col_update_prep_gramian_op.run()
als_model.initialize_col_update_op.run()
process_input_op = als_model.update_col_factors(sp_input=sp_feeder)[1]
process_input_op.run(feed_dict=feed_dict)
col_factors1 = [x.eval() for x in als_model.col_factors]
# Testing column projection. Projection weight doesn't matter in this case
# since the model is ALS special case.
als_projected_col_factors1 = als_model.project_col_factors(
np_matrix_to_tf_sparse(
INPUT_MATRIX, col_slices=[2, 0], shuffle=False)).eval()
feed_dict = {sp_feeder: sp_c}
wals_model.col_update_prep_gramian_op.run()
wals_model.initialize_col_update_op.run()
process_input_op = wals_model.update_col_factors(sp_input=sp_feeder)[1]
process_input_op.run(feed_dict=feed_dict)
col_factors2 = [x.eval() for x in wals_model.col_factors]
for c1, c2 in zip(col_factors1, col_factors2):
self.assertAllClose(c1, c2, rtol=5e-3, atol=1e-2)
self.assertAllClose(
als_projected_col_factors1, [col_factors2[0][2], col_factors2[0][0]],
atol=1e-2)
def _run_test_als_transposed(self, use_factors_weights_cache):
with ops.Graph().as_default(), self.cached_session():
self._wals_inputs = self.sparse_input()
col_init = np.random.rand(7, 3)
als_model = factorization_ops.WALSModel(
5,
7,
3,
col_init=col_init,
row_weights=None,
col_weights=None,
use_factors_weights_cache=use_factors_weights_cache)
als_model.initialize_op.run()
als_model.worker_init.run()
wals_model = factorization_ops.WALSModel(
5,
7,
3,
col_init=col_init,
row_weights=[0] * 5,
col_weights=[0] * 7,
use_factors_weights_cache=use_factors_weights_cache)
wals_model.initialize_op.run()
wals_model.worker_init.run()
sp_feeder = array_ops.sparse_placeholder(dtypes.float32)
# Here test partial row update with identical inputs but with transposed
# input for als.
sp_r_t = np_matrix_to_tf_sparse(
INPUT_MATRIX, [3, 1], transpose=True).eval()
sp_r = np_matrix_to_tf_sparse(INPUT_MATRIX, [3, 1]).eval()
feed_dict = {sp_feeder: sp_r_t}
als_model.row_update_prep_gramian_op.run()
als_model.initialize_row_update_op.run()
process_input_op = als_model.update_row_factors(
sp_input=sp_feeder, transpose_input=True)[1]
process_input_op.run(feed_dict=feed_dict)
# Only updated row 1 and row 3, so only compare these rows since others
# have randomly initialized values.
row_factors1 = [
als_model.row_factors[0].eval()[1], als_model.row_factors[0].eval()[3]
]
# Testing row projection. Projection weight doesn't matter in this case
# since the model is ALS special case. Note that the ordering of the
# returned results will be preserved as the input feature vectors
# ordering.
als_projected_row_factors1 = als_model.project_row_factors(
sp_input=sp_feeder, transpose_input=True).eval(feed_dict=feed_dict)
feed_dict = {sp_feeder: sp_r}
wals_model.row_update_prep_gramian_op.run()
wals_model.initialize_row_update_op.run()
process_input_op = wals_model.update_row_factors(sp_input=sp_feeder)[1]
process_input_op.run(feed_dict=feed_dict)
# Only updated row 1 and row 3, so only compare these rows since others
# have randomly initialized values.
row_factors2 = [
wals_model.row_factors[0].eval()[1],
wals_model.row_factors[0].eval()[3]
]
for r1, r2 in zip(row_factors1, row_factors2):
self.assertAllClose(r1, r2, atol=1e-3)
# Note that the ordering of the returned projection results is preserved
# as the input feature vectors ordering.
self.assertAllClose(
als_projected_row_factors1, [row_factors2[1], row_factors2[0]],
atol=1e-3)
def simple_train(self, model, inp, num_iterations):
"""Helper function to train model on inp for num_iterations."""
row_update_op = model.update_row_factors(sp_input=inp)[1]
col_update_op = model.update_col_factors(sp_input=inp)[1]
model.initialize_op.run()
model.worker_init.run()
for _ in xrange(num_iterations):
model.row_update_prep_gramian_op.run()
model.initialize_row_update_op.run()
row_update_op.run()
model.col_update_prep_gramian_op.run()
model.initialize_col_update_op.run()
col_update_op.run()
# Trains an ALS model for a low-rank matrix and make sure the product of
# factors is close to the original input.
def _run_test_train_full_low_rank_als(self, use_factors_weights_cache):
rows = 15
cols = 11
dims = 3
with ops.Graph().as_default(), self.cached_session():
data = np.dot(np.random.rand(rows, 3), np.random.rand(
3, cols)).astype(np.float32) / 3.0
indices = [[i, j] for i in xrange(rows) for j in xrange(cols)]
values = data.reshape(-1)
inp = sparse_tensor.SparseTensor(indices, values, [rows, cols])
model = factorization_ops.WALSModel(
rows,
cols,
dims,
regularization=1e-5,
row_weights=None,
col_weights=None,
use_factors_weights_cache=use_factors_weights_cache)
self.simple_train(model, inp, 25)
row_factor = model.row_factors[0].eval()
col_factor = model.col_factors[0].eval()
self.assertAllClose(
data,
np.dot(row_factor, np.transpose(col_factor)),
rtol=0.01,
atol=0.01)
# Trains a WALS model for a low-rank matrix and make sure the product of
# factors is close to the original input.
def _run_test_train_full_low_rank_wals(self, use_factors_weights_cache):
rows = 15
cols = 11
dims = 3
with ops.Graph().as_default(), self.cached_session():
data = np.dot(np.random.rand(rows, 3), np.random.rand(
3, cols)).astype(np.float32) / 3.0
indices = [[i, j] for i in xrange(rows) for j in xrange(cols)]
values = data.reshape(-1)
inp = sparse_tensor.SparseTensor(indices, values, [rows, cols])
model = factorization_ops.WALSModel(
rows,
cols,
dims,
regularization=1e-5,
row_weights=0,
col_weights=[0] * cols,
use_factors_weights_cache=use_factors_weights_cache)
self.simple_train(model, inp, 25)
row_factor = model.row_factors[0].eval()
col_factor = model.col_factors[0].eval()
self.assertAllClose(
data,
np.dot(row_factor, np.transpose(col_factor)),
rtol=0.01,
atol=0.01)
# Trains a WALS model for a partially observed low-rank matrix and makes
# sure the product of factors is reasonably close to the original input.
def _run_test_train_matrix_completion_wals(self, use_factors_weights_cache):
rows = 11
cols = 9
dims = 4
def keep_index(x):
return not (x[0] + x[1]) % 4
with ops.Graph().as_default(), self.cached_session():
row_wts = 0.1 + np.random.rand(rows)
col_wts = 0.1 + np.random.rand(cols)
data = np.dot(np.random.rand(rows, 3), np.random.rand(
3, cols)).astype(np.float32) / 3.0
indices = np.array(
list(
filter(keep_index,
[[i, j] for i in xrange(rows) for j in xrange(cols)])))
values = data[indices[:, 0], indices[:, 1]]
inp = sparse_tensor.SparseTensor(indices, values, [rows, cols])
model = factorization_ops.WALSModel(
rows,
cols,
dims,
unobserved_weight=0.01,
regularization=0.001,
row_weights=row_wts,
col_weights=col_wts,
use_factors_weights_cache=use_factors_weights_cache)
self.simple_train(model, inp, 25)
row_factor = model.row_factors[0].eval()
col_factor = model.col_factors[0].eval()
out = np.dot(row_factor, np.transpose(col_factor))
for i in xrange(rows):
for j in xrange(cols):
if keep_index([i, j]):
self.assertNear(
data[i][j], out[i][j], err=0.4, msg="%d, %d" % (i, j))
else:
self.assertNear(0, out[i][j], err=0.5, msg="%d, %d" % (i, j))
def test_process_input_with_cache(self):
self._run_test_process_input(True)
def test_process_input_without_cache(self):
self._run_test_process_input(False)
def test_process_input_transposed_with_cache(self):
self._run_test_process_input_transposed(True)
def test_process_input_transposed_without_cache(self):
self._run_test_process_input_transposed(False)
def test_als_with_cache(self):
self._run_test_als(True)
def test_als_without_cache(self):
self._run_test_als(False)
def test_als_transposed_with_cache(self):
self._run_test_als_transposed(True)
def test_als_transposed_without_cache(self):
self._run_test_als_transposed(False)
def test_train_full_low_rank_wals_with_cache(self):
self._run_test_train_full_low_rank_wals(True)
def test_train_full_low_rank_wals_without_cache(self):
self._run_test_train_full_low_rank_wals(False)
def test_train_matrix_completion_wals_with_cache(self):
self._run_test_train_matrix_completion_wals(True)
def test_train_matrix_completion_wals_without_cache(self):
self._run_test_train_matrix_completion_wals(False)
def test_loss_transposed_with_cache(self):
self._run_test_process_input_transposed(True, compute_loss=True)
def test_loss_transposed_without_cache(self):
self._run_test_process_input_transposed(False, compute_loss=True)
def test_loss_with_cache(self):
self._run_test_process_input(True, compute_loss=True)
def test_loss_without_cache(self):
self._run_test_process_input(False, compute_loss=True)
def test_sum_row_weights(self):
self._run_test_sum_weights(True)
def test_sum_col_weights(self):
self._run_test_sum_weights(False)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/factorization/python/ops/factorization_ops_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for KMeans."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import time
import numpy as np
from sklearn.cluster import KMeans as SklearnKMeans
# pylint: disable=g-import-not-at-top
from tensorflow.contrib.factorization.python.ops import kmeans as kmeans_lib
from tensorflow.python.estimator import run_config
from tensorflow.python.feature_column import feature_column_lib as fc
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import benchmark
from tensorflow.python.platform import flags
from tensorflow.python.platform import test
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import queue_runner
FLAGS = flags.FLAGS
def normalize(x):
return x / np.sqrt(np.sum(x * x, axis=-1, keepdims=True))
def cosine_similarity(x, y):
return np.dot(normalize(x), np.transpose(normalize(y)))
def make_random_centers(num_centers, num_dims, center_norm=500):
return np.round(
np.random.rand(num_centers, num_dims).astype(np.float32) * center_norm)
def make_random_points(centers, num_points, max_offset=20):
num_centers, num_dims = centers.shape
assignments = np.random.choice(num_centers, num_points)
offsets = np.round(
np.random.randn(num_points, num_dims).astype(np.float32) * max_offset)
return (centers[assignments] + offsets, assignments, np.add.reduce(
offsets * offsets, 1))
class KMeansTestBase(test.TestCase):
def input_fn(self,
batch_size=None,
points=None,
randomize=None,
num_epochs=None):
"""Returns an input_fn that randomly selects batches from given points."""
batch_size = batch_size or self.batch_size
points = points if points is not None else self.points
num_points = points.shape[0]
if randomize is None:
randomize = (self.use_mini_batch and
self.mini_batch_steps_per_iteration <= 1)
def _fn():
x = constant_op.constant(points)
if batch_size == num_points:
return input_lib.limit_epochs(x, num_epochs=num_epochs), None
if randomize:
indices = random_ops.random_uniform(
constant_op.constant([batch_size]),
minval=0,
maxval=num_points - 1,
dtype=dtypes.int32,
seed=10)
else:
# We need to cycle through the indices sequentially. We create a queue
# to maintain the list of indices.
q = data_flow_ops.FIFOQueue(num_points, dtypes.int32, ())
# Conditionally initialize the Queue.
def _init_q():
with ops.control_dependencies(
[q.enqueue_many(math_ops.range(num_points))]):
return control_flow_ops.no_op()
init_q = control_flow_ops.cond(q.size() <= 0, _init_q,
control_flow_ops.no_op)
with ops.control_dependencies([init_q]):
offsets = q.dequeue_many(batch_size)
with ops.control_dependencies([q.enqueue_many(offsets)]):
indices = array_ops.identity(offsets)
batch = array_ops.gather(x, indices)
return (input_lib.limit_epochs(batch, num_epochs=num_epochs), None)
return _fn
@staticmethod
def config(tf_random_seed):
return run_config.RunConfig().replace(tf_random_seed=tf_random_seed)
@property
def initial_clusters(self):
return kmeans_lib.KMeansClustering.KMEANS_PLUS_PLUS_INIT
@property
def batch_size(self):
return self.num_points
@property
def use_mini_batch(self):
return False
@property
def mini_batch_steps_per_iteration(self):
return 1
class KMeansTest(KMeansTestBase):
def setUp(self):
np.random.seed(3)
self.num_centers = 5
self.num_dims = 2
self.num_points = 1000
self.true_centers = make_random_centers(self.num_centers, self.num_dims)
self.points, _, self.scores = make_random_points(self.true_centers,
self.num_points)
self.true_score = np.add.reduce(self.scores)
def _kmeans(self, relative_tolerance=None):
return kmeans_lib.KMeansClustering(
self.num_centers,
initial_clusters=self.initial_clusters,
distance_metric=kmeans_lib.KMeansClustering.SQUARED_EUCLIDEAN_DISTANCE,
use_mini_batch=self.use_mini_batch,
mini_batch_steps_per_iteration=self.mini_batch_steps_per_iteration,
random_seed=24,
relative_tolerance=relative_tolerance)
def test_clusters(self):
kmeans = self._kmeans()
kmeans.train(input_fn=self.input_fn(), steps=1)
clusters = kmeans.cluster_centers()
self.assertAllEqual(list(clusters.shape), [self.num_centers, self.num_dims])
def test_fit(self):
kmeans = self._kmeans()
kmeans.train(input_fn=self.input_fn(), steps=1)
score1 = kmeans.score(input_fn=self.input_fn(batch_size=self.num_points))
steps = 10 * self.num_points // self.batch_size
kmeans.train(input_fn=self.input_fn(), steps=steps)
score2 = kmeans.score(input_fn=self.input_fn(batch_size=self.num_points))
self.assertTrue(score1 > score2)
self.assertNear(self.true_score, score2, self.true_score * 0.05)
def test_monitor(self):
if self.use_mini_batch:
# We don't test for use_mini_batch case since the loss value can be noisy.
return
kmeans = kmeans_lib.KMeansClustering(
self.num_centers,
initial_clusters=self.initial_clusters,
distance_metric=kmeans_lib.KMeansClustering.SQUARED_EUCLIDEAN_DISTANCE,
use_mini_batch=self.use_mini_batch,
mini_batch_steps_per_iteration=self.mini_batch_steps_per_iteration,
config=self.config(14),
random_seed=12,
relative_tolerance=1e-4)
kmeans.train(
input_fn=self.input_fn(),
# Force it to train until the relative tolerance monitor stops it.
steps=None)
score = kmeans.score(input_fn=self.input_fn(batch_size=self.num_points))
self.assertNear(self.true_score, score, self.true_score * 0.01)
def _infer_helper(self, kmeans, clusters, num_points):
points, true_assignments, true_offsets = make_random_points(
clusters, num_points)
input_fn = self.input_fn(batch_size=num_points, points=points, num_epochs=1)
# Test predict
assignments = list(kmeans.predict_cluster_index(input_fn))
self.assertAllEqual(assignments, true_assignments)
# Test score
score = kmeans.score(input_fn=lambda: (constant_op.constant(points), None))
self.assertNear(score, np.sum(true_offsets), 0.01 * score)
# Test transform
transform = list(kmeans.transform(input_fn))
true_transform = np.maximum(
0,
np.sum(np.square(points), axis=1, keepdims=True) -
2 * np.dot(points, np.transpose(clusters)) + np.transpose(
np.sum(np.square(clusters), axis=1, keepdims=True)))
self.assertAllClose(transform, true_transform, rtol=0.05, atol=10)
def test_infer(self):
kmeans = self._kmeans()
# Make a call to fit to initialize the cluster centers.
max_steps = 1
kmeans.train(input_fn=self.input_fn(), max_steps=max_steps)
clusters = kmeans.cluster_centers()
# Run inference on small datasets.
self._infer_helper(kmeans, clusters, 10)
self._infer_helper(kmeans, clusters, 1)
def _parse_feature_dict_helper(self, features, parsed_feature_dict):
# Perform a sanity check.
self.assertEqual(features.shape, parsed_feature_dict.shape)
self.assertEqual(features.dtype, parsed_feature_dict.dtype)
# Then check that running the tensor yields the original list of points.
with self.cached_session() as sess:
parsed_points = sess.run(parsed_feature_dict)
self.assertAllEqual(self.points, parsed_points)
def test_parse_features(self):
"""Tests the various behaviours of kmeans._parse_features_if_necessary."""
# No-op if a tensor is passed in.
features = constant_op.constant(self.points)
parsed_features = kmeans_lib._parse_features_if_necessary(features, None)
self.assertAllEqual(features, parsed_features)
# All values from a feature dict are transformed into a tensor.
feature_dict = {
'x': [[point[0]] for point in self.points],
'y': [[point[1]] for point in self.points]
}
parsed_feature_dict = kmeans_lib._parse_features_if_necessary(
feature_dict, None)
self._parse_feature_dict_helper(features, parsed_feature_dict)
# Only the feature_columns of a feature dict are transformed into a tensor.
feature_dict_with_extras = {
'foo': 'bar',
'x': [[point[0]] for point in self.points],
'baz': {'fizz': 'buzz'},
'y': [[point[1]] for point in self.points]
}
feature_columns = [fc.numeric_column(key='x'), fc.numeric_column(key='y')]
parsed_feature_dict = kmeans_lib._parse_features_if_necessary(
feature_dict_with_extras, feature_columns)
self._parse_feature_dict_helper(features, parsed_feature_dict)
class KMeansTestMultiStageInit(KMeansTestBase):
def test_random(self):
points = np.array(
[[1, 2], [3, 4], [5, 6], [7, 8], [9, 0]], dtype=np.float32)
kmeans = kmeans_lib.KMeansClustering(
num_clusters=points.shape[0],
initial_clusters=kmeans_lib.KMeansClustering.RANDOM_INIT,
distance_metric=kmeans_lib.KMeansClustering.SQUARED_EUCLIDEAN_DISTANCE,
use_mini_batch=True,
mini_batch_steps_per_iteration=100,
random_seed=24,
relative_tolerance=None)
kmeans.train(
input_fn=self.input_fn(batch_size=1, points=points, randomize=False),
steps=1)
clusters = kmeans.cluster_centers()
self.assertAllEqual(points, clusters)
def test_kmeans_plus_plus_batch_just_right(self):
points = np.array([[1, 2]], dtype=np.float32)
kmeans = kmeans_lib.KMeansClustering(
num_clusters=points.shape[0],
initial_clusters=kmeans_lib.KMeansClustering.KMEANS_PLUS_PLUS_INIT,
distance_metric=kmeans_lib.KMeansClustering.SQUARED_EUCLIDEAN_DISTANCE,
use_mini_batch=True,
mini_batch_steps_per_iteration=100,
random_seed=24,
relative_tolerance=None)
kmeans.train(
input_fn=self.input_fn(batch_size=1, points=points, randomize=False),
steps=1)
clusters = kmeans.cluster_centers()
self.assertAllEqual(points, clusters)
def test_kmeans_plus_plus_batch_too_small(self):
points = np.array(
[[1, 2], [3, 4], [5, 6], [7, 8], [9, 0]], dtype=np.float32)
kmeans = kmeans_lib.KMeansClustering(
num_clusters=points.shape[0],
initial_clusters=kmeans_lib.KMeansClustering.KMEANS_PLUS_PLUS_INIT,
distance_metric=kmeans_lib.KMeansClustering.SQUARED_EUCLIDEAN_DISTANCE,
use_mini_batch=True,
mini_batch_steps_per_iteration=100,
random_seed=24,
relative_tolerance=None)
with self.assertRaisesOpError(AssertionError):
kmeans.train(
input_fn=self.input_fn(batch_size=4, points=points, randomize=False),
steps=1)
class MiniBatchKMeansTest(KMeansTest):
@property
def batch_size(self):
return 50
@property
def use_mini_batch(self):
return True
class FullBatchAsyncKMeansTest(KMeansTest):
@property
def batch_size(self):
return 50
@property
def use_mini_batch(self):
return True
@property
def mini_batch_steps_per_iteration(self):
return self.num_points // self.batch_size
class KMeansCosineDistanceTest(KMeansTestBase):
def setUp(self):
self.points = np.array(
[[2.5, 0.1], [2, 0.2], [3, 0.1], [4, 0.2], [0.1, 2.5], [0.2, 2],
[0.1, 3], [0.2, 4]],
dtype=np.float32)
self.num_points = self.points.shape[0]
self.true_centers = np.array(
[
normalize(
np.mean(normalize(self.points)[0:4, :], axis=0,
keepdims=True))[0],
normalize(
np.mean(normalize(self.points)[4:, :], axis=0,
keepdims=True))[0]
],
dtype=np.float32)
self.true_assignments = np.array([0] * 4 + [1] * 4)
self.true_score = len(self.points) - np.tensordot(
normalize(self.points), self.true_centers[self.true_assignments])
self.num_centers = 2
self.kmeans = kmeans_lib.KMeansClustering(
self.num_centers,
initial_clusters=kmeans_lib.KMeansClustering.RANDOM_INIT,
distance_metric=kmeans_lib.KMeansClustering.COSINE_DISTANCE,
use_mini_batch=self.use_mini_batch,
mini_batch_steps_per_iteration=self.mini_batch_steps_per_iteration,
config=self.config(3))
def test_fit(self):
max_steps = 10 * self.num_points // self.batch_size
self.kmeans.train(input_fn=self.input_fn(), max_steps=max_steps)
centers = normalize(self.kmeans.cluster_centers())
centers = centers[centers[:, 0].argsort()]
true_centers = self.true_centers[self.true_centers[:, 0].argsort()]
self.assertAllClose(centers, true_centers, atol=0.04)
def test_transform(self):
self.kmeans.train(input_fn=self.input_fn(), steps=10)
centers = normalize(self.kmeans.cluster_centers())
true_transform = 1 - cosine_similarity(self.points, centers)
transform = list(
self.kmeans.transform(
input_fn=self.input_fn(batch_size=self.num_points, num_epochs=1)))
self.assertAllClose(transform, true_transform, atol=1e-3)
def test_predict(self):
max_steps = 10 * self.num_points // self.batch_size
self.kmeans.train(input_fn=self.input_fn(), max_steps=max_steps)
centers = normalize(self.kmeans.cluster_centers())
assignments = list(
self.kmeans.predict_cluster_index(
input_fn=self.input_fn(num_epochs=1, batch_size=self.num_points)))
self.assertAllClose(
centers[assignments],
self.true_centers[self.true_assignments],
atol=1e-2)
centers = centers[centers[:, 0].argsort()]
true_centers = self.true_centers[self.true_centers[:, 0].argsort()]
self.assertAllClose(centers, true_centers, atol=0.04)
score = self.kmeans.score(
input_fn=self.input_fn(batch_size=self.num_points))
self.assertAllClose(score, self.true_score, atol=1e-2)
def test_predict_kmeans_plus_plus(self):
# Most points are concentrated near one center. KMeans++ is likely to find
# the less populated centers.
points = np.array(
[[2.5, 3.5], [2.5, 3.5], [-2, 3], [-2, 3], [-3, -3], [-3.1, -3.2],
[-2.8, -3.], [-2.9, -3.1], [-3., -3.1], [-3., -3.1], [-3.2, -3.],
[-3., -3.]],
dtype=np.float32)
true_centers = np.array(
[
normalize(
np.mean(normalize(points)[0:2, :], axis=0, keepdims=True))[0],
normalize(
np.mean(normalize(points)[2:4, :], axis=0, keepdims=True))[0],
normalize(np.mean(normalize(points)[4:, :], axis=0,
keepdims=True))[0]
],
dtype=np.float32)
true_assignments = [0] * 2 + [1] * 2 + [2] * 8
true_score = len(points) - np.tensordot(
normalize(points), true_centers[true_assignments])
kmeans = kmeans_lib.KMeansClustering(
3,
initial_clusters=self.initial_clusters,
distance_metric=kmeans_lib.KMeansClustering.COSINE_DISTANCE,
use_mini_batch=self.use_mini_batch,
mini_batch_steps_per_iteration=self.mini_batch_steps_per_iteration,
config=self.config(3))
kmeans.train(
input_fn=lambda: (constant_op.constant(points), None), steps=30)
centers = normalize(kmeans.cluster_centers())
self.assertAllClose(
sorted(centers.tolist()), sorted(true_centers.tolist()), atol=1e-2)
def _input_fn():
return (input_lib.limit_epochs(
constant_op.constant(points), num_epochs=1), None)
assignments = list(kmeans.predict_cluster_index(input_fn=_input_fn))
self.assertAllClose(
centers[assignments], true_centers[true_assignments], atol=1e-2)
score = kmeans.score(input_fn=lambda: (constant_op.constant(points), None))
self.assertAllClose(score, true_score, atol=1e-2)
class MiniBatchKMeansCosineTest(KMeansCosineDistanceTest):
@property
def batch_size(self):
return 2
@property
def use_mini_batch(self):
return True
class FullBatchAsyncKMeansCosineTest(KMeansCosineDistanceTest):
@property
def batch_size(self):
return 2
@property
def use_mini_batch(self):
return True
@property
def mini_batch_steps_per_iteration(self):
return self.num_points // self.batch_size
class KMeansBenchmark(benchmark.Benchmark):
"""Base class for benchmarks."""
def SetUp(self,
dimension=50,
num_clusters=50,
points_per_cluster=10000,
center_norm=500,
cluster_width=20):
np.random.seed(123456)
self.num_clusters = num_clusters
self.num_points = num_clusters * points_per_cluster
self.centers = make_random_centers(
self.num_clusters, dimension, center_norm=center_norm)
self.points, _, scores = make_random_points(
self.centers, self.num_points, max_offset=cluster_width)
self.score = float(np.sum(scores))
def _report(self, num_iters, start, end, scores):
print(scores)
self.report_benchmark(
iters=num_iters,
wall_time=(end - start) / num_iters,
extras={'true_sum_squared_distances': self.score,
'fit_scores': scores})
def _fit(self, num_iters=10):
pass
def benchmark_01_2dim_5center_500point(self):
self.SetUp(dimension=2, num_clusters=5, points_per_cluster=100)
self._fit()
def benchmark_02_20dim_20center_10kpoint(self):
self.SetUp(dimension=20, num_clusters=20, points_per_cluster=500)
self._fit()
def benchmark_03_100dim_50center_50kpoint(self):
self.SetUp(dimension=100, num_clusters=50, points_per_cluster=1000)
self._fit()
def benchmark_03_100dim_50center_50kpoint_unseparated(self):
self.SetUp(
dimension=100,
num_clusters=50,
points_per_cluster=1000,
cluster_width=250)
self._fit()
def benchmark_04_100dim_500center_500kpoint(self):
self.SetUp(dimension=100, num_clusters=500, points_per_cluster=1000)
self._fit(num_iters=4)
def benchmark_05_100dim_500center_500kpoint_unseparated(self):
self.SetUp(
dimension=100,
num_clusters=500,
points_per_cluster=1000,
cluster_width=250)
self._fit(num_iters=4)
class TensorflowKMeansBenchmark(KMeansBenchmark):
def _fit(self, num_iters=10):
scores = []
start = time.time()
for i in range(num_iters):
print('Starting tensorflow KMeans: %d' % i)
tf_kmeans = kmeans_lib.KMeansClustering(
self.num_clusters,
initial_clusters=kmeans_lib.KMeansClustering.KMEANS_PLUS_PLUS_INIT,
kmeans_plus_plus_num_retries=int(math.log(self.num_clusters) + 2),
random_seed=i * 42,
relative_tolerance=1e-6,
config=self.config(3))
tf_kmeans.train(
input_fn=lambda: (constant_op.constant(self.points), None), steps=50)
_ = tf_kmeans.cluster_centers()
scores.append(
tf_kmeans.score(
input_fn=lambda: (constant_op.constant(self.points), None)))
self._report(num_iters, start, time.time(), scores)
class SklearnKMeansBenchmark(KMeansBenchmark):
def _fit(self, num_iters=10):
scores = []
start = time.time()
for i in range(num_iters):
print('Starting sklearn KMeans: %d' % i)
sklearn_kmeans = SklearnKMeans(
n_clusters=self.num_clusters,
init='k-means++',
max_iter=50,
n_init=1,
tol=1e-4,
random_state=i * 42)
sklearn_kmeans.train(self.points)
scores.append(sklearn_kmeans.inertia_)
self._report(num_iters, start, time.time(), scores)
class KMeansTestQueues(test.TestCase):
def input_fn(self):
def _fn():
queue = data_flow_ops.FIFOQueue(
capacity=10, dtypes=dtypes.float32, shapes=[10, 3])
enqueue_op = queue.enqueue(array_ops.zeros([10, 3], dtype=dtypes.float32))
queue_runner.add_queue_runner(
queue_runner.QueueRunner(queue, [enqueue_op]))
return queue.dequeue(), None
return _fn
# This test makes sure that there are no deadlocks when using a QueueRunner.
# Note that since cluster initialization is dependent on inputs, if input
# is generated using a QueueRunner, one has to make sure that these runners
# are started before the initialization.
def test_queues(self):
kmeans = kmeans_lib.KMeansClustering(5)
kmeans.train(input_fn=self.input_fn(), steps=1)
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/factorization/python/ops/kmeans_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test utils for factorization_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import sparse_ops
INPUT_MATRIX = np.array(
[[0.1, 0.0, 0.2, 0.0, 0.4, 0.5, 0.0],
[0.0, 1.1, 0.0, 1.3, 1.4, 0.0, 1.6],
[2.0, 0.0, 0.0, 2.3, 0.0, 2.5, 0.0],
[3.0, 0.0, 3.2, 3.3, 0.0, 3.5, 0.0],
[0.0, 4.1, 0.0, 0.0, 4.4, 0.0, 4.6]]).astype(np.float32)
def remove_empty_rows_columns(np_matrix):
"""Simple util to remove empty rows and columns of a matrix.
Args:
np_matrix: A numpy array.
Returns:
A tuple consisting of:
mat: A numpy matrix obtained by removing empty rows and columns from
np_matrix.
nz_row_ids: A numpy array of the ids of non-empty rows, such that
nz_row_ids[i] is the old row index corresponding to new index i.
nz_col_ids: A numpy array of the ids of non-empty columns, such that
nz_col_ids[j] is the old column index corresponding to new index j.
"""
nz_row_ids = np.where(np.sum(np_matrix, axis=1) != 0)[0]
nz_col_ids = np.where(np.sum(np_matrix, axis=0) != 0)[0]
mat = np_matrix[np.ix_(nz_row_ids, nz_col_ids)]
return mat, nz_row_ids, nz_col_ids
def np_matrix_to_tf_sparse(np_matrix,
row_slices=None,
col_slices=None,
transpose=False,
shuffle=False):
"""Simple util to slice non-zero np matrix elements as tf.SparseTensor."""
indices = np.nonzero(np_matrix)
# Only allow slices of whole rows or whole columns.
assert not (row_slices is not None and col_slices is not None)
if row_slices is not None:
selected_ind = np.concatenate(
[np.where(indices[0] == r)[0] for r in row_slices], 0)
indices = (indices[0][selected_ind], indices[1][selected_ind])
if col_slices is not None:
selected_ind = np.concatenate(
[np.where(indices[1] == c)[0] for c in col_slices], 0)
indices = (indices[0][selected_ind], indices[1][selected_ind])
if shuffle:
shuffled_ind = [x for x in range(len(indices[0]))]
random.shuffle(shuffled_ind)
indices = (indices[0][shuffled_ind], indices[1][shuffled_ind])
ind = (np.concatenate((np.expand_dims(indices[1], 1),
np.expand_dims(indices[0], 1)), 1).astype(np.int64) if
transpose else np.concatenate((np.expand_dims(indices[0], 1),
np.expand_dims(indices[1], 1)),
1).astype(np.int64))
val = np_matrix[indices].astype(np.float32)
shape = (np.array([max(indices[1]) + 1, max(indices[0]) + 1]).astype(np.int64)
if transpose else np.array(
[max(indices[0]) + 1, max(indices[1]) + 1]).astype(np.int64))
return sparse_tensor.SparseTensor(ind, val, shape)
def calculate_loss(input_mat, row_factors, col_factors, regularization=None,
w0=1., row_weights=None, col_weights=None):
"""Calculates the loss of a given factorization.
Using a non distributed method, different than the one implemented in the
WALS model. The weight of an observed entry (i, j) (i.e. such that
input_mat[i, j] is non zero) is (w0 + row_weights[i]col_weights[j]).
Args:
input_mat: The input matrix, a SparseTensor of rank 2.
row_factors: The row factors, a dense Tensor of rank 2.
col_factors: The col factors, a dense Tensor of rank 2.
regularization: the regularization coefficient, a scalar.
w0: the weight of unobserved entries. A scalar.
row_weights: A dense tensor of rank 1.
col_weights: A dense tensor of rank 1.
Returns:
The total loss.
"""
wr = (array_ops.expand_dims(row_weights, 1) if row_weights is not None
else constant_op.constant(1.))
wc = (array_ops.expand_dims(col_weights, 0) if col_weights is not None
else constant_op.constant(1.))
reg = (regularization if regularization is not None
else constant_op.constant(0.))
row_indices, col_indices = array_ops.split(input_mat.indices,
axis=1,
num_or_size_splits=2)
gathered_row_factors = array_ops.gather(row_factors, row_indices)
gathered_col_factors = array_ops.gather(col_factors, col_indices)
sp_approx_vals = array_ops.squeeze(math_ops.matmul(
gathered_row_factors, gathered_col_factors, adjoint_b=True))
sp_approx = sparse_tensor.SparseTensor(
indices=input_mat.indices,
values=sp_approx_vals,
dense_shape=input_mat.dense_shape)
sp_approx_sq = math_ops.square(sp_approx)
row_norm = math_ops.reduce_sum(math_ops.square(row_factors))
col_norm = math_ops.reduce_sum(math_ops.square(col_factors))
row_col_norm = math_ops.reduce_sum(math_ops.square(math_ops.matmul(
row_factors, col_factors, transpose_b=True)))
resid = sparse_ops.sparse_add(input_mat, sp_approx * (-1))
resid_sq = math_ops.square(resid)
loss = w0 * (
sparse_ops.sparse_reduce_sum(resid_sq) -
sparse_ops.sparse_reduce_sum(sp_approx_sq)
)
loss += (sparse_ops.sparse_reduce_sum(wr * (resid_sq * wc)) +
w0 * row_col_norm + reg * (row_norm + col_norm))
return loss.eval()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/factorization/python/ops/factorization_ops_test_utils.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Clustering Operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_clustering_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_impl
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops.embedding_ops import embedding_lookup
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_clustering_ops import *
# pylint: enable=wildcard-import
# Euclidean distance between vectors U and V is defined as \\(||U - V||_F\\)
# which is the square root of the sum of the absolute squares of the elements
# difference.
SQUARED_EUCLIDEAN_DISTANCE = 'squared_euclidean'
# Cosine distance between vectors U and V is defined as
# \\(1 - (U \dot V) / (||U||_F ||V||_F)\\)
COSINE_DISTANCE = 'cosine'
RANDOM_INIT = 'random'
KMEANS_PLUS_PLUS_INIT = 'kmeans_plus_plus'
KMC2_INIT = 'kmc2'
# The name of the variable holding the cluster centers. Used by the Estimator.
CLUSTERS_VAR_NAME = 'clusters'
class KMeans(object):
"""Creates the graph for k-means clustering."""
def __init__(self,
inputs,
num_clusters,
initial_clusters=RANDOM_INIT,
distance_metric=SQUARED_EUCLIDEAN_DISTANCE,
use_mini_batch=False,
mini_batch_steps_per_iteration=1,
random_seed=0,
kmeans_plus_plus_num_retries=2,
kmc2_chain_length=200):
"""Creates an object for generating KMeans clustering graph.
This class implements the following variants of K-means algorithm:
If use_mini_batch is False, it runs standard full batch K-means. Each step
runs a single iteration of K-Means. This step can be run sharded across
multiple workers by passing a list of sharded inputs to this class. Note
however that a single step needs to process the full input at once.
If use_mini_batch is True, it runs a generalization of the mini-batch
K-means algorithm. It runs multiple iterations, where each iteration is
composed of mini_batch_steps_per_iteration steps. Two copies of cluster
centers are maintained: one that is updated at the end of each iteration,
and one that is updated every step. The first copy is used to compute
cluster allocations for each step, and for inference, while the second copy
is the one updated each step using the mini-batch update rule. After each
iteration is complete, this second copy is copied back the first copy.
Note that for use_mini_batch=True, when mini_batch_steps_per_iteration=1,
the algorithm reduces to the standard mini-batch algorithm. Also by setting
mini_batch_steps_per_iteration = num_inputs / batch_size, the algorithm
becomes an asynchronous version of the full-batch algorithm. Note however
that there is no guarantee by this implementation that each input is seen
exactly once per iteration. Also, different updates are applied
asynchronously without locking. So this asynchronous version may not behave
exactly like a full-batch version.
Args:
inputs: An input tensor or list of input tensors. It is assumed that the
data points have been previously randomly permuted.
num_clusters: An integer tensor specifying the number of clusters. This
argument is ignored if initial_clusters is a tensor or numpy array.
initial_clusters: Specifies the clusters used during initialization. One
of the following:
- a tensor or numpy array with the initial cluster centers.
- a function f(inputs, k) that returns up to k centers from `inputs`.
- "random": Choose centers randomly from `inputs`.
- "kmeans_plus_plus": Use kmeans++ to choose centers from `inputs`.
- "kmc2": Use the fast k-MC2 algorithm to choose centers from `inputs`.
In the last three cases, one batch of `inputs` may not yield
`num_clusters` centers, in which case initialization will require
multiple batches until enough centers are chosen. In the case of
"random" or "kmeans_plus_plus", if the input size is <= `num_clusters`
then the entire batch is chosen to be cluster centers.
distance_metric: Distance metric used for clustering. Supported options:
"squared_euclidean", "cosine".
use_mini_batch: If true, use the mini-batch k-means algorithm. Else assume
full batch.
mini_batch_steps_per_iteration: Number of steps after which the updated
cluster centers are synced back to a master copy.
random_seed: Seed for PRNG used to initialize seeds.
kmeans_plus_plus_num_retries: For each point that is sampled during
kmeans++ initialization, this parameter specifies the number of
additional points to draw from the current distribution before selecting
the best. If a negative value is specified, a heuristic is used to
sample O(log(num_to_sample)) additional points.
kmc2_chain_length: Determines how many candidate points are used by the
k-MC2 algorithm to produce one new cluster centers. If a (mini-)batch
contains less points, one new cluster center is generated from the
(mini-)batch.
Raises:
ValueError: An invalid argument was passed to initial_clusters or
distance_metric.
"""
if isinstance(initial_clusters, str) and initial_clusters not in [
RANDOM_INIT, KMEANS_PLUS_PLUS_INIT, KMC2_INIT
]:
raise ValueError(
"Unsupported initialization algorithm '%s'" % initial_clusters)
if distance_metric not in [SQUARED_EUCLIDEAN_DISTANCE, COSINE_DISTANCE]:
raise ValueError("Unsupported distance metric '%s'" % distance_metric)
self._inputs = inputs if isinstance(inputs, list) else [inputs]
self._num_clusters = num_clusters
self._initial_clusters = initial_clusters
self._distance_metric = distance_metric
self._use_mini_batch = use_mini_batch
self._mini_batch_steps_per_iteration = int(mini_batch_steps_per_iteration)
self._random_seed = random_seed
self._kmeans_plus_plus_num_retries = kmeans_plus_plus_num_retries
self._kmc2_chain_length = kmc2_chain_length
@classmethod
def _distance_graph(cls, inputs, clusters, distance_metric):
"""Computes distance between each input and each cluster center.
Args:
inputs: list of input Tensors.
clusters: cluster Tensor.
distance_metric: distance metric used for clustering
Returns:
list of Tensors, where each element corresponds to each element in inputs.
The value is the distance of each row to all the cluster centers.
Currently only Euclidean distance and cosine distance are supported.
"""
assert isinstance(inputs, list)
if distance_metric == SQUARED_EUCLIDEAN_DISTANCE:
return cls._compute_euclidean_distance(inputs, clusters)
elif distance_metric == COSINE_DISTANCE:
return cls._compute_cosine_distance(
inputs, clusters, inputs_normalized=True)
else:
assert False, str(distance_metric)
@classmethod
def _compute_euclidean_distance(cls, inputs, clusters):
"""Computes Euclidean distance between each input and each cluster center.
Args:
inputs: list of input Tensors.
clusters: cluster Tensor.
Returns:
list of Tensors, where each element corresponds to each element in inputs.
The value is the distance of each row to all the cluster centers.
"""
output = []
for inp in inputs:
with ops.colocate_with(inp, ignore_existing=True):
# Computes Euclidean distance. Note the first and third terms are
# broadcast additions.
squared_distance = (
math_ops.reduce_sum(math_ops.square(inp), 1, keepdims=True) -
2 * math_ops.matmul(inp, clusters, transpose_b=True) +
array_ops.transpose(
math_ops.reduce_sum(
math_ops.square(clusters), 1, keepdims=True)))
output.append(squared_distance)
return output
@classmethod
def _compute_cosine_distance(cls, inputs, clusters, inputs_normalized=True):
"""Computes cosine distance between each input and each cluster center.
Args:
inputs: list of input Tensor.
clusters: cluster Tensor
inputs_normalized: if True, it assumes that inp and clusters are
normalized and computes the dot product which is equivalent to the cosine
distance. Else it L2 normalizes the inputs first.
Returns:
list of Tensors, where each element corresponds to each element in inp.
The value is the distance of each row to all the cluster centers.
"""
output = []
if not inputs_normalized:
with ops.colocate_with(clusters, ignore_existing=True):
clusters = nn_impl.l2_normalize(clusters, dim=1)
for inp in inputs:
with ops.colocate_with(inp, ignore_existing=True):
if not inputs_normalized:
inp = nn_impl.l2_normalize(inp, dim=1)
output.append(1 - math_ops.matmul(inp, clusters, transpose_b=True))
return output
def _infer_graph(self, inputs, clusters):
"""Maps input to closest cluster and the score.
Args:
inputs: list of input Tensors.
clusters: Tensor of cluster centers.
Returns:
List of tuple, where each value in tuple corresponds to a value in inp.
The tuple has following three elements:
all_scores: distance of each input to each cluster center.
score: distance of each input to closest cluster center.
cluster_idx: index of cluster center closest to the corresponding input.
"""
assert isinstance(inputs, list)
# Pairwise distances are used only by transform(). In all other cases, this
# sub-graph is not evaluated.
scores = self._distance_graph(inputs, clusters, self._distance_metric)
output = []
if (self._distance_metric == COSINE_DISTANCE and
not self._clusters_l2_normalized()):
# The cosine distance between normalized vectors x and y is the same as
# 2 * squared_euclidean_distance. We are using this fact and reusing the
# nearest_neighbors op.
# TODO(ands): Support COSINE distance in nearest_neighbors and remove
# this.
with ops.colocate_with(clusters, ignore_existing=True):
clusters = nn_impl.l2_normalize(clusters, dim=1)
for inp, score in zip(inputs, scores):
with ops.colocate_with(inp, ignore_existing=True):
(indices, distances) = gen_clustering_ops.nearest_neighbors(
inp, clusters, 1)
if self._distance_metric == COSINE_DISTANCE:
distances *= 0.5
output.append((score, array_ops.squeeze(distances, [-1]),
array_ops.squeeze(indices, [-1])))
return zip(*output)
def _clusters_l2_normalized(self):
"""Returns True if clusters centers are kept normalized."""
return (self._distance_metric == COSINE_DISTANCE and
(not self._use_mini_batch or
self._mini_batch_steps_per_iteration > 1))
def _create_variables(self, num_clusters):
"""Creates variables.
Args:
num_clusters: an integer Tensor providing the number of clusters.
Returns:
Tuple with following elements:
- cluster_centers: a Tensor for storing cluster centers
- cluster_centers_initialized: bool Variable indicating whether clusters
are initialized.
- cluster_counts: a Tensor for storing counts of points assigned to this
cluster. This is used by mini-batch training.
- cluster_centers_updated: Tensor representing copy of cluster centers
that are updated every step.
- update_in_steps: numbers of steps left before we sync
cluster_centers_updated back to cluster_centers.
"""
init_value = array_ops.constant([], dtype=dtypes.float32)
cluster_centers = variable_scope.variable(
init_value, name=CLUSTERS_VAR_NAME, validate_shape=False)
cluster_centers_initialized = variable_scope.variable(
False, dtype=dtypes.bool, name='initialized')
if self._use_mini_batch and self._mini_batch_steps_per_iteration > 1:
# Copy of cluster centers actively updated each step according to
# mini-batch update rule.
cluster_centers_updated = variable_scope.variable(
init_value, name='clusters_updated', validate_shape=False)
# How many steps till we copy the updated clusters to cluster_centers.
update_in_steps = variable_scope.variable(
self._mini_batch_steps_per_iteration,
dtype=dtypes.int64,
name='update_in_steps')
# Count of points assigned to cluster_centers_updated.
cluster_counts = variable_scope.variable(
array_ops.zeros([num_clusters], dtype=dtypes.int64))
else:
cluster_centers_updated = cluster_centers
update_in_steps = None
cluster_counts = (
variable_scope.variable(
array_ops.ones([num_clusters], dtype=dtypes.int64))
if self._use_mini_batch else None)
return (cluster_centers, cluster_centers_initialized, cluster_counts,
cluster_centers_updated, update_in_steps)
@classmethod
def _l2_normalize_data(cls, inputs):
"""Normalized the input data."""
output = []
for inp in inputs:
with ops.colocate_with(inp, ignore_existing=True):
output.append(nn_impl.l2_normalize(inp, dim=1))
return output
def training_graph(self):
"""Generate a training graph for kmeans algorithm.
This returns, among other things, an op that chooses initial centers
(init_op), a boolean variable that is set to True when the initial centers
are chosen (cluster_centers_initialized), and an op to perform either an
entire Lloyd iteration or a mini-batch of a Lloyd iteration (training_op).
The caller should use these components as follows. A single worker should
execute init_op multiple times until cluster_centers_initialized becomes
True. Then multiple workers may execute training_op any number of times.
Returns:
A tuple consisting of:
all_scores: A matrix (or list of matrices) of dimensions (num_input,
num_clusters) where the value is the distance of an input vector and a
cluster center.
cluster_idx: A vector (or list of vectors). Each element in the vector
corresponds to an input row in 'inp' and specifies the cluster id
corresponding to the input.
scores: Similar to cluster_idx but specifies the distance to the
assigned cluster instead.
cluster_centers_initialized: scalar indicating whether clusters have been
initialized.
init_op: an op to initialize the clusters.
training_op: an op that runs an iteration of training.
"""
# Implementation of kmeans.
if (isinstance(self._initial_clusters, str) or
callable(self._initial_clusters)):
initial_clusters = self._initial_clusters
num_clusters = ops.convert_to_tensor(self._num_clusters)
else:
initial_clusters = ops.convert_to_tensor(self._initial_clusters)
num_clusters = array_ops.shape(initial_clusters)[0]
inputs = self._inputs
(cluster_centers_var, cluster_centers_initialized, total_counts,
cluster_centers_updated,
update_in_steps) = self._create_variables(num_clusters)
init_op = _InitializeClustersOpFactory(
self._inputs, num_clusters, initial_clusters, self._distance_metric,
self._random_seed, self._kmeans_plus_plus_num_retries,
self._kmc2_chain_length, cluster_centers_var, cluster_centers_updated,
cluster_centers_initialized).op()
cluster_centers = cluster_centers_var
if self._distance_metric == COSINE_DISTANCE:
inputs = self._l2_normalize_data(inputs)
if not self._clusters_l2_normalized():
cluster_centers = nn_impl.l2_normalize(cluster_centers, dim=1)
all_scores, scores, cluster_idx = self._infer_graph(inputs, cluster_centers)
if self._use_mini_batch:
sync_updates_op = self._mini_batch_sync_updates_op(
update_in_steps, cluster_centers_var, cluster_centers_updated,
total_counts)
assert sync_updates_op is not None
with ops.control_dependencies([sync_updates_op]):
training_op = self._mini_batch_training_op(
inputs, cluster_idx, cluster_centers_updated, total_counts)
else:
assert cluster_centers == cluster_centers_var
training_op = self._full_batch_training_op(
inputs, num_clusters, cluster_idx, cluster_centers_var)
return (all_scores, cluster_idx, scores, cluster_centers_initialized,
init_op, training_op)
def _mini_batch_sync_updates_op(self, update_in_steps, cluster_centers_var,
cluster_centers_updated, total_counts):
if self._use_mini_batch and self._mini_batch_steps_per_iteration > 1:
assert update_in_steps is not None
with ops.colocate_with(update_in_steps, ignore_existing=True):
def _f():
# Note that there is a race condition here, so we do a best effort
# updates here. We reset update_in_steps first so that other workers
# don't duplicate the updates. Also we update cluster_center_vars
# before resetting total_counts to avoid large updates to
# cluster_centers_updated based on partially updated
# cluster_center_vars.
with ops.control_dependencies([
state_ops.assign(update_in_steps,
self._mini_batch_steps_per_iteration - 1)
]):
with ops.colocate_with(
cluster_centers_updated, ignore_existing=True):
if self._distance_metric == COSINE_DISTANCE:
cluster_centers = nn_impl.l2_normalize(
cluster_centers_updated, dim=1)
else:
cluster_centers = cluster_centers_updated
with ops.colocate_with(cluster_centers_var, ignore_existing=True):
with ops.control_dependencies(
[state_ops.assign(cluster_centers_var, cluster_centers)]):
with ops.colocate_with(None, ignore_existing=True):
with ops.control_dependencies([
state_ops.assign(total_counts,
array_ops.zeros_like(total_counts))
]):
return array_ops.identity(update_in_steps)
return control_flow_ops.cond(
update_in_steps <= 0, _f,
lambda: state_ops.assign_sub(update_in_steps, 1))
else:
return control_flow_ops.no_op()
def _mini_batch_training_op(self, inputs, cluster_idx_list, cluster_centers,
total_counts):
"""Creates an op for training for mini batch case.
Args:
inputs: list of input Tensors.
cluster_idx_list: A vector (or list of vectors). Each element in the
vector corresponds to an input row in 'inp' and specifies the cluster id
corresponding to the input.
cluster_centers: Tensor Ref of cluster centers.
total_counts: Tensor Ref of cluster counts.
Returns:
An op for doing an update of mini-batch k-means.
"""
update_ops = []
for inp, cluster_idx in zip(inputs, cluster_idx_list):
with ops.colocate_with(inp, ignore_existing=True):
assert total_counts is not None
cluster_idx = array_ops.reshape(cluster_idx, [-1])
# Dedupe the unique ids of cluster_centers being updated so that updates
# can be locally aggregated.
unique_ids, unique_idx = array_ops.unique(cluster_idx)
num_unique_cluster_idx = array_ops.size(unique_ids)
# Fetch the old values of counts and cluster_centers.
with ops.colocate_with(total_counts, ignore_existing=True):
old_counts = array_ops.gather(total_counts, unique_ids)
# TODO(agarwal): This colocation seems to run into problems. Fix it.
with ops.colocate_with(cluster_centers, ignore_existing=True):
old_cluster_centers = array_ops.gather(cluster_centers, unique_ids)
# Locally aggregate the increment to counts.
count_updates = math_ops.unsorted_segment_sum(
array_ops.ones_like(unique_idx, dtype=total_counts.dtype),
unique_idx, num_unique_cluster_idx)
# Locally compute the sum of inputs mapped to each id.
# For a cluster with old cluster value x, old count n, and with data
# d_1,...d_k newly assigned to it, we recompute the new value as
# \\(x += (sum_i(d_i) - k * x) / (n + k)\\).
# Compute \\(sum_i(d_i)\\), see comment above.
cluster_center_updates = math_ops.unsorted_segment_sum(
inp, unique_idx, num_unique_cluster_idx)
# Shape to enable broadcasting count_updates and learning_rate to inp.
# It extends the shape with 1's to match the rank of inp.
broadcast_shape = array_ops.concat([
array_ops.reshape(num_unique_cluster_idx, [1]),
array_ops.ones(
array_ops.reshape(array_ops.rank(inp) - 1, [1]),
dtype=dtypes.int32)
], 0)
# Subtract k * x, see comment above.
cluster_center_updates -= math_ops.cast(
array_ops.reshape(count_updates, broadcast_shape),
inp.dtype) * old_cluster_centers
learning_rate = math_ops.reciprocal(
math_ops.cast(old_counts + count_updates, inp.dtype))
learning_rate = array_ops.reshape(learning_rate, broadcast_shape)
# scale by 1 / (n + k), see comment above.
cluster_center_updates *= learning_rate
# Apply the updates.
update_counts = state_ops.scatter_add(total_counts, unique_ids,
count_updates)
update_cluster_centers = state_ops.scatter_add(
cluster_centers, unique_ids, cluster_center_updates)
update_ops.extend([update_counts, update_cluster_centers])
return control_flow_ops.group(*update_ops)
def _full_batch_training_op(self, inputs, num_clusters, cluster_idx_list,
cluster_centers):
"""Creates an op for training for full batch case.
Args:
inputs: list of input Tensors.
num_clusters: an integer Tensor providing the number of clusters.
cluster_idx_list: A vector (or list of vectors). Each element in the
vector corresponds to an input row in 'inp' and specifies the cluster id
corresponding to the input.
cluster_centers: Tensor Ref of cluster centers.
Returns:
An op for doing an update of mini-batch k-means.
"""
cluster_sums = []
cluster_counts = []
epsilon = constant_op.constant(1e-6, dtype=inputs[0].dtype)
for inp, cluster_idx in zip(inputs, cluster_idx_list):
with ops.colocate_with(inp, ignore_existing=True):
cluster_sums.append(
math_ops.unsorted_segment_sum(inp, cluster_idx, num_clusters))
cluster_counts.append(
math_ops.unsorted_segment_sum(
array_ops.reshape(
array_ops.ones(
array_ops.reshape(array_ops.shape(inp)[0], [-1])),
[-1, 1]), cluster_idx, num_clusters))
with ops.colocate_with(cluster_centers, ignore_existing=True):
new_clusters_centers = math_ops.add_n(cluster_sums) / (
math_ops.cast(math_ops.add_n(cluster_counts), cluster_sums[0].dtype) +
epsilon)
if self._clusters_l2_normalized():
new_clusters_centers = nn_impl.l2_normalize(new_clusters_centers, dim=1)
return state_ops.assign(cluster_centers, new_clusters_centers)
class _InitializeClustersOpFactory(object):
"""Internal class to create the op to initialize the clusters.
The op performs this algorithm (see constructor args):
num_remaining = num_clusters - length(cluster_centers)
if num_remaining == 0:
assert that cluster_centers_initialized is true
else:
assert that num_remaining > 0
new_centers = choose up to num_remaining initial centers
l2-normalize new_centers if using cosine distance
all_centers = concat(cluster_centers, new_centers)
cluster_centers := all_centers
if there is a cluster_centers_updated variable:
cluster_centers_updated := cluster_centers
num_now_remaining = num_clusters - length(cluster_centers)
if num_now_remaining == 0:
cluster_centers_initialized := true
"""
# TODO(ccolby): Refactor this class so that kmc2 isn't so much a special case.
def __init__(self, inputs, num_clusters, initial_clusters, distance_metric,
random_seed, kmeans_plus_plus_num_retries, kmc2_chain_length,
cluster_centers, cluster_centers_updated,
cluster_centers_initialized):
"""Creates an op factory.
Args:
inputs: See KMeans constructor.
num_clusters: An integer Tensor providing the number of clusters.
initial_clusters: See KMeans constructor.
distance_metric: See KMeans constructor.
random_seed: See KMeans constructor.
kmeans_plus_plus_num_retries: See KMeans constructor.
kmc2_chain_length: See KMeans constructor.
cluster_centers: The TF variable holding the initial centers. It may
already contain some centers when the op is executed.
cluster_centers_updated: A second TF variable to hold a copy of the
initial centers, used for full-batch mode. In mini-batch mode,
cluster_centers_updated is the same variable as cluster_centers.
cluster_centers_initialized: A boolean TF variable that will be set
to true when all the initial centers have been chosen.
"""
# All of these instance variables are constants.
self._inputs = inputs
self._num_clusters = num_clusters
self._initial_clusters = initial_clusters
self._distance_metric = distance_metric
self._random_seed = random_seed
self._kmeans_plus_plus_num_retries = kmeans_plus_plus_num_retries
self._kmc2_chain_length = kmc2_chain_length
self._cluster_centers = cluster_centers
self._cluster_centers_updated = cluster_centers_updated
self._cluster_centers_initialized = cluster_centers_initialized
self._num_selected = array_ops.shape(self._cluster_centers)[0]
self._num_remaining = self._num_clusters - self._num_selected
self._num_data = math_ops.add_n(
[array_ops.shape(i)[0] for i in self._inputs])
def _random(self):
indices = random_ops.random_uniform(
array_ops.reshape(self._num_remaining, [-1]),
minval=0,
maxval=math_ops.cast(self._num_data, dtypes.int64),
seed=self._random_seed,
dtype=dtypes.int64)
return embedding_lookup(self._inputs, indices, partition_strategy='div')
def _kmeans_plus_plus(self):
# Points from only the first shard are used for initializing centers.
# TODO(ands): Use all points.
inp = self._inputs[0]
if self._distance_metric == COSINE_DISTANCE:
inp = nn_impl.l2_normalize(inp, dim=1)
return gen_clustering_ops.kmeans_plus_plus_initialization(
inp,
math_ops.cast(self._num_remaining, dtypes.int64),
self._random_seed,
self._kmeans_plus_plus_num_retries)
def _kmc2_multiple_centers(self):
"""Adds new initial cluster centers using the k-MC2 algorithm.
In each call to the op, the provided batch is split into subsets based on
the specified `kmc2_chain_length`. On each subset, a single Markov chain of
the k-MC2 algorithm is used to add *one* new center cluster center. If there
are less than `kmc2_chain_length` points in the subset, a single center is
added using one Markov chain on the full input. It is assumed that the
provided batch has previously been randomly permuted. Otherwise, k-MC2 may
return suboptimal centers.
Returns:
An op that adds new cluster centers.
"""
# The op only operates on the first shard of data.
first_shard = self._inputs[0]
# Number of points in the input that can be used.
batch_size = array_ops.shape(first_shard)[0]
# Maximum number of subsets such that the size of each subset is at least
# `kmc2_chain_length`. Final subsets may be larger.
max_to_sample = math_ops.cast(
batch_size / self._kmc2_chain_length, dtype=dtypes.int32)
# We sample at least one new center and at most all remaining centers.
num_to_sample = math_ops.maximum(
math_ops.minimum(self._num_remaining, max_to_sample), 1)
def _cond(i, _):
"""Stopping condition for the while loop."""
return math_ops.less(i, num_to_sample)
def _body(i, _):
"""Body that adds a single new center based on a subset."""
def _sample_random():
"""Returns a random point as a cluster center."""
# By assumption the batch is reshuffled and _sample_random is always
# called for i=0. Hence, we simply return the first point.
new_center = array_ops.reshape(first_shard[0], [1, -1])
if self._distance_metric == COSINE_DISTANCE:
new_center = nn_impl.l2_normalize(new_center, dim=1)
return new_center
def _sample_kmc2_chain():
"""Returns previous centers as well as a new center sampled using k-MC2.
"""
# Extract the subset from the underlying batch.
start = i * self._kmc2_chain_length
end = start + self._kmc2_chain_length
subset = first_shard[start:end]
# Compute the distances from points in the subset to previous centers.
_, distances = gen_clustering_ops.nearest_neighbors(
subset, self._cluster_centers, 1)
# Sample index of new center using k-MC2 Markov chain.
new_center_index = gen_clustering_ops.kmc2_chain_initialization(
array_ops.squeeze(distances), self._random_seed)
# Extract actual new center.
newly_sampled_center = array_ops.reshape(subset[new_center_index],
[1, -1])
# Return concatenation with previously sampled centers.
if self._distance_metric == COSINE_DISTANCE:
newly_sampled_center = nn_impl.l2_normalize(
newly_sampled_center, dim=1)
return array_ops.concat([self._cluster_centers, newly_sampled_center],
0)
# Obtain a random point if there are no previously sampled centers.
# Otherwise, construct a k-MC2 Markov chain.
new_centers = control_flow_ops.cond(
math_ops.equal(self._num_selected, 0), _sample_random,
_sample_kmc2_chain)
# Assign new cluster centers to underlying variable.
assigned_centers = state_ops.assign(
self._cluster_centers, new_centers, validate_shape=False)
if self._cluster_centers_updated is not self._cluster_centers:
assigned_centers = state_ops.assign(
self._cluster_centers_updated,
assigned_centers,
validate_shape=False)
return i + 1, self._num_clusters - array_ops.shape(assigned_centers)[0]
# Add num_to_sample new data points.
_, num_remaining = control_flow_ops.while_loop(_cond, _body, [0, 0])
return num_remaining
def _greedy_batch_sampler(self, sampler):
# If the input dataset size is smaller than the number of centers
# remaining, choose the entire input dataset as centers. This can happen
# with mini-batch. Otherwise, sample the batch according to the provided
# sampler.
return control_flow_ops.cond(self._num_data <= self._num_remaining,
lambda: array_ops.concat(self._inputs, 0),
sampler)
def _single_batch_sampler(self, sampler):
# Enforce that there are at least as many data points as centers
# remaining. This gives the provided sampler the chance to select all
# remaining centers from a single batch.
with ops.control_dependencies(
[check_ops.assert_greater_equal(self._num_data, self._num_remaining)]):
return sampler()
def _choose_initial_centers(self):
if isinstance(self._initial_clusters, str):
if self._initial_clusters == RANDOM_INIT:
return self._greedy_batch_sampler(self._random)
else: # self._initial_clusters == KMEANS_PLUS_PLUS_INIT
return self._single_batch_sampler(self._kmeans_plus_plus)
elif callable(self._initial_clusters):
return self._initial_clusters(self._inputs, self._num_remaining)
else:
with ops.control_dependencies([
check_ops.assert_equal(self._num_remaining,
array_ops.shape(self._initial_clusters)[0])
]):
return self._initial_clusters
def _add_new_centers(self):
"""Adds some centers and returns the number of centers remaining."""
new_centers = self._choose_initial_centers()
if self._distance_metric == COSINE_DISTANCE:
new_centers = nn_impl.l2_normalize(new_centers, dim=1)
# If cluster_centers is empty, it doesn't have the right shape for concat.
all_centers = control_flow_ops.cond(
math_ops.equal(self._num_selected, 0), lambda: new_centers,
lambda: array_ops.concat([self._cluster_centers, new_centers], 0))
# TODO(ccolby): De-dupe all_centers?
a = state_ops.assign(
self._cluster_centers, all_centers, validate_shape=False)
if self._cluster_centers_updated is not self._cluster_centers:
a = state_ops.assign(
self._cluster_centers_updated, a, validate_shape=False)
return self._num_clusters - array_ops.shape(a)[0]
def _initialize(self):
with ops.control_dependencies([
check_ops.assert_positive(self._num_remaining),
]):
if self._initial_clusters == KMC2_INIT:
num_now_remaining = self._kmc2_multiple_centers()
else:
num_now_remaining = self._add_new_centers()
return control_flow_ops.cond(
math_ops.equal(num_now_remaining, 0),
lambda: state_ops.assign(self._cluster_centers_initialized, True),
control_flow_ops.no_op)
def op(self):
"""Returns the cluster initializer op."""
return control_flow_ops.cond(
math_ops.equal(self._num_remaining, 0),
lambda: check_ops.assert_equal(self._cluster_centers_initialized, True),
self._initialize)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/factorization/python/ops/clustering_ops.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Gaussian mixture models Operations."""
# TODO(xavigonzalvo): Factor out covariance matrix operations to make
# code reusable for different types (e.g. diag).
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.ops.embedding_ops import embedding_lookup
# Machine epsilon.
MEPS = np.finfo(float).eps
FULL_COVARIANCE = 'full'
DIAG_COVARIANCE = 'diag'
def _covariance(x, diag):
"""Defines the covariance operation of a matrix.
Args:
x: a matrix Tensor. Dimension 0 should contain the number of examples.
diag: if True, it computes the diagonal covariance.
Returns:
A Tensor representing the covariance of x. In the case of
diagonal matrix just the diagonal is returned.
"""
num_points = math_ops.cast(array_ops.shape(x)[0], dtypes.float32)
x -= math_ops.reduce_mean(x, 0, keepdims=True)
if diag:
cov = math_ops.reduce_sum(
math_ops.square(x), 0, keepdims=True) / (num_points - 1)
else:
cov = math_ops.matmul(x, x, transpose_a=True) / (num_points - 1)
return cov
def _init_clusters_random(data, num_clusters, random_seed):
"""Does random initialization of clusters.
Args:
data: a list of Tensors with a matrix of data, each row is an example.
num_clusters: an integer with the number of clusters.
random_seed: Seed for PRNG used to initialize seeds.
Returns:
A Tensor with num_clusters random rows of data.
"""
assert isinstance(data, list)
num_data = math_ops.add_n([array_ops.shape(inp)[0] for inp in data])
with ops.control_dependencies(
[check_ops.assert_less_equal(num_clusters, num_data)]):
indices = random_ops.random_uniform(
[num_clusters],
minval=0,
maxval=math_ops.cast(num_data, dtypes.int64),
seed=random_seed,
dtype=dtypes.int64)
indices %= math_ops.cast(num_data, dtypes.int64)
clusters_init = embedding_lookup(data, indices, partition_strategy='div')
return clusters_init
class GmmAlgorithm(object):
"""Tensorflow Gaussian mixture model clustering class."""
CLUSTERS_WEIGHT = 'alphas'
CLUSTERS_VARIABLE = 'clusters'
CLUSTERS_COVS_VARIABLE = 'clusters_covs'
def __init__(self,
data,
num_classes,
initial_means=None,
params='wmc',
covariance_type=FULL_COVARIANCE,
random_seed=0):
"""Constructor.
Args:
data: a list of Tensors with data, each row is a new example.
num_classes: number of clusters.
initial_means: a Tensor with a matrix of means. If None, means are
computed by sampling randomly.
params: Controls which parameters are updated in the training
process. Can contain any combination of "w" for weights, "m" for
means, and "c" for covariances.
covariance_type: one of "full", "diag".
random_seed: Seed for PRNG used to initialize seeds.
Raises:
Exception if covariance type is unknown.
"""
self._params = params
self._random_seed = random_seed
self._covariance_type = covariance_type
if self._covariance_type not in [DIAG_COVARIANCE, FULL_COVARIANCE]:
raise Exception( # pylint: disable=g-doc-exception
'programmer error: Invalid covariance type: %s' %
self._covariance_type)
# Create sharded variables for multiple shards. The following
# lists are indexed by shard.
# Probability per example in a class.
num_shards = len(data)
self._probs = [None] * num_shards
# Prior probability.
self._prior_probs = [None] * num_shards
# Membership weights w_{ik} where "i" is the i-th example and "k"
# is the k-th mixture.
self._w = [None] * num_shards
# Number of examples in a class.
self._points_in_k = [None] * num_shards
first_shard = data[0]
self._dimensions = array_ops.shape(first_shard)[1]
self._num_classes = num_classes
# Small value to guarantee that covariances are invertible.
self._min_var = array_ops.diag(
array_ops.ones(array_ops.stack([self._dimensions]))) * 1e-3
self._create_variables()
self._initialize_variables(data, initial_means)
# Operations of partial statistics for the computation of the means.
self._w_mul_x = []
# Operations of partial statistics for the computation of the covariances.
self._w_mul_x2 = []
self._define_graph(data)
def _create_variables(self):
"""Initializes GMM algorithm."""
init_value = array_ops.constant([], dtype=dtypes.float32)
self._means = variables.VariableV1(init_value,
name=self.CLUSTERS_VARIABLE,
validate_shape=False)
self._covs = variables.VariableV1(
init_value, name=self.CLUSTERS_COVS_VARIABLE, validate_shape=False)
# Mixture weights, representing the probability that a randomly
# selected unobservable data (in EM terms) was generated by component k.
self._alpha = variable_scope.variable(
array_ops.tile([1.0 / self._num_classes], [self._num_classes]),
name=self.CLUSTERS_WEIGHT,
validate_shape=False)
self._cluster_centers_initialized = variables.VariableV1(False,
dtype=dtypes.bool,
name='initialized')
def _initialize_variables(self, data, initial_means=None):
"""Initializes variables.
Args:
data: a list of Tensors with data, each row is a new example.
initial_means: a Tensor with a matrix of means.
"""
first_shard = data[0]
# Initialize means: num_classes X 1 X dimensions.
if initial_means is not None:
means = array_ops.expand_dims(initial_means, 1)
else:
# Sample data randomly
means = array_ops.expand_dims(
_init_clusters_random(data, self._num_classes, self._random_seed), 1)
# Initialize covariances.
if self._covariance_type == FULL_COVARIANCE:
cov = _covariance(first_shard, False) + self._min_var
# A matrix per class, num_classes X dimensions X dimensions
covs = array_ops.tile(
array_ops.expand_dims(cov, 0), [self._num_classes, 1, 1])
elif self._covariance_type == DIAG_COVARIANCE:
cov = _covariance(first_shard, True) + self._min_var
# A diagonal per row, num_classes X dimensions.
covs = array_ops.tile(
array_ops.expand_dims(array_ops.diag_part(cov), 0),
[self._num_classes, 1])
with ops.colocate_with(self._cluster_centers_initialized):
initialized = control_flow_ops.with_dependencies(
[means, covs],
array_ops.identity(self._cluster_centers_initialized))
self._init_ops = []
with ops.colocate_with(self._means):
init_means = state_ops.assign(self._means, means, validate_shape=False)
init_means = control_flow_ops.with_dependencies(
[init_means],
state_ops.assign(self._cluster_centers_initialized, True))
self._init_ops.append(control_flow_ops.cond(initialized,
control_flow_ops.no_op,
lambda: init_means).op)
with ops.colocate_with(self._covs):
init_covs = state_ops.assign(self._covs, covs, validate_shape=False)
init_covs = control_flow_ops.with_dependencies(
[init_covs],
state_ops.assign(self._cluster_centers_initialized, True))
self._init_ops.append(control_flow_ops.cond(initialized,
control_flow_ops.no_op,
lambda: init_covs).op)
def init_ops(self):
"""Returns the initialization operation."""
return control_flow_ops.group(*self._init_ops)
def training_ops(self):
"""Returns the training operation."""
return control_flow_ops.group(*self._train_ops)
def is_initialized(self):
"""Returns a boolean operation for initialized variables."""
return self._cluster_centers_initialized
def alphas(self):
return self._alpha
def clusters(self):
"""Returns the clusters with dimensions num_classes X 1 X num_dimensions."""
return self._means
def covariances(self):
"""Returns the covariances matrices."""
return self._covs
def assignments(self):
"""Returns a list of Tensors with the matrix of assignments per shard."""
ret = []
for w in self._w:
ret.append(math_ops.argmax(w, 1))
return ret
def scores(self):
"""Returns the per-sample likelihood fo the data.
Returns:
Log probabilities of each data point.
"""
return self._scores
def log_likelihood_op(self):
"""Returns the log-likelihood operation."""
return self._log_likelihood_op
def _define_graph(self, data):
"""Define graph for a single iteration.
Args:
data: a list of Tensors defining the training data.
"""
for shard_id, shard in enumerate(data):
self._num_examples = array_ops.shape(shard)[0]
shard = array_ops.expand_dims(shard, 0)
self._define_log_prob_operation(shard_id, shard)
self._define_prior_log_prob_operation(shard_id)
self._define_expectation_operation(shard_id)
self._define_partial_maximization_operation(shard_id, shard)
self._define_maximization_operation(len(data))
self._define_loglikelihood_operation()
self._define_score_samples()
def _define_full_covariance_probs(self, shard_id, shard):
"""Defines the full covariance probabilities per example in a class.
Updates a matrix with dimension num_examples X num_classes.
Args:
shard_id: id of the current shard.
shard: current data shard, 1 X num_examples X dimensions.
"""
diff = shard - self._means
cholesky = linalg_ops.cholesky(self._covs + self._min_var)
log_det_covs = 2.0 * math_ops.reduce_sum(
math_ops.log(array_ops.matrix_diag_part(cholesky)), 1)
x_mu_cov = math_ops.square(
linalg_ops.matrix_triangular_solve(
cholesky, array_ops.transpose(
diff, perm=[0, 2, 1]), lower=True))
diag_m = array_ops.transpose(math_ops.reduce_sum(x_mu_cov, 1))
self._probs[shard_id] = (
-0.5 * (diag_m + math_ops.cast(self._dimensions, dtypes.float32) *
math_ops.log(2 * np.pi) + log_det_covs))
def _define_diag_covariance_probs(self, shard_id, shard):
"""Defines the diagonal covariance probabilities per example in a class.
Args:
shard_id: id of the current shard.
shard: current data shard, 1 X num_examples X dimensions.
Returns a matrix num_examples * num_classes.
"""
# num_classes X 1
# TODO(xavigonzalvo): look into alternatives to log for
# reparametrization of variance parameters.
det_expanded = math_ops.reduce_sum(
math_ops.log(self._covs + 1e-3), 1, keepdims=True)
x2 = math_ops.squared_difference(shard, self._means)
cov_expanded = array_ops.expand_dims(1.0 / (self._covs + 1e-3), 2)
# num_classes X num_examples
x2_cov = math_ops.matmul(x2, cov_expanded)
x2_cov = array_ops.transpose(array_ops.squeeze(x2_cov, [2]))
self._probs[shard_id] = -0.5 * (
math_ops.cast(self._dimensions, dtypes.float32) *
math_ops.log(2.0 * np.pi) +
array_ops.transpose(det_expanded) + x2_cov)
def _define_log_prob_operation(self, shard_id, shard):
"""Probability per example in a class.
Updates a matrix with dimension num_examples X num_classes.
Args:
shard_id: id of the current shard.
shard: current data shard, 1 X num_examples X dimensions.
"""
# TODO(xavigonzalvo): Use the pdf defined in
# third_party/tensorflow/contrib/distributions/python/ops/gaussian.py
if self._covariance_type == FULL_COVARIANCE:
self._define_full_covariance_probs(shard_id, shard)
elif self._covariance_type == DIAG_COVARIANCE:
self._define_diag_covariance_probs(shard_id, shard)
self._probs[shard_id] += math_ops.log(self._alpha)
def _define_prior_log_prob_operation(self, shard_id):
"""Computes the prior probability of all samples.
Updates a vector where each item is the prior probability of an
input example.
Args:
shard_id: id of current shard_id.
"""
self._prior_probs[shard_id] = math_ops.reduce_logsumexp(
self._probs[shard_id], axis=1, keepdims=True)
def _define_expectation_operation(self, shard_id):
# Shape broadcasting.
probs = array_ops.expand_dims(self._probs[shard_id], 0)
# Membership weights are computed as:
# $$w_{ik} = \frac{\alpha_k f(\mathbf{y_i}|\mathbf{\theta}_k)}$$
# $$ {\sum_{m=1}^{K}\alpha_mf(\mathbf{y_i}|\mathbf{\theta}_m)}$$
# where "i" is the i-th example, "k" is the k-th mixture, theta are
# the model parameters and y_i the observations.
# These are defined for each shard.
self._w[shard_id] = array_ops.reshape(
math_ops.exp(probs - self._prior_probs[shard_id]),
array_ops.stack([self._num_examples, self._num_classes]))
def _define_partial_maximization_operation(self, shard_id, shard):
"""Computes the partial statistics of the means and covariances.
Args:
shard_id: current shard id.
shard: current data shard, 1 X num_examples X dimensions.
"""
# Soft assignment of each data point to each of the two clusters.
self._points_in_k[shard_id] = math_ops.reduce_sum(
self._w[shard_id], 0, keepdims=True)
# Partial means.
w_mul_x = array_ops.expand_dims(
math_ops.matmul(
self._w[shard_id], array_ops.squeeze(shard, [0]), transpose_a=True),
1)
self._w_mul_x.append(w_mul_x)
# Partial covariances.
x = array_ops.concat([shard for _ in range(self._num_classes)], 0)
x_trans = array_ops.transpose(x, perm=[0, 2, 1])
x_mul_w = array_ops.concat([
array_ops.expand_dims(x_trans[k, :, :] * self._w[shard_id][:, k], 0)
for k in range(self._num_classes)
], 0)
self._w_mul_x2.append(math_ops.matmul(x_mul_w, x))
def _define_maximization_operation(self, num_batches):
"""Maximization operations."""
# TODO(xavigonzalvo): some of these operations could be moved to C++.
# Compute the effective number of data points assigned to component k.
with ops.control_dependencies(self._w):
points_in_k = array_ops.squeeze(
math_ops.add_n(self._points_in_k), axis=[0])
# Update alpha.
if 'w' in self._params:
final_points_in_k = points_in_k / num_batches
num_examples = math_ops.cast(math_ops.reduce_sum(final_points_in_k),
dtypes.float32)
self._alpha_op = self._alpha.assign(final_points_in_k /
(num_examples + MEPS))
else:
self._alpha_op = control_flow_ops.no_op()
self._train_ops = [self._alpha_op]
# Update means.
points_in_k_expanded = array_ops.reshape(points_in_k,
[self._num_classes, 1, 1])
if 'm' in self._params:
self._means_op = self._means.assign(
math_ops.div(
math_ops.add_n(self._w_mul_x), points_in_k_expanded + MEPS))
else:
self._means_op = control_flow_ops.no_op()
# means are (num_classes x 1 x dims)
# Update covariances.
with ops.control_dependencies([self._means_op]):
b = math_ops.add_n(self._w_mul_x2) / (points_in_k_expanded + MEPS)
new_covs = []
for k in range(self._num_classes):
mean = self._means.value()[k, :, :]
square_mean = math_ops.matmul(mean, mean, transpose_a=True)
new_cov = b[k, :, :] - square_mean + self._min_var
if self._covariance_type == FULL_COVARIANCE:
new_covs.append(array_ops.expand_dims(new_cov, 0))
elif self._covariance_type == DIAG_COVARIANCE:
new_covs.append(
array_ops.expand_dims(array_ops.diag_part(new_cov), 0))
new_covs = array_ops.concat(new_covs, 0)
if 'c' in self._params:
# Train operations don't need to take care of the means
# because covariances already depend on it.
with ops.control_dependencies([self._means_op, new_covs]):
self._train_ops.append(
state_ops.assign(
self._covs, new_covs, validate_shape=False))
def _define_loglikelihood_operation(self):
"""Defines the total log-likelihood of current iteration."""
op = []
for prior_probs in self._prior_probs:
op.append(math_ops.reduce_logsumexp(prior_probs))
self._log_likelihood_op = math_ops.reduce_logsumexp(op)
def _define_score_samples(self):
"""Defines the likelihood of each data sample."""
op = []
for shard_id, prior_probs in enumerate(self._prior_probs):
op.append(prior_probs + math_ops.log(self._w[shard_id]))
self._scores = array_ops.squeeze(
math_ops.reduce_logsumexp(op, axis=2, keepdims=True), axis=0)
def gmm(inp,
initial_clusters,
num_clusters,
random_seed,
covariance_type=FULL_COVARIANCE,
params='wmc'):
"""Creates the graph for Gaussian mixture model (GMM) clustering.
Args:
inp: An input tensor or list of input tensors
initial_clusters: Specifies the clusters used during
initialization. Can be a tensor or numpy array, or a function
that generates the clusters. Can also be "random" to specify
that clusters should be chosen randomly from input data. Note: type
is diverse to be consistent with skflow.
num_clusters: number of clusters.
random_seed: Python integer. Seed for PRNG used to initialize centers.
covariance_type: one of "diag", "full".
params: Controls which parameters are updated in the training
process. Can contain any combination of "w" for weights, "m" for
means, and "c" for covars.
Returns:
Note: tuple of lists returned to be consistent with skflow
A tuple consisting of:
assignments: A vector (or list of vectors). Each element in the vector
corresponds to an input row in 'inp' and specifies the cluster id
corresponding to the input.
training_op: an op that runs an iteration of training.
init_op: an op that runs the initialization.
"""
initial_means = None
if initial_clusters != 'random' and not isinstance(initial_clusters,
ops.Tensor):
initial_means = constant_op.constant(initial_clusters, dtype=dtypes.float32)
# Implementation of GMM.
inp = inp if isinstance(inp, list) else [inp]
gmm_tool = GmmAlgorithm(inp, num_clusters, initial_means, params,
covariance_type, random_seed)
assignments = gmm_tool.assignments()
scores = gmm_tool.scores()
loss = gmm_tool.log_likelihood_op()
return (loss, scores, [assignments], gmm_tool.training_ops(),
gmm_tool.init_ops(), gmm_tool.is_initialized())
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/factorization/python/ops/gmm_ops.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A canned Estimator for k-means clustering."""
# TODO(ccolby): Move clustering_ops.py into this file and streamline the code.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
from tensorflow.contrib.factorization.python.ops import clustering_ops
from tensorflow.python.estimator import estimator
from tensorflow.python.estimator import model_fn as model_fn_lib
from tensorflow.python.estimator.export import export_output
from tensorflow.python.feature_column import feature_column_lib as fc
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import metrics
from tensorflow.python.ops import state_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.summary import summary
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import training_util
class _LossRelativeChangeHook(session_run_hook.SessionRunHook):
"""Stops when the change in loss goes below a tolerance."""
def __init__(self, loss_tensor, tolerance):
"""Creates a _LossRelativeChangeHook.
Args:
loss_tensor: A scalar tensor of the loss value.
tolerance: A relative tolerance of loss change between iterations.
"""
self._loss_tensor = loss_tensor
self._tolerance = tolerance
self._prev_loss = None
def before_run(self, run_context):
del run_context # unused
return session_run_hook.SessionRunArgs(self._loss_tensor)
def after_run(self, run_context, run_values):
loss = run_values.results
assert loss is not None
if self._prev_loss:
relative_change = (
abs(loss - self._prev_loss) / (1 + abs(self._prev_loss)))
if relative_change < self._tolerance:
run_context.request_stop()
self._prev_loss = loss
class _InitializeClustersHook(session_run_hook.SessionRunHook):
"""Initializes the cluster centers.
The chief repeatedly invokes an initialization op until all cluster centers
are initialized. The workers wait for the initialization phase to complete.
"""
def __init__(self, init_op, is_initialized_var, is_chief):
"""Creates an _InitializeClustersHook.
Args:
init_op: An op that, when run, will choose some initial cluster centers.
This op may need to be run multiple times to choose all the centers.
is_initialized_var: A boolean variable reporting whether all initial
centers have been chosen.
is_chief: A boolean specifying whether this task is the chief.
"""
self._init_op = init_op
self._is_initialized_var = is_initialized_var
self._is_chief = is_chief
def after_create_session(self, session, coord):
del coord # unused
assert self._init_op.graph is ops.get_default_graph()
assert self._is_initialized_var.graph is self._init_op.graph
while True:
try:
if session.run(self._is_initialized_var):
break
elif self._is_chief:
session.run(self._init_op)
else:
time.sleep(1)
except RuntimeError as e:
logging.info(e)
def _parse_features_if_necessary(features, feature_columns):
"""Helper function to convert the input points into a usable format.
Args:
features: The input features.
feature_columns: An optionable iterable containing all the feature columns
used by the model. All items in the set should be feature column instances
that can be passed to `tf.compat.v1.feature_column.input_layer`. If this
is None, all features will be used.
Returns:
If `features` is a dict of `k` features (optionally filtered by
`feature_columns`), each of which is a vector of `n` scalars, the return
value is a Tensor of shape `(n, k)` representing `n` input points, where the
items in the `k` dimension are sorted lexicographically by `features` key.
If `features` is not a dict, it is returned unmodified.
"""
if not isinstance(features, dict):
return features
if feature_columns:
return fc.input_layer(features, feature_columns)
keys = sorted(features.keys())
with ops.colocate_with(features[keys[0]]):
return array_ops.concat([features[k] for k in keys], axis=1)
class _ModelFn(object):
"""Model function for the estimator."""
def __init__(self, num_clusters, initial_clusters, distance_metric,
random_seed, use_mini_batch, mini_batch_steps_per_iteration,
kmeans_plus_plus_num_retries, relative_tolerance,
feature_columns):
self._num_clusters = num_clusters
self._initial_clusters = initial_clusters
self._distance_metric = distance_metric
self._random_seed = random_seed
self._use_mini_batch = use_mini_batch
self._mini_batch_steps_per_iteration = mini_batch_steps_per_iteration
self._kmeans_plus_plus_num_retries = kmeans_plus_plus_num_retries
self._relative_tolerance = relative_tolerance
self._feature_columns = feature_columns
def model_fn(self, features, mode, config):
"""Model function for the estimator.
Note that this does not take a `labels` arg. This works, but `input_fn` must
return either `features` or, equivalently, `(features, None)`.
Args:
features: The input points. See `tf.estimator.Estimator`.
mode: See `tf.estimator.Estimator`.
config: See `tf.estimator.Estimator`.
Returns:
A `tf.estimator.EstimatorSpec` (see `tf.estimator.Estimator`) specifying
this behavior:
* `train_op`: Execute one mini-batch or full-batch run of Lloyd's
algorithm.
* `loss`: The sum of the squared distances from each input point to its
closest center.
* `eval_metric_ops`: Maps `SCORE` to `loss`.
* `predictions`: Maps `ALL_DISTANCES` to the distance from each input
point to each cluster center; maps `CLUSTER_INDEX` to the index of
the closest cluster center for each input point.
"""
# input_points is a single Tensor. Therefore, the sharding functionality
# in clustering_ops is unused, and some of the values below are lists of a
# single item.
input_points = _parse_features_if_necessary(features, self._feature_columns)
# Let N = the number of input_points.
# all_distances: A list of one matrix of shape (N, num_clusters). Each value
# is the distance from an input point to a cluster center.
# model_predictions: A list of one vector of shape (N). Each value is the
# cluster id of an input point.
# losses: Similar to cluster_idx but provides the distance to the cluster
# center.
# is_initialized: scalar indicating whether the initial cluster centers
# have been chosen; see init_op.
# init_op: an op to choose the initial cluster centers. A single worker
# repeatedly executes init_op until is_initialized becomes True.
# training_op: an op that runs an iteration of training, either an entire
# Lloyd iteration or a mini-batch of a Lloyd iteration. Multiple workers
# may execute this op, but only after is_initialized becomes True.
(all_distances, model_predictions, losses, is_initialized, init_op,
training_op) = clustering_ops.KMeans(
inputs=input_points,
num_clusters=self._num_clusters,
initial_clusters=self._initial_clusters,
distance_metric=self._distance_metric,
use_mini_batch=self._use_mini_batch,
mini_batch_steps_per_iteration=self._mini_batch_steps_per_iteration,
random_seed=self._random_seed,
kmeans_plus_plus_num_retries=self._kmeans_plus_plus_num_retries
).training_graph()
loss = math_ops.reduce_sum(losses)
summary.scalar('loss/raw', loss)
incr_step = state_ops.assign_add(training_util.get_global_step(), 1)
training_op = control_flow_ops.with_dependencies([training_op, incr_step],
loss)
training_hooks = [
_InitializeClustersHook(init_op, is_initialized, config.is_chief)
]
if self._relative_tolerance is not None:
training_hooks.append(
_LossRelativeChangeHook(loss, self._relative_tolerance))
export_outputs = {
KMeansClustering.ALL_DISTANCES:
export_output.PredictOutput(all_distances[0]),
KMeansClustering.CLUSTER_INDEX:
export_output.PredictOutput(model_predictions[0]),
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
export_output.PredictOutput(model_predictions[0])
}
return model_fn_lib.EstimatorSpec(
mode=mode,
predictions={
KMeansClustering.ALL_DISTANCES: all_distances[0],
KMeansClustering.CLUSTER_INDEX: model_predictions[0],
},
loss=loss,
train_op=training_op,
eval_metric_ops={KMeansClustering.SCORE: metrics.mean(loss)},
training_hooks=training_hooks,
export_outputs=export_outputs)
# TODO(agarwal,ands): support sharded input.
class KMeansClustering(estimator.Estimator):
"""An Estimator for K-Means clustering.
Example:
```
import numpy as np
import tensorflow as tf
num_points = 100
dimensions = 2
points = np.random.uniform(0, 1000, [num_points, dimensions])
def input_fn():
return tf.compat.v1.train.limit_epochs(
tf.convert_to_tensor(points, dtype=tf.float32), num_epochs=1)
num_clusters = 5
kmeans = tf.contrib.factorization.KMeansClustering(
num_clusters=num_clusters, use_mini_batch=False)
# train
num_iterations = 10
previous_centers = None
for _ in xrange(num_iterations):
kmeans.train(input_fn)
cluster_centers = kmeans.cluster_centers()
if previous_centers is not None:
print 'delta:', cluster_centers - previous_centers
previous_centers = cluster_centers
print 'score:', kmeans.score(input_fn)
print 'cluster centers:', cluster_centers
# map the input points to their clusters
cluster_indices = list(kmeans.predict_cluster_index(input_fn))
for i, point in enumerate(points):
cluster_index = cluster_indices[i]
center = cluster_centers[cluster_index]
print 'point:', point, 'is in cluster', cluster_index, 'centered at', center
```
The `SavedModel` saved by the `export_savedmodel` method does not include the
cluster centers. However, the cluster centers may be retrieved by the
latest checkpoint saved during training. Specifically,
```
kmeans.cluster_centers()
```
is equivalent to
```
tf.train.load_variable(
kmeans.model_dir, KMeansClustering.CLUSTER_CENTERS_VAR_NAME)
```
"""
# Valid values for the distance_metric constructor argument.
SQUARED_EUCLIDEAN_DISTANCE = clustering_ops.SQUARED_EUCLIDEAN_DISTANCE
COSINE_DISTANCE = clustering_ops.COSINE_DISTANCE
# Values for initial_clusters constructor argument.
RANDOM_INIT = clustering_ops.RANDOM_INIT
KMEANS_PLUS_PLUS_INIT = clustering_ops.KMEANS_PLUS_PLUS_INIT
# Metric returned by evaluate(): The sum of the squared distances from each
# input point to its closest center.
SCORE = 'score'
# Keys returned by predict().
# ALL_DISTANCES: The distance from each input point to each cluster center.
# CLUSTER_INDEX: The index of the closest cluster center for each input point.
CLUSTER_INDEX = 'cluster_index'
ALL_DISTANCES = 'all_distances'
# Variable name used by cluster_centers().
CLUSTER_CENTERS_VAR_NAME = clustering_ops.CLUSTERS_VAR_NAME
def __init__(self,
num_clusters,
model_dir=None,
initial_clusters=RANDOM_INIT,
distance_metric=SQUARED_EUCLIDEAN_DISTANCE,
random_seed=0,
use_mini_batch=True,
mini_batch_steps_per_iteration=1,
kmeans_plus_plus_num_retries=2,
relative_tolerance=None,
config=None,
feature_columns=None):
"""Creates an Estimator for running KMeans training and inference.
This Estimator implements the following variants of the K-means algorithm:
If `use_mini_batch` is False, it runs standard full batch K-means. Each
training step runs a single iteration of K-Means and must process the full
input at once. To run in this mode, the `input_fn` passed to `train` must
return the entire input dataset.
If `use_mini_batch` is True, it runs a generalization of the mini-batch
K-means algorithm. It runs multiple iterations, where each iteration is
composed of `mini_batch_steps_per_iteration` steps. Each training step
accumulates the contribution from one mini-batch into temporary storage.
Every `mini_batch_steps_per_iteration` steps, the cluster centers are
updated and the temporary storage cleared for the next iteration. Note
that:
* If `mini_batch_steps_per_iteration=1`, the algorithm reduces to the
standard K-means mini-batch algorithm.
* If `mini_batch_steps_per_iteration = num_inputs / batch_size`, the
algorithm becomes an asynchronous version of the full-batch algorithm.
However, there is no guarantee by this implementation that each input
is seen exactly once per iteration. Also, different updates are applied
asynchronously without locking. So this asynchronous version may not
behave exactly like a full-batch version.
Args:
num_clusters: An integer tensor specifying the number of clusters. This
argument is ignored if `initial_clusters` is a tensor or numpy array.
model_dir: The directory to save the model results and log files.
initial_clusters: Specifies how the initial cluster centers are chosen.
One of the following: * a tensor or numpy array with the initial cluster
centers. * a callable `f(inputs, k)` that selects and returns up to
`k` centers from an input batch. `f` is free to return any number of
centers from `0` to `k`. It will be invoked on successive input
batches as necessary until all `num_clusters` centers are chosen.
* `KMeansClustering.RANDOM_INIT`: Choose centers randomly from an input
batch. If the batch size is less than `num_clusters` then the entire
batch is chosen to be initial cluster centers and the remaining
centers are chosen from successive input batches.
* `KMeansClustering.KMEANS_PLUS_PLUS_INIT`: Use kmeans++ to choose
centers from the first input batch. If the batch size is less than
`num_clusters`, a TensorFlow runtime error occurs.
distance_metric: The distance metric used for clustering. One of:
* `KMeansClustering.SQUARED_EUCLIDEAN_DISTANCE`: Euclidean distance
between vectors `u` and `v` is defined as \\(||u - v||_2\\) which is
the square root of the sum of the absolute squares of the elements'
difference.
* `KMeansClustering.COSINE_DISTANCE`: Cosine distance between vectors
`u` and `v` is defined as \\(1 - (u . v) / (||u||_2 ||v||_2)\\).
random_seed: Python integer. Seed for PRNG used to initialize centers.
use_mini_batch: A boolean specifying whether to use the mini-batch k-means
algorithm. See explanation above.
mini_batch_steps_per_iteration: The number of steps after which the
updated cluster centers are synced back to a master copy. Used only if
`use_mini_batch=True`. See explanation above.
kmeans_plus_plus_num_retries: For each point that is sampled during
kmeans++ initialization, this parameter specifies the number of
additional points to draw from the current distribution before selecting
the best. If a negative value is specified, a heuristic is used to
sample `O(log(num_to_sample))` additional points. Used only if
`initial_clusters=KMeansClustering.KMEANS_PLUS_PLUS_INIT`.
relative_tolerance: A relative tolerance of change in the loss between
iterations. Stops learning if the loss changes less than this amount.
This may not work correctly if `use_mini_batch=True`.
config: See `tf.estimator.Estimator`.
feature_columns: An optionable iterable containing all the feature columns
used by the model. All items in the set should be feature column
instances that can be passed to
`tf.compat.v1.feature_column.input_layer`. If this is None, all features
will be used.
Raises:
ValueError: An invalid argument was passed to `initial_clusters` or
`distance_metric`.
"""
if isinstance(initial_clusters, str) and initial_clusters not in [
KMeansClustering.RANDOM_INIT, KMeansClustering.KMEANS_PLUS_PLUS_INIT
]:
raise ValueError("Unsupported initialization algorithm '%s'" %
initial_clusters)
if distance_metric not in [
KMeansClustering.SQUARED_EUCLIDEAN_DISTANCE,
KMeansClustering.COSINE_DISTANCE
]:
raise ValueError("Unsupported distance metric '%s'" % distance_metric)
super(KMeansClustering, self).__init__(
model_fn=_ModelFn(num_clusters, initial_clusters, distance_metric,
random_seed, use_mini_batch,
mini_batch_steps_per_iteration,
kmeans_plus_plus_num_retries, relative_tolerance,
feature_columns).model_fn,
model_dir=model_dir,
config=config)
def _predict_one_key(self, input_fn, predict_key):
for result in self.predict(input_fn=input_fn, predict_keys=[predict_key]):
yield result[predict_key]
def predict_cluster_index(self, input_fn):
"""Finds the index of the closest cluster center to each input point.
Args:
input_fn: Input points. See `tf.estimator.Estimator.predict`.
Yields:
The index of the closest cluster center for each input point.
"""
for index in self._predict_one_key(input_fn,
KMeansClustering.CLUSTER_INDEX):
yield index
def score(self, input_fn):
"""Returns the sum of squared distances to nearest clusters.
Note that this function is different from the corresponding one in sklearn
which returns the negative sum.
Args:
input_fn: Input points. See `tf.estimator.Estimator.evaluate`. Only one
batch is retrieved.
Returns:
The sum of the squared distance from each point in the first batch of
inputs to its nearest cluster center.
"""
return self.evaluate(input_fn=input_fn, steps=1)[KMeansClustering.SCORE]
def transform(self, input_fn):
"""Transforms each input point to its distances to all cluster centers.
Note that if `distance_metric=KMeansClustering.SQUARED_EUCLIDEAN_DISTANCE`,
this
function returns the squared Euclidean distance while the corresponding
sklearn function returns the Euclidean distance.
Args:
input_fn: Input points. See `tf.estimator.Estimator.predict`.
Yields:
The distances from each input point to each cluster center.
"""
for distances in self._predict_one_key(input_fn,
KMeansClustering.ALL_DISTANCES):
yield distances
def cluster_centers(self):
"""Returns the cluster centers."""
return self.get_variable_value(KMeansClustering.CLUSTER_CENTERS_VAR_NAME)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/factorization/python/ops/kmeans.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops for matrix factorization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numbers
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.contrib.factorization.python.ops import gen_factorization_ops
from tensorflow.contrib.util import loader
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import resource_loader
from tensorflow.python.util.compat import collections_abc
_factorization_ops = loader.load_op_library(
resource_loader.get_path_to_datafile("_factorization_ops.so"))
class WALSModel(object):
r"""A model for Weighted Alternating Least Squares matrix factorization.
It minimizes the following loss function over U, V:
$$
\|\sqrt W \odot (A - U V^T)\|_F^2 + \lambda (\|U\|_F^2 + \|V\|_F^2)
$$
where,
A: input matrix,
W: weight matrix. Note that the (element-wise) square root of the weights
is used in the objective function.
U, V: row_factors and column_factors matrices,
\\(\lambda)\\: regularization.
Also we assume that W is of the following special form:
\\( W_{ij} = W_0 + R_i * C_j \\) if \\(A_{ij} \ne 0\\),
\\(W_{ij} = W_0\\) otherwise.
where,
\\(W_0\\): unobserved_weight,
\\(R_i\\): row_weights,
\\(C_j\\): col_weights.
Note that the current implementation supports two operation modes: The default
mode is for the condition where row_factors and col_factors can individually
fit into the memory of each worker and these will be cached. When this
condition can't be met, setting use_factors_weights_cache to False allows the
larger problem sizes with slight performance penalty as this will avoid
creating the worker caches and instead the relevant weight and factor values
are looked up from parameter servers at each step.
Loss computation: The loss can be computed efficiently by decomposing it into
a sparse term and a Gramian term, see wals.md.
The loss is returned by the update_{col, row}_factors(sp_input), and is
normalized as follows:
_, _, unregularized_loss, regularization, sum_weights =
update_row_factors(sp_input)
if sp_input contains the rows \\({A_i, i \in I}\\), and the input matrix A
has n total rows, then the minibatch loss = unregularized_loss +
regularization is
$$
(\|\sqrt W_I \odot (A_I - U_I V^T)\|_F^2 + \lambda \|U_I\|_F^2) * n / |I| +
\lambda \|V\|_F^2
$$
The sum_weights tensor contains the normalized sum of weights
\\(sum(W_I) * n / |I|\\).
A typical usage example (pseudocode):
with tf.Graph().as_default():
# Set up the model object.
model = tf.contrib.factorization.WALSModel(....)
# To be run only once as part of session initialization. In distributed
# training setting, this should only be run by the chief trainer and all
# other trainers should block until this is done.
model_init_op = model.initialize_op
# To be run once per worker after session is available, prior to
# the prep_gramian_op for row(column) can be run.
worker_init_op = model.worker_init
# To be run once per iteration sweep before the row(column) update
# initialize ops can be run. Note that in the distributed training
# situations, this should only be run by the chief trainer. All other
# trainers need to block until this is done.
row_update_prep_gramian_op = model.row_update_prep_gramian_op
col_update_prep_gramian_op = model.col_update_prep_gramian_op
# To be run once per worker per iteration sweep. Must be run before
# any actual update ops can be run.
init_row_update_op = model.initialize_row_update_op
init_col_update_op = model.initialize_col_update_op
# Ops to update row(column). This can either take the entire sparse
# tensor or slices of sparse tensor. For distributed trainer, each
# trainer handles just part of the matrix.
_, row_update_op, unreg_row_loss, row_reg, _ = model.update_row_factors(
sp_input=matrix_slices_from_queue_for_worker_shard)
row_loss = unreg_row_loss + row_reg
_, col_update_op, unreg_col_loss, col_reg, _ = model.update_col_factors(
sp_input=transposed_matrix_slices_from_queue_for_worker_shard,
transpose_input=True)
col_loss = unreg_col_loss + col_reg
...
# model_init_op is passed to Supervisor. Chief trainer runs it. Other
# trainers wait.
sv = tf.compat.v1.train.Supervisor(is_chief=is_chief,
...,
init_op=tf.group(..., model_init_op, ...), ...)
...
with sv.managed_session(...) as sess:
# All workers/trainers run it after session becomes available.
worker_init_op.run(session=sess)
...
while i in iterations:
# All trainers need to sync up here.
while not_all_ready:
wait
# Row update sweep.
if is_chief:
row_update_prep_gramian_op.run(session=sess)
else:
wait_for_chief
# All workers run upate initialization.
init_row_update_op.run(session=sess)
# Go through the matrix.
reset_matrix_slices_queue_for_worker_shard
while_matrix_slices:
row_update_op.run(session=sess)
# All trainers need to sync up here.
while not_all_ready:
wait
# Column update sweep.
if is_chief:
col_update_prep_gramian_op.run(session=sess)
else:
wait_for_chief
# All workers run upate initialization.
init_col_update_op.run(session=sess)
# Go through the matrix.
reset_transposed_matrix_slices_queue_for_worker_shard
while_transposed_matrix_slices:
col_update_op.run(session=sess)
"""
def __init__(self,
input_rows,
input_cols,
n_components,
unobserved_weight=0.1,
regularization=None,
row_init="random",
col_init="random",
num_row_shards=1,
num_col_shards=1,
row_weights=1,
col_weights=1,
use_factors_weights_cache=True,
use_gramian_cache=True,
use_scoped_vars=False):
"""Creates model for WALS matrix factorization.
Args:
input_rows: total number of rows for input matrix.
input_cols: total number of cols for input matrix.
n_components: number of dimensions to use for the factors.
unobserved_weight: weight given to unobserved entries of matrix.
regularization: weight of L2 regularization term. If None, no
regularization is done.
row_init: initializer for row factor. Can be a tensor or numpy constant.
If set to "random", the value is initialized randomly.
col_init: initializer for column factor. See row_init for details.
num_row_shards: number of shards to use for row factors.
num_col_shards: number of shards to use for column factors.
row_weights: Must be in one of the following three formats: None, a list
of lists of non-negative real numbers (or equivalent iterables) or a
single non-negative real number.
- When set to None, w_ij = unobserved_weight, which simplifies to ALS.
Note that col_weights must also be set to "None" in this case.
- If it is a list of lists of non-negative real numbers, it needs to be
in the form of [[w_0, w_1, ...], [w_k, ... ], [...]], with the number of
inner lists matching the number of row factor shards and the elements in
each inner list are the weights for the rows of the corresponding row
factor shard. In this case, w_ij = unobserved_weight +
row_weights[i] * col_weights[j].
- If this is a single non-negative real number, this value is used for
all row weights and \\(w_ij\\) = unobserved_weight + row_weights *
col_weights[j].
Note that it is allowed to have row_weights as a list while col_weights
a single number or vice versa.
col_weights: See row_weights.
use_factors_weights_cache: When True, the factors and weights will be
cached on the workers before the updates start. Defaults to True. Note
that the weights cache is initialized through `worker_init`, and the
row/col factors cache is initialized through
`initialize_{col/row}_update_op`. In the case where the weights are
computed outside and set before the training iterations start, it is
important to ensure the `worker_init` op is run afterwards for the
weights cache to take effect.
use_gramian_cache: When True, the Gramians will be cached on the workers
before the updates start. Defaults to True.
use_scoped_vars: When True, the factor and weight vars will also be nested
in a tf.name_scope.
"""
self._input_rows = input_rows
self._input_cols = input_cols
self._num_row_shards = num_row_shards
self._num_col_shards = num_col_shards
self._n_components = n_components
self._unobserved_weight = unobserved_weight
self._regularization = regularization
self._regularization_matrix = (
regularization * linalg_ops.eye(self._n_components)
if regularization is not None else None)
assert (row_weights is None) == (col_weights is None)
self._use_factors_weights_cache = use_factors_weights_cache
self._use_gramian_cache = use_gramian_cache
if use_scoped_vars:
with ops.name_scope("row_weights"):
self._row_weights = WALSModel._create_weights(
row_weights, self._input_rows, self._num_row_shards, "row_weights")
with ops.name_scope("col_weights"):
self._col_weights = WALSModel._create_weights(
col_weights, self._input_cols, self._num_col_shards, "col_weights")
with ops.name_scope("row_factors"):
self._row_factors = self._create_factors(
self._input_rows, self._n_components, self._num_row_shards,
row_init, "row_factors")
with ops.name_scope("col_factors"):
self._col_factors = self._create_factors(
self._input_cols, self._n_components, self._num_col_shards,
col_init, "col_factors")
else:
self._row_weights = WALSModel._create_weights(
row_weights, self._input_rows, self._num_row_shards, "row_weights")
self._col_weights = WALSModel._create_weights(
col_weights, self._input_cols, self._num_col_shards, "col_weights")
self._row_factors = self._create_factors(
self._input_rows, self._n_components, self._num_row_shards, row_init,
"row_factors")
self._col_factors = self._create_factors(
self._input_cols, self._n_components, self._num_col_shards, col_init,
"col_factors")
self._row_gramian = self._create_gramian(self._n_components, "row_gramian")
self._col_gramian = self._create_gramian(self._n_components, "col_gramian")
with ops.name_scope("row_prepare_gramian"):
self._row_update_prep_gramian = self._prepare_gramian(
self._col_factors, self._col_gramian)
with ops.name_scope("col_prepare_gramian"):
self._col_update_prep_gramian = self._prepare_gramian(
self._row_factors, self._row_gramian)
with ops.name_scope("transient_vars"):
self._create_transient_vars()
@property
def row_factors(self):
"""Returns a list of tensors corresponding to row factor shards."""
return self._row_factors
@property
def col_factors(self):
"""Returns a list of tensors corresponding to column factor shards."""
return self._col_factors
@property
def row_weights(self):
"""Returns a list of tensors corresponding to row weight shards."""
return self._row_weights
@property
def col_weights(self):
"""Returns a list of tensors corresponding to col weight shards."""
return self._col_weights
@property
def initialize_op(self):
"""Returns an op for initializing tensorflow variables."""
all_vars = self._row_factors + self._col_factors
all_vars.extend([self._row_gramian, self._col_gramian])
if self._row_weights is not None:
assert self._col_weights is not None
all_vars.extend(self._row_weights + self._col_weights)
return variables.variables_initializer(all_vars)
@classmethod
def _shard_sizes(cls, dims, num_shards):
"""Helper function to split dims values into num_shards."""
shard_size, residual = divmod(dims, num_shards)
return [shard_size + 1] * residual + [shard_size] * (num_shards - residual)
@classmethod
def _create_factors(cls, rows, cols, num_shards, init, name):
"""Helper function to create row and column factors."""
if callable(init):
init = init()
if isinstance(init, list):
assert len(init) == num_shards
elif isinstance(init, str) and init == "random":
pass
elif num_shards == 1:
init = [init]
sharded_matrix = []
sizes = cls._shard_sizes(rows, num_shards)
assert len(sizes) == num_shards
def make_initializer(i, size):
def initializer():
if init == "random":
return random_ops.random_normal([size, cols])
else:
return init[i]
return initializer
for i, size in enumerate(sizes):
var_name = "%s_shard_%d" % (name, i)
var_init = make_initializer(i, size)
sharded_matrix.append(
variable_scope.variable(
var_init, dtype=dtypes.float32, name=var_name))
return sharded_matrix
@classmethod
def _create_weights(cls, wt_init, num_wts, num_shards, name):
"""Helper function to create sharded weight vector.
Args:
wt_init: init value for the weight. If None, weights are not created. This
can be one of the None, a list of non-negative real numbers or a single
non-negative real number (or equivalent iterables).
num_wts: total size of all the weight shards
num_shards: number of shards for the weights
name: name for the new Variables.
Returns:
A list of weight shard Tensors.
Raises:
ValueError: If wt_init is not the right format.
"""
if wt_init is None:
return None
init_mode = "list"
if isinstance(wt_init, collections_abc.Iterable):
if num_shards == 1 and len(wt_init) == num_wts:
wt_init = [wt_init]
assert len(wt_init) == num_shards
elif isinstance(wt_init, numbers.Real) and wt_init >= 0:
init_mode = "scalar"
else:
raise ValueError(
"Invalid weight initialization argument. Must be one of these: "
"None, a real non-negative real number, or a list of lists of "
"non-negative real numbers (or equivalent iterables) corresponding "
"to sharded factors.")
sizes = cls._shard_sizes(num_wts, num_shards)
assert len(sizes) == num_shards
def make_wt_initializer(i, size):
def initializer():
if init_mode == "scalar":
return wt_init * array_ops.ones([size])
else:
return wt_init[i]
return initializer
sharded_weight = []
for i, size in enumerate(sizes):
var_name = "%s_shard_%d" % (name, i)
var_init = make_wt_initializer(i, size)
sharded_weight.append(
variable_scope.variable(
var_init, dtype=dtypes.float32, name=var_name))
return sharded_weight
@staticmethod
def _create_gramian(n_components, name):
"""Helper function to create the gramian variable.
Args:
n_components: number of dimensions of the factors from which the gramian
will be calculated.
name: name for the new Variables.
Returns:
A gramian Tensor with shape of [n_components, n_components].
"""
return variable_scope.variable(
array_ops.zeros([n_components, n_components]),
dtype=dtypes.float32,
name=name)
@staticmethod
def _transient_var(name):
"""Helper function to create a Variable."""
return variable_scope.variable(
1.0,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
validate_shape=False,
name=name)
def _prepare_gramian(self, factors, gramian):
"""Helper function to create ops to prepare/calculate gramian.
Args:
factors: Variable or list of Variable representing (sharded) factors.
Used to compute the updated corresponding gramian value.
gramian: Variable storing the gramian calculated from the factors.
Returns:
An op that updates the gramian with the calculated value from the factors.
"""
partial_gramians = []
for f in factors:
with ops.colocate_with(f):
partial_gramians.append(math_ops.matmul(f, f, transpose_a=True))
with ops.colocate_with(gramian):
prep_gramian = state_ops.assign(gramian,
math_ops.add_n(partial_gramians)).op
return prep_gramian
def _cached_copy(self, var, name, pass_through=False):
"""Helper function to create a worker cached copy of a Variable.
This assigns the var (either a single Variable or a list of Variables) to
local transient cache Variable(s). Note that if var is a list of Variables,
the assignment is done sequentially to minimize the memory overheads.
Also note that if pass_through is set to True, this does not create new
Variables but simply return the input back.
Args:
var: A Variable or a list of Variables to cache.
name: name of cached Variable.
pass_through: when set to True, this simply pass through the var back
through identity operator and does not actually creates a cache.
Returns:
Tuple consisting of following three entries:
cache: the new transient Variable or list of transient Variables
corresponding one-to-one with var.
cache_init: op to initialize the Variable or the list of Variables.
cache_reset: op to reset the Variable or the list of Variables to some
default value.
"""
if var is None:
return None, None, None
elif pass_through:
cache = var
cache_init = control_flow_ops.no_op()
cache_reset = control_flow_ops.no_op()
elif isinstance(var, variables.Variable):
cache = WALSModel._transient_var(name=name)
with ops.colocate_with(cache):
cache_init = state_ops.assign(cache, var, validate_shape=False)
cache_reset = state_ops.assign(cache, 1.0, validate_shape=False)
else:
assert isinstance(var, list)
assert var
cache = [
WALSModel._transient_var(name="%s_shard_%d" % (name, i))
for i in xrange(len(var))
]
reset_ops = []
for i, c in enumerate(cache):
with ops.colocate_with(c):
if i == 0:
cache_init = state_ops.assign(c, var[i], validate_shape=False)
else:
with ops.control_dependencies([cache_init]):
cache_init = state_ops.assign(c, var[i], validate_shape=False)
reset_ops.append(state_ops.assign(c, 1.0, validate_shape=False))
cache_reset = control_flow_ops.group(*reset_ops)
return cache, cache_init, cache_reset
def _create_transient_vars(self):
"""Creates local cache of factors, weights and gramian for rows and columns.
Note that currently the caching strategy is as follows:
When initiating a row (resp. column) update:
- The column (resp. row) gramian is computed.
- Optionally, if use_gramian_cache is True, the column (resp. row) Gramian
is cached, while the row (resp. column) gramian is reset.
- Optionally, if use_factors_weights_cache is True, the column (resp. row)
factors and weights are cached, while the row (resp. column) factors and
weights are reset.
"""
(self._row_factors_cache, row_factors_cache_init,
row_factors_cache_reset) = self._cached_copy(
self._row_factors,
"row_factors_cache",
pass_through=not self._use_factors_weights_cache)
(self._col_factors_cache, col_factors_cache_init,
col_factors_cache_reset) = self._cached_copy(
self._col_factors,
"col_factors_cache",
pass_through=not self._use_factors_weights_cache)
(self._row_wt_cache, row_wt_cache_init, _) = self._cached_copy(
self._row_weights,
"row_wt_cache",
pass_through=not self._use_factors_weights_cache)
(self._col_wt_cache, col_wt_cache_init, _) = self._cached_copy(
self._col_weights,
"col_wt_cache",
pass_through=not self._use_factors_weights_cache)
(self._row_gramian_cache, row_gramian_cache_init,
row_gramian_cache_reset) = self._cached_copy(
self._row_gramian,
"row_gramian_cache",
pass_through=not self._use_gramian_cache)
(self._col_gramian_cache, col_gramian_cache_init,
col_gramian_cache_reset) = self._cached_copy(
self._col_gramian,
"col_gramian_cache",
pass_through=not self._use_gramian_cache)
self._row_updates_init = control_flow_ops.group(
col_factors_cache_init, row_factors_cache_reset, col_gramian_cache_init,
row_gramian_cache_reset)
self._col_updates_init = control_flow_ops.group(
row_factors_cache_init, col_factors_cache_reset, row_gramian_cache_init,
col_gramian_cache_reset)
if self._row_wt_cache is not None:
assert self._col_wt_cache is not None
self._worker_init = control_flow_ops.group(
row_wt_cache_init, col_wt_cache_init, name="worker_init")
else:
self._worker_init = control_flow_ops.no_op(name="worker_init")
@property
def worker_init(self):
"""Op to initialize worker state once before starting any updates.
Note that specifically this initializes the cache of the row and column
weights on workers when `use_factors_weights_cache` is True. In this case,
if these weights are being calculated and reset after the object is created,
it is important to ensure this ops is run afterwards so the cache reflects
the correct values.
"""
return self._worker_init
@property
def row_update_prep_gramian_op(self):
"""Op to form the gramian before starting row updates.
Must be run before initialize_row_update_op and should only be run by one
trainer (usually the chief) when doing distributed training.
Returns:
Op to form the gramian.
"""
return self._row_update_prep_gramian
@property
def col_update_prep_gramian_op(self):
"""Op to form the gramian before starting col updates.
Must be run before initialize_col_update_op and should only be run by one
trainer (usually the chief) when doing distributed training.
Returns:
Op to form the gramian.
"""
return self._col_update_prep_gramian
@property
def initialize_row_update_op(self):
"""Op to initialize worker state before starting row updates."""
return self._row_updates_init
@property
def initialize_col_update_op(self):
"""Op to initialize worker state before starting column updates."""
return self._col_updates_init
@staticmethod
def _get_sharding_func(size, num_shards):
"""Create sharding function for scatter update."""
def func(ids):
if num_shards == 1:
return None, ids
else:
ids_per_shard = size // num_shards
extras = size % num_shards
assignments = math_ops.maximum(ids // (ids_per_shard + 1),
(ids - extras) // ids_per_shard)
new_ids = array_ops.where_v2(assignments < extras,
ids % (ids_per_shard + 1),
(ids - extras) % ids_per_shard)
return assignments, new_ids
return func
@classmethod
def scatter_update(cls, factor, indices, values, sharding_func, name=None):
"""Helper function for doing sharded scatter update."""
assert isinstance(factor, list)
if len(factor) == 1:
with ops.colocate_with(factor[0]):
# TODO(agarwal): assign instead of scatter update for full batch update.
return state_ops.scatter_update(
factor[0], indices, values, name=name).op
else:
num_shards = len(factor)
assignments, new_ids = sharding_func(indices)
assert assignments is not None
assignments = math_ops.cast(assignments, dtypes.int32)
sharded_ids = data_flow_ops.dynamic_partition(new_ids, assignments,
num_shards)
sharded_values = data_flow_ops.dynamic_partition(values, assignments,
num_shards)
updates = []
for i in xrange(num_shards):
updates.append(
state_ops.scatter_update(factor[i], sharded_ids[i], sharded_values[
i]))
return control_flow_ops.group(*updates, name=name)
def update_row_factors(self, sp_input=None, transpose_input=False):
r"""Updates the row factors.
Args:
sp_input: A SparseTensor representing a subset of rows of the full input
in any order. Please note that this SparseTensor must retain the
indexing as the original input.
transpose_input: If true, the input will be logically transposed and the
rows corresponding to the transposed input are updated.
Returns:
A tuple consisting of the following elements:
new_values: New values for the row factors.
update_op: An op that assigns the newly computed values to the row
factors.
unregularized_loss: A tensor (scalar) that contains the normalized
minibatch loss corresponding to sp_input, without the regularization
term. If sp_input contains the rows \\({A_{i, :}, i \in I}\\), and the
input matrix A has n total rows, then the unregularized loss is:
\\(\|\sqrt W_I \odot (A_I - U_I V^T)\|_F^2 * n / |I|\\)
The total loss is unregularized_loss + regularization.
regularization: A tensor (scalar) that contains the normalized
regularization term for the minibatch loss corresponding to sp_input.
If sp_input contains the rows \\({A_{i, :}, i \in I}\\), and the input
matrix A has n total rows, then the regularization term is:
\\(\lambda \|U_I\|_F^2) * n / |I| + \lambda \|V\|_F^2\\).
sum_weights: The sum of the weights W_I corresponding to sp_input,
normalized by a factor of \\(n / |I|\\). The root weighted squared
error is: \sqrt(unregularized_loss / sum_weights).
"""
return self._process_input_helper(
True, sp_input=sp_input, transpose_input=transpose_input)
def update_col_factors(self, sp_input=None, transpose_input=False):
r"""Updates the column factors.
Args:
sp_input: A SparseTensor representing a subset of columns of the full
input. Please refer to comments for update_row_factors for
restrictions.
transpose_input: If true, the input will be logically transposed and the
columns corresponding to the transposed input are updated.
Returns:
A tuple consisting of the following elements:
new_values: New values for the column factors.
update_op: An op that assigns the newly computed values to the column
factors.
unregularized_loss: A tensor (scalar) that contains the normalized
minibatch loss corresponding to sp_input, without the regularization
term. If sp_input contains the columns \\({A_{:, j}, j \in J}\\), and
the input matrix A has m total columns, then the unregularized loss is:
\\(\|\sqrt W_J \odot (A_J - U V_J^T)\|_F^2 * m / |I|\\)
The total loss is unregularized_loss + regularization.
regularization: A tensor (scalar) that contains the normalized
regularization term for the minibatch loss corresponding to sp_input.
If sp_input contains the columns \\({A_{:, j}, j \in J}\\), and the
input matrix A has m total columns, then the regularization term is:
\\(\lambda \|V_J\|_F^2) * m / |J| + \lambda \|U\|_F^2\\).
sum_weights: The sum of the weights W_J corresponding to sp_input,
normalized by a factor of \\(m / |J|\\). The root weighted squared
error is: \sqrt(unregularized_loss / sum_weights).
"""
return self._process_input_helper(
False, sp_input=sp_input, transpose_input=transpose_input)
def project_row_factors(self,
sp_input=None,
transpose_input=False,
projection_weights=None):
"""Projects the row factors.
This computes the row embedding \\(u_i\\) for an observed row \\(a_i\\) by
solving one iteration of the update equations.
Args:
sp_input: A SparseTensor representing a set of rows. Please note that the
column indices of this SparseTensor must match the model column feature
indexing while the row indices are ignored. The returned results will be
in the same ordering as the input rows.
transpose_input: If true, the input will be logically transposed and the
rows corresponding to the transposed input are projected.
projection_weights: The row weights to be used for the projection. If None
then 1.0 is used. This can be either a scaler or a rank-1 tensor with
the number of elements matching the number of rows to be projected.
Note that the column weights will be determined by the underlying WALS
model.
Returns:
Projected row factors.
"""
if projection_weights is None:
projection_weights = 1
return self._process_input_helper(
True,
sp_input=sp_input,
transpose_input=transpose_input,
row_weights=projection_weights)[0]
def project_col_factors(self,
sp_input=None,
transpose_input=False,
projection_weights=None):
"""Projects the column factors.
This computes the column embedding \\(v_j\\) for an observed column
\\(a_j\\) by solving one iteration of the update equations.
Args:
sp_input: A SparseTensor representing a set of columns. Please note that
the row indices of this SparseTensor must match the model row feature
indexing while the column indices are ignored. The returned results will
be in the same ordering as the input columns.
transpose_input: If true, the input will be logically transposed and the
columns corresponding to the transposed input are projected.
projection_weights: The column weights to be used for the projection. If
None then 1.0 is used. This can be either a scaler or a rank-1 tensor
with the number of elements matching the number of columns to be
projected. Note that the row weights will be determined by the
underlying WALS model.
Returns:
Projected column factors.
"""
if projection_weights is None:
projection_weights = 1
return self._process_input_helper(
False,
sp_input=sp_input,
transpose_input=transpose_input,
row_weights=projection_weights)[0]
def _process_input_helper(self,
update_row_factors,
sp_input=None,
transpose_input=False,
row_weights=None):
"""Creates the graph for processing a sparse slice of input.
Args:
update_row_factors: if True, update or project the row_factors, else
update or project the column factors.
sp_input: Please refer to comments for update_row_factors,
update_col_factors, project_row_factors, and project_col_factors for
restrictions.
transpose_input: If True, the input is logically transposed and then the
corresponding rows/columns of the transposed input are updated.
row_weights: If not None, this is the row/column weights to be used for
the update or projection. If None, use the corresponding weights from
the model. Note that the feature (column/row) weights will be
determined by the model. When not None, it can either be a scalar or
a rank-1 tensor with the same number of elements as the number of rows
of columns to be updated/projected.
Returns:
A tuple consisting of the following elements:
new_values: New values for the row/column factors.
update_op: An op that assigns the newly computed values to the row/column
factors.
unregularized_loss: A tensor (scalar) that contains the normalized
minibatch loss corresponding to sp_input, without the regularization
term. Add the regularization term below to yield the loss.
regularization: A tensor (scalar) that contains the normalized
regularization term for the minibatch loss corresponding to sp_input.
sum_weights: The sum of the weights corresponding to sp_input. This
can be used with unregularized loss to calculate the root weighted
squared error.
"""
assert isinstance(sp_input, sparse_tensor.SparseTensor)
if update_row_factors:
left = self._row_factors
right_factors = self._col_factors_cache
row_wt = self._row_wt_cache
col_wt = self._col_wt_cache
total_rows = self._input_rows
total_cols = self._input_cols
sharding_func = WALSModel._get_sharding_func(self._input_rows,
self._num_row_shards)
gramian = self._col_gramian_cache
else:
left = self._col_factors
right_factors = self._row_factors_cache
row_wt = self._col_wt_cache
col_wt = self._row_wt_cache
total_rows = self._input_cols
total_cols = self._input_rows
sharding_func = WALSModel._get_sharding_func(self._input_cols,
self._num_col_shards)
gramian = self._row_gramian_cache
transpose_input = not transpose_input
# Note that the row indices of sp_input are based on the original full input
# Here we reindex the rows and give them contiguous ids starting at 0.
# We use tf.unique to achieve this reindexing. Note that this is done so
# that the downstream kernel can assume that the input is "dense" along the
# row dimension.
row_ids, col_ids = array_ops.split(
value=sp_input.indices, num_or_size_splits=2, axis=1)
update_row_indices, all_row_ids = array_ops.unique(row_ids[:, 0])
update_col_indices, all_col_ids = array_ops.unique(col_ids[:, 0])
col_ids = array_ops.expand_dims(math_ops.cast(all_col_ids, dtypes.int64), 1)
row_ids = array_ops.expand_dims(math_ops.cast(all_row_ids, dtypes.int64), 1)
if transpose_input:
update_indices = update_col_indices
row_shape = [
math_ops.cast(array_ops.shape(update_row_indices)[0], dtypes.int64)
]
gather_indices = update_row_indices
else:
update_indices = update_row_indices
row_shape = [
math_ops.cast(array_ops.shape(update_col_indices)[0], dtypes.int64)
]
gather_indices = update_col_indices
num_rows = math_ops.cast(array_ops.shape(update_indices)[0], dtypes.int64)
col_shape = [num_rows]
right = embedding_ops.embedding_lookup(
right_factors, gather_indices, partition_strategy="div")
new_sp_indices = array_ops.concat([row_ids, col_ids], 1)
new_sp_shape = (array_ops.concat([row_shape, col_shape], 0)
if transpose_input else
array_ops.concat([col_shape, row_shape], 0))
new_sp_input = sparse_tensor.SparseTensor(
indices=new_sp_indices,
values=sp_input.values,
dense_shape=new_sp_shape)
# Compute lhs and rhs of the normal equations
total_lhs = (self._unobserved_weight * gramian)
if self._regularization_matrix is not None:
total_lhs += self._regularization_matrix
if self._row_weights is None:
# Special case of ALS. Use a much simpler update rule.
total_rhs = (
self._unobserved_weight * sparse_ops.sparse_tensor_dense_matmul(
new_sp_input, right, adjoint_a=transpose_input))
# TODO(rmlarsen): handle transposing in tf.linalg.solve instead of
# transposing explicitly.
# TODO(rmlarsen): multi-thread tf.matrix_solve.
new_left_values = array_ops.transpose(
linalg_ops.matrix_solve(total_lhs, array_ops.transpose(total_rhs)))
else:
if row_weights is None:
# TODO(yifanchen): Add special handling for single shard without using
# embedding_lookup and perform benchmarks for those cases. Same for
# col_weights lookup below.
row_weights_slice = embedding_ops.embedding_lookup(
row_wt, update_indices, partition_strategy="div")
else:
num_indices = array_ops.shape(update_indices)[0]
with ops.control_dependencies(
[check_ops.assert_less_equal(array_ops.rank(row_weights), 1)]):
row_weights_slice = control_flow_ops.cond(
math_ops.equal(array_ops.rank(row_weights), 0),
lambda: (array_ops.ones([num_indices]) * row_weights),
lambda: math_ops.cast(row_weights, dtypes.float32))
col_weights = embedding_ops.embedding_lookup(
col_wt, gather_indices, partition_strategy="div")
partial_lhs, total_rhs = (
gen_factorization_ops.wals_compute_partial_lhs_and_rhs(
right,
col_weights,
self._unobserved_weight,
row_weights_slice,
new_sp_input.indices,
new_sp_input.values,
[],
num_rows,
transpose_input,
name="wals_compute_partial_lhs_rhs"))
total_lhs = array_ops.expand_dims(total_lhs, 0) + partial_lhs
total_rhs = array_ops.expand_dims(total_rhs, -1)
new_left_values = array_ops.squeeze(
linalg_ops.matrix_solve(total_lhs, total_rhs), [2])
update_op_name = "row_update" if update_row_factors else "col_update"
update_op = self.scatter_update(
left,
update_indices,
new_left_values,
sharding_func,
name=update_op_name)
# Create the loss subgraph
loss_sp_input = (sparse_ops.sparse_transpose(new_sp_input)
if transpose_input else new_sp_input)
# sp_approx is the low rank estimate of the input matrix, formed by
# computing the product <\\(u_i, v_j\\)> for (i, j) in loss_sp_input.indices.
sp_approx_vals = gen_factorization_ops.masked_matmul(
new_left_values,
right,
loss_sp_input.indices,
transpose_a=False,
transpose_b=True)
sp_approx = sparse_tensor.SparseTensor(
loss_sp_input.indices, sp_approx_vals, loss_sp_input.dense_shape)
sp_approx_sq = math_ops.square(sp_approx)
sp_residual = sparse_ops.sparse_add(loss_sp_input, sp_approx * (-1))
sp_residual_sq = math_ops.square(sp_residual)
row_wt_mat = (constant_op.constant(0.)
if self._row_weights is None else array_ops.expand_dims(
row_weights_slice, 1))
col_wt_mat = (constant_op.constant(0.)
if self._col_weights is None else array_ops.expand_dims(
col_weights, 0))
# We return the normalized loss
partial_row_gramian = math_ops.matmul(
new_left_values, new_left_values, transpose_a=True)
normalization_factor = total_rows / math_ops.cast(num_rows, dtypes.float32)
unregularized_loss = (
self._unobserved_weight * ( # pyformat line break
sparse_ops.sparse_reduce_sum(sp_residual_sq) - # pyformat break
sparse_ops.sparse_reduce_sum(sp_approx_sq) + # pyformat break
math_ops.trace(math_ops.matmul(partial_row_gramian, gramian))) +
sparse_ops.sparse_reduce_sum(row_wt_mat * (sp_residual_sq * col_wt_mat))
) * normalization_factor
if self._regularization is not None:
regularization = self._regularization * (
math_ops.trace(partial_row_gramian) * normalization_factor +
math_ops.trace(gramian))
else:
regularization = constant_op.constant(0.)
sum_weights = self._unobserved_weight * math_ops.cast(
total_rows * total_cols, dtypes.float32)
if self._row_weights is not None and self._col_weights is not None:
ones = sparse_tensor.SparseTensor(
indices=loss_sp_input.indices,
values=array_ops.ones(array_ops.shape(loss_sp_input.values)),
dense_shape=loss_sp_input.dense_shape)
sum_weights += sparse_ops.sparse_reduce_sum(row_wt_mat * (
ones * col_wt_mat)) * normalization_factor
return (new_left_values, update_op, unregularized_loss, regularization,
sum_weights)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/factorization/python/ops/factorization_ops.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Weighted Alternating Least Squares (WALS) on the tf.learn API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.factorization.python.ops import factorization_ops
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import training_util
class _SweepHook(session_run_hook.SessionRunHook):
"""Keeps track of row/col sweeps, and runs prep ops before each sweep."""
def __init__(self, is_row_sweep_var, is_sweep_done_var, init_op,
row_prep_ops, col_prep_ops, row_train_op, col_train_op,
switch_op):
"""Initializes SweepHook.
Args:
is_row_sweep_var: A Boolean tf.Variable, determines whether we are
currently doing a row or column sweep. It is updated by the hook.
is_sweep_done_var: A Boolean tf.Variable, determines whether we are
starting a new sweep (this is used to determine when to run the prep ops
below).
init_op: op to be run once before training. This is typically a local
initialization op (such as cache initialization).
row_prep_ops: A list of TensorFlow ops, to be run before the beginning of
each row sweep (and during initialization), in the given order.
col_prep_ops: A list of TensorFlow ops, to be run before the beginning of
each column sweep (and during initialization), in the given order.
row_train_op: A TensorFlow op to be run during row sweeps.
col_train_op: A TensorFlow op to be run during column sweeps.
switch_op: A TensorFlow op to be run before each sweep.
"""
self._is_row_sweep_var = is_row_sweep_var
self._is_sweep_done_var = is_sweep_done_var
self._init_op = init_op
self._row_prep_ops = row_prep_ops
self._col_prep_ops = col_prep_ops
self._row_train_op = row_train_op
self._col_train_op = col_train_op
self._switch_op = switch_op
# Boolean variable that determines whether the init_op has been run.
self._is_initialized = False
def before_run(self, run_context):
"""Runs the appropriate prep ops, and requests running update ops."""
sess = run_context.session
is_sweep_done = sess.run(self._is_sweep_done_var)
if not self._is_initialized:
logging.info("SweepHook running init op.")
sess.run(self._init_op)
if is_sweep_done:
logging.info("SweepHook starting the next sweep.")
sess.run(self._switch_op)
is_row_sweep = sess.run(self._is_row_sweep_var)
if is_sweep_done or not self._is_initialized:
logging.info("SweepHook running prep ops for the {} sweep.".format(
"row" if is_row_sweep else "col"))
prep_ops = self._row_prep_ops if is_row_sweep else self._col_prep_ops
for prep_op in prep_ops:
sess.run(prep_op)
self._is_initialized = True
logging.info("Next fit step starting.")
return session_run_hook.SessionRunArgs(
fetches=[self._row_train_op if is_row_sweep else self._col_train_op])
class _IncrementGlobalStepHook(session_run_hook.SessionRunHook):
"""Hook that increments the global step."""
def __init__(self):
global_step = training_util.get_global_step()
if global_step:
self._global_step_incr_op = state_ops.assign_add(
global_step, 1, name="global_step_incr").op
else:
self._global_step_incr_op = None
def before_run(self, run_context):
if self._global_step_incr_op:
run_context.session.run(self._global_step_incr_op)
class _StopAtSweepHook(session_run_hook.SessionRunHook):
"""Hook that requests stop at a given sweep."""
def __init__(self, last_sweep):
"""Initializes a `StopAtSweepHook`.
This hook requests stop at a given sweep. Relies on the tensor named
COMPLETED_SWEEPS in the default graph.
Args:
last_sweep: Integer, number of the last sweep to run.
"""
self._last_sweep = last_sweep
def begin(self):
try:
self._completed_sweeps_var = ops.get_default_graph().get_tensor_by_name(
WALSMatrixFactorization.COMPLETED_SWEEPS + ":0")
except KeyError:
raise RuntimeError(WALSMatrixFactorization.COMPLETED_SWEEPS +
" counter should be created to use StopAtSweepHook.")
def before_run(self, run_context):
return session_run_hook.SessionRunArgs(self._completed_sweeps_var)
def after_run(self, run_context, run_values):
completed_sweeps = run_values.results
if completed_sweeps >= self._last_sweep:
run_context.request_stop()
def _wals_factorization_model_function(features, labels, mode, params):
"""Model function for the WALSFactorization estimator.
Args:
features: Dictionary of features. See WALSMatrixFactorization.
labels: Must be None.
mode: A model_fn.ModeKeys object.
params: Dictionary of parameters containing arguments passed to the
WALSMatrixFactorization constructor.
Returns:
A ModelFnOps object.
Raises:
ValueError: If `mode` is not recognized.
"""
assert labels is None
use_factors_weights_cache = (params["use_factors_weights_cache_for_training"]
and mode == model_fn.ModeKeys.TRAIN)
use_gramian_cache = (params["use_gramian_cache_for_training"] and
mode == model_fn.ModeKeys.TRAIN)
max_sweeps = params["max_sweeps"]
model = factorization_ops.WALSModel(
params["num_rows"],
params["num_cols"],
params["embedding_dimension"],
unobserved_weight=params["unobserved_weight"],
regularization=params["regularization_coeff"],
row_init=params["row_init"],
col_init=params["col_init"],
num_row_shards=params["num_row_shards"],
num_col_shards=params["num_col_shards"],
row_weights=params["row_weights"],
col_weights=params["col_weights"],
use_factors_weights_cache=use_factors_weights_cache,
use_gramian_cache=use_gramian_cache)
# Get input rows and cols. We either update rows or columns depending on
# the value of row_sweep, which is maintained using a session hook.
input_rows = features[WALSMatrixFactorization.INPUT_ROWS]
input_cols = features[WALSMatrixFactorization.INPUT_COLS]
# TRAIN mode:
if mode == model_fn.ModeKeys.TRAIN:
# Training consists of the following ops (controlled using a SweepHook).
# Before a row sweep:
# row_update_prep_gramian_op
# initialize_row_update_op
# During a row sweep:
# update_row_factors_op
# Before a col sweep:
# col_update_prep_gramian_op
# initialize_col_update_op
# During a col sweep:
# update_col_factors_op
is_row_sweep_var = variable_scope.variable(
True,
trainable=False,
name="is_row_sweep",
collections=[ops.GraphKeys.GLOBAL_VARIABLES])
is_sweep_done_var = variable_scope.variable(
False,
trainable=False,
name="is_sweep_done",
collections=[ops.GraphKeys.GLOBAL_VARIABLES])
completed_sweeps_var = variable_scope.variable(
0,
trainable=False,
name=WALSMatrixFactorization.COMPLETED_SWEEPS,
collections=[ops.GraphKeys.GLOBAL_VARIABLES])
loss_var = variable_scope.variable(
0.,
trainable=False,
name=WALSMatrixFactorization.LOSS,
collections=[ops.GraphKeys.GLOBAL_VARIABLES])
# The root weighted squared error =
# \\(\sqrt( \sum_{i,j} w_ij * (a_ij - r_ij)^2 / \sum_{i,j} w_ij )\\)
rwse_var = variable_scope.variable(
0.,
trainable=False,
name=WALSMatrixFactorization.RWSE,
collections=[ops.GraphKeys.GLOBAL_VARIABLES])
summary.scalar("loss", loss_var)
summary.scalar("root_weighted_squared_error", rwse_var)
summary.scalar("completed_sweeps", completed_sweeps_var)
def create_axis_ops(sp_input, num_items, update_fn, axis_name):
"""Creates book-keeping and training ops for a given axis.
Args:
sp_input: A SparseTensor corresponding to the row or column batch.
num_items: An integer, the total number of items of this axis.
update_fn: A function that takes one argument (`sp_input`), and that
returns a tuple of
* new_factors: A float Tensor of the factor values after update.
* update_op: a TensorFlow op which updates the factors.
* loss: A float Tensor, the unregularized loss.
* reg_loss: A float Tensor, the regularization loss.
* sum_weights: A float Tensor, the sum of factor weights.
axis_name: A string that specifies the name of the axis.
Returns:
A tuple consisting of:
* reset_processed_items_op: A TensorFlow op, to be run before the
beginning of any sweep. It marks all items as not-processed.
* axis_train_op: A Tensorflow op, to be run during this axis' sweeps.
"""
processed_items_init = array_ops.fill(dims=[num_items], value=False)
with ops.colocate_with(processed_items_init):
processed_items = variable_scope.variable(
processed_items_init,
collections=[ops.GraphKeys.GLOBAL_VARIABLES],
trainable=False,
name="processed_" + axis_name)
_, update_op, loss, reg, sum_weights = update_fn(sp_input)
input_indices = sp_input.indices[:, 0]
with ops.control_dependencies([
update_op,
state_ops.assign(loss_var, loss + reg),
state_ops.assign(rwse_var, math_ops.sqrt(loss / sum_weights))]):
with ops.colocate_with(processed_items):
update_processed_items = state_ops.scatter_update(
processed_items,
input_indices,
array_ops.ones_like(input_indices, dtype=dtypes.bool),
name="update_processed_{}_indices".format(axis_name))
with ops.control_dependencies([update_processed_items]):
is_sweep_done = math_ops.reduce_all(processed_items)
axis_train_op = control_flow_ops.group(
state_ops.assign(is_sweep_done_var, is_sweep_done),
state_ops.assign_add(
completed_sweeps_var,
math_ops.cast(is_sweep_done, dtypes.int32)),
name="{}_sweep_train_op".format(axis_name))
return processed_items.initializer, axis_train_op
reset_processed_rows_op, row_train_op = create_axis_ops(
input_rows,
params["num_rows"],
lambda x: model.update_row_factors(sp_input=x, transpose_input=False),
"rows")
reset_processed_cols_op, col_train_op = create_axis_ops(
input_cols,
params["num_cols"],
lambda x: model.update_col_factors(sp_input=x, transpose_input=True),
"cols")
switch_op = control_flow_ops.group(
state_ops.assign(
is_row_sweep_var, math_ops.logical_not(is_row_sweep_var)),
reset_processed_rows_op,
reset_processed_cols_op,
name="sweep_switch_op")
row_prep_ops = [
model.row_update_prep_gramian_op, model.initialize_row_update_op]
col_prep_ops = [
model.col_update_prep_gramian_op, model.initialize_col_update_op]
init_op = model.worker_init
sweep_hook = _SweepHook(
is_row_sweep_var, is_sweep_done_var, init_op,
row_prep_ops, col_prep_ops, row_train_op, col_train_op, switch_op)
global_step_hook = _IncrementGlobalStepHook()
training_hooks = [sweep_hook, global_step_hook]
if max_sweeps is not None:
training_hooks.append(_StopAtSweepHook(max_sweeps))
return model_fn.ModelFnOps(
mode=model_fn.ModeKeys.TRAIN,
predictions={},
loss=loss_var,
eval_metric_ops={},
train_op=control_flow_ops.no_op(),
training_hooks=training_hooks)
# INFER mode
elif mode == model_fn.ModeKeys.INFER:
projection_weights = features.get(
WALSMatrixFactorization.PROJECTION_WEIGHTS)
def get_row_projection():
return model.project_row_factors(
sp_input=input_rows,
projection_weights=projection_weights,
transpose_input=False)
def get_col_projection():
return model.project_col_factors(
sp_input=input_cols,
projection_weights=projection_weights,
transpose_input=True)
predictions = {
WALSMatrixFactorization.PROJECTION_RESULT: control_flow_ops.cond(
features[WALSMatrixFactorization.PROJECT_ROW],
get_row_projection,
get_col_projection)
}
return model_fn.ModelFnOps(
mode=model_fn.ModeKeys.INFER,
predictions=predictions,
loss=None,
eval_metric_ops={},
train_op=control_flow_ops.no_op(),
training_hooks=[])
# EVAL mode
elif mode == model_fn.ModeKeys.EVAL:
def get_row_loss():
_, _, loss, reg, _ = model.update_row_factors(
sp_input=input_rows, transpose_input=False)
return loss + reg
def get_col_loss():
_, _, loss, reg, _ = model.update_col_factors(
sp_input=input_cols, transpose_input=True)
return loss + reg
loss = control_flow_ops.cond(
features[WALSMatrixFactorization.PROJECT_ROW],
get_row_loss,
get_col_loss)
return model_fn.ModelFnOps(
mode=model_fn.ModeKeys.EVAL,
predictions={},
loss=loss,
eval_metric_ops={},
train_op=control_flow_ops.no_op(),
training_hooks=[])
else:
raise ValueError("mode=%s is not recognized." % str(mode))
class WALSMatrixFactorization(estimator.Estimator):
"""An Estimator for Weighted Matrix Factorization, using the WALS method.
WALS (Weighted Alternating Least Squares) is an algorithm for weighted matrix
factorization. It computes a low-rank approximation of a given sparse (n x m)
matrix `A`, by a product of two matrices, `U * V^T`, where `U` is a (n x k)
matrix and `V` is a (m x k) matrix. Here k is the rank of the approximation,
also called the embedding dimension. We refer to `U` as the row factors, and
`V` as the column factors.
See tensorflow/contrib/factorization/g3doc/wals.md for the precise problem
formulation.
The training proceeds in sweeps: during a row_sweep, we fix `V` and solve for
`U`. During a column sweep, we fix `U` and solve for `V`. Each one of these
problems is an unconstrained quadratic minimization problem and can be solved
exactly (it can also be solved in mini-batches, since the solution decouples
across rows of each matrix).
The alternating between sweeps is achieved by using a hook during training,
which is responsible for keeping track of the sweeps and running preparation
ops at the beginning of each sweep. It also updates the global_step variable,
which keeps track of the number of batches processed since the beginning of
training.
The current implementation assumes that the training is run on a single
machine, and will fail if `config.num_worker_replicas` is not equal to one.
Training is done by calling `self.fit(input_fn=input_fn)`, where `input_fn`
provides two tensors: one for rows of the input matrix, and one for rows of
the transposed input matrix (i.e. columns of the original matrix). Note that
during a row sweep, only row batches are processed (ignoring column batches)
and vice-versa.
Also note that every row (respectively every column) of the input matrix
must be processed at least once for the sweep to be considered complete. In
particular, training will not make progress if some rows are not generated by
the `input_fn`.
For prediction, given a new set of input rows `A'`, we compute a corresponding
set of row factors `U'`, such that `U' * V^T` is a good approximation of `A'`.
We call this operation a row projection. A similar operation is defined for
columns. Projection is done by calling
`self.get_projections(input_fn=input_fn)`, where `input_fn` satisfies the
constraints given below.
The input functions must satisfy the following constraints: Calling `input_fn`
must return a tuple `(features, labels)` where `labels` is None, and
`features` is a dict containing the following keys:
TRAIN:
* `WALSMatrixFactorization.INPUT_ROWS`: float32 SparseTensor (matrix).
Rows of the input matrix to process (or to project).
* `WALSMatrixFactorization.INPUT_COLS`: float32 SparseTensor (matrix).
Columns of the input matrix to process (or to project), transposed.
INFER:
* `WALSMatrixFactorization.INPUT_ROWS`: float32 SparseTensor (matrix).
Rows to project.
* `WALSMatrixFactorization.INPUT_COLS`: float32 SparseTensor (matrix).
Columns to project.
* `WALSMatrixFactorization.PROJECT_ROW`: Boolean Tensor. Whether to project
the rows or columns.
* `WALSMatrixFactorization.PROJECTION_WEIGHTS` (Optional): float32 Tensor
(vector). The weights to use in the projection.
EVAL:
* `WALSMatrixFactorization.INPUT_ROWS`: float32 SparseTensor (matrix).
Rows to project.
* `WALSMatrixFactorization.INPUT_COLS`: float32 SparseTensor (matrix).
Columns to project.
* `WALSMatrixFactorization.PROJECT_ROW`: Boolean Tensor. Whether to project
the rows or columns.
"""
# Keys to be used in model_fn
# Features keys
INPUT_ROWS = "input_rows"
INPUT_COLS = "input_cols"
PROJECT_ROW = "project_row"
PROJECTION_WEIGHTS = "projection_weights"
# Predictions key
PROJECTION_RESULT = "projection"
# Name of the completed_sweeps variable
COMPLETED_SWEEPS = "completed_sweeps"
# Name of the loss variable
LOSS = "WALS_loss"
# Name of the Root Weighted Squared Error variable
RWSE = "WALS_RWSE"
def __init__(self,
num_rows,
num_cols,
embedding_dimension,
unobserved_weight=0.1,
regularization_coeff=None,
row_init="random",
col_init="random",
num_row_shards=1,
num_col_shards=1,
row_weights=1,
col_weights=1,
use_factors_weights_cache_for_training=True,
use_gramian_cache_for_training=True,
max_sweeps=None,
model_dir=None,
config=None):
r"""Creates a model for matrix factorization using the WALS method.
Args:
num_rows: Total number of rows for input matrix.
num_cols: Total number of cols for input matrix.
embedding_dimension: Dimension to use for the factors.
unobserved_weight: Weight of the unobserved entries of matrix.
regularization_coeff: Weight of the L2 regularization term. Defaults to
None, in which case the problem is not regularized.
row_init: Initializer for row factor. Must be either:
- A tensor: The row factor matrix is initialized to this tensor,
- A numpy constant,
- "random": The rows are initialized using a normal distribution.
col_init: Initializer for column factor. See row_init.
num_row_shards: Number of shards to use for the row factors.
num_col_shards: Number of shards to use for the column factors.
row_weights: Must be in one of the following three formats:
- None: In this case, the weight of every entry is the unobserved_weight
and the problem simplifies to ALS. Note that, in this case,
col_weights must also be set to "None".
- List of lists of non-negative scalars, of the form
\\([[w_0, w_1, ...], [w_k, ... ], [...]]\\),
where the number of inner lists equal to the number of row factor
shards and the elements in each inner list are the weights for the
rows of that shard. In this case,
\\(w_ij = unonbserved_weight + row_weights[i] * col_weights[j]\\).
- A non-negative scalar: This value is used for all row weights.
Note that it is allowed to have row_weights as a list and col_weights
as a scalar, or vice-versa.
col_weights: See row_weights.
use_factors_weights_cache_for_training: Boolean, whether the factors and
weights will be cached on the workers before the updates start, during
training. Defaults to True.
Note that caching is disabled during prediction.
use_gramian_cache_for_training: Boolean, whether the Gramians will be
cached on the workers before the updates start, during training.
Defaults to True. Note that caching is disabled during prediction.
max_sweeps: integer, optional. Specifies the number of sweeps for which
to train the model, where a sweep is defined as a full update of all the
row factors (resp. column factors).
If `steps` or `max_steps` is also specified in model.fit(), training
stops when either of the steps condition or sweeps condition is met.
model_dir: The directory to save the model results and log files.
config: A Configuration object. See Estimator.
Raises:
ValueError: If config.num_worker_replicas is strictly greater than one.
The current implementation only supports running on a single worker.
"""
# TODO(walidk): Support power-law based weight computation.
# TODO(walidk): Add factor lookup by indices, with caching.
# TODO(walidk): Support caching during prediction.
# TODO(walidk): Provide input pipelines that handle missing rows.
params = {
"num_rows":
num_rows,
"num_cols":
num_cols,
"embedding_dimension":
embedding_dimension,
"unobserved_weight":
unobserved_weight,
"regularization_coeff":
regularization_coeff,
"row_init":
row_init,
"col_init":
col_init,
"num_row_shards":
num_row_shards,
"num_col_shards":
num_col_shards,
"row_weights":
row_weights,
"col_weights":
col_weights,
"max_sweeps":
max_sweeps,
"use_factors_weights_cache_for_training":
use_factors_weights_cache_for_training,
"use_gramian_cache_for_training":
use_gramian_cache_for_training
}
self._row_factors_names = [
"row_factors_shard_%d" % i for i in range(num_row_shards)
]
self._col_factors_names = [
"col_factors_shard_%d" % i for i in range(num_col_shards)
]
super(WALSMatrixFactorization, self).__init__(
model_fn=_wals_factorization_model_function,
params=params,
model_dir=model_dir,
config=config)
if self._config is not None and self._config.num_worker_replicas > 1:
raise ValueError("WALSMatrixFactorization must be run on a single worker "
"replica.")
def get_row_factors(self):
"""Returns the row factors of the model, loading them from checkpoint.
Should only be run after training.
Returns:
A list of the row factors of the model.
"""
return [self.get_variable_value(name) for name in self._row_factors_names]
def get_col_factors(self):
"""Returns the column factors of the model, loading them from checkpoint.
Should only be run after training.
Returns:
A list of the column factors of the model.
"""
return [self.get_variable_value(name) for name in self._col_factors_names]
def get_projections(self, input_fn):
"""Computes the projections of the rows or columns given in input_fn.
Runs predict() with the given input_fn, and returns the results. Should only
be run after training.
Args:
input_fn: Input function which specifies the rows or columns to project.
Returns:
A generator of the projected factors.
"""
return (result[WALSMatrixFactorization.PROJECTION_RESULT]
for result in self.predict(input_fn=input_fn))
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/factorization/python/ops/wals.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for WALSMatrixFactorization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import json
import numpy as np
from tensorflow.contrib.factorization.python.ops import factorization_ops_test_utils
from tensorflow.contrib.factorization.python.ops import wals as wals_lib
from tensorflow.contrib.learn.python.learn import run_config
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.learn.python.learn.estimators import run_config as run_config_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import monitored_session
class WALSMatrixFactorizationTest(test.TestCase):
INPUT_MATRIX = factorization_ops_test_utils.INPUT_MATRIX
def np_array_to_sparse(self, np_array):
"""Transforms an np.array to a tf.SparseTensor."""
return factorization_ops_test_utils.np_matrix_to_tf_sparse(np_array)
def calculate_loss(self):
"""Calculates the loss of the current (trained) model."""
current_rows = embedding_ops.embedding_lookup(
self._model.get_row_factors(), math_ops.range(self._num_rows),
partition_strategy='div')
current_cols = embedding_ops.embedding_lookup(
self._model.get_col_factors(), math_ops.range(self._num_cols),
partition_strategy='div')
row_wts = embedding_ops.embedding_lookup(
self._row_weights, math_ops.range(self._num_rows),
partition_strategy='div')
col_wts = embedding_ops.embedding_lookup(
self._col_weights, math_ops.range(self._num_cols),
partition_strategy='div')
sp_inputs = self.np_array_to_sparse(self.INPUT_MATRIX)
return factorization_ops_test_utils.calculate_loss(
sp_inputs, current_rows, current_cols, self._regularization_coeff,
self._unobserved_weight, row_wts, col_wts)
# TODO(walidk): Replace with input_reader_utils functions once open sourced.
def remap_sparse_tensor_rows(self, sp_x, row_ids, shape):
"""Remaps the row ids of a tf.SparseTensor."""
old_row_ids, old_col_ids = array_ops.split(
value=sp_x.indices, num_or_size_splits=2, axis=1)
new_row_ids = array_ops.gather(row_ids, old_row_ids)
new_indices = array_ops.concat([new_row_ids, old_col_ids], 1)
return sparse_tensor.SparseTensor(
indices=new_indices, values=sp_x.values, dense_shape=shape)
# TODO(walidk): Add an option to shuffle inputs.
def input_fn(self, np_matrix, batch_size, mode,
project_row=None, projection_weights=None,
remove_empty_rows_columns=False):
"""Returns an input_fn that selects row and col batches from np_matrix.
This simple utility creates an input function from a numpy_array. The
following transformations are performed:
* The empty rows and columns in np_matrix are removed (if
remove_empty_rows_columns is true)
* np_matrix is converted to a SparseTensor.
* The rows of the sparse matrix (and the rows of its transpose) are batched.
* A features dictionary is created, which contains the row / column batches.
In TRAIN mode, one only needs to specify the np_matrix and the batch_size.
In INFER and EVAL modes, one must also provide project_row, a boolean which
specifies whether we are projecting rows or columns.
Args:
np_matrix: A numpy array. The input matrix to use.
batch_size: Integer.
mode: Can be one of model_fn.ModeKeys.{TRAIN, INFER, EVAL}.
project_row: A boolean. Used in INFER and EVAL modes. Specifies whether
to project rows or columns.
projection_weights: A float numpy array. Used in INFER mode. Specifies
the weights to use in the projection (the weights are optional, and
default to 1.).
remove_empty_rows_columns: A boolean. When true, this will remove empty
rows and columns in the np_matrix. Note that this will result in
modifying the indices of the input matrix. The mapping from new indices
to old indices is returned in the form of two numpy arrays.
Returns:
A tuple consisting of:
_fn: A callable. Calling _fn returns a features dict.
nz_row_ids: A numpy array of the ids of non-empty rows, such that
nz_row_ids[i] is the old row index corresponding to new index i.
nz_col_ids: A numpy array of the ids of non-empty columns, such that
nz_col_ids[j] is the old column index corresponding to new index j.
"""
if remove_empty_rows_columns:
np_matrix, nz_row_ids, nz_col_ids = (
factorization_ops_test_utils.remove_empty_rows_columns(np_matrix))
else:
nz_row_ids = np.arange(np.shape(np_matrix)[0])
nz_col_ids = np.arange(np.shape(np_matrix)[1])
def extract_features(row_batch, col_batch, num_rows, num_cols):
row_ids = row_batch[0]
col_ids = col_batch[0]
rows = self.remap_sparse_tensor_rows(
row_batch[1], row_ids, shape=[num_rows, num_cols])
cols = self.remap_sparse_tensor_rows(
col_batch[1], col_ids, shape=[num_cols, num_rows])
features = {
wals_lib.WALSMatrixFactorization.INPUT_ROWS: rows,
wals_lib.WALSMatrixFactorization.INPUT_COLS: cols,
}
return features
def _fn():
num_rows = np.shape(np_matrix)[0]
num_cols = np.shape(np_matrix)[1]
row_ids = math_ops.range(num_rows, dtype=dtypes.int64)
col_ids = math_ops.range(num_cols, dtype=dtypes.int64)
sp_mat = self.np_array_to_sparse(np_matrix)
sp_mat_t = sparse_ops.sparse_transpose(sp_mat)
row_batch = input_lib.batch(
[row_ids, sp_mat],
batch_size=min(batch_size, num_rows),
capacity=10,
enqueue_many=True)
col_batch = input_lib.batch(
[col_ids, sp_mat_t],
batch_size=min(batch_size, num_cols),
capacity=10,
enqueue_many=True)
features = extract_features(row_batch, col_batch, num_rows, num_cols)
if mode == model_fn.ModeKeys.INFER or mode == model_fn.ModeKeys.EVAL:
self.assertTrue(
project_row is not None,
msg='project_row must be specified in INFER or EVAL mode.')
features[wals_lib.WALSMatrixFactorization.PROJECT_ROW] = (
constant_op.constant(project_row))
if mode == model_fn.ModeKeys.INFER and projection_weights is not None:
weights_batch = input_lib.batch(
projection_weights,
batch_size=batch_size,
capacity=10,
enqueue_many=True)
features[wals_lib.WALSMatrixFactorization.PROJECTION_WEIGHTS] = (
weights_batch)
labels = None
return features, labels
return _fn, nz_row_ids, nz_col_ids
@property
def input_matrix(self):
return self.INPUT_MATRIX
@property
def row_steps(self):
return np.ceil(self._num_rows / self.batch_size)
@property
def col_steps(self):
return np.ceil(self._num_cols / self.batch_size)
@property
def batch_size(self):
return 5
@property
def use_cache(self):
return False
@property
def max_sweeps(self):
return None
def setUp(self):
self._num_rows = 5
self._num_cols = 7
self._embedding_dimension = 3
self._unobserved_weight = 0.1
self._num_row_shards = 2
self._num_col_shards = 3
self._regularization_coeff = 0.01
self._col_init = [
# Shard 0.
[[-0.36444709, -0.39077035, -0.32528427],
[1.19056475, 0.07231052, 2.11834812],
[0.93468881, -0.71099287, 1.91826844]],
# Shard 1.
[[1.18160152, 1.52490723, -0.50015002],
[1.82574749, -0.57515913, -1.32810032]],
# Shard 2.
[[-0.15515432, -0.84675711, 0.13097958],
[-0.9246484, 0.69117504, 1.2036494]],
]
self._row_weights = [[0.1, 0.2, 0.3], [0.4, 0.5]]
self._col_weights = [[0.1, 0.2, 0.3], [0.4, 0.5], [0.6, 0.7]]
# Values of row and column factors after running one iteration or factor
# updates.
self._row_factors_0 = [[0.097689, -0.219293, -0.020780],
[0.50842, 0.64626, 0.22364],
[0.401159, -0.046558, -0.192854]]
self._row_factors_1 = [[1.20597, -0.48025, 0.35582],
[1.5564, 1.2528, 1.0528]]
self._col_factors_0 = [[2.4725, -1.2950, -1.9980],
[0.44625, 1.50771, 1.27118],
[1.39801, -2.10134, 0.73572]]
self._col_factors_1 = [[3.36509, -0.66595, -3.51208],
[0.57191, 1.59407, 1.33020]]
self._col_factors_2 = [[3.3459, -1.3341, -3.3008],
[0.57366, 1.83729, 1.26798]]
self._model = wals_lib.WALSMatrixFactorization(
self._num_rows,
self._num_cols,
self._embedding_dimension,
self._unobserved_weight,
col_init=self._col_init,
regularization_coeff=self._regularization_coeff,
num_row_shards=self._num_row_shards,
num_col_shards=self._num_col_shards,
row_weights=self._row_weights,
col_weights=self._col_weights,
max_sweeps=self.max_sweeps,
use_factors_weights_cache_for_training=self.use_cache,
use_gramian_cache_for_training=self.use_cache)
def test_fit(self):
# Row sweep.
input_fn = self.input_fn(np_matrix=self.input_matrix,
batch_size=self.batch_size,
mode=model_fn.ModeKeys.TRAIN,
remove_empty_rows_columns=True)[0]
self._model.fit(input_fn=input_fn, steps=self.row_steps)
row_factors = self._model.get_row_factors()
self.assertAllClose(row_factors[0], self._row_factors_0, atol=1e-3)
self.assertAllClose(row_factors[1], self._row_factors_1, atol=1e-3)
# Col sweep.
# Running fit a second time will resume training from the checkpoint.
input_fn = self.input_fn(np_matrix=self.input_matrix,
batch_size=self.batch_size,
mode=model_fn.ModeKeys.TRAIN,
remove_empty_rows_columns=True)[0]
self._model.fit(input_fn=input_fn, steps=self.col_steps)
col_factors = self._model.get_col_factors()
self.assertAllClose(col_factors[0], self._col_factors_0, atol=1e-3)
self.assertAllClose(col_factors[1], self._col_factors_1, atol=1e-3)
self.assertAllClose(col_factors[2], self._col_factors_2, atol=1e-3)
def test_predict(self):
input_fn = self.input_fn(np_matrix=self.input_matrix,
batch_size=self.batch_size,
mode=model_fn.ModeKeys.TRAIN,
remove_empty_rows_columns=True,
)[0]
# Project rows 1 and 4 from the input matrix.
proj_input_fn = self.input_fn(
np_matrix=self.INPUT_MATRIX[[1, 4], :],
batch_size=2,
mode=model_fn.ModeKeys.INFER,
project_row=True,
projection_weights=[[0.2, 0.5]])[0]
self._model.fit(input_fn=input_fn, steps=self.row_steps)
projections = self._model.get_projections(proj_input_fn)
projected_rows = list(itertools.islice(projections, 2))
self.assertAllClose(
projected_rows,
[self._row_factors_0[1], self._row_factors_1[1]],
atol=1e-3)
# Project columns 5, 3, 1 from the input matrix.
proj_input_fn = self.input_fn(
np_matrix=self.INPUT_MATRIX[:, [5, 3, 1]],
batch_size=3,
mode=model_fn.ModeKeys.INFER,
project_row=False,
projection_weights=[[0.6, 0.4, 0.2]])[0]
self._model.fit(input_fn=input_fn, steps=self.col_steps)
projections = self._model.get_projections(proj_input_fn)
projected_cols = list(itertools.islice(projections, 3))
self.assertAllClose(
projected_cols,
[self._col_factors_2[0], self._col_factors_1[0],
self._col_factors_0[1]],
atol=1e-3)
def test_eval(self):
# Do a row sweep then evaluate the model on row inputs.
# The evaluate function returns the loss of the projected rows, but since
# projection is idempotent, the eval loss must match the model loss.
input_fn = self.input_fn(np_matrix=self.input_matrix,
batch_size=self.batch_size,
mode=model_fn.ModeKeys.TRAIN,
remove_empty_rows_columns=True,
)[0]
self._model.fit(input_fn=input_fn, steps=self.row_steps)
eval_input_fn_row = self.input_fn(np_matrix=self.input_matrix,
batch_size=1,
mode=model_fn.ModeKeys.EVAL,
project_row=True,
remove_empty_rows_columns=True)[0]
loss = self._model.evaluate(
input_fn=eval_input_fn_row, steps=self._num_rows)['loss']
with self.cached_session():
true_loss = self.calculate_loss()
self.assertNear(
loss, true_loss, err=.001,
msg="""After row update, eval loss = {}, does not match the true
loss = {}.""".format(loss, true_loss))
# Do a col sweep then evaluate the model on col inputs.
self._model.fit(input_fn=input_fn, steps=self.col_steps)
eval_input_fn_col = self.input_fn(np_matrix=self.input_matrix,
batch_size=1,
mode=model_fn.ModeKeys.EVAL,
project_row=False,
remove_empty_rows_columns=True)[0]
loss = self._model.evaluate(
input_fn=eval_input_fn_col, steps=self._num_cols)['loss']
with self.cached_session():
true_loss = self.calculate_loss()
self.assertNear(
loss, true_loss, err=.001,
msg="""After col update, eval loss = {}, does not match the true
loss = {}.""".format(loss, true_loss))
class WALSMatrixFactorizationTestSweeps(WALSMatrixFactorizationTest):
@property
def max_sweeps(self):
return 2
# We set the column steps to None so that we rely only on max_sweeps to stop
# training.
@property
def col_steps(self):
return None
class WALSMatrixFactorizationTestCached(WALSMatrixFactorizationTest):
@property
def use_cache(self):
return True
class WALSMatrixFactorizaiontTestPaddedInput(WALSMatrixFactorizationTest):
PADDED_INPUT_MATRIX = np.pad(
WALSMatrixFactorizationTest.INPUT_MATRIX,
[(1, 0), (1, 0)], mode='constant')
@property
def input_matrix(self):
return self.PADDED_INPUT_MATRIX
class WALSMatrixFactorizationUnsupportedTest(test.TestCase):
def setUp(self):
pass
def testDistributedWALSUnsupported(self):
tf_config = {
'cluster': {
run_config_lib.TaskType.PS: ['host1:1', 'host2:2'],
run_config_lib.TaskType.WORKER: ['host3:3', 'host4:4']
},
'task': {
'type': run_config_lib.TaskType.WORKER,
'index': 1
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
self.assertEqual(config.num_worker_replicas, 2)
with self.assertRaises(ValueError):
self._model = wals_lib.WALSMatrixFactorization(1, 1, 1, config=config)
class SweepHookTest(test.TestCase):
def test_sweeps(self):
is_row_sweep_var = variables.VariableV1(True)
is_sweep_done_var = variables.VariableV1(False)
init_done = variables.VariableV1(False)
row_prep_done = variables.VariableV1(False)
col_prep_done = variables.VariableV1(False)
row_train_done = variables.VariableV1(False)
col_train_done = variables.VariableV1(False)
init_op = state_ops.assign(init_done, True)
row_prep_op = state_ops.assign(row_prep_done, True)
col_prep_op = state_ops.assign(col_prep_done, True)
row_train_op = state_ops.assign(row_train_done, True)
col_train_op = state_ops.assign(col_train_done, True)
train_op = control_flow_ops.no_op()
switch_op = control_flow_ops.group(
state_ops.assign(is_sweep_done_var, False),
state_ops.assign(is_row_sweep_var,
math_ops.logical_not(is_row_sweep_var)))
mark_sweep_done = state_ops.assign(is_sweep_done_var, True)
with self.cached_session() as sess:
sweep_hook = wals_lib._SweepHook(
is_row_sweep_var,
is_sweep_done_var,
init_op,
[row_prep_op],
[col_prep_op],
row_train_op,
col_train_op,
switch_op)
mon_sess = monitored_session._HookedSession(sess, [sweep_hook])
sess.run([variables.global_variables_initializer()])
# Row sweep.
mon_sess.run(train_op)
self.assertTrue(sess.run(init_done),
msg='init op not run by the Sweephook')
self.assertTrue(sess.run(row_prep_done),
msg='row_prep_op not run by the SweepHook')
self.assertTrue(sess.run(row_train_done),
msg='row_train_op not run by the SweepHook')
self.assertTrue(
sess.run(is_row_sweep_var),
msg='Row sweep is not complete but is_row_sweep_var is False.')
# Col sweep.
mon_sess.run(mark_sweep_done)
mon_sess.run(train_op)
self.assertTrue(sess.run(col_prep_done),
msg='col_prep_op not run by the SweepHook')
self.assertTrue(sess.run(col_train_done),
msg='col_train_op not run by the SweepHook')
self.assertFalse(
sess.run(is_row_sweep_var),
msg='Col sweep is not complete but is_row_sweep_var is True.')
# Row sweep.
mon_sess.run(mark_sweep_done)
mon_sess.run(train_op)
self.assertTrue(
sess.run(is_row_sweep_var),
msg='Col sweep is complete but is_row_sweep_var is False.')
class StopAtSweepHookTest(test.TestCase):
def test_stop(self):
hook = wals_lib._StopAtSweepHook(last_sweep=10)
completed_sweeps = variables.VariableV1(
8, name=wals_lib.WALSMatrixFactorization.COMPLETED_SWEEPS)
train_op = state_ops.assign_add(completed_sweeps, 1)
hook.begin()
with self.cached_session() as sess:
sess.run([variables.global_variables_initializer()])
mon_sess = monitored_session._HookedSession(sess, [hook])
mon_sess.run(train_op)
# completed_sweeps is 9 after running train_op.
self.assertFalse(mon_sess.should_stop())
mon_sess.run(train_op)
# completed_sweeps is 10 after running train_op.
self.assertTrue(mon_sess.should_stop())
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/factorization/python/ops/wals_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ops.gmm."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.factorization.python.ops import gmm as gmm_lib
from tensorflow.contrib.learn.python.learn.estimators import kmeans
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import random_seed as random_seed_lib
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import test
from tensorflow.python.training import queue_runner
class GMMTest(test.TestCase):
def input_fn(self, batch_size=None, points=None):
batch_size = batch_size or self.batch_size
points = points if points is not None else self.points
num_points = points.shape[0]
def _fn():
x = constant_op.constant(points)
if batch_size == num_points:
return x, None
indices = random_ops.random_uniform(constant_op.constant([batch_size]),
minval=0, maxval=num_points-1,
dtype=dtypes.int32,
seed=10)
return array_ops.gather(x, indices), None
return _fn
def setUp(self):
np.random.seed(3)
random_seed_lib.set_random_seed(2)
self.num_centers = 2
self.num_dims = 2
self.num_points = 4000
self.batch_size = self.num_points
self.true_centers = self.make_random_centers(self.num_centers,
self.num_dims)
self.points, self.assignments = self.make_random_points(
self.true_centers, self.num_points)
# Use initial means from kmeans (just like scikit-learn does).
clusterer = kmeans.KMeansClustering(num_clusters=self.num_centers)
clusterer.fit(input_fn=lambda: (constant_op.constant(self.points), None),
steps=30)
self.initial_means = clusterer.clusters()
@staticmethod
def make_random_centers(num_centers, num_dims):
return np.round(
np.random.rand(num_centers, num_dims).astype(np.float32) * 500)
@staticmethod
def make_random_points(centers, num_points):
num_centers, num_dims = centers.shape
assignments = np.random.choice(num_centers, num_points)
offsets = np.round(
np.random.randn(num_points, num_dims).astype(np.float32) * 20)
points = centers[assignments] + offsets
return (points, assignments)
def test_weights(self):
"""Tests the shape of the weights."""
gmm = gmm_lib.GMM(self.num_centers,
initial_clusters=self.initial_means,
random_seed=4,
config=run_config.RunConfig(tf_random_seed=2))
gmm.fit(input_fn=self.input_fn(), steps=0)
weights = gmm.weights()
self.assertAllEqual(list(weights.shape), [self.num_centers])
def test_clusters(self):
"""Tests the shape of the clusters."""
gmm = gmm_lib.GMM(self.num_centers,
initial_clusters=self.initial_means,
random_seed=4,
config=run_config.RunConfig(tf_random_seed=2))
gmm.fit(input_fn=self.input_fn(), steps=0)
clusters = gmm.clusters()
self.assertAllEqual(list(clusters.shape), [self.num_centers, self.num_dims])
def test_fit(self):
gmm = gmm_lib.GMM(self.num_centers,
initial_clusters='random',
random_seed=4,
config=run_config.RunConfig(tf_random_seed=2))
gmm.fit(input_fn=self.input_fn(), steps=1)
score1 = gmm.score(input_fn=self.input_fn(batch_size=self.num_points),
steps=1)
gmm.fit(input_fn=self.input_fn(), steps=10)
score2 = gmm.score(input_fn=self.input_fn(batch_size=self.num_points),
steps=1)
self.assertLess(score1, score2)
def test_infer(self):
gmm = gmm_lib.GMM(self.num_centers,
initial_clusters=self.initial_means,
random_seed=4,
config=run_config.RunConfig(tf_random_seed=2))
gmm.fit(input_fn=self.input_fn(), steps=60)
clusters = gmm.clusters()
# Make a small test set
num_points = 40
points, true_assignments = self.make_random_points(clusters, num_points)
assignments = []
for item in gmm.predict_assignments(
input_fn=self.input_fn(points=points, batch_size=num_points)):
assignments.append(item)
assignments = np.ravel(assignments)
self.assertAllEqual(true_assignments, assignments)
def _compare_with_sklearn(self, cov_type):
# sklearn version.
iterations = 40
np.random.seed(5)
sklearn_assignments = np.asarray([0, 0, 1, 0, 0, 0, 1, 0, 0, 1])
sklearn_means = np.asarray([[144.83417719, 254.20130341],
[274.38754816, 353.16074346]])
sklearn_covs = np.asarray([[[395.0081194, -4.50389512],
[-4.50389512, 408.27543989]],
[[385.17484203, -31.27834935],
[-31.27834935, 391.74249925]]])
# skflow version.
gmm = gmm_lib.GMM(self.num_centers,
initial_clusters=self.initial_means,
covariance_type=cov_type,
config=run_config.RunConfig(tf_random_seed=2))
gmm.fit(input_fn=self.input_fn(), steps=iterations)
points = self.points[:10, :]
skflow_assignments = []
for item in gmm.predict_assignments(
input_fn=self.input_fn(points=points, batch_size=10)):
skflow_assignments.append(item)
self.assertAllClose(sklearn_assignments,
np.ravel(skflow_assignments).astype(int))
self.assertAllClose(sklearn_means, gmm.clusters())
if cov_type == 'full':
self.assertAllClose(sklearn_covs, gmm.covariances(), rtol=0.01)
else:
for d in [0, 1]:
self.assertAllClose(
np.diag(sklearn_covs[d]), gmm.covariances()[d, :], rtol=0.01)
def test_compare_full(self):
self._compare_with_sklearn('full')
def test_compare_diag(self):
self._compare_with_sklearn('diag')
def test_random_input_large(self):
# sklearn version.
iterations = 5 # that should be enough to know whether this diverges
np.random.seed(5)
num_classes = 20
x = np.array([[np.random.random() for _ in range(100)]
for _ in range(num_classes)], dtype=np.float32)
# skflow version.
gmm = gmm_lib.GMM(num_classes,
covariance_type='full',
config=run_config.RunConfig(tf_random_seed=2))
def get_input_fn(x):
def input_fn():
return constant_op.constant(x.astype(np.float32)), None
return input_fn
gmm.fit(input_fn=get_input_fn(x), steps=iterations)
self.assertFalse(np.isnan(gmm.clusters()).any())
class GMMTestQueues(test.TestCase):
def input_fn(self):
def _fn():
queue = data_flow_ops.FIFOQueue(capacity=10,
dtypes=dtypes.float32,
shapes=[10, 3])
enqueue_op = queue.enqueue(array_ops.zeros([10, 3], dtype=dtypes.float32))
queue_runner.add_queue_runner(queue_runner.QueueRunner(queue,
[enqueue_op]))
return queue.dequeue(), None
return _fn
# This test makes sure that there are no deadlocks when using a QueueRunner.
# Note that since cluster initialization is dependent on inputs, if input
# is generated using a QueueRunner, one has to make sure that these runners
# are started before the initialization.
def test_queues(self):
gmm = gmm_lib.GMM(2, covariance_type='diag')
gmm.fit(input_fn=self.input_fn(), steps=1)
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/factorization/python/ops/gmm_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for gmm_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.contrib.factorization.python.ops import gmm_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed as random_seed_lib
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
class GmmOpsTest(test.TestCase):
def setUp(self):
self.num_examples = 1000
self.iterations = 40
self.seed = 4
random_seed_lib.set_random_seed(self.seed)
np.random.seed(self.seed * 2)
self.data, self.true_assignments = self.make_data(self.num_examples)
# Generate more complicated data.
self.centers = [[1, 1], [-1, 0.5], [2, 1]]
self.more_data, self.more_true_assignments = self.make_data_from_centers(
self.num_examples, self.centers)
@staticmethod
def make_data(num_vectors):
"""Generates 2-dimensional data centered on (2,2), (-1,-1).
Args:
num_vectors: number of training examples.
Returns:
A tuple containing the data as a numpy array and the cluster ids.
"""
vectors = []
classes = []
for _ in xrange(num_vectors):
if np.random.random() > 0.5:
vectors.append([np.random.normal(2.0, 0.6), np.random.normal(2.0, 0.9)])
classes.append(0)
else:
vectors.append(
[np.random.normal(-1.0, 0.4), np.random.normal(-1.0, 0.5)])
classes.append(1)
return np.asarray(vectors), classes
@staticmethod
def make_data_from_centers(num_vectors, centers):
"""Generates 2-dimensional data with random centers.
Args:
num_vectors: number of training examples.
centers: a list of random 2-dimensional centers.
Returns:
A tuple containing the data as a numpy array and the cluster ids.
"""
vectors = []
classes = []
for _ in xrange(num_vectors):
current_class = np.random.random_integers(0, len(centers) - 1)
vectors.append([
np.random.normal(centers[current_class][0],
np.random.random_sample()),
np.random.normal(centers[current_class][1], np.random.random_sample())
])
classes.append(current_class)
return np.asarray(vectors), len(centers)
def test_covariance(self):
start_time = time.time()
data = self.data.T
np_cov = np.cov(data)
logging.info('Numpy took %f', time.time() - start_time)
start_time = time.time()
with self.cached_session() as sess:
op = gmm_ops._covariance(
constant_op.constant(
data.T, dtype=dtypes.float32), False)
op_diag = gmm_ops._covariance(
constant_op.constant(
data.T, dtype=dtypes.float32), True)
variables.global_variables_initializer().run()
tf_cov = sess.run(op)
np.testing.assert_array_almost_equal(np_cov, tf_cov)
logging.info('Tensorflow took %f', time.time() - start_time)
tf_cov = sess.run(op_diag)
np.testing.assert_array_almost_equal(
np.diag(np_cov), np.ravel(tf_cov), decimal=5)
def test_simple_cluster(self):
"""Tests that the clusters are correct."""
num_classes = 2
graph = ops.Graph()
with graph.as_default() as g:
g.seed = 5
with self.cached_session() as sess:
data = constant_op.constant(self.data, dtype=dtypes.float32)
loss_op, scores, assignments, training_op, init_op, _ = gmm_ops.gmm(
data, 'random', num_classes, random_seed=self.seed)
variables.global_variables_initializer().run()
sess.run(init_op)
first_loss = sess.run(loss_op)
for _ in xrange(self.iterations):
sess.run(training_op)
assignments = sess.run(assignments)
end_loss = sess.run(loss_op)
scores = sess.run(scores)
self.assertEqual((self.num_examples, 1), scores.shape)
accuracy = np.mean(
np.asarray(self.true_assignments) == np.squeeze(assignments))
logging.info('Accuracy: %f', accuracy)
logging.info('First loss: %f, end loss: %f', first_loss, end_loss)
self.assertGreater(end_loss, first_loss)
self.assertGreater(accuracy, 0.98)
def testParams(self):
"""Tests that the params work as intended."""
num_classes = 2
with self.cached_session() as sess:
# Experiment 1. Update weights only.
data = constant_op.constant(self.data, dtype=dtypes.float32)
gmm_tool = gmm_ops.GmmAlgorithm([data], num_classes,
[[3.0, 3.0], [0.0, 0.0]], 'w')
training_ops = gmm_tool.training_ops()
variables.global_variables_initializer().run()
sess.run(gmm_tool.init_ops())
for _ in xrange(self.iterations):
sess.run(training_ops)
# Only the probability to each class is updated.
alphas = sess.run(gmm_tool.alphas())
self.assertGreater(alphas[1], 0.6)
means = sess.run(gmm_tool.clusters())
np.testing.assert_almost_equal(
np.expand_dims([[3.0, 3.0], [0.0, 0.0]], 1), means)
covs = sess.run(gmm_tool.covariances())
np.testing.assert_almost_equal(covs[0], covs[1])
# Experiment 2. Update means and covariances.
gmm_tool = gmm_ops.GmmAlgorithm([data], num_classes,
[[3.0, 3.0], [0.0, 0.0]], 'mc')
training_ops = gmm_tool.training_ops()
variables.global_variables_initializer().run()
sess.run(gmm_tool.init_ops())
for _ in xrange(self.iterations):
sess.run(training_ops)
alphas = sess.run(gmm_tool.alphas())
self.assertAlmostEqual(alphas[0], alphas[1])
means = sess.run(gmm_tool.clusters())
np.testing.assert_almost_equal(
np.expand_dims([[2.0, 2.0], [-1.0, -1.0]], 1), means, decimal=1)
covs = sess.run(gmm_tool.covariances())
np.testing.assert_almost_equal(
[[0.371111, -0.0050774], [-0.0050774, 0.8651744]], covs[0], decimal=4)
np.testing.assert_almost_equal(
[[0.146976, 0.0259463], [0.0259463, 0.2543971]], covs[1], decimal=4)
# Experiment 3. Update covariances only.
gmm_tool = gmm_ops.GmmAlgorithm([data], num_classes,
[[-1.0, -1.0], [1.0, 1.0]], 'c')
training_ops = gmm_tool.training_ops()
variables.global_variables_initializer().run()
sess.run(gmm_tool.init_ops())
for _ in xrange(self.iterations):
sess.run(training_ops)
alphas = sess.run(gmm_tool.alphas())
self.assertAlmostEqual(alphas[0], alphas[1])
means = sess.run(gmm_tool.clusters())
np.testing.assert_almost_equal(
np.expand_dims([[-1.0, -1.0], [1.0, 1.0]], 1), means)
covs = sess.run(gmm_tool.covariances())
np.testing.assert_almost_equal(
[[0.1299582, 0.0435872], [0.0435872, 0.2558578]], covs[0], decimal=5)
np.testing.assert_almost_equal(
[[3.195385, 2.6989155], [2.6989155, 3.3881593]], covs[1], decimal=5)
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/factorization/python/ops/gmm_ops_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Example mnist model with jointly computed k-means clustering.
This is a toy example of how clustering can be embedded into larger tensorflow
graphs. In this case, we learn a clustering on-the-fly and transform the input
into the 'distance to clusters' space. These are then fed into hidden layers to
learn the supervised objective.
To train this model on real mnist data, run this model as follows:
mnist --fake_data=False --max_steps=2000
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import math
import sys
import tempfile
import time
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from tensorflow.examples.tutorials.mnist import mnist
FLAGS = None
# The MNIST dataset has 10 classes, representing the digits 0 through 9.
NUM_CLASSES = 10
# The MNIST images are always 28x28 pixels.
IMAGE_SIZE = 28
IMAGE_PIXELS = IMAGE_SIZE * IMAGE_SIZE
def placeholder_inputs():
"""Generate placeholder variables to represent the input tensors.
Returns:
images_placeholder: Images placeholder.
labels_placeholder: Labels placeholder.
"""
images_placeholder = tf.placeholder(tf.float32, shape=(None,
mnist.IMAGE_PIXELS))
labels_placeholder = tf.placeholder(tf.int32, shape=(None))
return images_placeholder, labels_placeholder
def fill_feed_dict(data_set, images_pl, labels_pl, batch_size):
"""Fills the feed_dict for training the given step.
Args:
data_set: The set of images and labels, from input_data.read_data_sets()
images_pl: The images placeholder, from placeholder_inputs().
labels_pl: The labels placeholder, from placeholder_inputs().
batch_size: Batch size of data to feed.
Returns:
feed_dict: The feed dictionary mapping from placeholders to values.
"""
# Create the feed_dict for the placeholders filled with the next
# `batch size ` examples.
images_feed, labels_feed = data_set.next_batch(batch_size, FLAGS.fake_data)
feed_dict = {
images_pl: images_feed,
labels_pl: labels_feed,
}
return feed_dict
def do_eval(sess,
eval_correct,
images_placeholder,
labels_placeholder,
data_set):
"""Runs one evaluation against the full epoch of data.
Args:
sess: The session in which the model has been trained.
eval_correct: The Tensor that returns the number of correct predictions.
images_placeholder: The images placeholder.
labels_placeholder: The labels placeholder.
data_set: The set of images and labels to evaluate, from
input_data.read_data_sets().
Returns:
Precision value on the dataset.
"""
# And run one epoch of eval.
true_count = 0 # Counts the number of correct predictions.
batch_size = min(FLAGS.batch_size, data_set.num_examples)
steps_per_epoch = data_set.num_examples // batch_size
num_examples = steps_per_epoch * batch_size
for _ in xrange(steps_per_epoch):
feed_dict = fill_feed_dict(data_set,
images_placeholder,
labels_placeholder,
batch_size)
true_count += sess.run(eval_correct, feed_dict=feed_dict)
precision = true_count / num_examples
print(' Num examples: %d Num correct: %d Precision @ 1: %0.04f' %
(num_examples, true_count, precision))
return precision
def inference(inp, num_clusters, hidden1_units, hidden2_units):
"""Build the MNIST model up to where it may be used for inference.
Args:
inp: input data
num_clusters: number of clusters of input features to train.
hidden1_units: Size of the first hidden layer.
hidden2_units: Size of the second hidden layer.
Returns:
logits: Output tensor with the computed logits.
clustering_loss: Clustering loss.
kmeans_training_op: An op to train the clustering.
"""
# Clustering
kmeans = tf.contrib.factorization.KMeans(
inp,
num_clusters,
distance_metric=tf.contrib.factorization.COSINE_DISTANCE,
# TODO(agarwal): kmeans++ is currently causing crash in dbg mode.
# Enable this after fixing.
# initial_clusters=tf.contrib.factorization.KMEANS_PLUS_PLUS_INIT,
use_mini_batch=True)
(all_scores, _, clustering_scores, _, kmeans_init,
kmeans_training_op) = kmeans.training_graph()
# Some heuristics to approximately whiten this output.
all_scores = (all_scores[0] - 0.5) * 5
# Here we avoid passing the gradients from the supervised objective back to
# the clusters by creating a stop_gradient node.
all_scores = tf.stop_gradient(all_scores)
clustering_loss = tf.reduce_sum(clustering_scores[0])
# Hidden 1
with tf.name_scope('hidden1'):
weights = tf.Variable(
tf.truncated_normal([num_clusters, hidden1_units],
stddev=1.0 / math.sqrt(float(IMAGE_PIXELS))),
name='weights')
biases = tf.Variable(tf.zeros([hidden1_units]),
name='biases')
hidden1 = tf.nn.relu(tf.matmul(all_scores, weights) + biases)
# Hidden 2
with tf.name_scope('hidden2'):
weights = tf.Variable(
tf.truncated_normal([hidden1_units, hidden2_units],
stddev=1.0 / math.sqrt(float(hidden1_units))),
name='weights')
biases = tf.Variable(tf.zeros([hidden2_units]),
name='biases')
hidden2 = tf.nn.relu(tf.matmul(hidden1, weights) + biases)
# Linear
with tf.name_scope('softmax_linear'):
weights = tf.Variable(
tf.truncated_normal([hidden2_units, NUM_CLASSES],
stddev=1.0 / math.sqrt(float(hidden2_units))),
name='weights')
biases = tf.Variable(tf.zeros([NUM_CLASSES]),
name='biases')
logits = tf.matmul(hidden2, weights) + biases
return logits, clustering_loss, kmeans_init, kmeans_training_op
def run_training():
"""Train MNIST for a number of steps."""
# Get the sets of images and labels for training, validation, and
# test on MNIST.
train_dir = tempfile.mkdtemp()
data_sets = input_data.read_data_sets(train_dir, FLAGS.fake_data)
# Tell TensorFlow that the model will be built into the default Graph.
with tf.Graph().as_default():
# Generate placeholders for the images and labels.
images_placeholder, labels_placeholder = placeholder_inputs()
# Build a Graph that computes predictions from the inference model.
logits, clustering_loss, kmeans_init, kmeans_training_op = inference(
images_placeholder,
FLAGS.num_clusters,
FLAGS.hidden1,
FLAGS.hidden2)
# Add to the Graph the Ops for loss calculation.
loss = mnist.loss(logits, labels_placeholder)
# Add to the Graph the Ops that calculate and apply gradients.
train_op = tf.group(mnist.training(loss, FLAGS.learning_rate),
kmeans_training_op)
# Add the Op to compare the logits to the labels during evaluation.
eval_correct = mnist.evaluation(logits, labels_placeholder)
# Add the variable initializer Op.
init = tf.global_variables_initializer()
# Create a session for running Ops on the Graph.
sess = tf.Session()
# Run the Op to initialize the variables.
sess.run(init)
feed_dict = fill_feed_dict(data_sets.train,
images_placeholder,
labels_placeholder,
batch_size=max(FLAGS.batch_size, 5000))
# Run the Op to initialize the clusters.
sess.run(kmeans_init, feed_dict=feed_dict)
# Start the training loop.
max_test_prec = 0
for step in xrange(FLAGS.max_steps):
start_time = time.time()
# Fill a feed dictionary with the actual set of images and labels
# for this particular training step.
feed_dict = fill_feed_dict(data_sets.train,
images_placeholder,
labels_placeholder,
FLAGS.batch_size)
# Run one step of the model.
_, loss_value, clustering_loss_value = sess.run([train_op,
loss,
clustering_loss],
feed_dict=feed_dict)
duration = time.time() - start_time
if step % 100 == 0:
# Print status to stdout.
print('Step %d: loss = %.2f, clustering_loss = %.2f (%.3f sec)' % (
step, loss_value, clustering_loss_value, duration))
# Save a checkpoint and evaluate the model periodically.
if (step + 1) % 1000 == 0 or (step + 1) == FLAGS.max_steps:
# Evaluate against the training set.
print('Training Data Eval:')
do_eval(sess,
eval_correct,
images_placeholder,
labels_placeholder,
data_sets.train)
# Evaluate against the validation set.
print('Validation Data Eval:')
do_eval(sess,
eval_correct,
images_placeholder,
labels_placeholder,
data_sets.validation)
# Evaluate against the test set.
print('Test Data Eval:')
test_prec = do_eval(sess,
eval_correct,
images_placeholder,
labels_placeholder,
data_sets.test)
max_test_prec = max(max_test_prec, test_prec)
return max_test_prec
class MnistTest(tf.test.TestCase):
def test_train(self):
self.assertTrue(run_training() > 0.6)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Basic model parameters as external flags.'
)
parser.register('type', 'bool', lambda v: v.lower() == 'true')
parser.add_argument(
'--learning_rate',
type=float,
default=0.3,
help='Initial learning rate.'
)
parser.add_argument(
'--max_steps',
type=int,
default=200,
help='Number of steps to run trainer.'
)
parser.add_argument(
'--num_clusters',
type=int,
default=384,
help='Number of input feature clusters'
)
parser.add_argument(
'--hidden1',
type=int,
default=256,
help='Number of units in hidden layer 1.'
)
parser.add_argument(
'--hidden2',
type=int,
default=32,
help='Number of units in hidden layer 2.'
)
parser.add_argument(
'--batch_size',
type=int,
default=100,
help='Batch size. Must divide evenly into the dataset sizes.'
)
parser.add_argument(
'--train_dir',
type=str,
default='data',
help='Directory to put the training data.'
)
parser.add_argument(
'--fake_data',
type='bool',
default=True,
help='Use fake input data.'
)
FLAGS, unparsed = parser.parse_known_args()
sys.argv = [sys.argv[0]] + unparsed
tf.test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/factorization/examples/mnist.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Training and input utilities.
See
[Contrib Training](https://tensorflow.org/api_guides/python/contrib.training)
guide.
@@batch_sequences_with_states
@@NextQueuedSequenceBatch
@@SequenceQueueingStateSaver
@@rejection_sample
@@resample_at_rate
@@stratified_sample
@@weighted_resample
@@bucket
@@bucket_by_sequence_length
@@RandomStrategy
@@GreedyLoadBalancingStrategy
@@byte_size_load_fn
@@FailureTolerator
@@rejection_sample
@@stratified_sample
@@resample_at_rate
@@weighted_resample
@@HParams
@@HParamDef
@@parse_values
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,wildcard-import
from tensorflow.contrib.training.python.training.bucket_ops import *
from tensorflow.contrib.training.python.training.device_setter import *
from tensorflow.contrib.training.python.training.evaluation import checkpoints_iterator
from tensorflow.contrib.training.python.training.evaluation import evaluate_once
from tensorflow.contrib.training.python.training.evaluation import evaluate_repeatedly
from tensorflow.contrib.training.python.training.evaluation import get_or_create_eval_step
from tensorflow.contrib.training.python.training.evaluation import StopAfterNEvalsHook
from tensorflow.contrib.training.python.training.evaluation import SummaryAtEndHook
from tensorflow.contrib.training.python.training.evaluation import wait_for_new_checkpoint
from tensorflow.contrib.training.python.training.feeding_queue_runner import FeedingQueueRunner
from tensorflow.contrib.training.python.training.hparam import *
from tensorflow.contrib.training.python.training.resample import *
from tensorflow.contrib.training.python.training.sampling_ops import *
from tensorflow.contrib.training.python.training.sequence_queueing_state_saver import *
from tensorflow.contrib.training.python.training.training import add_gradients_summaries
from tensorflow.contrib.training.python.training.training import clip_gradient_norms
from tensorflow.contrib.training.python.training.training import clip_gradient_norms_fn
from tensorflow.contrib.training.python.training.training import create_train_op
from tensorflow.contrib.training.python.training.training import multiply_gradients
from tensorflow.contrib.training.python.training.training import train
from tensorflow.contrib.training.python.training.tuner import Tuner
# pylint: enable=unused-import,wildcard-import
from tensorflow.python.util.all_util import remove_undocumented
# Allow explicitly imported symbols. Symbols imported with * must also be
# whitelisted here or in the module docstring above.
_allowed_symbols = [
'checkpoints_iterator', 'evaluate_once', 'evaluate_repeatedly',
'FeedingQueueRunner', 'get_or_create_eval_step', 'StopAfterNEvalsHook',
'SummaryAtEndHook', 'wait_for_new_checkpoint', 'add_gradients_summaries',
'clip_gradient_norms', 'clip_gradient_norms_fn', 'create_train_op',
'multiply_gradients', 'train']
remove_undocumented(__name__, _allowed_symbols)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/training/__init__.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/training/python/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.contrib.training.device_setter."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.contrib.training.python.training import device_setter as device_setter_lib
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import device_setter
from tensorflow.python.training import server_lib
_CLUSTER_SPEC = server_lib.ClusterSpec({
"ps": ["ps0:2222", "ps1:2222"],
"worker": ["worker0:2222", "worker1:2222", "worker2:2222"]
})
MockOperation = collections.namedtuple("MockOperation", "name")
class RandomStrategyTest(test.TestCase):
def testBasic(self):
ps_strategy = device_setter_lib.RandomStrategy(2, seed=0)
with ops.device(
device_setter.replica_device_setter(
cluster=_CLUSTER_SPEC,
ps_strategy=ps_strategy)):
u = variables.Variable(array_ops.zeros([2, 2]))
v = variables.Variable(array_ops.zeros([2, 1]))
w = variables.Variable(array_ops.zeros([2, 2]))
x = variables.Variable(array_ops.zeros([1, 3]))
a = v + w
# Randomly distributed with seed 0.
self.assertDeviceEqual("/job:ps/task:1", u.device)
self.assertDeviceEqual("/job:ps/task:1", u.initializer.device)
self.assertDeviceEqual("/job:ps/task:0", v.device)
self.assertDeviceEqual("/job:ps/task:0", v.initializer.device)
self.assertDeviceEqual("/job:ps/task:1", w.device)
self.assertDeviceEqual("/job:ps/task:1", w.initializer.device)
self.assertDeviceEqual("/job:ps/task:1", x.device)
self.assertDeviceEqual("/job:ps/task:1", x.initializer.device)
self.assertDeviceEqual("/job:worker", a.device)
def testHandlesUnicode(self):
op = MockOperation(u"A unicode \u018e string \xf1")
ps_strategy = device_setter_lib.RandomStrategy(2, seed=0)
ps_task = ps_strategy(op)
self.assertEqual(ps_task, 1)
class GreedyLoadBalancingStrategyTest(test.TestCase):
def testUniformLoadEqualsRoundRobin(self):
def _load_fn(unused_op):
return 1
with ops.device(
device_setter.replica_device_setter(
cluster=_CLUSTER_SPEC,
ps_strategy=device_setter_lib.GreedyLoadBalancingStrategy(
2, _load_fn))):
u = variables.Variable(array_ops.zeros([2, 2]))
v = variables.Variable(array_ops.zeros([2, 1]))
w = variables.Variable(array_ops.zeros([2, 2]))
x = variables.Variable(array_ops.zeros([1, 3]))
a = v + w
self.assertDeviceEqual("/job:ps/task:0", u.device)
self.assertDeviceEqual("/job:ps/task:0", u.initializer.device)
self.assertDeviceEqual("/job:ps/task:1", v.device)
self.assertDeviceEqual("/job:ps/task:1", v.initializer.device)
self.assertDeviceEqual("/job:ps/task:0", w.device)
self.assertDeviceEqual("/job:ps/task:0", w.initializer.device)
self.assertDeviceEqual("/job:ps/task:1", x.device)
self.assertDeviceEqual("/job:ps/task:1", x.initializer.device)
self.assertDeviceEqual("/job:worker", a.device)
def testByteSizeLoadFn(self):
with ops.device(
device_setter.replica_device_setter(
cluster=_CLUSTER_SPEC,
ps_strategy=device_setter_lib.GreedyLoadBalancingStrategy(
2, device_setter_lib.byte_size_load_fn))):
u = variables.VariableV1(array_ops.zeros([2, 2]))
v = variables.VariableV1(array_ops.zeros([2, 1]))
w = variables.VariableV1(array_ops.zeros([2, 2]))
x = variables.VariableV1(array_ops.zeros([1, 3]))
a = v + w
self.assertDeviceEqual("/job:ps/task:0", u.device)
self.assertDeviceEqual("/job:ps/task:0", u.initializer.device)
self.assertDeviceEqual("/job:ps/task:1", v.device)
self.assertDeviceEqual("/job:ps/task:1", v.initializer.device)
self.assertDeviceEqual("/job:ps/task:1", w.device)
self.assertDeviceEqual("/job:ps/task:1", w.initializer.device)
self.assertDeviceEqual("/job:ps/task:0", x.device)
self.assertDeviceEqual("/job:ps/task:0", x.initializer.device)
self.assertDeviceEqual("/job:worker", a.device)
def testByteSizeLoadFnWithScalar(self):
with ops.device(
device_setter.replica_device_setter(
cluster=_CLUSTER_SPEC,
ps_strategy=device_setter_lib.GreedyLoadBalancingStrategy(
2, device_setter_lib.byte_size_load_fn))):
# Note: we must test the load function as part of the device function
# instead of passing u.op to the function directly, because the only
# time that the output Tensor has unknown shape for scalars is during
# Variable construction.
u = variables.Variable(0)
self.assertDeviceEqual("/job:ps/task:0", u.device)
self.assertDeviceEqual("/job:ps/task:0", u.initializer.device)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/training/python/training/device_setter_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=unused-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.training.python.training import sampling_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner_impl
class StratifiedSampleTest(test.TestCase):
def testGraphBuildAssertionFailures(self):
val = [array_ops.zeros([1, 3]), array_ops.ones([1, 5])]
label = constant_op.constant([1], shape=[1]) # must have batch dimension
probs = [.2] * 5
init_probs = [.1, .3, .1, .3, .2]
batch_size = 16
# Label must have only batch dimension if enqueue_many is True.
with self.assertRaises(ValueError):
sampling_ops.stratified_sample(
val,
array_ops.zeros([]),
probs,
batch_size,
init_probs,
enqueue_many=True)
with self.assertRaises(ValueError):
sampling_ops.stratified_sample(
val,
array_ops.zeros([1, 1]),
probs,
batch_size,
init_probs,
enqueue_many=True)
# Label must not be one-hot.
with self.assertRaises(ValueError):
sampling_ops.stratified_sample(val,
constant_op.constant([0, 1, 0, 0, 0]),
probs, batch_size, init_probs)
# Data must be list, not singleton tensor.
with self.assertRaises(TypeError):
sampling_ops.stratified_sample(
array_ops.zeros([1, 3]), label, probs, batch_size, init_probs)
# Data must have batch dimension if enqueue_many is True.
with self.assertRaises(ValueError):
sampling_ops.stratified_sample(
val,
constant_op.constant(1),
probs,
batch_size,
init_probs,
enqueue_many=True)
# Batch dimensions on data and labels should be equal.
with self.assertRaises(ValueError):
sampling_ops.stratified_sample(
[array_ops.zeros([2, 1])],
label,
probs,
batch_size,
init_probs,
enqueue_many=True)
# Probabilities must be numpy array, python list, or tensor.
with self.assertRaises(ValueError):
sampling_ops.stratified_sample(val, label, 1, batch_size, init_probs)
# Probabilities shape must be fully defined.
with self.assertRaises(ValueError):
sampling_ops.stratified_sample(
val,
label,
array_ops.placeholder(
dtypes.float32, shape=[None]),
batch_size,
init_probs)
# In the rejection sampling case, make sure that probability lengths are
# the same.
with self.assertRaises(ValueError):
sampling_ops.stratified_sample(
val, label, [.1] * 10, batch_size, init_probs=[.2] * 5)
# In the rejection sampling case, make sure that zero initial probability
# classes also have zero target probability.
with self.assertRaises(ValueError):
sampling_ops.stratified_sample(
val, label, [.2, .4, .4], batch_size, init_probs=[0, .5, .5])
def testRuntimeAssertionFailures(self):
valid_probs = [.2] * 5
valid_labels = [1, 2, 3]
vals = [array_ops.zeros([3, 1])]
illegal_labels = [
[0, -1, 1], # classes must be nonnegative
[5, 1, 1], # classes must be less than number of classes
[2, 3], # data and label batch size must be the same
]
illegal_probs = [
[.1] * 5, # probabilities must sum to one
[-.5, .5, .5, .4, .1], # probabilities must be non-negative
]
# Set up graph with illegal label vector.
label_ph = array_ops.placeholder(dtypes.int32, shape=[None])
probs_ph = array_ops.placeholder(
dtypes.float32, shape=[5]) # shape must be defined
val_tf, lbl_tf, prob_tf = sampling_ops._verify_input( # pylint: disable=protected-access
vals, label_ph, [probs_ph])
for illegal_label in illegal_labels:
# Run session that should fail.
with self.cached_session() as sess:
with self.assertRaises(errors_impl.InvalidArgumentError):
sess.run([val_tf, lbl_tf],
feed_dict={label_ph: illegal_label,
probs_ph: valid_probs})
for illegal_prob in illegal_probs:
# Run session that should fail.
with self.cached_session() as sess:
with self.assertRaises(errors_impl.InvalidArgumentError):
sess.run([prob_tf],
feed_dict={label_ph: valid_labels,
probs_ph: illegal_prob})
def testCanBeCalledMultipleTimes(self):
batch_size = 20
val_input_batch = [array_ops.zeros([2, 3, 4])]
lbl_input_batch = array_ops.ones([], dtype=dtypes.int32)
probs = np.array([0, 1, 0, 0, 0])
batches = sampling_ops.stratified_sample(
val_input_batch, lbl_input_batch, probs, batch_size, init_probs=probs)
batches += sampling_ops.stratified_sample(
val_input_batch, lbl_input_batch, probs, batch_size, init_probs=probs)
summary_op = logging_ops.merge_summary(
ops.get_collection(ops.GraphKeys.SUMMARIES))
with self.cached_session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(coord=coord)
sess.run(batches + (summary_op,))
coord.request_stop()
coord.join(threads)
def testRejectionBatchingBehavior(self):
batch_size = 20
input_batch_size = 11
val_input_batch = [array_ops.zeros([input_batch_size, 2, 3, 4])]
lbl_input_batch = control_flow_ops.cond(
math_ops.greater(.5, random_ops.random_uniform([])),
lambda: array_ops.ones([input_batch_size], dtype=dtypes.int32) * 1,
lambda: array_ops.ones([input_batch_size], dtype=dtypes.int32) * 3)
probs = np.array([0, .2, 0, .8, 0])
data_batch, labels = sampling_ops.stratified_sample(
val_input_batch,
lbl_input_batch,
probs,
batch_size,
init_probs=[0, .3, 0, .7, 0],
enqueue_many=True)
with self.cached_session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(coord=coord)
sess.run([data_batch, labels])
coord.request_stop()
coord.join(threads)
def testBatchDimensionNotRequired(self):
classes = 5
# Probs must be a tensor, since we pass it directly to _verify_input.
probs = constant_op.constant([1.0 / classes] * classes)
# Make sure that these vals/labels pairs don't throw any runtime exceptions.
legal_input_pairs = [
(np.zeros([2, 3]), [x % classes for x in range(2)]), # batch dim 2
(np.zeros([4, 15]), [x % classes for x in range(4)]), # batch dim 4
(np.zeros([10, 1]), [x % classes for x in range(10)]), # batch dim 10
]
# Set up graph with placeholders.
vals_ph = array_ops.placeholder(
dtypes.float32) # completely undefined shape
labels_ph = array_ops.placeholder(
dtypes.int32) # completely undefined shape
val_tf, labels_tf, _ = sampling_ops._verify_input( # pylint: disable=protected-access
[vals_ph], labels_ph, [probs])
# Run graph to make sure there are no shape-related runtime errors.
for vals, labels in legal_input_pairs:
with self.cached_session() as sess:
sess.run([val_tf, labels_tf],
feed_dict={vals_ph: vals,
labels_ph: labels})
def testRejectionDataListInput(self):
batch_size = 20
val_input_batch = [
array_ops.zeros([2, 3, 4]), array_ops.ones([2, 4]), array_ops.ones(2) *
3
]
lbl_input_batch = array_ops.ones([], dtype=dtypes.int32)
probs = np.array([0, 1, 0, 0, 0])
val_list, lbls = sampling_ops.stratified_sample(
val_input_batch,
lbl_input_batch,
probs,
batch_size,
init_probs=[0, 1, 0, 0, 0])
# Check output shapes.
self.assertTrue(isinstance(val_list, list))
self.assertEqual(len(val_list), len(val_input_batch))
self.assertTrue(isinstance(lbls, ops.Tensor))
with self.cached_session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(coord=coord)
out = sess.run(val_list + [lbls])
coord.request_stop()
coord.join(threads)
# Check output shapes.
self.assertEqual(len(out), len(val_input_batch) + 1)
def normalBehaviorHelper(self, sampler):
# Set up graph.
random_seed.set_random_seed(1234)
lbl1 = 0
lbl2 = 3
# This cond allows the necessary class queues to be populated.
label = control_flow_ops.cond(
math_ops.greater(.5, random_ops.random_uniform([])),
lambda: constant_op.constant(lbl1), lambda: constant_op.constant(lbl2))
val = [np.array([1, 4]) * label]
probs = np.array([.8, 0, 0, .2, 0])
batch_size = 16
data_batch, labels = sampler(val, label, probs, batch_size)
# Run session and keep track of how frequently the labels and values appear.
data_l = []
label_l = []
with self.cached_session() as sess:
# Need to initialize variables that keep running total of classes seen.
variables.global_variables_initializer().run()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(coord=coord)
for _ in range(20):
[data], lbls = sess.run([data_batch, labels])
data_l.append(data)
label_l.append(lbls)
coord.request_stop()
coord.join(threads)
# First check that the data matches the labels.
for lbl, data in zip(label_l, data_l):
for i in range(batch_size):
self.assertListEqual(list(np.array([1, 4]) * lbl[i]), list(data[i, :]))
# Check that the labels are approximately correct.
expected_label = probs[0] * lbl1 + probs[3] * lbl2
lbl_list = range(len(probs))
lbl_std_dev = np.sqrt(np.sum((np.square(lbl_list - expected_label))))
lbl_std_dev_of_mean = lbl_std_dev / np.sqrt(len(label_l)) # CLT
actual_lbl = np.mean(label_l)
# Tolerance is 3 standard deviations of the mean. According to the central
# limit theorem, this should cover 99.7% of cases. Note that since the seed
# is fixed, for a given implementation, this test will pass or fail 100% of
# the time. This use of assertNear is to cover cases where someone changes
# an implementation detail, which would cause the random behavior to differ.
self.assertNear(actual_lbl, expected_label, 3 * lbl_std_dev_of_mean)
def testRejectionNormalBehavior(self):
initial_p = [.7, 0, 0, .3, 0]
def curried_sampler(val, lbls, probs, batch, enqueue_many=False):
return sampling_ops.stratified_sample(
val,
lbls,
probs,
batch,
init_probs=initial_p,
enqueue_many=enqueue_many)
self.normalBehaviorHelper(curried_sampler)
def testRejectionNormalBehaviorWithOnlineInitPEstimate(self):
def curried_sampler(val, lbls, probs, batch, enqueue_many=False):
return sampling_ops.stratified_sample(
val, lbls, probs, batch, init_probs=None, enqueue_many=enqueue_many)
self.normalBehaviorHelper(curried_sampler)
class RejectionSampleTest(test.TestCase):
def testGraphConstructionFailures(self):
accept_prob_fn = lambda _: constant_op.constant(1.0)
batch_size = 32
# Data must have batch dimension if `enqueue_many` is `True`.
with self.assertRaises(ValueError):
sampling_ops.rejection_sample(
[array_ops.zeros([])], accept_prob_fn, batch_size, enqueue_many=True)
# Batch dimensions should be equal if `enqueue_many` is `True`.
with self.assertRaises(ValueError):
sampling_ops.rejection_sample(
[array_ops.zeros([5, 1]), array_ops.zeros([4, 1])],
accept_prob_fn,
batch_size,
enqueue_many=True)
def testRuntimeFailures(self):
prob_ph = array_ops.placeholder(dtypes.float32, [])
accept_prob_fn = lambda _: prob_ph
batch_size = 32
# Set up graph.
random_seed.set_random_seed(1234)
sampling_ops.rejection_sample(
[array_ops.zeros([])],
accept_prob_fn,
batch_size,
runtime_checks=True,
name='rejection_sample')
prob_tensor = ops.get_default_graph().get_tensor_by_name(
'rejection_sample/prob_with_checks:0')
# Run session that should fail.
with self.cached_session() as sess:
for illegal_prob in [-0.1, 1.1]:
with self.assertRaises(errors_impl.InvalidArgumentError):
sess.run(prob_tensor, feed_dict={prob_ph: illegal_prob})
def testNormalBehavior(self):
tensor_list = [
control_flow_ops.cond(
math_ops.greater(.5, random_ops.random_uniform([])),
lambda: constant_op.constant(1.0),
lambda: constant_op.constant(2.0))
]
accept_prob_fn = lambda x: x[0] - 1.0
batch_size = 10
# Set up graph.
sample = sampling_ops.rejection_sample(tensor_list, accept_prob_fn,
batch_size)
with self.cached_session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(coord=coord)
for _ in range(5):
sample_np = sess.run(sample)[0]
self.assertListEqual([2.0] * batch_size, list(sample_np))
coord.request_stop()
coord.join(threads)
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/training/python/training/sampling_ops_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Resampling methods for batches of tensors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.training import moving_averages
def _repeat_range(counts, name=None):
"""Repeat integers given by range(len(counts)) each the given number of times.
Example behavior:
[0, 1, 2, 3] -> [1, 2, 2, 3, 3, 3]
Args:
counts: 1D tensor with dtype=int32.
name: optional name for operation.
Returns:
1D tensor with dtype=int32 and dynamic length giving the repeated integers.
"""
with ops.name_scope(name, 'repeat_range', [counts]) as scope:
counts = ops.convert_to_tensor(counts, name='counts')
def cond(unused_output, i):
return i < size
def body(output, i):
value = array_ops.fill(counts[i:i+1], i)
return (output.write(i, value), i + 1)
size = array_ops.shape(counts)[0]
init_output_array = tensor_array_ops.TensorArray(
dtype=dtypes.int32, size=size, infer_shape=False)
output_array, num_writes = control_flow_ops.while_loop(
cond, body, loop_vars=[init_output_array, 0])
return control_flow_ops.cond(
num_writes > 0,
output_array.concat,
lambda: array_ops.zeros(shape=[0], dtype=dtypes.int32),
name=scope)
def resample_at_rate(inputs, rates, scope=None, seed=None, back_prop=False):
"""Given `inputs` tensors, stochastically resamples each at a given rate.
For example, if the inputs are `[[a1, a2], [b1, b2]]` and the rates
tensor contains `[3, 1]`, then the return value may look like `[[a1,
a2, a1, a1], [b1, b2, b1, b1]]`. However, many other outputs are
possible, since this is stochastic -- averaged over many repeated
calls, each set of inputs should appear in the output `rate` times
the number of invocations.
Args:
inputs: A list of tensors, each of which has a shape of `[batch_size, ...]`
rates: A tensor of shape `[batch_size]` containing the resampling rates
for each input.
scope: Scope for the op.
seed: Random seed to use.
back_prop: Whether to allow back-propagation through this op.
Returns:
Selections from the input tensors.
"""
with ops.name_scope(scope, default_name='resample_at_rate',
values=list(inputs) + [rates]):
rates = ops.convert_to_tensor(rates, name='rates')
sample_counts = math_ops.cast(
random_ops.random_poisson(rates, (), rates.dtype, seed=seed),
dtypes.int32)
sample_indices = _repeat_range(sample_counts)
if not back_prop:
sample_indices = array_ops.stop_gradient(sample_indices)
return [array_ops.gather(x, sample_indices) for x in inputs]
def weighted_resample(inputs, weights, overall_rate, scope=None,
mean_decay=0.999, seed=None):
"""Performs an approximate weighted resampling of `inputs`.
This method chooses elements from `inputs` where each item's rate of
selection is proportional to its value in `weights`, and the average
rate of selection across all inputs (and many invocations!) is
`overall_rate`.
Args:
inputs: A list of tensors whose first dimension is `batch_size`.
weights: A `[batch_size]`-shaped tensor with each batch member's weight.
overall_rate: Desired overall rate of resampling.
scope: Scope to use for the op.
mean_decay: How quickly to decay the running estimate of the mean weight.
seed: Random seed.
Returns:
A list of tensors exactly like `inputs`, but with an unknown (and
possibly zero) first dimension.
A tensor containing the effective resampling rate used for each output.
"""
# Algorithm: Just compute rates as weights/mean_weight *
# overall_rate. This way the average weight corresponds to the
# overall rate, and a weight twice the average has twice the rate,
# etc.
with ops.name_scope(scope, 'weighted_resample', inputs) as opscope:
# First: Maintain a running estimated mean weight, with zero debiasing
# enabled (by default) to avoid throwing the average off.
with variable_scope.variable_scope(scope, 'estimate_mean', inputs):
estimated_mean = variable_scope.get_local_variable(
'estimated_mean',
initializer=math_ops.cast(0, weights.dtype),
dtype=weights.dtype)
batch_mean = math_ops.reduce_mean(weights)
mean = moving_averages.assign_moving_average(
estimated_mean, batch_mean, mean_decay)
# Then, normalize the weights into rates using the mean weight and
# overall target rate:
rates = weights * overall_rate / mean
results = resample_at_rate([rates] + inputs, rates,
scope=opscope, seed=seed, back_prop=False)
return (results[1:], results[0])
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/training/python/training/resample.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional test for sgdr learning rate decay."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from sgdr_learning_rate_decay import sgdr_decay
from tensorflow.python.platform import googletest
from tensorflow.python.framework import test_util
from tensorflow.python.framework import dtypes
from tensorflow import placeholder
class SGDRDecayTest(test_util.TensorFlowTestCase):
"""Unit tests for SGDR learning rate decay."""
def get_original_values(self, lr, t_e, mult_factor, iter_per_epoch, epochs):
"""Get an array with learning rate values from the consecutive steps using
the original implementation
(https://github.com/loshchil/SGDR/blob/master/SGDR_WRNs.py)."""
t0 = math.pi / 2.0
tt = 0
te_next = t_e
lr_values = []
sh_lr = lr
for epoch in range(epochs):
for _ in range(iter_per_epoch):
# In the original approach training function is executed here
lr_values.append(sh_lr)
dt = 2.0 * math.pi / float(2.0 * t_e)
tt = tt + float(dt) / iter_per_epoch
if tt >= math.pi:
tt = tt - math.pi
cur_t = t0 + tt
new_lr = lr * (1.0 + math.sin(cur_t)) / 2.0 # lr_min = 0, lr_max = lr
sh_lr = new_lr
if (epoch + 1) == te_next: # time to restart
sh_lr = lr
tt = 0 # by setting to 0 we set lr to lr_max, see above
t_e = t_e * mult_factor # change the period of restarts
te_next = te_next + t_e # note the next restart's epoch
return lr_values
def get_sgdr_values(self, lr, initial_period_steps, t_mul, iters):
"""Get an array with learning rate values from the consecutive steps
using current tensorflow implementation."""
with self.cached_session():
step = placeholder(dtypes.int32)
decay = sgdr_decay(lr, step, initial_period_steps, t_mul)
lr_values = []
for i in range(iters):
lr_values.append(decay.eval(feed_dict={step: i}))
return lr_values
def testCompareToOriginal(self):
"""Compare values generated by tensorflow implementation to the values
generated by the original implementation
(https://github.com/loshchil/SGDR/blob/master/SGDR_WRNs.py)."""
with self.cached_session():
lr = 10.0
init_steps = 2
t_mul = 3
iters = 10
epochs = 50
org_lr = self.get_original_values(lr, init_steps, t_mul, iters, epochs)
sgdr_lr = self.get_sgdr_values(lr, init_steps*iters, t_mul, iters*epochs)
for org, sgdr in zip(org_lr, sgdr_lr):
self.assertAllClose(org, sgdr)
def testMDecay(self):
"""Test m_mul argument. Check values for learning rate at the beginning
of the first, second, third and fourth period. """
with self.cached_session():
step = placeholder(dtypes.int32)
lr = 0.1
t_e = 10
t_mul = 3
m_mul = 0.9
decay = sgdr_decay(lr, step, t_e, t_mul, m_mul)
test_step = 0
self.assertAllClose(decay.eval(feed_dict={step: test_step}),
lr)
test_step = t_e
self.assertAllClose(decay.eval(feed_dict={step: test_step}),
lr * m_mul)
test_step = t_e + t_e*t_mul
self.assertAllClose(decay.eval(feed_dict={step: test_step}),
lr * m_mul**2)
test_step = t_e + t_e*t_mul + t_e * (t_mul**2)
self.assertAllClose(decay.eval(feed_dict={step: test_step}),
lr * (m_mul**3))
def testCos(self):
"""Check learning rate values at the beginning, in the middle
and at the end of the period."""
with self.cached_session():
step = placeholder(dtypes.int32)
lr = 0.2
t_e = 1000
t_mul = 1
decay = sgdr_decay(lr, step, t_e, t_mul)
test_step = 0
self.assertAllClose(decay.eval(feed_dict={step: test_step}), lr)
test_step = t_e//2
self.assertAllClose(decay.eval(feed_dict={step: test_step}), lr/2)
test_step = t_e
self.assertAllClose(decay.eval(feed_dict={step: test_step}), lr)
test_step = t_e*3//2
self.assertAllClose(decay.eval(feed_dict={step: test_step}), lr/2)
if __name__ == "__main__":
googletest.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/training/python/training/sgdr_learning_rate_decay_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for hparam."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.training.python.training import hparam
from tensorflow.python.platform import test
class HParamsTest(test.TestCase):
def testEmpty(self):
hparams = hparam.HParams()
self.assertDictEqual({}, hparams.values())
hparams.parse('')
self.assertDictEqual({}, hparams.values())
with self.assertRaisesRegexp(ValueError, 'Unknown hyperparameter'):
hparams.parse('xyz=123')
def testContains(self):
hparams = hparam.HParams(foo=1)
self.assertTrue('foo' in hparams)
self.assertFalse('bar' in hparams)
def testSomeValues(self):
hparams = hparam.HParams(aaa=1, b=2.0, c_c='relu6', d='/a/b=c/d')
self.assertDictEqual({
'aaa': 1,
'b': 2.0,
'c_c': 'relu6',
'd': '/a/b=c/d'
}, hparams.values())
expected_str = ('HParams([(\'aaa\', 1), (\'b\', 2.0), (\'c_c\', \'relu6\'),'
' (\'d\', \'/a/b=c/d\')])')
self.assertEqual(expected_str, repr(hparams))
self.assertEqual(expected_str, repr(hparams))
self.assertEqual(1, hparams.aaa)
self.assertEqual(2.0, hparams.b)
self.assertEqual('relu6', hparams.c_c)
self.assertEqual('/a/b=c/d', hparams.d)
hparams.parse('aaa=12')
self.assertDictEqual({
'aaa': 12,
'b': 2.0,
'c_c': 'relu6',
'd': '/a/b=c/d'
}, hparams.values())
self.assertEqual(12, hparams.aaa)
self.assertEqual(2.0, hparams.b)
self.assertEqual('relu6', hparams.c_c)
self.assertEqual('/a/b=c/d', hparams.d)
hparams.parse('c_c=relu4, b=-2.0e10')
self.assertDictEqual({
'aaa': 12,
'b': -2.0e10,
'c_c': 'relu4',
'd': '/a/b=c/d'
}, hparams.values())
self.assertEqual(12, hparams.aaa)
self.assertEqual(-2.0e10, hparams.b)
self.assertEqual('relu4', hparams.c_c)
self.assertEqual('/a/b=c/d', hparams.d)
hparams.parse('c_c=,b=0,')
self.assertDictEqual({
'aaa': 12,
'b': 0,
'c_c': '',
'd': '/a/b=c/d'
}, hparams.values())
self.assertEqual(12, hparams.aaa)
self.assertEqual(0.0, hparams.b)
self.assertEqual('', hparams.c_c)
self.assertEqual('/a/b=c/d', hparams.d)
hparams.parse('c_c=2.3",b=+2,')
self.assertEqual(2.0, hparams.b)
self.assertEqual('2.3"', hparams.c_c)
hparams.parse('d=/a/b/c/d,aaa=11,')
self.assertEqual(11, hparams.aaa)
self.assertEqual(2.0, hparams.b)
self.assertEqual('2.3"', hparams.c_c)
self.assertEqual('/a/b/c/d', hparams.d)
hparams.parse('b=1.5,d=/a=b/c/d,aaa=10,')
self.assertEqual(10, hparams.aaa)
self.assertEqual(1.5, hparams.b)
self.assertEqual('2.3"', hparams.c_c)
self.assertEqual('/a=b/c/d', hparams.d)
with self.assertRaisesRegexp(ValueError, 'Unknown hyperparameter'):
hparams.parse('x=123')
with self.assertRaisesRegexp(ValueError, 'Could not parse'):
hparams.parse('aaa=poipoi')
with self.assertRaisesRegexp(ValueError, 'Could not parse'):
hparams.parse('aaa=1.0')
with self.assertRaisesRegexp(ValueError, 'Could not parse'):
hparams.parse('b=12x')
with self.assertRaisesRegexp(ValueError, 'Could not parse'):
hparams.parse('b=relu')
with self.assertRaisesRegexp(ValueError, 'Must not pass a list'):
hparams.parse('aaa=[123]')
self.assertEqual(10, hparams.aaa)
self.assertEqual(1.5, hparams.b)
self.assertEqual('2.3"', hparams.c_c)
self.assertEqual('/a=b/c/d', hparams.d)
# Exports to proto.
hparam_def = hparams.to_proto()
# Imports from proto.
hparams2 = hparam.HParams(hparam_def=hparam_def)
# Verifies that all hparams are restored.
self.assertEqual(10, hparams2.aaa)
self.assertEqual(1.5, hparams2.b)
self.assertEqual('2.3"', hparams2.c_c)
self.assertEqual('/a=b/c/d', hparams2.d)
def testWithPeriodInVariableName(self):
hparams = hparam.HParams()
hparams.add_hparam(name='a.b', value=0.0)
hparams.parse('a.b=1.0')
self.assertEqual(1.0, getattr(hparams, 'a.b'))
hparams.add_hparam(name='c.d', value=0.0)
with self.assertRaisesRegexp(ValueError, 'Could not parse'):
hparams.parse('c.d=abc')
hparams.add_hparam(name='e.f', value='')
hparams.parse('e.f=abc')
self.assertEqual('abc', getattr(hparams, 'e.f'))
hparams.add_hparam(name='d..', value=0.0)
hparams.parse('d..=10.0')
self.assertEqual(10.0, getattr(hparams, 'd..'))
def testSetFromMap(self):
hparams = hparam.HParams(a=1, b=2.0, c='tanh')
hparams.override_from_dict({'a': -2, 'c': 'identity'})
self.assertDictEqual({'a': -2, 'c': 'identity', 'b': 2.0}, hparams.values())
hparams = hparam.HParams(x=1, b=2.0, d=[0.5])
hparams.override_from_dict({'d': [0.1, 0.2, 0.3]})
self.assertDictEqual({
'd': [0.1, 0.2, 0.3],
'x': 1,
'b': 2.0
}, hparams.values())
def testBoolParsing(self):
for value in 'true', 'false', 'True', 'False', '1', '0':
for initial in False, True:
hparams = hparam.HParams(use_gpu=initial)
hparams.parse('use_gpu=' + value)
self.assertEqual(hparams.use_gpu, value in ['True', 'true', '1'])
# Exports to proto.
hparam_def = hparams.to_proto()
# Imports from proto.
hparams2 = hparam.HParams(hparam_def=hparam_def)
self.assertEqual(hparams.use_gpu, hparams2.use_gpu)
# Check that hparams2.use_gpu is a bool rather than an int.
# The assertEqual() call above won't catch this, since
# (0 == False) and (1 == True) in Python.
self.assertEqual(bool, type(hparams2.use_gpu))
def testBoolParsingFail(self):
hparams = hparam.HParams(use_gpu=True)
with self.assertRaisesRegexp(ValueError, r'Could not parse.*use_gpu'):
hparams.parse('use_gpu=yep')
def testLists(self):
hparams = hparam.HParams(aaa=[1], b=[2.0, 3.0], c_c=['relu6'])
self.assertDictEqual({
'aaa': [1],
'b': [2.0, 3.0],
'c_c': ['relu6']
}, hparams.values())
self.assertEqual([1], hparams.aaa)
self.assertEqual([2.0, 3.0], hparams.b)
self.assertEqual(['relu6'], hparams.c_c)
hparams.parse('aaa=[12]')
self.assertEqual([12], hparams.aaa)
hparams.parse('aaa=[12,34,56]')
self.assertEqual([12, 34, 56], hparams.aaa)
hparams.parse('c_c=[relu4,relu12],b=[1.0]')
self.assertEqual(['relu4', 'relu12'], hparams.c_c)
self.assertEqual([1.0], hparams.b)
hparams.parse('c_c=[],aaa=[-34]')
self.assertEqual([-34], hparams.aaa)
self.assertEqual([], hparams.c_c)
hparams.parse('c_c=[_12,3\'4"],aaa=[+3]')
self.assertEqual([3], hparams.aaa)
self.assertEqual(['_12', '3\'4"'], hparams.c_c)
with self.assertRaisesRegexp(ValueError, 'Unknown hyperparameter'):
hparams.parse('x=[123]')
with self.assertRaisesRegexp(ValueError, 'Could not parse'):
hparams.parse('aaa=[poipoi]')
with self.assertRaisesRegexp(ValueError, 'Could not parse'):
hparams.parse('aaa=[1.0]')
with self.assertRaisesRegexp(ValueError, 'Could not parse'):
hparams.parse('b=[12x]')
with self.assertRaisesRegexp(ValueError, 'Could not parse'):
hparams.parse('b=[relu]')
with self.assertRaisesRegexp(ValueError, 'Must pass a list'):
hparams.parse('aaa=123')
# Exports to proto.
hparam_def = hparams.to_proto()
# Imports from proto.
hparams2 = hparam.HParams(hparam_def=hparam_def)
# Verifies that all hparams are restored.
self.assertEqual([3], hparams2.aaa)
self.assertEqual([1.0], hparams2.b)
self.assertEqual(['_12', '3\'4"'], hparams2.c_c)
def testStr(self):
hparam1 = hparam.HParams(a=1, b=[2.0, 3.0], c='relu6')
hparam1_str = str(hparam1)
# Create the signature
hparam2 = hparam.HParams()
hparam2.add_hparam('a', 4)
hparam2.add_hparam('b', [5.0, 6.0])
hparam2.add_hparam('c', 'relu10')
# Load from string
hparam2.parse(hparam1_str)
# Verifies all hparams are restored
self.assertEqual(hparam2.a, hparam1.a)
self.assertEqual(hparam2.b, hparam1.b)
self.assertEqual(hparam2.c, hparam1.c)
def testParseValuesWithIndexAssigment1(self):
"""Assignment to an index position."""
parse_dict = hparam.parse_values('arr[1]=10', {'arr': int})
self.assertEqual(len(parse_dict), 1)
self.assertTrue(isinstance(parse_dict['arr'], dict))
self.assertDictEqual(parse_dict['arr'], {1: 10})
def testParseValuesWithIndexAssigment1_IgnoreUnknown(self):
"""Assignment to an index position."""
parse_dict = hparam.parse_values(
'arr[1]=10,b=5', {'arr': int}, ignore_unknown=True)
self.assertEqual(len(parse_dict), 1)
self.assertTrue(isinstance(parse_dict['arr'], dict))
self.assertDictEqual(parse_dict['arr'], {1: 10})
def testParseValuesWithIndexAssigment2(self):
"""Assignment to multiple index positions."""
parse_dict = hparam.parse_values('arr[0]=10,arr[5]=20', {'arr': int})
self.assertEqual(len(parse_dict), 1)
self.assertTrue(isinstance(parse_dict['arr'], dict))
self.assertDictEqual(parse_dict['arr'], {0: 10, 5: 20})
def testParseValuesWithIndexAssigment2_IgnoreUnknown(self):
"""Assignment to multiple index positions."""
parse_dict = hparam.parse_values(
'arr[0]=10,arr[5]=20,foo=bar', {'arr': int}, ignore_unknown=True)
self.assertEqual(len(parse_dict), 1)
self.assertTrue(isinstance(parse_dict['arr'], dict))
self.assertDictEqual(parse_dict['arr'], {0: 10, 5: 20})
def testParseValuesWithIndexAssigment3(self):
"""Assignment to index positions in multiple names."""
parse_dict = hparam.parse_values('arr[0]=10,arr[1]=20,L[5]=100,L[10]=200', {
'arr': int,
'L': int
})
self.assertEqual(len(parse_dict), 2)
self.assertTrue(isinstance(parse_dict['arr'], dict))
self.assertDictEqual(parse_dict['arr'], {0: 10, 1: 20})
self.assertTrue(isinstance(parse_dict['L'], dict))
self.assertDictEqual(parse_dict['L'], {5: 100, 10: 200})
def testParseValuesWithIndexAssigment3_IgnoreUnknown(self):
"""Assignment to index positions in multiple names."""
parse_dict = hparam.parse_values(
'arr[0]=10,C=5,arr[1]=20,B[0]=kkk,L[5]=100,L[10]=200', {
'arr': int,
'L': int
},
ignore_unknown=True)
self.assertEqual(len(parse_dict), 2)
self.assertTrue(isinstance(parse_dict['arr'], dict))
self.assertDictEqual(parse_dict['arr'], {0: 10, 1: 20})
self.assertTrue(isinstance(parse_dict['L'], dict))
self.assertDictEqual(parse_dict['L'], {5: 100, 10: 200})
def testParseValuesWithIndexAssigment4(self):
"""Assignment of index positions and scalars."""
parse_dict = hparam.parse_values('x=10,arr[1]=20,y=30', {
'x': int,
'y': int,
'arr': int
})
self.assertEqual(len(parse_dict), 3)
self.assertTrue(isinstance(parse_dict['arr'], dict))
self.assertDictEqual(parse_dict['arr'], {1: 20})
self.assertEqual(parse_dict['x'], 10)
self.assertEqual(parse_dict['y'], 30)
def testParseValuesWithIndexAssigment4_IgnoreUnknown(self):
"""Assignment of index positions and scalars."""
parse_dict = hparam.parse_values(
'x=10,foo[0]=bar,arr[1]=20,zzz=78,y=30', {
'x': int,
'y': int,
'arr': int
},
ignore_unknown=True)
self.assertEqual(len(parse_dict), 3)
self.assertTrue(isinstance(parse_dict['arr'], dict))
self.assertDictEqual(parse_dict['arr'], {1: 20})
self.assertEqual(parse_dict['x'], 10)
self.assertEqual(parse_dict['y'], 30)
def testParseValuesWithIndexAssigment5(self):
"""Different variable types."""
parse_dict = hparam.parse_values('a[0]=5,b[1]=true,c[2]=abc,d[3]=3.14', {
'a': int,
'b': bool,
'c': str,
'd': float
})
self.assertEqual(set(parse_dict.keys()), {'a', 'b', 'c', 'd'})
self.assertTrue(isinstance(parse_dict['a'], dict))
self.assertDictEqual(parse_dict['a'], {0: 5})
self.assertTrue(isinstance(parse_dict['b'], dict))
self.assertDictEqual(parse_dict['b'], {1: True})
self.assertTrue(isinstance(parse_dict['c'], dict))
self.assertDictEqual(parse_dict['c'], {2: 'abc'})
self.assertTrue(isinstance(parse_dict['d'], dict))
self.assertDictEqual(parse_dict['d'], {3: 3.14})
def testParseValuesWithIndexAssigment5_IgnoreUnknown(self):
"""Different variable types."""
parse_dict = hparam.parse_values(
'a[0]=5,cc=4,b[1]=true,c[2]=abc,mm=2,d[3]=3.14', {
'a': int,
'b': bool,
'c': str,
'd': float
},
ignore_unknown=True)
self.assertEqual(set(parse_dict.keys()), {'a', 'b', 'c', 'd'})
self.assertTrue(isinstance(parse_dict['a'], dict))
self.assertDictEqual(parse_dict['a'], {0: 5})
self.assertTrue(isinstance(parse_dict['b'], dict))
self.assertDictEqual(parse_dict['b'], {1: True})
self.assertTrue(isinstance(parse_dict['c'], dict))
self.assertDictEqual(parse_dict['c'], {2: 'abc'})
self.assertTrue(isinstance(parse_dict['d'], dict))
self.assertDictEqual(parse_dict['d'], {3: 3.14})
def testParseValuesWithBadIndexAssigment1(self):
"""Reject assignment of list to variable type."""
with self.assertRaisesRegexp(ValueError,
r'Assignment of a list to a list index.'):
hparam.parse_values('arr[1]=[1,2,3]', {'arr': int})
def testParseValuesWithBadIndexAssigment1_IgnoreUnknown(self):
"""Reject assignment of list to variable type."""
with self.assertRaisesRegexp(ValueError,
r'Assignment of a list to a list index.'):
hparam.parse_values(
'arr[1]=[1,2,3],c=8', {'arr': int}, ignore_unknown=True)
def testParseValuesWithBadIndexAssigment2(self):
"""Reject if type missing."""
with self.assertRaisesRegexp(ValueError,
r'Unknown hyperparameter type for arr'):
hparam.parse_values('arr[1]=5', {})
def testParseValuesWithBadIndexAssigment2_IgnoreUnknown(self):
"""Ignore missing type."""
hparam.parse_values('arr[1]=5', {}, ignore_unknown=True)
def testParseValuesWithBadIndexAssigment3(self):
"""Reject type of the form name[index]."""
with self.assertRaisesRegexp(ValueError,
'Unknown hyperparameter type for arr'):
hparam.parse_values('arr[1]=1', {'arr[1]': int})
def testParseValuesWithBadIndexAssigment3_IgnoreUnknown(self):
"""Ignore type of the form name[index]."""
hparam.parse_values('arr[1]=1', {'arr[1]': int}, ignore_unknown=True)
def testWithReusedVariables(self):
with self.assertRaisesRegexp(ValueError,
'Multiple assignments to variable \'x\''):
hparam.parse_values('x=1,x=1', {'x': int})
with self.assertRaisesRegexp(ValueError,
'Multiple assignments to variable \'arr\''):
hparam.parse_values('arr=[100,200],arr[0]=10', {'arr': int})
with self.assertRaisesRegexp(
ValueError, r'Multiple assignments to variable \'arr\[0\]\''):
hparam.parse_values('arr[0]=10,arr[0]=20', {'arr': int})
with self.assertRaisesRegexp(ValueError,
'Multiple assignments to variable \'arr\''):
hparam.parse_values('arr[0]=10,arr=[100]', {'arr': int})
def testJson(self):
hparams = hparam.HParams(aaa=1, b=2.0, c_c='relu6', d=True)
self.assertDictEqual({
'aaa': 1,
'b': 2.0,
'c_c': 'relu6',
'd': True
}, hparams.values())
self.assertEqual(1, hparams.aaa)
self.assertEqual(2.0, hparams.b)
self.assertEqual('relu6', hparams.c_c)
hparams.parse_json('{"aaa": 12, "b": 3.0, "c_c": "relu4", "d": false}')
self.assertDictEqual({
'aaa': 12,
'b': 3.0,
'c_c': 'relu4',
'd': False
}, hparams.values())
self.assertEqual(12, hparams.aaa)
self.assertEqual(3.0, hparams.b)
self.assertEqual('relu4', hparams.c_c)
json_str = hparams.to_json()
hparams2 = hparam.HParams(aaa=10, b=20.0, c_c='hello', d=False)
hparams2.parse_json(json_str)
self.assertEqual(12, hparams2.aaa)
self.assertEqual(3.0, hparams2.b)
self.assertEqual('relu4', hparams2.c_c)
self.assertEqual(False, hparams2.d)
hparams3 = hparam.HParams(aaa=123)
self.assertEqual('{"aaa": 123}', hparams3.to_json())
self.assertEqual('{\n "aaa": 123\n}', hparams3.to_json(indent=2))
self.assertEqual('{"aaa"=123}', hparams3.to_json(separators=(';', '=')))
hparams4 = hparam.HParams(aaa=123, b='hello', c_c=False)
self.assertEqual('{"aaa": 123, "b": "hello", "c_c": false}',
hparams4.to_json(sort_keys=True))
def testSetHParam(self):
hparams = hparam.HParams(aaa=1, b=2.0, c_c='relu6', d=True)
self.assertDictEqual({
'aaa': 1,
'b': 2.0,
'c_c': 'relu6',
'd': True
}, hparams.values())
self.assertEqual(1, hparams.aaa)
self.assertEqual(2.0, hparams.b)
self.assertEqual('relu6', hparams.c_c)
hparams.set_hparam('aaa', 12)
hparams.set_hparam('b', 3.0)
hparams.set_hparam('c_c', 'relu4')
hparams.set_hparam('d', False)
self.assertDictEqual({
'aaa': 12,
'b': 3.0,
'c_c': 'relu4',
'd': False
}, hparams.values())
self.assertEqual(12, hparams.aaa)
self.assertEqual(3.0, hparams.b)
self.assertEqual('relu4', hparams.c_c)
def testSetHParamListNonListMismatch(self):
hparams = hparam.HParams(a=1, b=[2.0, 3.0])
with self.assertRaisesRegexp(ValueError, r'Must not pass a list'):
hparams.set_hparam('a', [1.0])
with self.assertRaisesRegexp(ValueError, r'Must pass a list'):
hparams.set_hparam('b', 1.0)
def testSetHParamTypeMismatch(self):
hparams = hparam.HParams(
int_=1, str_='str', bool_=True, float_=1.1, list_int=[1, 2], none=None)
with self.assertRaises(ValueError):
hparams.set_hparam('str_', 2.2)
with self.assertRaises(ValueError):
hparams.set_hparam('int_', False)
with self.assertRaises(ValueError):
hparams.set_hparam('bool_', 1)
# Unfortunately there is no automagic conversion of bool-like strings to
# bool.
with self.assertRaises(ValueError):
hparams.set_hparam('bool_', 'true')
with self.assertRaises(ValueError):
hparams.set_hparam('bool_', 'True')
with self.assertRaises(ValueError):
hparams.set_hparam('bool_', 'false')
with self.assertRaises(ValueError):
hparams.set_hparam('bool_', 'False')
with self.assertRaises(ValueError):
hparams.set_hparam('bool_', '0')
with self.assertRaises(ValueError):
hparams.set_hparam('bool_', '1')
with self.assertRaises(ValueError):
hparams.set_hparam('int_', 2.2)
with self.assertRaises(ValueError):
hparams.set_hparam('list_int', [2, 3.3])
with self.assertRaises(ValueError):
hparams.set_hparam('int_', '2')
# Casting int to float is OK
hparams.set_hparam('float_', 1)
# Getting stuck with NoneType :(
hparams.set_hparam('none', '1')
self.assertEqual('1', hparams.none)
def testSetHParamExactTypeMatch(self):
class DummyContext(object):
def __init__(self, a, b=0):
self.a = a
self.b = b
hparams = hparam.HParams(x=DummyContext(a=100, b=100))
# Verify x is assigned directly, without casting.
hparams.set_hparam('x', DummyContext(a=100, b=100))
self.assertEqual(hparams.x.a, 100)
self.assertEqual(hparams.x.b, 100)
def testNonProtoFails(self):
with self.assertRaisesRegexp(AssertionError, ''):
hparam.HParams(hparam_def=1)
with self.assertRaisesRegexp(AssertionError, ''):
hparam.HParams(hparam_def=1.0)
with self.assertRaisesRegexp(AssertionError, ''):
hparam.HParams(hparam_def='hello')
with self.assertRaisesRegexp(AssertionError, ''):
hparam.HParams(hparam_def=[1, 2, 3])
def testGet(self):
hparams = hparam.HParams(aaa=1, b=2.0, c_c='relu6', d=True, e=[5.0, 6.0])
# Existing parameters with default=None.
self.assertEqual(1, hparams.get('aaa'))
self.assertEqual(2.0, hparams.get('b'))
self.assertEqual('relu6', hparams.get('c_c'))
self.assertEqual(True, hparams.get('d'))
self.assertEqual([5.0, 6.0], hparams.get('e', None))
# Existing parameters with compatible defaults.
self.assertEqual(1, hparams.get('aaa', 2))
self.assertEqual(2.0, hparams.get('b', 3.0))
self.assertEqual(2.0, hparams.get('b', 3))
self.assertEqual('relu6', hparams.get('c_c', 'default'))
self.assertEqual(True, hparams.get('d', True))
self.assertEqual([5.0, 6.0], hparams.get('e', [1.0, 2.0, 3.0]))
self.assertEqual([5.0, 6.0], hparams.get('e', [1, 2, 3]))
# Existing parameters with incompatible defaults.
with self.assertRaises(ValueError):
hparams.get('aaa', 2.0)
with self.assertRaises(ValueError):
hparams.get('b', False)
with self.assertRaises(ValueError):
hparams.get('c_c', [1, 2, 3])
with self.assertRaises(ValueError):
hparams.get('d', 'relu')
with self.assertRaises(ValueError):
hparams.get('e', 123.0)
with self.assertRaises(ValueError):
hparams.get('e', ['a', 'b', 'c'])
# Nonexistent parameters.
self.assertEqual(None, hparams.get('unknown'))
self.assertEqual(123, hparams.get('unknown', 123))
self.assertEqual([1, 2, 3], hparams.get('unknown', [1, 2, 3]))
def testDel(self):
hparams = hparam.HParams(aaa=1, b=2.0)
with self.assertRaises(ValueError):
hparams.set_hparam('aaa', 'will fail')
with self.assertRaises(ValueError):
hparams.add_hparam('aaa', 'will fail')
hparams.del_hparam('aaa')
hparams.add_hparam('aaa', 'will work')
self.assertEqual('will work', hparams.get('aaa'))
hparams.set_hparam('aaa', 'still works')
self.assertEqual('still works', hparams.get('aaa'))
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/training/python/training/hparam_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.batch_sequences_with_states."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from tensorflow.contrib.training.python.training import sequence_queueing_state_saver as sqss
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner_impl
from tensorflow.python.training import saver
class BatchSequencesWithStatesTest(test.TestCase):
def setUp(self):
super(BatchSequencesWithStatesTest, self).setUp()
self.value_length = 4
ind1 = np.array([
[0, 0],
[1, 0], [1, 3], [1, 4],
[3, 2], [3, 3]])
val1 = np.array([0, 10, 13, 14, 32, 33])
shape1 = np.array([self.value_length, 6])
sp_tensor1 = sparse_tensor.SparseTensor(
array_ops.constant(ind1, dtypes.int64),
array_ops.constant(val1, dtypes.int64),
array_ops.placeholder_with_default(shape1, shape=[2]))
ind2 = np.array([
[0, 0, 1],
[0, 1, 0],
[0, 1, 2],
[1, 0, 3],
[1, 1, 0],
[1, 1, 1],
[1, 1, 2],
[1, 2, 2]])
val2 = np.array([1, 10, 12, 103, 150, 149, 150, 122])
shape2 = np.array([self.value_length, 3, 4])
sp_tensor2 = sparse_tensor.SparseTensor(
array_ops.constant(ind2, dtypes.int64),
array_ops.constant(val2, dtypes.int64),
array_ops.placeholder_with_default(shape2, shape=[3]))
sp_tensor3 = sparse_tensor.SparseTensor(
array_ops.constant([[1, 9], [2, 2], [2, 10]], dtypes.int64),
array_ops.constant([7, 15, 2], dtypes.int64),
array_ops.constant([5, 12], dtypes.int64)
)
self.sp_tensor3_expected = sparse_tensor.SparseTensorValue(
[[0, 1, 9], [0, 2, 2], [0, 2, 10], [1, 1, 9], [1, 2, 2], [1, 2, 10]],
[7, 15, 2, 7, 15, 2],
[2, 5, 12]
)
self.batch_size = 2
self.key = string_ops.string_join([
"key_", string_ops.as_string(
math_ops.cast(10000 * random_ops.random_uniform(()), dtypes.int32))
])
self.sequences = {
"seq1": np.random.rand(self.value_length, 5),
"seq2": np.random.rand(self.value_length, 4, 2),
"seq3": sp_tensor1,
"seq4": sp_tensor2}
self.context = {
"context1": [3, 4],
"sp_context": sp_tensor3}
self.initial_states = {
"state1": np.random.rand(6, 7),
"state2": np.random.rand(8)
}
def _prefix(self, key_value):
return set(
[s.decode("ascii").split(":")[0].encode("ascii") for s in key_value])
def _testBasics(self, num_unroll, length, pad,
expected_seq1_batch1, expected_seq2_batch1,
expected_seq1_batch2, expected_seq2_batch2,
expected_seq3_batch1, expected_seq3_batch2,
expected_seq4_batch1, expected_seq4_batch2,
key=None, make_keys_unique=False):
with self.cached_session() as sess:
next_batch = sqss.batch_sequences_with_states(
input_key=key if key is not None else self.key,
input_sequences=self.sequences,
input_context=self.context,
input_length=length,
initial_states=self.initial_states,
num_unroll=num_unroll,
batch_size=self.batch_size,
num_threads=3,
# to enforce that we only move on to the next examples after finishing
# all segments of the first ones.
capacity=2,
pad=pad,
make_keys_unique=make_keys_unique,
make_keys_unique_seed=9)
state1 = next_batch.state("state1")
state2 = next_batch.state("state2")
state1_update = next_batch.save_state("state1", state1 + 1)
state2_update = next_batch.save_state("state2", state2 - 1)
# Make sure queue runner with SQSS is added properly to meta graph def.
# Saver requires at least one variable.
v0 = variables.Variable(10.0, name="v0")
ops.add_to_collection("variable_collection", v0)
variables.global_variables_initializer()
save = saver.Saver([v0])
test_dir = os.path.join(test.get_temp_dir(), "sqss_test")
filename = os.path.join(test_dir, "metafile")
meta_graph_def = save.export_meta_graph(filename)
qr_saved = meta_graph_def.collection_def[ops.GraphKeys.QUEUE_RUNNERS]
self.assertTrue(qr_saved.bytes_list.value is not None)
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(coord=coord)
# Step 1
(key_value, next_key_value, seq1_value, seq2_value, seq3_value,
seq4_value, context1_value, context2_value, state1_value, state2_value,
length_value, _, _) = sess.run(
(next_batch.key, next_batch.next_key, next_batch.sequences["seq1"],
next_batch.sequences["seq2"], next_batch.sequences["seq3"],
next_batch.sequences["seq4"], next_batch.context["context1"],
next_batch.context["sp_context"], state1, state2, next_batch.length,
state1_update, state2_update))
expected_first_keys = set([b"00000_of_00002"])
expected_second_keys = set([b"00001_of_00002"])
expected_final_keys = set([b"STOP"])
self.assertEqual(expected_first_keys, self._prefix(key_value))
self.assertEqual(expected_second_keys, self._prefix(next_key_value))
self.assertAllEqual(
np.tile(self.context["context1"], (self.batch_size, 1)),
context1_value)
self.assertAllEqual(self.sp_tensor3_expected.indices,
context2_value.indices)
self.assertAllEqual(self.sp_tensor3_expected.values,
context2_value.values)
self.assertAllEqual(self.sp_tensor3_expected.dense_shape,
context2_value.dense_shape)
self.assertAllEqual(expected_seq1_batch1, seq1_value)
self.assertAllEqual(expected_seq2_batch1, seq2_value)
self.assertAllEqual(expected_seq3_batch1.indices, seq3_value.indices)
self.assertAllEqual(expected_seq3_batch1.values, seq3_value.values)
self.assertAllEqual(expected_seq3_batch1.dense_shape,
seq3_value.dense_shape)
self.assertAllEqual(expected_seq4_batch1.indices, seq4_value.indices)
self.assertAllEqual(expected_seq4_batch1.values, seq4_value.values)
self.assertAllEqual(expected_seq4_batch1.dense_shape,
seq4_value.dense_shape)
self.assertAllEqual(
np.tile(self.initial_states["state1"], (self.batch_size, 1, 1)),
state1_value)
self.assertAllEqual(
np.tile(self.initial_states["state2"], (self.batch_size, 1)),
state2_value)
self.assertAllEqual(length_value, [num_unroll, num_unroll])
# Step 2
(key_value, next_key_value, seq1_value, seq2_value, seq3_value,
seq4_value, context1_value, context2_value, state1_value, state2_value,
length_value, _, _) = sess.run(
(next_batch.key, next_batch.next_key, next_batch.sequences["seq1"],
next_batch.sequences["seq2"], next_batch.sequences["seq3"],
next_batch.sequences["seq4"], next_batch.context["context1"],
next_batch.context["sp_context"], state1, state2, next_batch.length,
state1_update, state2_update))
self.assertEqual(expected_second_keys, self._prefix(key_value))
self.assertEqual(expected_final_keys, self._prefix(next_key_value))
self.assertAllEqual(
np.tile(self.context["context1"], (self.batch_size, 1)),
context1_value)
self.assertAllEqual(self.sp_tensor3_expected.indices,
context2_value.indices)
self.assertAllEqual(self.sp_tensor3_expected.values,
context2_value.values)
self.assertAllEqual(self.sp_tensor3_expected.dense_shape,
context2_value.dense_shape)
self.assertAllEqual(expected_seq1_batch2, seq1_value)
self.assertAllEqual(expected_seq2_batch2, seq2_value)
self.assertAllEqual(expected_seq3_batch2.indices, seq3_value.indices)
self.assertAllEqual(expected_seq3_batch2.values, seq3_value.values)
self.assertAllEqual(expected_seq3_batch2.dense_shape,
seq3_value.dense_shape)
self.assertAllEqual(expected_seq4_batch2.indices, seq4_value.indices)
self.assertAllEqual(expected_seq4_batch2.values, seq4_value.values)
self.assertAllEqual(expected_seq4_batch2.dense_shape,
seq4_value.dense_shape)
self.assertAllEqual(1 + np.tile(self.initial_states["state1"],
(self.batch_size, 1, 1)), state1_value)
self.assertAllEqual(-1 + np.tile(self.initial_states["state2"],
(self.batch_size, 1)), state2_value)
self.assertAllEqual([1, 1], length_value)
coord.request_stop()
coord.join(threads, stop_grace_period_secs=2)
def _testBasicPadding(self, pad, key=None, make_keys_unique=False):
num_unroll = 2 # Divisor of value_length - so no padding necessary.
expected_seq1_batch1 = np.tile(
self.sequences["seq1"][np.newaxis, 0:num_unroll, :],
(self.batch_size, 1, 1))
expected_seq2_batch1 = np.tile(
self.sequences["seq2"][np.newaxis, 0:num_unroll, :, :],
(self.batch_size, 1, 1, 1))
expected_seq1_batch2 = np.tile(
self.sequences["seq1"][np.newaxis, num_unroll:self.value_length, :],
(self.batch_size, 1, 1))
expected_seq2_batch2 = np.tile(
self.sequences["seq2"][np.newaxis, num_unroll:self.value_length, :, :],
(self.batch_size, 1, 1, 1))
ind1_1 = np.array([
# batch entry 1
[0, 0, 0],
[0, 1, 0], [0, 1, 3], [0, 1, 4],
# batch entry 2
[1, 0, 0],
[1, 1, 0], [1, 1, 3], [1, 1, 4]])
ind1_2 = np.array([
# batch entry 1
[0, 1, 2], [0, 1, 3],
# batch entry 2
[1, 1, 2], [1, 1, 3]])
val1_1 = np.array([0, 10, 13, 14,
0, 10, 13, 14])
val1_2 = np.array([32, 33,
32, 33])
shape1 = np.array([self.batch_size, num_unroll, 6])
# For sp_tensor2 all values fall into the first segment.
ind2_1 = np.array([
# batch entry 1
[0, 0, 0, 1],
[0, 0, 1, 0],
[0, 0, 1, 2],
[0, 1, 0, 3],
[0, 1, 1, 0],
[0, 1, 1, 1],
[0, 1, 1, 2],
[0, 1, 2, 2],
# batch entry 2
[1, 0, 0, 1],
[1, 0, 1, 0],
[1, 0, 1, 2],
[1, 1, 0, 3],
[1, 1, 1, 0],
[1, 1, 1, 1],
[1, 1, 1, 2],
[1, 1, 2, 2],
])
val2_1 = np.array([1, 10, 12, 103, 150, 149, 150, 122,
1, 10, 12, 103, 150, 149, 150, 122])
shape2 = np.array([self.batch_size, num_unroll, 3, 4])
expected_seq3_batch1 = sparse_tensor.SparseTensorValue(
ind1_1, val1_1, shape1)
expected_seq3_batch2 = sparse_tensor.SparseTensorValue(
ind1_2, val1_2, shape1)
expected_seq4_batch1 = sparse_tensor.SparseTensorValue(
ind2_1, val2_1, shape2)
expected_seq4_batch2 = sparse_tensor.SparseTensorValue(
np.empty(shape=[0, 4], dtype=np.int64), np.array([]), shape2)
self._testBasics(
num_unroll=num_unroll,
length=3,
pad=pad,
expected_seq1_batch1=expected_seq1_batch1,
expected_seq1_batch2=expected_seq1_batch2,
expected_seq2_batch1=expected_seq2_batch1,
expected_seq2_batch2=expected_seq2_batch2,
expected_seq3_batch1=expected_seq3_batch1,
expected_seq3_batch2=expected_seq3_batch2,
expected_seq4_batch1=expected_seq4_batch1,
expected_seq4_batch2=expected_seq4_batch2,
key=key,
make_keys_unique=make_keys_unique)
def testBasicPadding(self):
self._testBasicPadding(pad=True)
def testBasicNoPadding(self):
self._testBasicPadding(pad=False)
def testRandomKeyGen(self):
self._testBasicPadding(pad=False,
key=constant_op.constant("fixed_key"),
make_keys_unique=True)
def testNotAMultiple(self):
num_unroll = 3 # Not a divisor of value_length -
# so padding would have been necessary.
# Use placeholder_with_default in sequences to make sure we get runtime
# error instead of shape inference error
sequences = {
"seq1": array_ops.placeholder_with_default(self.sequences["seq1"],
shape=(None, 5)),
"seq2": array_ops.placeholder_with_default(self.sequences["seq2"],
shape=(None, 4, 2)),
"seq3": self.sequences["seq3"],
"seq4": self.sequences["seq4"],
}
with self.cached_session() as sess:
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
".*should be a multiple of: 3, but saw "
"value: 4. Consider setting pad=True."):
coord = coordinator.Coordinator()
threads = None
try:
with coord.stop_on_exception():
next_batch = sqss.batch_sequences_with_states(
input_key=self.key,
input_sequences=sequences,
input_context=self.context,
input_length=3,
initial_states=self.initial_states,
num_unroll=num_unroll,
batch_size=self.batch_size,
num_threads=3,
# to enforce that we only move on to the next examples after
# finishing all segments of the first ones.
capacity=2,
pad=False)
threads = queue_runner_impl.start_queue_runners(coord=coord)
sess.run([next_batch.key])
except errors_impl.OutOfRangeError:
pass
finally:
coord.request_stop()
if threads is not None:
coord.join(threads, stop_grace_period_secs=2)
def testAdvancedPadding(self):
num_unroll = 3 # Not a divisor of value_length - so padding to 6 necessary.
expected_seq1_batch1 = np.tile(
self.sequences["seq1"][np.newaxis, 0:num_unroll, :],
(self.batch_size, 1, 1))
expected_seq2_batch1 = np.tile(
self.sequences["seq2"][np.newaxis, 0:num_unroll, :, :],
(self.batch_size, 1, 1, 1))
padded_seq1 = np.concatenate(
[
self.sequences["seq1"][np.newaxis, num_unroll:self.value_length, :],
np.zeros((1, 1, 5)), np.zeros((1, 1, 5))
],
axis=1)
expected_seq1_batch2 = np.concatenate(
[padded_seq1] * self.batch_size, axis=0)
padded_seq2 = np.concatenate(
[
self.sequences["seq2"][np.newaxis, num_unroll:self.value_length, :],
np.zeros((1, 1, 4, 2)), np.zeros((1, 1, 4, 2))
],
axis=1)
expected_seq2_batch2 = np.concatenate(
[padded_seq2] * self.batch_size, axis=0)
ind1_1 = np.array([
# batch entry 1
[0, 0, 0],
[0, 1, 0], [0, 1, 3], [0, 1, 4],
# batch entry 2
[1, 0, 0],
[1, 1, 0], [1, 1, 3], [1, 1, 4]])
ind1_2 = np.array([
# batch entry 1
[0, 0, 2], [0, 0, 3],
# batch entry 2
[1, 0, 2], [1, 0, 3]])
val1_1 = np.array([0, 10, 13, 14,
0, 10, 13, 14])
val1_2 = np.array([32, 33,
32, 33])
shape1 = np.array([self.batch_size, num_unroll, 6])
# For sp_tensor2 all values fall into the first segment.
ind2_1 = np.array([
# batch entry 1
[0, 0, 0, 1],
[0, 0, 1, 0],
[0, 0, 1, 2],
[0, 1, 0, 3],
[0, 1, 1, 0],
[0, 1, 1, 1],
[0, 1, 1, 2],
[0, 1, 2, 2],
# batch entry 2
[1, 0, 0, 1],
[1, 0, 1, 0],
[1, 0, 1, 2],
[1, 1, 0, 3],
[1, 1, 1, 0],
[1, 1, 1, 1],
[1, 1, 1, 2],
[1, 1, 2, 2],
])
val2_1 = np.array([1, 10, 12, 103, 150, 149, 150, 122,
1, 10, 12, 103, 150, 149, 150, 122])
shape2 = np.array([self.batch_size, num_unroll, 3, 4])
expected_seq3_batch1 = sparse_tensor.SparseTensorValue(
ind1_1, val1_1, shape1)
expected_seq3_batch2 = sparse_tensor.SparseTensorValue(
ind1_2, val1_2, shape1)
expected_seq4_batch1 = sparse_tensor.SparseTensorValue(
ind2_1, val2_1, shape2)
expected_seq4_batch2 = sparse_tensor.SparseTensorValue(
np.empty(shape=[0, 4], dtype=np.int64), np.array([]), shape2)
ind1_1 = np.array([
# batch entry 1
[0, 0, 0],
[0, 1, 0], [0, 1, 3], [0, 1, 4],
# batch entry 2
[1, 0, 0],
[1, 1, 0], [1, 1, 3], [1, 1, 4]])
ind1_2 = np.array([
# batch entry 1
[0, 0, 2], [0, 0, 3],
# batch entry 2
[1, 0, 2], [1, 0, 3]])
val1_1 = np.array([0, 10, 13, 14,
0, 10, 13, 14])
val1_2 = np.array([32, 33,
32, 33])
shape1 = np.array([self.batch_size, num_unroll, 6])
# For sp_tensor2 all values fall into the first segment.
ind2_1 = np.array([
# batch entry 1
[0, 0, 0, 1],
[0, 0, 1, 0],
[0, 0, 1, 2],
[0, 1, 0, 3],
[0, 1, 1, 0],
[0, 1, 1, 1],
[0, 1, 1, 2],
[0, 1, 2, 2],
# batch entry 2
[1, 0, 0, 1],
[1, 0, 1, 0],
[1, 0, 1, 2],
[1, 1, 0, 3],
[1, 1, 1, 0],
[1, 1, 1, 1],
[1, 1, 1, 2],
[1, 1, 2, 2],
])
val2_1 = np.array([1, 10, 12, 103, 150, 149, 150, 122,
1, 10, 12, 103, 150, 149, 150, 122])
shape2 = np.array([self.batch_size, num_unroll, 3, 4])
expected_seq3_batch1 = sparse_tensor.SparseTensorValue(
ind1_1, val1_1, shape1)
expected_seq3_batch2 = sparse_tensor.SparseTensorValue(
ind1_2, val1_2, shape1)
expected_seq4_batch1 = sparse_tensor.SparseTensorValue(
ind2_1, val2_1, shape2)
expected_seq4_batch2 = sparse_tensor.SparseTensorValue(
np.empty(shape=[0, 4], dtype=np.int64), np.array([]), shape2)
self._testBasics(
num_unroll=num_unroll,
length=None,
pad=True,
expected_seq1_batch1=expected_seq1_batch1,
expected_seq1_batch2=expected_seq1_batch2,
expected_seq2_batch1=expected_seq2_batch1,
expected_seq2_batch2=expected_seq2_batch2,
expected_seq3_batch1=expected_seq3_batch1,
expected_seq3_batch2=expected_seq3_batch2,
expected_seq4_batch1=expected_seq4_batch1,
expected_seq4_batch2=expected_seq4_batch2)
class PaddingTest(test.TestCase):
def testPaddingInvalidLengths(self):
with ops.Graph().as_default() as g, self.session(graph=g):
sequences = {
"key_1": constant_op.constant([1, 2, 3]), # length 3
"key_2": constant_op.constant([1.5, 2.5]) # length 2
}
_, padded_seq = sqss._padding(sequences, 2)
with self.assertRaisesOpError(
".*All sequence lengths must match, but received lengths.*"):
padded_seq["key_1"].eval()
def testPadding(self):
with ops.Graph().as_default() as g, self.session(graph=g):
sequences = {
"key_1": constant_op.constant([1, 2]),
"key_2": constant_op.constant([0.5, -1.0]),
"key_3": constant_op.constant(["a", "b"]), # padding strings
"key_4": constant_op.constant([[1, 2, 3], [4, 5, 6]])
}
_, padded_seq = sqss._padding(sequences, 5)
expected_padded_seq = {
"key_1": [1, 2, 0, 0, 0],
"key_2": [0.5, -1.0, 0.0, 0.0, 0.0],
"key_3": ["a", "b", "", "", ""],
"key_4": [[1, 2, 3], [4, 5, 6], [0, 0, 0], [0, 0, 0], [0, 0, 0]]
}
for key, val in expected_padded_seq.items():
self.assertTrue(
math_ops.reduce_all(math_ops.equal(val, padded_seq[key])).eval())
def testPaddingOnlySparse(self):
ind1 = np.array([[0], [2]])
val1 = np.array([3, 4])
shape1 = np.array([4])
ind2 = np.array([[1], [2]])
val2 = np.array([9, 12])
shape2 = np.array([5])
with ops.Graph().as_default() as g, self.session(graph=g):
sp_tensor1 = sparse_tensor.SparseTensor(
indices=array_ops.constant(ind1, dtypes.int64),
values=array_ops.constant(val1, dtypes.int64),
dense_shape=array_ops.constant(shape1, dtypes.int64))
sp_tensor2 = sparse_tensor.SparseTensor(
indices=array_ops.constant(ind2, dtypes.int64),
values=array_ops.constant(val2, dtypes.int64),
dense_shape=array_ops.constant(shape2, dtypes.int64))
sp_tensor1_expected = sparse_tensor.SparseTensor(
indices=sp_tensor1.indices,
values=sp_tensor1.values,
dense_shape=[8])
sp_tensor2_expected = sparse_tensor.SparseTensor(
indices=sp_tensor2.indices,
values=sp_tensor2.values,
dense_shape=[8])
sequences = {
"key_1": sp_tensor1,
"key_2": sp_tensor2,
}
_, padded_seq = sqss._padding(sequences, 4)
expected_padded_seq = {
"key_1": sp_tensor1_expected,
"key_2": sp_tensor2_expected,
}
for key, val in expected_padded_seq.items():
self.assertAllEqual(
sparse_ops.sparse_tensor_to_dense(val).eval(),
sparse_ops.sparse_tensor_to_dense(padded_seq[key]).eval())
class SparseTensorReConstructionTest(test.TestCase):
def testAddManyTakeManyRoundTripBatched(self):
with self.test_session(use_gpu=False) as sess:
# N == 4 because shape_value == [4, 5]
indices_value_1 = np.array([[0, 0], [0, 1], [2, 0]], dtype=np.int64)
values_value_1 = np.array([b"a", b"b", b"c"])
shape_value_1 = np.array([4, 5], dtype=np.int64)
sparse_tensor_1 = sparse_tensor.SparseTensor(
array_ops.placeholder(dtypes.int64),
array_ops.placeholder(dtypes.string),
array_ops.placeholder(dtypes.int64))
dict1 = {"key": sparse_tensor_1}
indices_value_2 = np.array([[1, 4], [2, 3]], dtype=np.int64)
values_value_2 = np.array([b"d", b"e"])
shape_value_2 = np.array([4, 5], dtype=np.int64)
sparse_tensor_2 = sparse_tensor.SparseTensor(
array_ops.placeholder(dtypes.int64),
array_ops.placeholder(dtypes.string),
array_ops.placeholder(dtypes.int64))
dict2 = {"key": sparse_tensor_2}
input_seq1, keys1, tensor_list1 = sqss._deconstruct_sparse_tensor_seq(
dict1, shared_name="a")
handles_1 = input_seq1["key"]
input_seq2, _, _ = sqss._deconstruct_sparse_tensor_seq(
dict2, shared_name="a")
handles_2 = input_seq2["key"]
combined_handles = array_ops.stack(
[handles_1[1], handles_1[2], handles_1[3],
handles_2[1], handles_2[2], handles_2[3]])
batched_dict = {"key": combined_handles}
sqss._reconstruct_sparse_tensor_seq(
batched_dict,
keys1,
tensor_list1,
batch_size=2,
num_unroll=3)
roundtrip_value, = sess.run(
[batched_dict["key"]],
feed_dict={sparse_tensor_1.indices: indices_value_1,
sparse_tensor_1.values: values_value_1,
sparse_tensor_1.dense_shape: shape_value_1,
sparse_tensor_2.indices: indices_value_2,
sparse_tensor_2.values: values_value_2,
sparse_tensor_2.dense_shape: shape_value_2})
self.assertAllEqual(roundtrip_value.indices,
np.array([[0, 1, 0], [1, 0, 4], [1, 1, 3]],
dtype=np.int64))
self.assertAllEqual(roundtrip_value.values,
np.array([b"c", b"d", b"e"]))
self.assertAllEqual(roundtrip_value.dense_shape,
np.array([2, 3, 5], dtype=np.int64))
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/training/python/training/batch_sequences_with_states_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains functions for evaluation and summarization of metrics.
The evaluation.py module contains helper functions for evaluating TensorFlow
modules using a variety of metrics and summarizing the results.
****************************************
* Evaluating a Checkpointed Model Once *
****************************************
Once we've trained a model, we'll want to evaluate it. The simplest way to do
this is to evaluate the performance of a saved model a single time. In order
to do this, we can specify a number of metrics we'll want to evaluate as well
as specify the summaries we want to save to disk. Furthermore, we can print
out the metrics values to stdout:
# Specify where the checkpoint is stored:
checkpoint_path = ...
# Create model and obtain the predictions:
images, labels = LoadData(...)
predictions = MyModel(images)
# Choose the metrics to compute:
names_to_values, names_to_updates = tf.contrib.metrics.aggregate_metric_map({
"accuracy": tf.compat.v1.metrics.accuracy(labels, predictions),
"mse": tf.compat.v1.metrics.mean_squared_error(labels, predictions),
})
# Define the summaries to write:
for metric_name, metric_value in metrics_to_values.iteritems():
tf.compat.v1.summary.scalar(metric_name, metric_value)
checkpoint_dir = '/tmp/my_model_dir/'
log_dir = '/tmp/my_model_eval/'
# We'll evaluate 1000 batches:
num_evals = 1000
names_to_values = evaluate_once(
checkpoint_path=checkpoint_path,
eval_ops=names_to_updates.values(),
final_ops=names_to_values,
hooks=[
tf.contrib.training.StopAfterNEvalsHook(num_evals),
tf.contrib.training.SummaryAtEndHook(logdir),
],
config=None)
for name in names_to_values:
print('Metric %s has value %f.' % (name, names_to_values[name]))
************************************************
* Evaluating a Checkpointed Model with Metrics *
************************************************
Often, one wants to evaluate a model checkpoint saved on disk. This can be
performed once or repeatedly on a set schedule.
To evaluate a particular model, users define zero or more metrics and zero or
more summaries and call the evaluate_repeatedly method:
# Create model and obtain the predictions:
images, labels = LoadData(...)
predictions = MyModel(images)
# Choose the metrics to compute:
names_to_values, names_to_updates = tf.contrib.metrics.aggregate_metric_map({
"accuracy": tf.compat.v1.metrics.accuracy(labels, predictions),
"mse": tf.compat.v1.metrics.mean_squared_error(labels, predictions),
})
# Define the summaries to write:
for metric_name, metric_value in metrics_to_values.iteritems():
tf.compat.v1.summary.scalar(metric_name, metric_value)
checkpoint_dir = '/tmp/my_model_dir/'
log_dir = '/tmp/my_model_eval/'
# We'll evaluate 1000 batches:
num_evals = 1000
# Evaluate every 10 minutes:
tf.contrib.training.evaluate_repeatedly(
checkpoint_dir,
eval_ops=names_to_updates.values(),
hooks=[
tf.contrib.training.StopAfterNEvalsHook(num_evals),
tf.contrib.training.SummaryAtEndHook(logdir),
],
eval_interval_secs=600)
*******************************************************
* Evaluating a Checkpointed Model with Summaries Only *
*******************************************************
At times, an evaluation can be performed without metrics at all but rather
with only summaries. The user need only leave out the 'eval_ops' argument:
# Create model and obtain the predictions:
images, labels = LoadData(...)
predictions = MyModel(images)
# Define the summaries to write:
tf.compat.v1.summary.scalar(...)
tf.compat.v1.summary.histogram(...)
checkpoint_dir = '/tmp/my_model_dir/'
log_dir = '/tmp/my_model_eval/'
# Evaluate once every 10 minutes.
tf.contrib.training.evaluate_repeatedly(
checkpoint_dir,
hooks=[
tf.contrib.training.SummaryAtEndHook(logdir),
],
eval_interval_secs=600)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
from tensorflow.python.ops import state_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training import evaluation
from tensorflow.python.training import monitored_session
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import training_util
__all__ = [
'StopAfterNEvalsHook',
'SummaryAtEndHook',
'checkpoints_iterator',
'evaluate_once',
'evaluate_repeatedly',
'get_or_create_eval_step',
'wait_for_new_checkpoint',
]
# pylint: disable=protected-access
# pylint: disable=invalid-name
StopAfterNEvalsHook = evaluation._StopAfterNEvalsHook
evaluate_once = evaluation._evaluate_once
get_or_create_eval_step = evaluation._get_or_create_eval_step
# pylint: enable=invalid-name
# pylint: enable=protected-access
def wait_for_new_checkpoint(checkpoint_dir,
last_checkpoint=None,
seconds_to_sleep=1,
timeout=None):
"""Waits until a new checkpoint file is found.
Args:
checkpoint_dir: The directory in which checkpoints are saved.
last_checkpoint: The last checkpoint path used or `None` if we're expecting
a checkpoint for the first time.
seconds_to_sleep: The number of seconds to sleep for before looking for a
new checkpoint.
timeout: The maximum number of seconds to wait. If left as `None`, then the
process will wait indefinitely.
Returns:
a new checkpoint path, or None if the timeout was reached.
"""
logging.info('Waiting for new checkpoint at %s', checkpoint_dir)
stop_time = time.time() + timeout if timeout is not None else None
while True:
checkpoint_path = checkpoint_management.latest_checkpoint(checkpoint_dir)
if checkpoint_path is None or checkpoint_path == last_checkpoint:
if stop_time is not None and time.time() + seconds_to_sleep > stop_time:
return None
time.sleep(seconds_to_sleep)
else:
logging.info('Found new checkpoint at %s', checkpoint_path)
return checkpoint_path
def checkpoints_iterator(checkpoint_dir,
min_interval_secs=0,
timeout=None,
timeout_fn=None):
"""Continuously yield new checkpoint files as they appear.
The iterator only checks for new checkpoints when control flow has been
reverted to it. This means it can miss checkpoints if your code takes longer
to run between iterations than `min_interval_secs` or the interval at which
new checkpoints are written.
The `timeout` argument is the maximum number of seconds to block waiting for
a new checkpoint. It is used in combination with the `timeout_fn` as
follows:
* If the timeout expires and no `timeout_fn` was specified, the iterator
stops yielding.
* If a `timeout_fn` was specified, that function is called and if it returns
a true boolean value the iterator stops yielding.
* If the function returns a false boolean value then the iterator resumes the
wait for new checkpoints. At this point the timeout logic applies again.
This behavior gives control to callers on what to do if checkpoints do not
come fast enough or stop being generated. For example, if callers have a way
to detect that the training has stopped and know that no new checkpoints
will be generated, they can provide a `timeout_fn` that returns `True` when
the training has stopped. If they know that the training is still going on
they return `False` instead.
Args:
checkpoint_dir: The directory in which checkpoints are saved.
min_interval_secs: The minimum number of seconds between yielding
checkpoints.
timeout: The maximum number of seconds to wait between checkpoints. If left
as `None`, then the process will wait indefinitely.
timeout_fn: Optional function to call after a timeout. If the function
returns True, then it means that no new checkpoints will be generated and
the iterator will exit. The function is called with no arguments.
Yields:
String paths to latest checkpoint files as they arrive.
"""
checkpoint_path = None
while True:
new_checkpoint_path = wait_for_new_checkpoint(
checkpoint_dir, checkpoint_path, timeout=timeout)
if new_checkpoint_path is None:
if not timeout_fn:
# timed out
logging.info('Timed-out waiting for a checkpoint.')
return
if timeout_fn():
# The timeout_fn indicated that we are truly done.
return
else:
# The timeout_fn indicated that more checkpoints may come.
continue
start = time.time()
checkpoint_path = new_checkpoint_path
yield checkpoint_path
time_to_next_eval = start + min_interval_secs - time.time()
if time_to_next_eval > 0:
time.sleep(time_to_next_eval)
class SummaryAtEndHook(session_run_hook.SessionRunHook):
"""A run hook that saves a summary with the results of evaluation."""
def __init__(self,
log_dir=None,
summary_writer=None,
summary_op=None,
feed_dict=None):
"""Constructs the Summary Hook.
Args:
log_dir: The directory where the summary events are saved to. Used only
when `summary_writer` is not specified.
summary_writer: A `tf.compat.v1.summary.FileWriter` to write summary
events with.
summary_op: The summary op to run. If left as `None`, then all summaries
in the tf.GraphKeys.SUMMARIES collection are used.
feed_dict: An optional feed dictionary to use when evaluating the
summaries.
Raises:
ValueError: If both `log_dir` and `summary_writer` are `None`.
"""
self._summary_op = summary_op
self._replace_summary_op = summary_op is None
self._feed_dict = feed_dict
self._summary_writer = summary_writer
self._log_dir = log_dir
if self._log_dir is None and self._summary_writer is None:
raise ValueError('One of log_dir or summary_writer should be used.')
def begin(self):
if self._replace_summary_op:
# This can still remain None if there are no summaries.
self._summary_op = summary.merge_all()
self._global_step = training_util.get_or_create_global_step()
def after_create_session(self, session, coord):
if self._summary_writer is None and self._log_dir:
self._summary_writer = summary.FileWriterCache.get(self._log_dir)
def end(self, session):
if self._summary_op is not None:
global_step = training_util.global_step(session, self._global_step)
summary_str = session.run(self._summary_op, self._feed_dict)
if self._summary_writer:
self._summary_writer.add_summary(summary_str, global_step)
if self._summary_writer:
self._summary_writer.flush()
def _scaffold_with_init(scaffold, saver, checkpoint_path):
"""Creates a scaffold that loads the given checkpoint using an init_fn.
Args:
scaffold: The scaffold to copy.
saver: The saver to use when restoring the checkpoint.
checkpoint_path: An absolute path to a checkpoint.
Returns:
A scaffold with an init_fn that loads the given checkpoint. If the scaffold
provided already has an init_fn, the scaffold is returned unchanged.
"""
def restore_checkpoint(_, session):
saver.restore(session, checkpoint_path)
if not scaffold.init_fn:
scaffold = monitored_session.Scaffold(
init_op=scaffold.init_op,
init_feed_dict=scaffold.init_feed_dict,
init_fn=restore_checkpoint,
ready_op=scaffold.ready_op,
local_init_op=scaffold.local_init_op,
summary_op=scaffold.summary_op,
saver=scaffold.saver)
return scaffold
def evaluate_repeatedly(checkpoint_dir,
master='',
scaffold=None,
eval_ops=None,
feed_dict=None,
final_ops=None,
final_ops_feed_dict=None,
eval_interval_secs=60,
hooks=None,
config=None,
max_number_of_evaluations=None,
timeout=None,
timeout_fn=None):
"""Repeatedly searches for a checkpoint in `checkpoint_dir` and evaluates it.
During a single evaluation, the `eval_ops` is run until the session is
interrupted or requested to finish. This is typically requested via a
`tf.contrib.training.StopAfterNEvalsHook` which results in `eval_ops` running
the requested number of times.
Optionally, a user can pass in `final_ops`, a single `Tensor`, a list of
`Tensors` or a dictionary from names to `Tensors`. The `final_ops` is
evaluated a single time after `eval_ops` has finished running and the fetched
values of `final_ops` are returned. If `final_ops` is left as `None`, then
`None` is returned.
One may also consider using a `tf.contrib.training.SummaryAtEndHook` to record
summaries after the `eval_ops` have run. If `eval_ops` is `None`, the
summaries run immediately after the model checkpoint has been restored.
Note that `evaluate_once` creates a local variable used to track the number of
evaluations run via `tf.contrib.training.get_or_create_eval_step`.
Consequently, if a custom local init op is provided via a `scaffold`, the
caller should ensure that the local init op also initializes the eval step.
Args:
checkpoint_dir: The directory where checkpoints are stored.
master: The address of the TensorFlow master.
scaffold: An tf.compat.v1.train.Scaffold instance for initializing variables
and restoring variables. Note that `scaffold.init_fn` is used by the
function to restore the checkpoint. If you supply a custom init_fn, then
it must also take care of restoring the model from its checkpoint.
eval_ops: A single `Tensor`, a list of `Tensors` or a dictionary of names to
`Tensors`, which is run until the session is requested to stop, commonly
done by a `tf.contrib.training.StopAfterNEvalsHook`.
feed_dict: The feed dictionary to use when executing the `eval_ops`.
final_ops: A single `Tensor`, a list of `Tensors` or a dictionary of names
to `Tensors`.
final_ops_feed_dict: A feed dictionary to use when evaluating `final_ops`.
eval_interval_secs: The minimum number of seconds between evaluations.
hooks: List of `tf.estimator.SessionRunHook` callbacks which are run inside
the evaluation loop.
config: An instance of `tf.compat.v1.ConfigProto` that will be used to
configure the `Session`. If left as `None`, the default will be used.
max_number_of_evaluations: The maximum times to run the evaluation. If left
as `None`, then evaluation runs indefinitely.
timeout: The maximum number of seconds to wait between checkpoints. If left
as `None`, then the process will wait indefinitely.
timeout_fn: Optional function to call after a timeout. If the function
returns True, then it means that no new checkpoints will be generated and
the iterator will exit. The function is called with no arguments.
Returns:
The fetched values of `final_ops` or `None` if `final_ops` is `None`.
"""
eval_step = get_or_create_eval_step()
# Prepare the run hooks.
hooks = hooks or []
if eval_ops is not None:
update_eval_step = state_ops.assign_add(eval_step, 1)
for h in hooks:
if isinstance(h, StopAfterNEvalsHook):
h._set_evals_completed_tensor(update_eval_step) # pylint: disable=protected-access
if isinstance(eval_ops, dict):
eval_ops['update_eval_step'] = update_eval_step
elif isinstance(eval_ops, (tuple, list)):
eval_ops = list(eval_ops) + [update_eval_step]
else:
eval_ops = [eval_ops, update_eval_step]
final_ops_hook = basic_session_run_hooks.FinalOpsHook(final_ops,
final_ops_feed_dict)
hooks.append(final_ops_hook)
num_evaluations = 0
for checkpoint_path in checkpoints_iterator(
checkpoint_dir,
min_interval_secs=eval_interval_secs,
timeout=timeout,
timeout_fn=timeout_fn):
session_creator = monitored_session.ChiefSessionCreator(
scaffold=scaffold,
checkpoint_filename_with_path=checkpoint_path,
master=master,
config=config)
with monitored_session.MonitoredSession(
session_creator=session_creator, hooks=hooks) as session:
logging.info('Starting evaluation at ' +
time.strftime('%Y-%m-%d-%H:%M:%S', time.gmtime()))
if eval_ops is not None:
while not session.should_stop():
session.run(eval_ops, feed_dict)
logging.info('Finished evaluation at ' +
time.strftime('%Y-%m-%d-%H:%M:%S', time.gmtime()))
num_evaluations += 1
if (max_number_of_evaluations is not None and
num_evaluations >= max_number_of_evaluations):
return final_ops_hook.final_ops_values
return final_ops_hook.final_ops_values
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/training/python/training/evaluation.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SequenceQueueingStateSaver and wrappers.
Please see the reading data how-to for context.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numbers
import six
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.summary import summary
from tensorflow.python.training import queue_runner
# pylint: disable=protected-access
_restore_sparse = sparse_ops._take_many_sparse_from_tensors_map
_store_sparse = sparse_ops._add_many_sparse_to_tensors_map
# pylint: enable=protected-access
class _SequenceInputWrapper(object):
"""A wrapper object for storing sequence-related input.
The SequenceInputWapper accepts four objects:
length: A scalar int containing the length of the input sequence.
key: A scalar string containing the unique key of the input sequence.
sequences: A dict mapping labels, like `input`, to tensors
whose initial index dimension is at least size `length`.
context: A dict mapping labels, like `global_target`, to tensors
that represent data across the entire example.
"""
def __init__(self, length, key, sequences, context):
length = ops.convert_to_tensor(length, name="length")
key = ops.convert_to_tensor(key, name="key")
if not isinstance(sequences, dict):
raise TypeError("sequences must be a dict")
if not isinstance(context, dict):
raise TypeError("context must be a dict")
if not sequences:
raise ValueError("must have at least one sequence tensor")
for k in sequences.keys():
if not isinstance(k, six.string_types):
raise TypeError("sequence key must be string: %s" % k)
if ":" in k:
raise ValueError("sequence key may not have a colon: '%s'" % k)
for k in context.keys():
if not isinstance(k, six.string_types):
raise TypeError("context key must be string: %s" % k)
if ":" in k:
raise ValueError("context key may not have a colon: '%s'" % k)
sequences = dict((k, ops.convert_to_tensor(
v, name="sequence_%s" % k)) for k, v in sequences.items())
context = dict((k, ops.convert_to_tensor(
v, name="context_%s" % k)) for k, v in context.items())
self._length = length
self._key = key
self._sequences = sequences
self._context = context
@property
def length(self):
return self._length
@property
def key(self):
return self._key
@property
def sequences(self):
return self._sequences
@property
def context(self):
return self._context
def _check_multiple_of(value, multiple_of):
"""Checks that value `value` is a non-zero multiple of `multiple_of`.
Args:
value: an int32 scalar Tensor.
multiple_of: an int or int32 scalar Tensor.
Returns:
new_value: an int32 scalar Tensor matching `value`, but which includes an
assertion that `value` is a multiple of `multiple_of`.
"""
assert isinstance(value, ops.Tensor)
with ops.control_dependencies([
control_flow_ops.Assert(
math_ops.logical_and(
math_ops.equal(math_ops.mod(value, multiple_of), 0),
math_ops.not_equal(value, 0)), [
string_ops.string_join([
"Tensor %s should be a multiple of: " % value.name,
string_ops.as_string(multiple_of), ", but saw value: ",
string_ops.as_string(value),
". Consider setting pad=True."
])
])
]):
new_value = array_ops.identity(value, name="multiple_of_checked")
return new_value
def _check_rank(value, expected_rank):
"""Check the rank of Tensor `value`, via shape inference and assertions.
Args:
value: A Tensor, possibly with shape associated shape information.
expected_rank: int32 scalar (optionally a `Tensor`).
Returns:
new_value: A Tensor matching `value`. Accessing this tensor tests
assertions on its rank. If expected_rank is not a `Tensor`, then
new_value's shape's rank has been set.
Raises:
ValueError: if `expected_rank` is not a `Tensor` and the rank of `value`
is known and is not equal to `expected_rank`.
"""
assert isinstance(value, ops.Tensor)
with ops.control_dependencies([
control_flow_ops.Assert(
math_ops.equal(expected_rank, array_ops.rank(value)), [
string_ops.string_join([
"Rank of tensor %s should be: " % value.name,
string_ops.as_string(expected_rank), ", shape received:"
]), array_ops.shape(value)
])
]):
new_value = array_ops.identity(value, name="rank_checked")
if isinstance(expected_rank, ops.Tensor):
expected_rank_value = tensor_util.constant_value(expected_rank)
if expected_rank_value is not None:
expected_rank = int(expected_rank_value)
if not isinstance(expected_rank, ops.Tensor):
try:
new_value.set_shape(new_value.get_shape().with_rank(expected_rank))
except ValueError as e:
raise ValueError("Rank check failed for %s: %s" % (value.name, str(e)))
return new_value
def _check_shape(value, expected_shape):
"""Check the shape of Tensor `value`, via shape inference and assertions.
Args:
value: A Tensor, possibly with shape associated shape information.
expected_shape: a `TensorShape`, list of `int32`, or a vector `Tensor`.
Returns:
new_value: A Tensor matching `value`. Accessing this tensor tests
assertions on its shape. If expected_shape is not a `Tensor`, then
new_value's shape has been set.
Raises:
ValueError: if `expected_shape` is not a `Tensor` and the shape of `value`
is known and is not equal to `expected_shape`.
"""
assert isinstance(value, ops.Tensor)
if isinstance(expected_shape, tensor_shape.TensorShape):
expected_shape = expected_shape.as_list()
if isinstance(expected_shape, ops.Tensor):
expected_shape_value = tensor_util.constant_value(expected_shape)
if expected_shape_value is not None:
expected_shape = [int(d) for d in expected_shape_value]
if isinstance(expected_shape, ops.Tensor):
value = _check_rank(value, array_ops.size(expected_shape))
else:
value = _check_rank(value, len(expected_shape))
with ops.control_dependencies([
control_flow_ops.Assert(
math_ops.reduce_all(
math_ops.equal(expected_shape, array_ops.shape(value))), [
string_ops.string_join([
"Shape of tensor %s should be: " % value.name,
string_ops.as_string(expected_shape),
", shape received: ",
string_ops.as_string(array_ops.shape(value))
])
])
]):
new_value = array_ops.identity(value, name="shape_checked")
if not isinstance(expected_shape, ops.Tensor):
try:
new_value.set_shape(new_value.get_shape().merge_with(expected_shape))
except ValueError as e:
raise ValueError("Shape check failed for %s: %s" % (value.name, str(e)))
return new_value
def _check_dimensions(value, dimensions, expected_sizes, debug_prefix):
"""Check the dimensions of Tensor `value`, via shape inference and assertions.
Args:
value: A Tensor, with optional / partial shape associated shape information.
dimensions: An int list, the dimensions to check.
expected_sizes: list of mixed ints and int32 scalar tensors.
Optionally also a vector `Tensor`.
debug_prefix: A string, used for naming ops and printing debugging messages.
Returns:
new_value: A Tensor matching `value`. Accessing this tensor tests
assertions on its shape. If expected_sizes is not a `Tensor`, then
new_value's shape has been set for all `dimensions[i]` where
`expected_sizes[i]` is not a `Tensor`.
Raises:
TypeError: if any of the input contains invalid types:
if `value` is not a `Tensor`.
if `dimensions` is not a `list` or `tuple`.
ValueError: if input has incorrect sizes or inferred shapes do not match:
if `dimensions` contains repeated dimensions.
if `expected_sizes` is not a `Tensor` and its length does not match that
`dimensions`.
if `value`'s shape has a well-defined rank, and one of the values in
`dimensions` is equal to or above this rank.
if `value`'s shape is well defined for some `dimensions[i]`, and
`expected_sizes[i]` is not a `Tensor`, and these two values do
not match.
"""
if not isinstance(dimensions, (list, tuple)):
raise TypeError("dimensions must be a list or tuple")
if len(set(dimensions)) != len(dimensions):
raise ValueError("dimensions are not unique: %s" % dimensions)
if not isinstance(value, ops.Tensor):
raise TypeError("value is not a Tensor: %s" % value)
value_shape = value.get_shape()
if not isinstance(expected_sizes, ops.Tensor):
if len(dimensions) != len(expected_sizes):
raise ValueError("len(dimensions) != len(expected_sizes): %d vs. %d" %
(len(dimensions), len(expected_sizes)))
if value_shape.ndims is not None:
if value_shape.ndims <= max(dimensions):
raise ValueError(
"%s: rank of input is not greater than max(dimensions): "
"%d vs. %d" % (debug_prefix, value.get_shape().ndims,
max(dimensions)))
value_dims = value_shape.as_list()
for d, s in zip(dimensions, expected_sizes):
if not isinstance(s, ops.Tensor):
value_dims[d] = s
try:
value.set_shape(value.get_shape().merge_with(value_dims))
except ValueError as e:
raise ValueError("Dimensions check failed for %s: %s" %
(debug_prefix, str(e)))
with ops.control_dependencies([
control_flow_ops.Assert(
math_ops.equal(expected_size, array_ops.shape(value)[dimension]), [
string_ops.string_join([
"Dimension %d of tensor labeled %s should be: " %
(dimension, debug_prefix),
string_ops.as_string(expected_size), ", shape received: ",
string_ops.as_string(array_ops.shape(value))
])
]) for (dimension, expected_size) in zip(dimensions, expected_sizes)
]):
new_value = array_ops.identity(value, name="dims_checked_%s" % debug_prefix)
return new_value
def _prepare_sequence_inputs(inputs, states):
"""Convert input to tensors and validate shape information.
Args:
inputs: A `_SequenceInputWrapper` instance.
states: A dictionary mapping state names to input constants or tensors.
Returns:
The tuple (length, key, sorted_states, sorted_sequences, sorted_context),
where each value has been checked for valid shape, and the sorted_* dicts
are instances of OrderedDict; with key-value pairs sorted by key.
Raises:
ValueError: if the shapes of inputs.context.values(), states.values(),
or inputs.sequences.values() are not fully defined (with the exception
of the dimension of any `Tensor` in inputs.sequences.values()).
TypeError: if the dtype of length is not int32.
"""
# Convert state initial values to tensors
states = dict((k, ops.convert_to_tensor(
v, name="state_%s" % k)) for k, v in states.items())
def _assert_fully_defined(label, dict_, ignore_first_dimension=False):
start_dimension = 1 if ignore_first_dimension else 0
for k, v in dict_.items():
if not v.get_shape()[start_dimension:].is_fully_defined():
raise ValueError("Shape for %s %s is not fully defined %s: %s" %
(label, k, "(ignoring first dimension)" if
ignore_first_dimension else "", v.get_shape()))
_assert_fully_defined("state", states)
_assert_fully_defined("context", inputs.context)
# Sequences' first dimension (time) may be variable
_assert_fully_defined(
"sequence", inputs.sequences, ignore_first_dimension=True)
# Get dictionaries' dtypes ordered by name - ordering is important
# when switching between dicts and tuples for passing to Barrier.
def _sort_by_name(d):
return collections.OrderedDict(sorted(d.items(), key=lambda k_v: k_v[0]))
sorted_sequences = _sort_by_name(inputs.sequences)
sorted_context = _sort_by_name(inputs.context)
sorted_states = _sort_by_name(states)
length = _check_rank(inputs.length, 0)
key = _check_rank(inputs.key, 0)
if length.dtype != dtypes.int32:
raise TypeError("length dtype must be int32, but received: %s" %
length.dtype)
if key.dtype != dtypes.string:
raise TypeError("key dtype must be string, but received: %s" % key.dtype)
return (length, key, sorted_states, sorted_sequences, sorted_context)
# NextQueuedSequenceBatch works closely with
# SequenceQueueingStateSaver and requires access to its private properties
# pylint: disable=protected-access
class NextQueuedSequenceBatch(object):
"""NextQueuedSequenceBatch stores deferred SequenceQueueingStateSaver data.
This class is instantiated by `SequenceQueueingStateSaver` and is accessible
via its `next_batch` property.
"""
def __init__(self, state_saver):
self._state_saver = state_saver
@property
def total_length(self):
"""The lengths of the original (non-truncated) unrolled examples.
Returns:
An integer vector of length `batch_size`, the total lengths.
"""
return self._state_saver._received_total_length
@property
def length(self):
"""The lengths of the given truncated unrolled examples.
For initial iterations, for which `sequence * num_unroll < length`,
this number is `num_unroll`. For the remainder,
this number is between `0` and `num_unroll`.
Returns:
An integer vector of length `batch_size`, the lengths.
"""
return self._state_saver._received_length
@property
def batch_size(self):
"""The batch_size of the given batch.
Usually, this is the batch_size requested when initializing the SQSS, but
if allow_small_batch=True this will become smaller when inputs are
exhausted.
Returns:
A scalar integer tensor, the batch_size
"""
return self._state_saver._received_batch_size
@property
def insertion_index(self):
"""The insertion indices of the examples (when they were first added).
These indices start with the value -2**63 and increase with every
call to the prefetch op. Each whole example gets its own insertion
index, and this is used to prioritize the example so that its truncated
segments appear in adjacent iterations, even if new examples are inserted
by the prefetch op between iterations.
Returns:
An int64 vector of length `batch_size`, the insertion indices.
"""
return self._state_saver._received_indices
@property
def key(self):
"""The key names of the given truncated unrolled examples.
The format of the key is:
```python
"%05d_of_%05d:%s" % (sequence, sequence_count, original_key)
```
where `original_key` is the unique key read in by the prefetcher.
Returns:
A string vector of length `batch_size`, the keys.
"""
return self._state_saver._received_keys
@property
def next_key(self):
"""The key names of the next (in iteration) truncated unrolled examples.
The format of the key is:
```python
"%05d_of_%05d:%s" % (sequence + 1, sequence_count, original_key)
```
if `sequence + 1 < sequence_count`, otherwise:
```python
"STOP:%s" % original_key
```
where `original_key` is the unique key read in by the prefetcher.
Returns:
A string vector of length `batch_size`, the keys.
"""
return self._state_saver._received_next_key
@property
def sequence(self):
"""An int32 vector, length `batch_size`: the sequence index of each entry.
When an input is split up, the sequence values
```
0, 1, ..., sequence_count - 1
```
are assigned to each split.
Returns:
An int32 vector `Tensor`.
"""
return self._state_saver._received_sequence
@property
def sequence_count(self):
"""An int32 vector, length `batch_size`: the sequence count of each entry.
When an input is split up, the number of splits is equal to:
`padded_length / num_unroll`. This is the sequence_count.
Returns:
An int32 vector `Tensor`.
"""
return self._state_saver._received_sequence_count
@property
def context(self):
"""A dict mapping keys of `input_context` to batched context.
Returns:
A dict mapping keys of `input_context` to tensors.
If we had at input:
```python
context["name"].get_shape() == [d1, d2, ...]
```
then for this property:
```python
context["name"].get_shape() == [batch_size, d1, d2, ...]
```
"""
return self._state_saver._received_context
@property
def sequences(self):
"""A dict mapping keys of `input_sequences` to split and rebatched data.
Returns:
A dict mapping keys of `input_sequences` to tensors.
If we had at input:
```python
sequences["name"].get_shape() == [None, d1, d2, ...]
```
where `None` meant the sequence time was dynamic, then for this property:
```python
sequences["name"].get_shape() == [batch_size, num_unroll, d1, d2, ...].
```
"""
return self._state_saver._received_sequences
def state(self, state_name):
"""Returns batched state tensors.
Args:
state_name: string, matches a key provided in `initial_states`.
Returns:
A `Tensor`: a batched set of states, either initial states (if this is
the first run of the given example), or a value as stored during
a previous iteration via `save_state` control flow.
Its type is the same as `initial_states["state_name"].dtype`.
If we had at input:
```python
initial_states[state_name].get_shape() == [d1, d2, ...],
```
then
```python
state(state_name).get_shape() == [batch_size, d1, d2, ...]
```
Raises:
KeyError: if `state_name` does not match any of the initial states
declared in `initial_states`.
"""
return self._state_saver._received_states[state_name]
def save_state(self, state_name, value, name=None):
"""Returns an op to save the current batch of state `state_name`.
Args:
state_name: string, matches a key provided in `initial_states`.
value: A `Tensor`.
Its type must match that of `initial_states[state_name].dtype`.
If we had at input:
```python
initial_states[state_name].get_shape() == [d1, d2, ...]
```
then the shape of `value` must match:
```python
tf.shape(value) == [batch_size, d1, d2, ...]
```
name: string (optional). The name scope for newly created ops.
Returns:
A control flow op that stores the new state of each entry into
the state saver. This op must be run for every iteration that
accesses data from the state saver (otherwise the state saver
will never progress through its states and run out of capacity).
Raises:
KeyError: if `state_name` does not match any of the initial states
declared in `initial_states`.
"""
if state_name not in self._state_saver._received_states.keys():
raise KeyError("state was not declared: %s" % state_name)
default_name = "InputQueueingStateSaver_SaveState"
with ops.name_scope(name, default_name, values=[value]):
# Place all operations on the CPU. Barriers and queues are only
# implemented for CPU, but all the other book-keeping operations
# (reshape, shape, range, ...) would be placed on GPUs if available,
# unless we explicitly tie them to CPU.
with ops.colocate_with(self._state_saver._capacity_queue.queue_ref):
indices_where_not_done = array_ops.reshape(
array_ops.where_v2(
math_ops.logical_not(self._state_saver._sequence_is_done)),
[-1])
keeping_next_key = array_ops.gather(
self._state_saver._received_next_key, indices_where_not_done)
value = _check_shape(
array_ops.identity(
value, name="convert_%s" % state_name),
array_ops.shape(self._state_saver._received_states[state_name]))
keeping_state = array_ops.gather(value, indices_where_not_done)
return self._state_saver._barrier.insert_many(
self._state_saver._get_barrier_index("state", state_name),
keeping_next_key,
keeping_state,
name="BarrierInsertState_%s" % state_name)
# pylint: enable=protected-access
class SequenceQueueingStateSaver(object):
"""SequenceQueueingStateSaver provides access to stateful values from input.
This class is meant to be used instead of, e.g., a `Queue`, for splitting
variable-length sequence inputs into segments of sequences with fixed length
and batching them into mini-batches. It maintains contexts and state for a
sequence across the segments. It can be used in conjunction with a
`QueueRunner` (see the example below).
The `SequenceQueueingStateSaver` (SQSS) accepts one example at a time via the
inputs `input_length`, `input_key`, `input_sequences` (a dict),
`input_context` (a dict), and `initial_states` (a dict).
The sequences, values in `input_sequences`, may have variable first dimension
(the `padded_length`), though this dimension must always be a multiple of
`num_unroll`. All other dimensions must be fixed and accessible via
`get_shape` calls. The length prior to padding can be recorded in
`input_length`. The context values in `input_context` must all have fixed and
well defined dimensions. The initial state values must all have fixed and
well defined dimensions.
The SQSS splits the sequences of an input example into segments of length
`num_unroll`. Across examples minibatches of size `batch_size` are formed.
These minibatches contain a segment of the sequences, copy the context values,
and maintain state, length, and key information of the original input
examples. In the first segment of an example the state is still the initial
state. It can then be updated; and updated state values are accessible in
subsequent segments of the same example. After each segment
`batch.save_state()` must be called which is done by the state_saving_rnn.
Without this call, the dequeue op associated with the SQSS will not run.
Internally, SQSS has a queue for the input examples. Its `capacity` is
configurable. If set smaller than `batch_size` then the dequeue op will block
indefinitely. A small multiple of `batch_size` is a good rule of thumb to
prevent that queue from becoming a bottleneck and slowing down training.
If set too large (and note that it defaults to unbounded) memory consumption
goes up. Moreover, when iterating over the same input examples multiple times
reusing the same `key` the `capacity` must be smaller than the number of
examples.
The prefetcher, which reads one unrolled, variable-length input sequence at
a time, is accessible via `prefetch_op`. The underlying `Barrier` object
is accessible via `barrier`. Processed minibatches, as well as
state read and write capabilities are accessible via `next_batch`.
Specifically, `next_batch` provides access to all of the minibatched
data, including the following, see `NextQueuedSequenceBatch` for details:
* `total_length`, `length`, `insertion_index`, `key`, `next_key`,
* `sequence` (the index each minibatch entry's time segment index),
* `sequence_count` (the total time segment count for each minibatch entry),
* `context` (a dict of the copied minibatched context values),
* `sequences` (a dict of the split minibatched variable-length sequences),
* `state` (to access the states of the current segments of these entries)
* `save_state` (to save the states for the next segments of these entries)
Example usage:
```python
batch_size = 32
num_unroll = 20
lstm_size = 8
cell = tf.compat.v1.nn.rnn_cell.BasicLSTMCell(num_units=lstm_size)
initial_state_values = tf.zeros(cell.state_size, dtype=tf.float32)
raw_data = get_single_input_from_input_reader()
length, key, sequences, context = my_parser(raw_data)
assert "input" in sequences.keys()
assert "label" in context.keys()
initial_states = {"lstm_state": initial_state_value}
stateful_reader = tf.SequenceQueueingStateSaver(
batch_size, num_unroll,
length=length, input_key=key, input_sequences=sequences,
input_context=context, initial_states=initial_states,
capacity=batch_size*100)
batch = stateful_reader.next_batch
inputs = batch.sequences["input"]
context_label = batch.context["label"]
inputs_by_time = tf.split(value=inputs, num_or_size_splits=num_unroll, axis=1)
assert len(inputs_by_time) == num_unroll
lstm_output, _ = tf.contrib.rnn.static_state_saving_rnn(
cell,
inputs_by_time,
state_saver=batch,
state_name="lstm_state")
# Start a prefetcher in the background
sess = tf.compat.v1.Session()
num_threads = 3
queue_runner = tf.compat.v1.train.QueueRunner(
stateful_reader, [stateful_reader.prefetch_op] * num_threads)
tf.compat.v1.train.add_queue_runner(queue_runner)
tf.compat.v1.train.start_queue_runners(sess=session)
while True:
# Step through batches, perform training or inference...
session.run([lstm_output])
```
**Note**: Usually the barrier is given to a QueueRunner as in the
examples above. The QueueRunner will close the barrier if the prefetch_op
receives an OutOfRange Error from upstream input queues (i.e., reaches
the end of the input). If the barrier is closed no further new examples
are added to the SQSS. The underlying barrier might, however, still
contain further unroll-steps of examples that have not undergone all
iterations. To gracefully finish all examples, the flag
`allow_small_batch` must be set to true, which causes the SQSS to issue
progressively smaller mini-batches with the remaining examples.
"""
def __init__(self,
batch_size,
num_unroll,
input_length,
input_key,
input_sequences,
input_context,
initial_states,
capacity=None,
allow_small_batch=False,
name=None):
"""Creates the SequenceQueueingStateSaver.
Args:
batch_size: int or int32 scalar `Tensor`, how large minibatches should
be when accessing the `state()` method and `context`, `sequences`, etc,
properties.
num_unroll: Python integer, how many time steps to unroll at a time.
The input sequences of length `k` are then split into `k / num_unroll`
many segments.
input_length: An int32 scalar `Tensor`, the length of the sequence prior
to padding. This value may be at most `padded_length` for any given
input (see below for the definition of `padded_length`).
Batched and total lengths of the current iteration are made accessible
via the `length` and `total_length` properties. The shape of
input_length (scalar) must be fully specified.
input_key: A string scalar `Tensor`, the **unique** key for the given
input. This is used to keep track of the split minibatch elements
of this input. Batched keys of the current iteration are made
accessible via the `key` property. The shape of `input_key` (scalar)
must be fully specified.
input_sequences: A dict mapping string names to `Tensor` values. The
values must all have matching first dimension, called `padded_length`.
The `SequenceQueueingStateSaver` will split these tensors along
this first dimension into minibatch elements of dimension
`num_unroll`. Batched and segmented sequences of the current iteration
are made accessible via the `sequences` property.
**Note**: `padded_length` may be dynamic, and may vary from input
to input, but must always be a multiple of `num_unroll`. The remainder
of the shape (other than the first dimension) must be fully specified.
input_context: A dict mapping string names to `Tensor` values. The values
are treated as "global" across all time splits of the given input,
and will be copied across for all minibatch elements accordingly.
Batched and copied context of the current iteration are made
accessible via the `context` property.
**Note**: All input_context values must have fully defined shapes.
initial_states: A dict mapping string state names to multi-dimensional
values (e.g. constants or tensors). This input defines the set of
states that will be kept track of during computing iterations, and
which can be accessed via the `state` and `save_state` methods.
**Note**: All initial_state values must have fully defined shapes.
capacity: The max capacity of the SQSS in number of examples. Needs to be
at least `batch_size`. Defaults to unbounded.
allow_small_batch: If true, the SQSS will return smaller batches when
there aren't enough input examples to fill a whole batch and the end of
the input has been reached (i.e., the underlying barrier has been
closed).
name: An op name string (optional).
Raises:
TypeError: if any of the inputs is not an expected type.
ValueError: if any of the input values is inconsistent, e.g. if
not enough shape information is available from inputs to build
the state saver.
"""
if capacity is not None and isinstance(batch_size, ops.Tensor):
with ops.control_dependencies([check_ops.assert_greater_equal(
math_ops.cast(capacity, dtype=dtypes.int64),
math_ops.cast(batch_size, dtype=dtypes.int64),
message="capacity needs to be >= batch_size.")]):
input_key = array_ops.identity(input_key)
elif capacity is not None and capacity < batch_size:
raise ValueError("capacity %d needs to be >= batch_size %d" % (
capacity, batch_size))
# The barrier is ignorant of the number of actual examples, since a long
# example that requires many iterations produces more elements in the
# barrier than a short example. Furthermore, we don't have an upper bound
# on the length of examples, and hence have to keep the capacity of the
# barrier at infinite to avoid dead-lock. Instead we have to keep track of
# the number of active examples in this class, and block the prefetch_op
# when capacity is reached. To this end, we employ a FIFOQueue in which we
# store one token (its value doesn't matter) for each input example, and
# dequeue a token for each completed example. Since the capacity of this
# queue is limited the enqueue operation will block if capacity is reached.
self._capacity_queue = data_flow_ops.FIFOQueue(
capacity=capacity, dtypes=[dtypes.int32], shapes=[[]])
# Place all operations on the CPU. Barriers and queues are only implemented
# for CPU, but all the other book-keeping operations
# (reshape, shape, range, ...) would be placed on GPUs if available,
# unless we explicitly tie them to CPU.
with ops.colocate_with(self._capacity_queue.queue_ref):
if not isinstance(initial_states, dict):
raise TypeError("initial_states must be a dictionary")
if not initial_states:
raise ValueError(
"initial_states may not be empty: at least one state variable is "
"required to properly enqueue split sequences to run in separate "
"iterations")
for k in initial_states:
if not isinstance(k, six.string_types):
raise TypeError("state name must be a string: %s" % k)
if ":" in k:
raise ValueError("state name may not have a colon: '%s'" % k)
op_vars = ([input_length, input_key] + list(input_sequences.values()) +
list(input_context.values()))
with ops.name_scope(name, "InputQueueingStateSaver", op_vars) as scope:
inputs = _SequenceInputWrapper(input_length, input_key, input_sequences,
input_context)
self._batch_size = batch_size
self._num_unroll = num_unroll
self._name = scope
# This step makes sure all shapes are well defined. We can now
# use get_shape() on any tensor in the output of this function
# and get a fully-defined shape.
(self._length, self._key, self._sorted_states, self._sorted_sequences,
self._sorted_context) = _prepare_sequence_inputs(inputs,
initial_states)
self._padded_length = array_ops.identity(
array_ops.shape(six.next(six.itervalues(self._sorted_sequences)))[
0],
name="padded_length") # The name is useful for debugging
self._padded_length = _check_multiple_of(self._padded_length,
self._num_unroll)
# sequences should have length == all matching
self._sorted_sequences = collections.OrderedDict(
(k, _check_dimensions(
v, [0], [self._padded_length],
debug_prefix="sorted_sequences_%s" % k))
for k, v in self._sorted_sequences.items())
self._uninitialized_states = self._sorted_states
# Once this is set, self._get_barrier_*_index are available for use.
self._store_index_maps(self._sorted_sequences, self._sorted_context,
self._sorted_states)
# Make sure that the length is <= the padded_length
with ops.control_dependencies([
control_flow_ops.Assert(
math_ops.less_equal(self._length, self._padded_length), [
"Input length should be <= than length from sequences:",
self._length, " vs. ", self._padded_length
])
]):
self._length = array_ops.identity(self._length)
# Only create barrier; enqueue and dequeue operations happen when you
# access prefetch_op and next_batch.
self._create_barrier()
self._scope = scope
self._allow_small_batch = allow_small_batch
self._prefetch_op = None
self._next_batch = None
@property
def name(self):
return self._name
@property
def barrier(self):
return self._barrier
@property
def batch_size(self):
return self._batch_size
@property
def num_unroll(self):
return self._num_unroll
@property
def prefetch_op(self):
"""The op used to prefetch new data into the state saver.
Running it once enqueues one new input example into the state saver.
The first time this gets called, it additionally creates the prefetch_op.
Subsequent calls simply return the previously created `prefetch_op`.
It should be run in a separate thread via e.g. a `QueueRunner`.
Returns:
An `Operation` that performs prefetching.
"""
if not self._prefetch_op:
with ops.name_scope(None), ops.name_scope(
self._scope, values=[self._barrier.barrier_ref]):
self._create_prefetch_op()
return self._prefetch_op
@property
def next_batch(self):
"""The `NextQueuedSequenceBatch` providing access to batched output data.
Also provides access to the `state` and `save_state` methods.
The first time this gets called, it additionally prepares barrier reads
and creates `NextQueuedSequenceBatch` / next_batch objects. Subsequent
calls simply return the previously created `next_batch`.
In order to access data in `next_batch` without blocking, the `prefetch_op`
must have been run at least `batch_size` times (ideally in a separate
thread, or launched via a `QueueRunner`). After processing a segment in
`next_batch()`, `batch.save_state()` must be called which is done by the
state_saving_rnn. Without this call, the dequeue op associated with the SQSS
will not run.
Returns:
A cached `NextQueuedSequenceBatch` instance.
"""
# This is needed to prevent errors if next_batch is called before
# prefetch_op is created.
if not self._prefetch_op:
with ops.name_scope(None), ops.name_scope(
self._scope, values=[self._barrier.barrier_ref]):
self._create_prefetch_op()
if not self._next_batch:
with ops.name_scope(None), ops.name_scope(
self._scope, values=[self._barrier.barrier_ref]):
self._prepare_barrier_reads()
return self._next_batch
def close(self, cancel_pending_enqueues=False, name=None):
"""Closes the barrier and the FIFOQueue.
This operation signals that no more segments of new sequences will be
enqueued. New segments of already inserted sequences may still be enqueued
and dequeued if there is a sufficient number filling a batch or
allow_small_batch is true. Otherwise dequeue operations will fail
immediately.
Args:
cancel_pending_enqueues: (Optional.) A boolean, defaulting to
`False`. If `True`, all pending enqueues to the underlying queues will
be cancelled, and completing already started sequences is not possible.
name: Optional name for the op.
Returns:
The operation that closes the barrier and the FIFOQueue.
"""
with ops.name_scope(name, "SQSSClose", [self._prefetch_op]) as name:
barrier_close = self.barrier.close(cancel_pending_enqueues,
"BarrierClose")
fifo_queue_close = self._capacity_queue.close(cancel_pending_enqueues,
"FIFOClose")
return control_flow_ops.group(barrier_close, fifo_queue_close, name=name)
def _store_index_maps(self, sequences, context, states):
"""Prepares the internal dictionaries _name_to_index and _index_to_name.
These dictionaries are used to keep track of indices into the barrier.
Args:
sequences: `OrderedDict` of string, `Tensor` pairs.
context: `OrderedDict` of string, `Tensor` pairs.
states: `OrderedDict` of string, `Tensor` pairs.
"""
assert isinstance(sequences, dict)
assert isinstance(context, dict)
assert isinstance(states, dict)
self._name_to_index = {
name: ix
for (ix, name) in enumerate([
"__length", "__total_length", "__next_key", "__sequence",
"__sequence_count"
] + ["__sequence__%s" % k for k in sequences.keys()] + [
"__context__%s" % k for k in context.keys()
] + ["__state__%s" % k for k in states.keys()])}
self._index_to_name = [
name
for (name, _) in sorted(
self._name_to_index.items(), key=lambda n_ix: n_ix[1])
]
def _get_barrier_length_index(self):
return self._name_to_index["__length"]
def _get_barrier_total_length_index(self):
return self._name_to_index["__total_length"]
def _get_barrier_next_key_index(self):
return self._name_to_index["__next_key"]
def _get_barrier_sequence_index(self):
return self._name_to_index["__sequence"]
def _get_barrier_sequence_count_index(self):
return self._name_to_index["__sequence_count"]
def _get_barrier_index(self, index_type, name):
assert index_type in ("sequence", "context", "state")
key = "__%s__%s" % (index_type, name)
assert key in self._name_to_index, (
"Requested a name not in the value type %s: %s" % (index_type, name))
return self._name_to_index[key]
def _create_barrier(self):
"""Create the barrier.
This method initializes the Barrier object with the right types and shapes.
"""
# Create the barrier
sequence_dtypes = [v.dtype for k, v in self._sorted_sequences.items()]
context_dtypes = [v.dtype for k, v in self._sorted_context.items()]
state_dtypes = [v.dtype for k, v in self._sorted_states.items()]
types = ([
dtypes.int32, # length
dtypes.int32, # total_length
dtypes.string, # next_keys
dtypes.int32, # sequence
dtypes.int32
] # expanded_sequence_count
+ sequence_dtypes + context_dtypes + state_dtypes)
sequence_shapes = [
[self._num_unroll] + self._sorted_sequences[k].get_shape().as_list()[1:]
for k in self._sorted_sequences.keys()
]
context_shapes = [
self._sorted_context[k].get_shape().as_list()
for k in self._sorted_context.keys()
]
state_shapes = [
self._sorted_states[k].get_shape().as_list()
for k in self._sorted_states.keys()
]
shapes = ([
(), # length
(), # total_length
(), # next_keys
(), # sequence
()
] # expanded_sequence_count
+ sequence_shapes + context_shapes + state_shapes)
self._barrier = data_flow_ops.Barrier(types=types, shapes=shapes)
def _create_prefetch_op(self):
"""Group insert_many ops and create prefetch_op.
This method implements the "meat" of the logic underlying the
`SequenceQueueingStateSaver`. It performs dynamic reshaping of
sequences, copying of context, and initial insertion of these values,
as well as the key, next_key, sequence, sequence_count, and initial
states into the barrier.
"""
# Step 1: identify how many barrier entries to split this input
# into, store the result as a scalar
sequence_count = math_ops.div(self._padded_length, self._num_unroll)
sequence_count_vec = array_ops.expand_dims(sequence_count, 0)
# The final unrolled sequence's length is num_unroll only in
# the case that num_unroll divides it evenly.
ones = array_ops.ones(sequence_count_vec, dtype=dtypes.int32)
sequence = math_ops.range(sequence_count)
expanded_length = math_ops.maximum(
0, self._length - self._num_unroll * sequence)
expanded_length = math_ops.minimum(self._num_unroll, expanded_length)
expanded_total_length = self._length * ones
expanded_sequence_count = sequence_count * ones
current_keys = string_ops.string_join(
[
string_ops.as_string(
sequence, width=5, fill="0"), "_of_", string_ops.as_string(
sequence_count, width=5, fill="0"), ":", self._key
],
name="StringJoinCurrentKeys")
next_keys = array_ops.concat(
[
array_ops.slice(current_keys, [1], [-1]), array_ops.expand_dims(
string_ops.string_join(
["STOP:", self._key], name="StringJoinStop"),
0)
],
0,
name="concat_next_keys")
reshaped_sequences = collections.OrderedDict((
k,
_check_dimensions(
# Reshape sequences to sequence_count rows
array_ops.reshape(
v,
array_ops.concat(
[
array_ops.expand_dims(sequence_count, 0),
array_ops.expand_dims(self._num_unroll, 0),
v.get_shape().as_list()[1:]
],
0,
name="concat_sequences_%s" % k),
name="reshape_sequences_%s" % k),
[0, 1] + list(range(2, v.get_shape().ndims + 1)),
[sequence_count, self._num_unroll] + v.get_shape().as_list()[1:],
debug_prefix="reshaped_sequences_%s" %
k)) for k, v in self._sorted_sequences.items())
expanded_context = collections.OrderedDict(
(
k,
_check_dimensions(
# Copy context to be sequence_count rows
array_ops.tile(
array_ops.expand_dims(v, 0),
array_ops.concat(
[
array_ops.expand_dims(sequence_count, 0),
[1] * v.get_shape().ndims
],
0,
name="concat_context_%s" % k),
name="tile_context_%s" % k),
[0] + list(range(1, v.get_shape().ndims + 1)),
[sequence_count] + v.get_shape().as_list(),
debug_prefix="expanded_context_%s" % k))
for k, v in self._sorted_context.items())
# Storing into the barrier, for each current_key:
# sequence_ix, sequence_count, next_key, length,
# context... (copied), sequences... (truncated)
# Also storing into the barrier for the first key
# states (using initial_states).
insert_sequence_op = self._barrier.insert_many(
self._get_barrier_sequence_index(),
current_keys,
sequence,
name="BarrierInsertSequence")
insert_sequence_count_op = self._barrier.insert_many(
self._get_barrier_sequence_count_index(),
current_keys,
expanded_sequence_count,
name="BarrierInsertSequenceCount")
insert_next_key_op = self._barrier.insert_many(
self._get_barrier_next_key_index(),
current_keys,
next_keys,
name="BarrierInsertNextKey")
insert_length_op = self._barrier.insert_many(
self._get_barrier_length_index(),
current_keys,
expanded_length,
name="BarrierInsertLength")
insert_total_length_op = self._barrier.insert_many(
self._get_barrier_total_length_index(),
current_keys,
expanded_total_length,
name="BarrierInsertTotalLength")
insert_context_ops = dict((name, self._barrier.insert_many(
self._get_barrier_index("context", name),
current_keys,
value,
name="BarrierInsertContext_%s" % name))
for (name, value) in expanded_context.items())
insert_sequences_ops = dict((name, self._barrier.insert_many(
self._get_barrier_index("sequence", name),
current_keys,
value,
name="BarrierInsertSequences_%s" % name))
for (name, value) in reshaped_sequences.items())
# An op that blocks if we reached capacity in number of active examples.
TOKEN_WITH_IGNORED_VALUE = 21051976 # pylint: disable=invalid-name
insert_capacity_token_op = self._capacity_queue.enqueue(
(TOKEN_WITH_IGNORED_VALUE,))
# Insert just the initial state. Specifically force this to run
# the insert sequence op *first* so that the Barrier receives
# an insert with *all* the segments and the segments all get the same index.
with ops.control_dependencies(
[insert_sequence_op, insert_capacity_token_op]):
insert_initial_state_ops = dict(
(name, self._barrier.insert_many(
self._get_barrier_index("state", name),
array_ops.stack([current_keys[0]]),
array_ops.stack([value]),
name="BarrierInitialInsertState_%s" % name))
for (name, value) in self._uninitialized_states.items())
all_inserts = ([
insert_capacity_token_op, insert_sequence_op, insert_sequence_count_op,
insert_next_key_op, insert_length_op, insert_total_length_op
] + list(insert_initial_state_ops.values()) +
list(insert_context_ops.values()) +
list(insert_sequences_ops.values()))
self._prefetch_op = control_flow_ops.group(
*all_inserts, name="StateSaverPrefetchGroup")
def _prepare_barrier_reads(self):
"""Creates ops for reading the barrier, as used by properties like `length`.
"""
# Ops for reading from the barrier. These ops must be run in a
# different thread than the prefetcher op to avoid blocking.
received = self._barrier.take_many(
self._batch_size, self._allow_small_batch, name="BarrierTakeMany")
self._received_indices = received[0]
self._received_keys = received[1]
received_values = received[2]
self._received_sequence = received_values[self._get_barrier_sequence_index(
)]
self._received_sequence_count = received_values[
self._get_barrier_sequence_count_index()]
self._received_next_key = received_values[self._get_barrier_next_key_index(
)]
self._received_length = received_values[self._get_barrier_length_index()]
self._received_total_length = received_values[
self._get_barrier_total_length_index()]
self._received_context = collections.OrderedDict(
(name, received_values[self._get_barrier_index("context", name)])
for name in self._sorted_context.keys())
self._received_sequences = collections.OrderedDict(
(name, received_values[self._get_barrier_index("sequence", name)])
for name in self._sorted_sequences.keys())
self._received_batch_size = array_ops.squeeze(
array_ops.shape(self._received_length))
# Which examples are we done with?
self._sequence_is_done = (
self._received_sequence + 1 >= self._received_sequence_count)
# Compute the number of finished sequences and dequeue as many tokens from
# the capacity queue.
finished_sequences = (math_ops.reduce_sum(
math_ops.cast(self._sequence_is_done, dtypes.int32)))
# TODO(ebrevdo): convert to dequeue_up_to when FIFOQueue supports it.
dequeue_op = self._capacity_queue.dequeue_many(finished_sequences)
# Tie the dequeue_op to the received_state, such that it is definitely
# carried out.
with ops.control_dependencies([dequeue_op]):
self._received_states = collections.OrderedDict(
(name, array_ops.identity(received_values[self._get_barrier_index(
"state", name)])) for name in self._sorted_states.keys())
self._next_batch = NextQueuedSequenceBatch(self)
def batch_sequences_with_states(input_key,
input_sequences,
input_context,
input_length,
initial_states,
num_unroll,
batch_size,
num_threads=3,
capacity=1000,
allow_small_batch=True,
pad=True,
make_keys_unique=False,
make_keys_unique_seed=None,
name=None):
"""Creates batches of segments of sequential input.
This method creates a `SequenceQueueingStateSaver` (SQSS) and adds it to
the queuerunners. It returns a `NextQueuedSequenceBatch`.
It accepts one example at a time identified by a unique `input_key`.
`input_sequence` is a dict with values that are tensors with time as first
dimension. This time dimension must be the same across those tensors of an
example. It can vary across examples. Although it always has to be a multiple
of `num_unroll`. Hence, padding may be necessary and it is turned on by
default by `pad=True`.
`input_length` is a Tensor scalar or an int recording the time dimension prior
to padding. It should be between 0 and the time dimension. One reason we want
to keep track of it is so that we can take it into consideration when
computing the loss. If `pad=True` then `input_length` can be `None` and will
be inferred.
This methods segments `input_sequence` into segments of length `num_unroll`.
It batches input sequences from `batch_size` many examples. These mini-batches
are available through the `sequence` property of the output. Moreover, for
each entry in the batch we can access its original `input_key` in `key` and
its input length in `total_length`. `length` records within this segment how
many non-padded time steps there are.
Static features of an example that do not vary across time can be part of the
`input_context`, a dict with Tensor values. This method copies the context for
each segment and makes it available in the `context` of the output.
This method can maintain and update a state for each example. It accepts some
initial_states as a dict with Tensor values. The first mini-batch an example
is contained has initial_states as entry of the `state`. If save_state is
called then the next segment will have the updated entry of the `state`.
See `NextQueuedSequenceBatch` for a complete list of properties and methods.
Example usage:
```python
batch_size = 32
num_unroll = 20
num_enqueue_threads = 3
lstm_size = 8
cell = tf.compat.v1.nn.rnn_cell.BasicLSTMCell(num_units=lstm_size)
key, sequences, context = my_parser(raw_data)
initial_state_values = tf.zeros((state_size,), dtype=tf.float32)
initial_states = {"lstm_state": initial_state_values}
batch = tf.batch_sequences_with_states(
input_key=key,
input_sequences=sequences,
input_context=context,
input_length=tf.shape(sequences["input"])[0],
initial_states=initial_states,
num_unroll=num_unroll,
batch_size=batch_size,
num_threads=num_enqueue_threads,
capacity=batch_size * num_enqueue_threads * 2)
inputs = batch.sequences["input"]
context_label = batch.context["label"]
inputs_by_time = tf.split(value=inputs, num_or_size_splits=num_unroll, axis=1)
assert len(inputs_by_time) == num_unroll
lstm_output, _ = tf.contrib.rnn.static_state_saving_rnn(
cell,
inputs_by_time,
state_saver=batch,
state_name="lstm_state")
# Start a prefetcher in the background
sess = tf.compat.v1.Session()
tf.compat.v1.train.start_queue_runners(sess=session)
while True:
# Step through batches, perform training or inference...
session.run([lstm_output])
```
Args:
input_key: A string scalar `Tensor`, the **unique** key for the given
input example. This is used to keep track of the split minibatch elements
of this input. Batched keys of the current iteration are made
accessible via the `key` property. The shape of `input_key` (scalar) must
be fully specified. Consider setting `make_keys_unique` to True when
iterating over the same input multiple times.
**Note**: if `make_keys_unique=False` then `input_key`s must be unique.
input_sequences: A dict mapping string names to `Tensor` values. The values
must all have matching first dimension, called `value_length`. They may
vary from input to input. The remainder of the shape (other than the first
dimension) must be fully specified.
The `SequenceQueueingStateSaver` will split these tensors along
this first dimension into minibatch elements of dimension `num_unrolled`.
Batched and segmented sequences of the current iteration are made
accessible via the `sequences` property.
**Note**: if `pad=False`, then `value_length` must always be a multiple
of `num_unroll`.
input_context: A dict mapping string names to `Tensor` values. The values
are treated as "global" across all time splits of the given input example,
and will be copied across for all minibatch elements accordingly.
Batched and copied context of the current iteration are made
accessible via the `context` property.
**Note**: All input_context values must have fully defined shapes.
input_length: None or an int32 scalar `Tensor`, the length of the sequence
prior to padding. If `input_length=None` and `pad=True` then the length
will be inferred and will be equal to `value_length`. If `pad=False` then
`input_length` cannot be `None`: `input_length` must be specified. Its
shape of `input_length` (scalar) must be fully specified. Its value may be
at most `value_length` for any given input (see above for the definition
of `value_length`). Batched and total lengths of the current iteration are
made accessible via the `length` and `total_length` properties.
initial_states: A dict mapping string state names to multi-dimensional
values (e.g. constants or tensors). This input defines the set of
states that will be kept track of during computing iterations, and
which can be accessed via the `state` and `save_state` methods.
**Note**: All initial_state values must have fully defined shapes.
num_unroll: Python integer, how many time steps to unroll at a time.
The input sequences of length k are then split into k / num_unroll many
segments.
batch_size: int or int32 scalar `Tensor`, how large minibatches should
be when accessing the `state()` method and `context`, `sequences`, etc,
properties.
num_threads: The int number of threads enqueuing input examples into a
queue.
capacity: The max capacity of the queue in number of examples. Needs to be
at least `batch_size`. Defaults to 1000. When iterating over the same
input example multiple times reusing their keys the `capacity` must be
smaller than the number of examples.
allow_small_batch: If true, the queue will return smaller batches when
there aren't enough input examples to fill a whole batch and the end of
the input has been reached.
pad: If `True`, `input_sequences` will be padded to multiple of
`num_unroll`. In that case `input_length` may be `None` and is assumed to
be the length of first dimension of values in `input_sequences`
(i.e. `value_length`).
make_keys_unique: Whether to append a random integer to the `input_key` in
an effort to make it unique. The seed can be set via
`make_keys_unique_seed`.
make_keys_unique_seed: If `make_keys_unique=True` this fixes the seed with
which a random postfix is generated.
name: An op name string (optional).
Returns:
A NextQueuedSequenceBatch with segmented and batched inputs and their
states.
Raises:
TypeError: if any of the inputs is not an expected type.
ValueError: if any of the input values is inconsistent, e.g. if
not enough shape information is available from inputs to build
the state saver.
"""
tensor_list = (list(input_sequences.values()) + list(input_context.values()) +
list(initial_states.values()))
with ops.name_scope(name, "batch_sequences_with_states", tensor_list) as name:
if pad:
length, input_sequences = _padding(input_sequences, num_unroll)
input_length = input_length if input_length is not None else length
elif input_sequences:
# Assert that value_length is a multiple of num_unroll.
checked_input_sequences = {}
for key, value in input_sequences.items():
if (isinstance(value, sparse_tensor.SparseTensor) or
isinstance(value, sparse_tensor.SparseTensorValue)):
value_length = value.dense_shape[0]
with ops.control_dependencies([
control_flow_ops.Assert(
math_ops.logical_and(
math_ops.equal(value_length % num_unroll, 0),
math_ops.not_equal(value_length, 0)),
[
string_ops.string_join([
"SparseTensor %s first dimension should be a "
"multiple of: " % key,
string_ops.as_string(num_unroll),
", but saw value: ",
string_ops.as_string(value_length),
". Consider setting pad=True."])])]):
checked_input_sequences[key] = sparse_tensor.SparseTensor(
indices=array_ops.identity(
value.indices, name="multiple_of_checked"),
values=array_ops.identity(
value.values, name="multiple_of_checked"),
dense_shape=array_ops.identity(
value.dense_shape, name="multiple_of_checked"))
else:
if not isinstance(value, ops.Tensor):
try:
value = ops.convert_to_tensor(value)
except TypeError:
raise TypeError(
"Unsupported input_sequences expected Tensor or SparseTensor "
"values, got: %s for key %s" % (str(type(value)), key))
value_length = array_ops.shape(value)[0]
with ops.control_dependencies([
control_flow_ops.Assert(
math_ops.logical_and(
math_ops.equal(value_length % num_unroll, 0),
math_ops.not_equal(value_length, 0)),
[
string_ops.string_join([
"Tensor %s first dimension should be a multiple "
"of: " % key,
string_ops.as_string(num_unroll),
", but saw value: ",
string_ops.as_string(value_length),
". Consider setting pad=True."
])
])
]):
checked_input_sequences[key] = array_ops.identity(
value, name="multiple_of_checked")
input_sequences = checked_input_sequences
# Move SparseTensors in context into input_sequences.
_move_sparse_tensor_out_context(input_context, input_sequences, num_unroll)
# Deconstruct SparseTensors in sequence into a dense Tensor before inputting
# to SQSS.
(transformed_input_seq,
sparse_tensor_keys,
tensor_list) = _deconstruct_sparse_tensor_seq(input_sequences)
if make_keys_unique:
input_key = string_ops.string_join([
input_key,
string_ops.as_string(
random_ops.random_uniform(
(), minval=0, maxval=100000000, dtype=dtypes.int32,
seed=make_keys_unique_seed))])
# setup stateful queue reader
stateful_reader = SequenceQueueingStateSaver(
batch_size,
num_unroll,
input_length=input_length,
input_key=input_key,
input_sequences=transformed_input_seq,
input_context=input_context,
initial_states=initial_states,
capacity=capacity,
allow_small_batch=allow_small_batch)
barrier = stateful_reader.barrier
summary.scalar("queue/%s/ready_segment_batches_" % barrier.name,
math_ops.cast(barrier.ready_size(), dtypes.float32))
q_runner = queue_runner.QueueRunner(
stateful_reader, [stateful_reader.prefetch_op] * num_threads,
queue_closed_exception_types=(errors.OutOfRangeError,
errors.CancelledError))
queue_runner.add_queue_runner(q_runner)
batch = stateful_reader.next_batch
# Reconstruct SparseTensors in sequence.
_reconstruct_sparse_tensor_seq(
batch.sequences,
sparse_tensor_keys,
tensor_list,
batch_size,
num_unroll)
# Move select SparseTensors back to context.
_move_sparse_tensor_in_context(batch.context, batch.sequences)
return batch
def _padding(sequences, num_unroll):
"""For a dictionary of sequences, pads tensors to a multiple of `num_unroll`.
Args:
sequences: dictionary with `Tensor` values.
num_unroll: int specifying to what multiple to pad sequences to.
Returns:
length: Scalar `Tensor` of dimension 0 of all the values in sequences.
padded_sequence: Dictionary of sequences that are padded to a multiple of
`num_unroll`.
Raises:
ValueError: If `num_unroll` not an int or sequences not a dictionary from
string to `Tensor`.
"""
if not isinstance(num_unroll, numbers.Integral):
raise ValueError("Unsupported num_unroll expected int, got: %s" %
str(num_unroll))
if not isinstance(sequences, dict):
raise TypeError("Unsupported sequences expected dict, got: %s" %
str(sequences))
for key, value in sequences.items():
if not isinstance(key, six.string_types):
raise TypeError("Unsupported sequences key expected string, got: %s" %
str(key))
if not sequences:
return 0, {}
# Sort 'sequences_dict' so 'length' will have a predictable value below.
sequences_dict = collections.OrderedDict()
for key, value in sorted(sequences.items()):
if not (isinstance(value, sparse_tensor.SparseTensor) or
isinstance(value, sparse_tensor.SparseTensorValue)):
sequences_dict[key] = ops.convert_to_tensor(value)
else:
sequences_dict[key] = value
lengths = [array_ops.shape(value)[0] for value in sequences_dict.values()
if isinstance(value, ops.Tensor)]
if lengths:
length = lengths[0]
all_lengths_equal = [
control_flow_ops.Assert(
math_ops.equal(l, length), [string_ops.string_join(
["All sequence lengths must match, but received lengths: ",
string_ops.as_string(lengths)])])
for l in lengths]
length = control_flow_ops.with_dependencies(all_lengths_equal, length)
else: # Only have SparseTensors
sparse_lengths = [value.dense_shape[0] for value in sequences_dict.values()
if isinstance(value, sparse_tensor.SparseTensor)]
length = math_ops.reduce_max(math_ops.cast(sparse_lengths, dtypes.int32))
unroll = array_ops.constant(num_unroll)
padded_length = length + ((unroll - (length % unroll)) % unroll)
padded_sequences = {}
for key, value in sequences_dict.items():
if isinstance(value, ops.Tensor):
# 1. create shape of paddings
# first dimension of value will be increased by num_paddings to
# padded_length
num_paddings = [padded_length - array_ops.shape(value)[0]]
# the shape of the paddings that we concat with the original value will be
# [num_paddings, tf.shape(value)[1], tf.shape(value)[2], ...,
# tf.shape(value)[tf.rank(value) - 1])]
padding_shape = array_ops.concat(
(num_paddings, array_ops.shape(value)[1:]), 0)
# 2. fill padding shape with dummies
dummy = array_ops.constant(
"" if value.dtype == dtypes.string else 0, dtype=value.dtype)
paddings = array_ops.fill(dims=padding_shape, value=dummy)
# 3. concat values with paddings
padded_sequences[key] = array_ops.concat([value, paddings], 0)
else:
padded_shape = array_ops.concat(
[[math_ops.cast(padded_length, dtypes.int64)], value.dense_shape[1:]],
0)
padded_sequences[key] = sparse_tensor.SparseTensor(
indices=value.indices,
values=value.values,
dense_shape=padded_shape)
return length, padded_sequences
_SPARSE_CONTEXT_PREFIX_KEY = "_context_in_seq_"
def _move_sparse_tensor_out_context(input_context, input_sequences, num_unroll):
"""Moves `SparseTensor`s from `input_context` into `input_sequences` as seq.
For `key, value` pairs in `input_context` with `SparseTensor` `value` removes
them from `input_context` and transforms the `value` into a sequence and
then adding `key`, transformed `value` into `input_sequences`.
The transformation is done by adding a new first dimension of `value_length`
equal to that of the other values in input_sequences` and tiling the `value`
every `num_unroll` steps.
Args:
input_context: dictionary with `Tensor` or `SparseTensor` values. To be
modified to take out `SparseTensor` values.
input_sequences: dictionary with `Tensor` or `SparseTensor` values. To be
modified to add transformed `SparseTensor` values from `input_context`.
num_unroll: int specifying to what multiple to pad sequences to.
"""
value_length = array_ops.constant(1)
if input_sequences:
seq = list(input_sequences.values())[0]
if isinstance(seq, ops.Tensor):
with ops.control_dependencies([seq]):
value_length = array_ops.shape(seq)[0]
else:
value_length = seq.dense_shape[0]
value_length = math_ops.cast(value_length, dtype=dtypes.int64)
def _copy_sparse_tensor(sp_tensor):
"""Operation to tile a sparse tensor along a newly added 0 dimension.
Adding a new first dimension of `value_length` and tiling the `sp_tensor`
every `num_unroll` steps.
Args:
sp_tensor: `SparseTensor`.
Returns:
`SparseTensor` sequence with `sp_tensor` tiled.
"""
n = value_length // num_unroll
n = math_ops.cast(n, dtype=dtypes.int32)
values = array_ops.tile(sp_tensor.values, array_ops.expand_dims(n, 0))
shape = array_ops.concat(
[array_ops.expand_dims(value_length, 0), sp_tensor.dense_shape], 0)
# Construct new indices by multiplying old ones and prepending [0, n).
# First multiply indices n times along a newly created 0-dimension.
multiplied_indices = array_ops.tile(
array_ops.expand_dims(sp_tensor.indices, 0),
array_ops.stack([n, 1, 1]))
# Construct indicator for [0, n).
# [ [ [0] [0] ... [0] ]
# [ [num_unroll] [num_unroll] ... [num_unroll] ]
# ...
# [ [num_unroll*(n-1)] [num_unroll*(n-1)] ... [num_unroll*(n-1)] ] ]
# of shape [n, shape(sp_tensor.indices)[0], 1]
# Get current dimensions of indices.
dim0 = array_ops.shape(sp_tensor.indices)[0]
dim1 = array_ops.shape(sp_tensor.indices)[1]
ind = math_ops.range(start=0, limit=value_length, delta=num_unroll)
# ind.set_shape([n])
ind = array_ops.expand_dims(ind, 1)
ind = array_ops.expand_dims(ind, 2)
ind = array_ops.tile(ind, [1, dim0, 1])
# Concatenate both and reshape.
indices = array_ops.concat([ind, multiplied_indices], 2)
indices = array_ops.reshape(indices, [dim0 * n, dim1 + 1])
return sparse_tensor.SparseTensor(indices=indices,
values=values,
dense_shape=shape)
sparse_tensor_keys = [
k for k in sorted(input_context.keys())
if (isinstance(input_context[k], sparse_tensor.SparseTensor) or
isinstance(input_context[k], sparse_tensor.SparseTensorValue))]
for key in sparse_tensor_keys:
input_sequences[_SPARSE_CONTEXT_PREFIX_KEY + key] = _copy_sparse_tensor(
input_context[key])
del input_context[key]
def _move_sparse_tensor_in_context(context, sequences):
sparse_tensor_keys = [
k for k in sorted(sequences) if k.startswith(_SPARSE_CONTEXT_PREFIX_KEY)]
for key in sparse_tensor_keys:
new_key = key[len(_SPARSE_CONTEXT_PREFIX_KEY):]
sp_tensor = sequences[key]
# Take out time dimension.
sp_tensor = sparse_tensor.SparseTensor(
sp_tensor.indices, # with only 0s at column 1 representing time.
sp_tensor.values,
array_ops.concat(
[[sp_tensor.dense_shape[0]], # batch
[1], # time
sp_tensor.dense_shape[2:]], # SparseTensor shape prior to batching
0))
new_shape = array_ops.concat(
[[sp_tensor.dense_shape[0]], sp_tensor.dense_shape[2:]], 0)
context[new_key] = sparse_ops.sparse_reshape(sp_tensor, new_shape)
del sequences[key]
def _deconstruct_sparse_tensor_seq(input_sequence, shared_name=None):
"""Converts `SparseTensor` values into `Tensors` of IDs and meta data.
Given a dict of keys -> `Tensor` or `SparseTensor` transforms the
`SparseTensor` values into `Tensor` values of IDs by calling `_store_sparse`.
The IDs are pointers into and underlying `SparseTensorsMap` that is being
constructed. Additional meta data is returned in order to be able to
reconstruct `SparseTensor` values after batching and segmenting the IDs
`Tensor`.
Args:
input_sequence: dictionary with `Tensor` or `SparseTensor` values.
shared_name: The shared name for the underlying `SparseTensorsMap`
(optional, defaults to the name of the newly created op).
Returns:
A tuple `(sequence, sparse_tensor_keys, tensor_list)` where `sequence` is
dictionary with the same keys as `input_sequence` but only `Tensor` values,
`sparse_tensor_keys` is a list of the keys of the `SparseTensor` values that
were converted, and `tensor_list` is a list of the same length with
`Tensor` objects.
"""
sparse_tensor_keys = [
k for k in sorted(input_sequence.keys())
if (isinstance(input_sequence[k], sparse_tensor.SparseTensor) or
isinstance(input_sequence[k], sparse_tensor.SparseTensorValue))]
if not sparse_tensor_keys:
return input_sequence, None, sparse_tensor_keys
sparse_tensor_list = [input_sequence[k] for k in sparse_tensor_keys]
tensor_list = [_store_sparse(sp_tensor, shared_name=shared_name)
for sp_tensor in sparse_tensor_list]
transformed_input_seq = dict(input_sequence)
tensor_op_list = []
for i, k in enumerate(sparse_tensor_keys):
transformed_input_seq[k] = tensor_list[i]
tensor_op_list += [tensor_list[i].op]
return transformed_input_seq, sparse_tensor_keys, tensor_op_list
def _reconstruct_sparse_tensor_seq(sequence,
sparse_tensor_keys,
tensor_op_list,
batch_size,
num_unroll):
"""Inverse of _deconstruct_sparse_tensor_seq.
Given a dict of keys -> `Tensor` reconstructs `SparseTensor` values for keys
in `sparse_tensor_keys`. Their `Tensor` values are assumed to be IDs into the
underlying `SparseTensorsMap`. The `dense_shape` of the `SparseTensor`s is
`[batch_size, num_unroll, d_0, d_1, ..., d_n]` when the original
`SparseTensor` that got deconstructed with `_deconstruct_sparse_tensor_seq`
has a `dense_shape` of `[None, d_0, d_1, ..., d_n]`.
Args:
sequence: dictionary with only `Tensor` values that is being updated.
sparse_tensor_keys: list of the keys present in `sequence` identifying
`SparseTensor` values that should be reconstructed.
tensor_op_list: list of the same length as `sparse_tensor_keys` with
`Tensor` objects.
batch_size: int or int32 scalar `Tensor`, how large minibatches should
be.
num_unroll: Python integer, how many time steps were unrolled at a time.
"""
def _flatten_tensor(tensor):
"""Flattens `Tensor` of `shape [batch_size, num_unroll]` into 1D `Tensor`.
The main use of this function is to work around the limitation of
`_restore_sparse` to only accept 1D handles.
Args:
tensor: 2D `Tensor` of `shape [batch_size, num_unroll]`
Returns:
1D `Tensor`.
"""
return array_ops.reshape(tensor, [-1])
def _unflatten_sparse_tensor(sp_tensor):
"""Recreates `[batch_size, num_unroll]` dimensions in the `SparseTensor`.
Counter-part of `_flatten_tensor` which is called on the input of
`_restore_sparse` while this method is called on the output of it.
Together they work around the limitation of `_restore_sparse` to only
accept 1D handles.
The `indices` in `sp_tensor` is a 2D `Tensor` of `shape [N, ndims]`, where
`N` is the number of `values` and `ndims` is the number of dimension in its
dense counterpart. Among `ndims` the first entry corresponds to the batch
dimension `[0, num_unroll * batch_size)` from which we need to recreate the
2 dimensions `batch_size` and `num_unroll`.
The reason this reconstruction works is because the output of
`_restore_sparse` despite being a `SparseTensor` is actually dense w.r.t.
that first entry.
Args:
sp_tensor: A SparseTensor.
Returns:
A SparseTensor with a +1 higher rank than the input.
"""
idx_batch = math_ops.cast(
math_ops.floor(sp_tensor.indices[:, 0] / num_unroll), dtypes.int64)
idx_time = math_ops.mod(sp_tensor.indices[:, 0], num_unroll)
indices = array_ops.concat(
[
array_ops.expand_dims(idx_batch, 1),
array_ops.expand_dims(idx_time, 1), sp_tensor.indices[:, 1:]
],
axis=1)
dense_shape = array_ops.concat(
[[math_ops.cast(batch_size, dtype=dtypes.int64)],
[math_ops.cast(num_unroll, dtype=dtypes.int64)],
sp_tensor.dense_shape[1:]], axis=0)
return sparse_tensor.SparseTensor(
indices=indices,
values=sp_tensor.values,
dense_shape=dense_shape)
if not sparse_tensor_keys:
return
tensor_list = [sequence[k] for k in sparse_tensor_keys]
sp_tensors = [
_restore_sparse(sparse_map_op=i,
# Flatten the 2D Tensor [batch_size, num_unroll] of
# handles to a 1D Tensor.
# Reconstruct the dimensions later.
# TODO(b/34247140): Remove this workaround.
sparse_handles=_flatten_tensor(s), rank=None)
for i, s in zip(tensor_op_list, tensor_list)]
num_unroll = ops.convert_to_tensor(num_unroll, dtype=dtypes.int64,
name="num_unroll_int64")
# Recreate the [batch_size, num_unroll] dimensions in the SparseTensors.
# The dense_shape will have a +1 higher rank.
# TODO(b/34247140): Remove this workaround.
sp_tensors_higher_dim = [_unflatten_sparse_tensor(s) for s in sp_tensors]
# Set values to SparseTensors for sparse_tensor_keys.
for i, key in enumerate(sparse_tensor_keys):
sequence[key] = sp_tensors_higher_dim[i]
return
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/training/python/training/sequence_queueing_state_saver.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/training/python/training/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.SequenceQueueingStateSaver."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
from tensorflow.contrib.training.python.training import sequence_queueing_state_saver as sqss
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.platform import test
class SequenceQueueingStateSaverTest(test.TestCase):
def testSequenceInputWrapper(self):
with self.cached_session():
length = 3
key = "key"
padded_length = 4
sequences = {
"seq1": np.random.rand(padded_length, 5),
"seq2": np.random.rand(padded_length, 4, 2)
}
context = {"context1": [3, 4]}
input_wrapper = sqss._SequenceInputWrapper(length, key, sequences,
context)
self.assertTrue(isinstance(input_wrapper.length, ops.Tensor))
self.assertTrue(isinstance(input_wrapper.key, ops.Tensor))
self.assertTrue(isinstance(input_wrapper.sequences["seq1"], ops.Tensor))
self.assertTrue(isinstance(input_wrapper.sequences["seq2"], ops.Tensor))
self.assertTrue(isinstance(input_wrapper.context["context1"], ops.Tensor))
def testStateSaverWithTwoSimpleSteps(self):
with self.cached_session() as sess:
batch_size_value = 2
batch_size = constant_op.constant(batch_size_value)
num_unroll = 2
length = 3
key = string_ops.string_join([
"key_", string_ops.as_string(
math_ops.cast(10000 * random_ops.random_uniform(()),
dtypes.int32))
])
padded_length = 4
sequences = {
"seq1": np.random.rand(padded_length, 5),
"seq2": np.random.rand(padded_length, 4, 2)
}
context = {"context1": [3, 4]}
initial_states = {
"state1": np.random.rand(6, 7),
"state2": np.random.rand(8)
}
state_saver = sqss.SequenceQueueingStateSaver(
batch_size=batch_size,
num_unroll=num_unroll,
input_length=length,
input_key=key,
input_sequences=sequences,
input_context=context,
initial_states=initial_states,
capacity=100)
initial_key_value_0, _ = sess.run((key, state_saver.prefetch_op))
initial_key_value_1, _ = sess.run((key, state_saver.prefetch_op))
initial_key_value_0 = initial_key_value_0.decode("ascii")
initial_key_value_1 = initial_key_value_1.decode("ascii")
# Step 1
next_batch = state_saver.next_batch
(key_value, next_key_value, seq1_value, seq2_value, context1_value,
state1_value, state2_value, length_value, _, _) = sess.run(
(next_batch.key, next_batch.next_key, next_batch.sequences["seq1"],
next_batch.sequences["seq2"], next_batch.context["context1"],
next_batch.state("state1"), next_batch.state("state2"),
next_batch.length,
next_batch.save_state("state1", next_batch.state("state1") + 1),
next_batch.save_state("state2", next_batch.state("state2") - 1)))
expected_first_keys = set(
("00000_of_00002:%s" % x).encode("ascii")
for x in (initial_key_value_0, initial_key_value_1))
expected_second_keys = set(
("00001_of_00002:%s" % x).encode("ascii")
for x in (initial_key_value_0, initial_key_value_1))
expected_final_keys = set(
("STOP:%s" % x).encode("ascii")
for x in (initial_key_value_0, initial_key_value_1))
self.assertEqual(set(key_value), expected_first_keys)
self.assertEqual(set(next_key_value), expected_second_keys)
self.assertAllEqual(context1_value,
np.tile(context["context1"], (batch_size_value, 1)))
self.assertAllEqual(seq1_value,
np.tile(sequences["seq1"][np.newaxis, 0:2, :],
(batch_size_value, 1, 1)))
self.assertAllEqual(seq2_value,
np.tile(sequences["seq2"][np.newaxis, 0:2, :, :],
(batch_size_value, 1, 1, 1)))
self.assertAllEqual(state1_value,
np.tile(initial_states["state1"],
(batch_size_value, 1, 1)))
self.assertAllEqual(state2_value,
np.tile(initial_states["state2"],
(batch_size_value, 1)))
self.assertAllEqual(length_value, [2, 2])
# Step 2
(key_value, next_key_value, seq1_value, seq2_value, context1_value,
state1_value, state2_value, length_value, _, _) = sess.run(
(next_batch.key, next_batch.next_key, next_batch.sequences["seq1"],
next_batch.sequences["seq2"], next_batch.context["context1"],
next_batch.state("state1"), next_batch.state("state2"),
next_batch.length,
next_batch.save_state("state1", next_batch.state("state1") + 1),
next_batch.save_state("state2", next_batch.state("state2") - 1)))
self.assertEqual(set(key_value), expected_second_keys)
self.assertEqual(set(next_key_value), expected_final_keys)
self.assertAllEqual(context1_value,
np.tile(context["context1"], (batch_size_value, 1)))
self.assertAllEqual(seq1_value,
np.tile(sequences["seq1"][np.newaxis, 2:4, :],
(batch_size_value, 1, 1)))
self.assertAllEqual(seq2_value,
np.tile(sequences["seq2"][np.newaxis, 2:4, :, :],
(batch_size_value, 1, 1, 1)))
self.assertAllEqual(state1_value, 1 + np.tile(initial_states["state1"],
(batch_size_value, 1, 1)))
self.assertAllEqual(state2_value, -1 + np.tile(initial_states["state2"],
(batch_size_value, 1)))
self.assertAllEqual(length_value, [1, 1])
# Finished. Let's make sure there's nothing left in the barrier.
self.assertEqual(0, state_saver.barrier.ready_size().eval())
def testStateSaverFailsIfPaddedLengthIsNotMultipleOfNumUnroll(self):
with self.cached_session() as sess:
batch_size = constant_op.constant(32)
num_unroll = 17
bad_padded_length = 3
length = array_ops.placeholder(dtypes.int32)
key = array_ops.placeholder(dtypes.string)
sequences = {
"seq1": array_ops.placeholder(
dtypes.float32, shape=(None, 5))
}
context = {}
initial_states = {
"state1": array_ops.placeholder(
dtypes.float32, shape=())
}
state_saver = sqss.SequenceQueueingStateSaver(
batch_size=batch_size,
num_unroll=num_unroll,
input_length=length,
input_key=key,
input_sequences=sequences,
input_context=context,
initial_states=initial_states)
with self.assertRaisesOpError(
"should be a multiple of: 17, but saw value: %d" % bad_padded_length):
sess.run([state_saver.prefetch_op],
feed_dict={
length: 1,
key: "key",
sequences["seq1"]: np.random.rand(bad_padded_length, 5),
initial_states["state1"]: 1.0
})
def _testStateSaverFailsIfCapacityTooSmall(self, batch_size):
with self.cached_session() as sess:
num_unroll = 2
length = array_ops.placeholder(dtypes.int32)
key = array_ops.placeholder(dtypes.string)
sequences = {
"seq1": array_ops.placeholder(
dtypes.float32, shape=(None, 5)),
"seq2": array_ops.placeholder(
dtypes.float32, shape=(None,))
}
context = {}
initial_states = {
"state1": array_ops.placeholder(
dtypes.float32, shape=())
}
state_saver = sqss.SequenceQueueingStateSaver(
batch_size=batch_size,
num_unroll=num_unroll,
input_length=length,
input_key=key,
input_sequences=sequences,
input_context=context,
initial_states=initial_states,
capacity=10)
sess.run([state_saver.prefetch_op],
feed_dict={
length: 1,
key: "key",
sequences["seq1"]: np.random.rand(num_unroll, 5),
sequences["seq2"]: np.random.rand(num_unroll),
initial_states["state1"]: 1.0
})
def testStateSaverFailsIfCapacityTooSmallTensor(self):
batch_size_value = 32
batch_size = constant_op.constant(batch_size_value)
with self.assertRaisesOpError(
".*capacity needs to be >= batch_size.*"):
self._testStateSaverFailsIfCapacityTooSmall(batch_size)
def testStateSaverFailsIfCapacityTooSmallInt(self):
batch_size = 32
with self.assertRaisesRegexp(
ValueError,
"capacity %d needs to be >= batch_size %d" % (10, batch_size)):
self._testStateSaverFailsIfCapacityTooSmall(batch_size)
def testStateSaverFailsIfInconsistentPaddedLength(self):
with self.cached_session() as sess:
batch_size = constant_op.constant(32)
num_unroll = 17
length = array_ops.placeholder(dtypes.int32)
key = array_ops.placeholder(dtypes.string)
sequences = {
"seq1": array_ops.placeholder(
dtypes.float32, shape=(None, 5)),
"seq2": array_ops.placeholder(
dtypes.float32, shape=(None,))
}
context = {}
initial_states = {
"state1": array_ops.placeholder(
dtypes.float32, shape=())
}
state_saver = sqss.SequenceQueueingStateSaver(
batch_size=batch_size,
num_unroll=num_unroll,
input_length=length,
input_key=key,
input_sequences=sequences,
input_context=context,
initial_states=initial_states)
with self.assertRaisesOpError(
"Dimension 0 of tensor labeled sorted_sequences_seq2 "
"should be: %d, shape received: %d" % (num_unroll, 2 * num_unroll)):
sess.run([state_saver.prefetch_op],
feed_dict={
length: 1,
key: "key",
sequences["seq1"]: np.random.rand(num_unroll, 5),
sequences["seq2"]: np.random.rand(2 * num_unroll),
initial_states["state1"]: 1.0
})
def testStateSaverFailsIfInconsistentWriteState(self):
# TODO(b/26910386): Identify why this infrequently causes timeouts.
with self.cached_session() as sess:
batch_size = constant_op.constant(1)
num_unroll = 17
length = array_ops.placeholder(dtypes.int32)
key = array_ops.placeholder(dtypes.string)
sequences = {
"seq1": array_ops.placeholder(
dtypes.float32, shape=(None, 5))
}
context = {}
initial_states = {
"state1": array_ops.placeholder(
dtypes.float32, shape=())
}
state_saver = sqss.SequenceQueueingStateSaver(
batch_size=batch_size,
num_unroll=num_unroll,
input_length=length,
input_key=key,
input_sequences=sequences,
input_context=context,
initial_states=initial_states)
next_batch = state_saver.next_batch
with self.assertRaisesRegexp(KeyError, "state was not declared: state2"):
save_op = next_batch.save_state("state2", None)
with self.assertRaisesRegexp(ValueError, "Rank check failed for.*state1"):
save_op = next_batch.save_state("state1", np.random.rand(1, 1))
with self.assertRaisesOpError(
r"convert_state1:0 should be: 1, shape received:\] \[1 1\]"):
state_input = array_ops.placeholder(dtypes.float32)
with ops.control_dependencies([state_saver.prefetch_op]):
save_op = next_batch.save_state("state1", state_input)
sess.run([save_op],
feed_dict={
length: 1,
key: "key",
sequences["seq1"]: np.random.rand(num_unroll, 5),
initial_states["state1"]: 1.0,
state_input: np.random.rand(1, 1)
})
def testStateSaverWithManyInputsReadWriteThread(self):
batch_size_value = 32
num_proc_threads = 100
with self.cached_session() as sess:
batch_size = constant_op.constant(batch_size_value)
num_unroll = 17
length = array_ops.placeholder(dtypes.int32)
key = array_ops.placeholder(dtypes.string)
sequences = {
"seq1": array_ops.placeholder(
dtypes.float32, shape=(None, 5)),
"seq2": array_ops.placeholder(
dtypes.float32, shape=(None, 4, 2)),
"seq3": array_ops.placeholder(
dtypes.float64, shape=(None,))
}
context = {
"context1": array_ops.placeholder(
dtypes.string, shape=(3, 4)),
"context2": array_ops.placeholder(
dtypes.int64, shape=())
}
initial_states = {
"state1": array_ops.placeholder(
dtypes.float32, shape=(6, 7)),
"state2": array_ops.placeholder(
dtypes.int32, shape=())
}
state_saver = sqss.SequenceQueueingStateSaver(
batch_size=batch_size,
num_unroll=num_unroll,
input_length=length,
input_key=key,
input_sequences=sequences,
input_context=context,
initial_states=initial_states)
next_batch = state_saver.next_batch
cancel_op = state_saver.close(cancel_pending_enqueues=True)
update_1 = next_batch.save_state("state1", 1 + next_batch.state("state1"))
update_2 = next_batch.save_state("state2",
-1 + next_batch.state("state2"))
original_values = {}
def insert(which):
for i in range(20):
# Insert varying length inputs
pad_i = num_unroll * (1 + (i % 10))
length_i = int(np.random.rand() * pad_i)
key_value = "key_%02d_%04d" % (which, i)
stored_state = {
"length": length_i,
"seq1": np.random.rand(pad_i, 5),
"seq2": np.random.rand(pad_i, 4, 2),
"seq3": np.random.rand(pad_i),
"context1": np.random.rand(3, 4).astype(np.str),
"context2": np.asarray(
100 * np.random.rand(), dtype=np.int32),
"state1": np.random.rand(6, 7),
"state2": np.asarray(
100 * np.random.rand(), dtype=np.int32)
}
original_values[key_value] = stored_state
sess.run([state_saver.prefetch_op],
feed_dict={
length: stored_state["length"],
key: key_value,
sequences["seq1"]: stored_state["seq1"],
sequences["seq2"]: stored_state["seq2"],
sequences["seq3"]: stored_state["seq3"],
context["context1"]: stored_state["context1"],
context["context2"]: stored_state["context2"],
initial_states["state1"]: stored_state["state1"],
initial_states["state2"]: stored_state["state2"]
})
processed_count = [0]
def process_and_check_state():
next_batch = state_saver.next_batch
while True:
try:
(got_key, next_key, length, total_length, sequence, sequence_count,
context1, context2, seq1, seq2, seq3, state1, state2, _,
_) = (sess.run([
next_batch.key, next_batch.next_key, next_batch.length,
next_batch.total_length, next_batch.sequence,
next_batch.sequence_count, next_batch.context["context1"],
next_batch.context["context2"], next_batch.sequences["seq1"],
next_batch.sequences["seq2"], next_batch.sequences["seq3"],
next_batch.state("state1"), next_batch.state("state2"),
update_1, update_2
]))
except errors_impl.OutOfRangeError:
# SQSS has been closed
break
self.assertEqual(len(got_key), batch_size_value)
processed_count[0] += len(got_key)
for i in range(batch_size_value):
key_name = got_key[i].decode("ascii").split(":")[1]
# We really saved this unique key
self.assertTrue(key_name in original_values)
# The unique key matches next_key
self.assertEqual(key_name,
next_key[i].decode("ascii").split(":")[1])
# Pull out the random values we used to create this example
stored_state = original_values[key_name]
self.assertEqual(total_length[i], stored_state["length"])
self.assertEqual("%05d_of_%05d:%s" %
(sequence[i], sequence_count[i], key_name),
got_key[i].decode("ascii"))
expected_length = max(
0,
min(num_unroll,
stored_state["length"] - sequence[i] * num_unroll))
self.assertEqual(length[i], expected_length)
expected_state1 = stored_state["state1"] + sequence[i]
expected_state2 = stored_state["state2"] - sequence[i]
expected_sequence1 = stored_state["seq1"][sequence[i] * num_unroll:(
sequence[i] + 1) * num_unroll]
expected_sequence2 = stored_state["seq2"][sequence[i] * num_unroll:(
sequence[i] + 1) * num_unroll]
expected_sequence3 = stored_state["seq3"][sequence[i] * num_unroll:(
sequence[i] + 1) * num_unroll]
self.assertAllClose(state1[i], expected_state1)
self.assertAllEqual(state2[i], expected_state2)
# context1 is strings, which come back as bytes
self.assertAllEqual(context1[i].astype(np.str),
stored_state["context1"])
self.assertAllEqual(context2[i], stored_state["context2"])
self.assertAllClose(seq1[i], expected_sequence1)
self.assertAllClose(seq2[i], expected_sequence2)
self.assertAllClose(seq3[i], expected_sequence3)
# Total number of inserts will be a multiple of batch_size
insert_threads = [
self.checkedThread(
insert, args=(which,)) for which in range(batch_size_value)
]
process_threads = [
self.checkedThread(process_and_check_state)
for _ in range(num_proc_threads)
]
for t in insert_threads:
t.start()
for t in process_threads:
t.start()
for t in insert_threads:
t.join()
time.sleep(3) # Allow the threads to run and process for a while
cancel_op.run()
for t in process_threads:
t.join()
# Each thread processed at least 2 sequence segments
self.assertGreater(processed_count[0], 2 * 20 * batch_size_value)
def testStateSaverProcessesExamplesInOrder(self):
with self.cached_session() as sess:
batch_size_value = 32
batch_size = constant_op.constant(batch_size_value)
num_unroll = 17
length = array_ops.placeholder(dtypes.int32)
key = array_ops.placeholder(dtypes.string)
sequences = {
"seq1": array_ops.placeholder(
dtypes.float32, shape=(None, 5))
}
context = {"context1": array_ops.placeholder(dtypes.string, shape=(3, 4))}
initial_states = {
"state1": array_ops.placeholder(
dtypes.float32, shape=())
}
state_saver = sqss.SequenceQueueingStateSaver(
batch_size=batch_size,
num_unroll=num_unroll,
input_length=length,
input_key=key,
input_sequences=sequences,
input_context=context,
initial_states=initial_states)
next_batch = state_saver.next_batch
update = next_batch.save_state("state1", 1 + next_batch.state("state1"))
get_ready_size = state_saver.barrier.ready_size()
get_incomplete_size = state_saver.barrier.incomplete_size()
global_insert_key = [0]
def insert(insert_key):
# Insert varying length inputs
sess.run([state_saver.prefetch_op],
feed_dict={
length: np.random.randint(2 * num_unroll),
key: "%05d" % insert_key[0],
sequences["seq1"]: np.random.rand(2 * num_unroll, 5),
context["context1"]: np.random.rand(3, 4).astype(np.str),
initial_states["state1"]: 0.0
})
insert_key[0] += 1
for _ in range(batch_size_value * 100):
insert(global_insert_key)
def process_and_validate(check_key):
true_step = int(check_key[0] / 2) # Each entry has two slices
check_key[0] += 1
got_keys, input_index, _ = sess.run(
[next_batch.key, next_batch.insertion_index, update])
decoded_keys = [int(x.decode("ascii").split(":")[-1]) for x in got_keys]
min_key = min(decoded_keys)
min_index = int(min(input_index)) # numpy scalar
max_key = max(decoded_keys)
max_index = int(max(input_index)) # numpy scalar
# The current min key should be above the previous min
self.assertEqual(min_key, true_step * batch_size_value)
self.assertEqual(max_key, (true_step + 1) * batch_size_value - 1)
self.assertEqual(2**63 + min_index, true_step * batch_size_value)
self.assertEqual(2**63 + max_index,
(true_step + 1) * batch_size_value - 1)
# There are now (batch_size * 100 * 2) / batch_size = 200 full steps
global_step_key = [0]
for _ in range(200):
process_and_validate(global_step_key)
# Processed everything in the queue
self.assertEqual(get_incomplete_size.eval(), 0)
self.assertEqual(get_ready_size.eval(), 0)
def testStateSaverCanHandleVariableBatchsize(self):
with self.cached_session() as sess:
batch_size = array_ops.placeholder(dtypes.int32)
num_unroll = 17
length = array_ops.placeholder(dtypes.int32)
key = array_ops.placeholder(dtypes.string)
sequences = {
"seq1": array_ops.placeholder(
dtypes.float32, shape=(None, 5))
}
context = {"context1": array_ops.placeholder(dtypes.string, shape=(3, 4))}
initial_states = {
"state1": array_ops.placeholder(
dtypes.float32, shape=())
}
state_saver = sqss.SequenceQueueingStateSaver(
batch_size=batch_size,
num_unroll=num_unroll,
input_length=length,
input_key=key,
input_sequences=sequences,
input_context=context,
initial_states=initial_states)
next_batch = state_saver.next_batch
update = next_batch.save_state("state1", 1 + next_batch.state("state1"))
for insert_key in range(128):
# Insert varying length inputs
sess.run([state_saver.prefetch_op],
feed_dict={
length: np.random.randint(2 * num_unroll),
key: "%05d" % insert_key,
sequences["seq1"]: np.random.rand(2 * num_unroll, 5),
context["context1"]: np.random.rand(3, 4).astype(np.str),
initial_states["state1"]: 0.0
})
all_received_indices = []
# Pull out and validate batch sizes 0, 1, ..., 7
for batch_size_value in range(8):
got_keys, input_index, context1, seq1, state1, _ = sess.run(
[
next_batch.key, next_batch.insertion_index,
next_batch.context["context1"], next_batch.sequences["seq1"],
next_batch.state("state1"), update
],
feed_dict={batch_size: batch_size_value})
# Indices may have come in out of order within the batch
all_received_indices.append(input_index.tolist())
self.assertEqual(got_keys.size, batch_size_value)
self.assertEqual(input_index.size, batch_size_value)
self.assertEqual(context1.shape, (batch_size_value, 3, 4))
self.assertEqual(seq1.shape, (batch_size_value, num_unroll, 5))
self.assertEqual(state1.shape, (batch_size_value,))
# Each input was split into 2 iterations (sequences size == 2*num_unroll)
expected_indices = [[], [0], [0, 1], [1, 2, 3], [2, 3, 4, 5],
[4, 5, 6, 7, 8], [6, 7, 8, 9, 10, 11],
[9, 10, 11, 12, 13, 14, 15]]
self.assertEqual(len(all_received_indices), len(expected_indices))
for received, expected in zip(all_received_indices, expected_indices):
self.assertAllEqual([x + 2**63 for x in received], expected)
def testStateSaverScopeNames(self):
batch_size = constant_op.constant(2)
sqss_scope_name = "unique_scope_name_for_sqss"
num_unroll = 2
length = 3
key = string_ops.string_join([
"key_", string_ops.as_string(
math_ops.cast(10000 * random_ops.random_uniform(()), dtypes.int32))
])
padded_length = 4
sequences = {
"seq1": np.random.rand(padded_length, 5),
"seq2": np.random.rand(padded_length, 4, 2)
}
context = {"context1": [3, 4]}
initial_states = {
"state1": np.random.rand(6, 7),
"state2": np.random.rand(8)
}
state_saver = sqss.SequenceQueueingStateSaver(
batch_size=batch_size,
num_unroll=num_unroll,
input_length=length,
input_key=key,
input_sequences=sequences,
input_context=context,
initial_states=initial_states,
name=sqss_scope_name)
prefetch_op = state_saver.prefetch_op
next_batch = state_saver.next_batch
self.assertTrue(
state_saver.barrier.barrier_ref.name.startswith("%s/" %
sqss_scope_name))
self.assertTrue(prefetch_op.name.startswith("%s/" % sqss_scope_name))
self.assertTrue(next_batch.key.name.startswith("%s/" % sqss_scope_name))
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/training/python/training/sequence_queueing_state_saver_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.contrib.training.bucket."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import numpy as np
from tensorflow.contrib.training.python.training import bucket_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner_impl
def _which_bucket(bucket_edges, v):
"""Identify which bucket v falls into.
Args:
bucket_edges: int array, bucket edges
v: int scalar, index
Returns:
int scalar, the bucket.
If v < bucket_edges[0], return 0.
If bucket_edges[0] <= v < bucket_edges[1], return 1.
...
If bucket_edges[-2] <= v < bucket_edges[-1], return len(bucket_edges).
If v >= bucket_edges[-1], return len(bucket_edges) + 1
"""
v = np.asarray(v)
full = [0] + bucket_edges
found = np.where(np.logical_and(v >= full[:-1], v < full[1:]))[0]
if not found.size:
return len(full)
return found[0]
class BucketTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
self.scalar_int_feed = array_ops.placeholder(dtypes_lib.int32, ())
self.unk_int64_feed = array_ops.placeholder(dtypes_lib.int64, (None,))
self.vec3_str_feed = array_ops.placeholder(dtypes_lib.string, (3,))
self.sparse_c = sparse_tensor.SparseTensor(
indices=[[0]],
values=[1.0],
dense_shape=[1])
self._coord = coordinator.Coordinator()
# Make capacity very large so we can feed all the inputs in the
# main thread without blocking
input_queue = data_flow_ops.PaddingFIFOQueue(
5000,
dtypes=[dtypes_lib.int32, dtypes_lib.int64, dtypes_lib.string],
shapes=[(), (None,), (3,)])
self._input_enqueue_op = input_queue.enqueue(
(self.scalar_int_feed, self.unk_int64_feed, self.vec3_str_feed))
self.scalar_int, self.unk_int64, self.vec3_str = input_queue.dequeue()
self._threads = None
self._close_op = input_queue.close()
self._sess = None
def enqueue_inputs(self, sess, feed_dict):
sess.run(self._input_enqueue_op, feed_dict=feed_dict)
def start_queue_runners(self, sess):
# Store session to be able to close inputs later
if self._sess is None:
self._sess = sess
self._threads = queue_runner_impl.start_queue_runners(coord=self._coord)
def tearDown(self):
if self._sess is not None:
self._sess.run(self._close_op)
self._coord.request_stop()
self._coord.join(self._threads)
def testSingleBucket(self):
bucketed_dynamic = bucket_ops.bucket(
tensors=[self.scalar_int, self.unk_int64, self.vec3_str, self.sparse_c],
which_bucket=constant_op.constant(0),
num_buckets=2,
batch_size=32,
num_threads=10,
dynamic_pad=True)
# Check shape inference on bucketing outputs
self.assertAllEqual(
[[32], [32, None], [32, 3], [None, None]],
[out.get_shape().as_list() for out in bucketed_dynamic[1]])
with self.cached_session() as sess:
for v in range(32):
self.enqueue_inputs(sess, {
self.scalar_int_feed: v,
self.unk_int64_feed: v * [v],
self.vec3_str_feed: 3 * [str(v)]
})
self.start_queue_runners(sess)
# Get a single minibatch
bucketed_values = sess.run(bucketed_dynamic)
# (which_bucket, bucket_tensors).
self.assertEqual(2, len(bucketed_values))
# Count number of bucket_tensors.
self.assertEqual(4, len(bucketed_values[1]))
# Ensure bucket 0 was used for all minibatch entries.
self.assertAllEqual(0, bucketed_values[0])
expected_scalar_int = np.arange(32)
expected_unk_int64 = np.zeros((32, 31)).astype(np.int64)
for i in range(32):
expected_unk_int64[i, :i] = i
expected_vec3_str = np.vstack(3 * [np.arange(32).astype(bytes)]).T
# Must resort the output because num_threads > 1 leads to
# sometimes-inconsistent insertion order.
resort = np.argsort(bucketed_values[1][0])
self.assertAllEqual(expected_scalar_int, bucketed_values[1][0][resort])
self.assertAllEqual(expected_unk_int64, bucketed_values[1][1][resort])
self.assertAllEqual(expected_vec3_str, bucketed_values[1][2][resort])
def testBatchSizePerBucket(self):
which_bucket = control_flow_ops.cond(self.scalar_int < 5,
lambda: constant_op.constant(0),
lambda: constant_op.constant(1))
batch_sizes = [5, 10]
bucketed_dynamic = bucket_ops.bucket(
tensors=[self.scalar_int, self.unk_int64, self.vec3_str, self.sparse_c],
which_bucket=which_bucket,
num_buckets=2,
batch_size=batch_sizes,
num_threads=1,
dynamic_pad=True)
# Check shape inference on bucketing outputs
self.assertAllEqual(
[[None], [None, None], [None, 3], [None, None]],
[out.get_shape().as_list() for out in bucketed_dynamic[1]])
with self.cached_session() as sess:
for v in range(15):
self.enqueue_inputs(sess, {
self.scalar_int_feed: v,
self.unk_int64_feed: v * [v],
self.vec3_str_feed: 3 * [str(v)]
})
self.start_queue_runners(sess)
# Get two minibatches (one with small values, one with large).
bucketed_values_0 = sess.run(bucketed_dynamic)
bucketed_values_1 = sess.run(bucketed_dynamic)
# Figure out which output has the small values
if bucketed_values_0[0] < 5:
bucketed_values_large, bucketed_values_small = (bucketed_values_1,
bucketed_values_0)
else:
bucketed_values_small, bucketed_values_large = (bucketed_values_0,
bucketed_values_1)
# Ensure bucket 0 was used for all minibatch entries.
self.assertAllEqual(0, bucketed_values_small[0])
self.assertAllEqual(1, bucketed_values_large[0])
# Check that the batch sizes differ per bucket
self.assertEqual(5, len(bucketed_values_small[1][0]))
self.assertEqual(10, len(bucketed_values_large[1][0]))
def testEvenOddBuckets(self):
which_bucket = (self.scalar_int % 2)
bucketed_dynamic = bucket_ops.bucket(
tensors=[self.scalar_int, self.unk_int64, self.vec3_str, self.sparse_c],
which_bucket=which_bucket,
num_buckets=2,
batch_size=32,
num_threads=10,
dynamic_pad=True)
# Check shape inference on bucketing outputs
self.assertAllEqual(
[[32], [32, None], [32, 3], [None, None]],
[out.get_shape().as_list() for out in bucketed_dynamic[1]])
with self.cached_session() as sess:
for v in range(64):
self.enqueue_inputs(sess, {
self.scalar_int_feed: v,
self.unk_int64_feed: v * [v],
self.vec3_str_feed: 3 * [str(v)]
})
self.start_queue_runners(sess)
# Get two minibatches (one containing even values, one containing odds)
bucketed_values_0 = sess.run(bucketed_dynamic)
bucketed_values_1 = sess.run(bucketed_dynamic)
# (which_bucket, bucket_tensors).
self.assertEqual(2, len(bucketed_values_0))
self.assertEqual(2, len(bucketed_values_1))
# Count number of bucket_tensors.
self.assertEqual(4, len(bucketed_values_0[1]))
self.assertEqual(4, len(bucketed_values_1[1]))
# Figure out which output has the even values (there's
# randomness due to the multithreaded nature of bucketing)
if bucketed_values_0[0] % 2 == 1:
bucketed_values_even, bucketed_values_odd = (bucketed_values_1,
bucketed_values_0)
else:
bucketed_values_even, bucketed_values_odd = (bucketed_values_0,
bucketed_values_1)
# Ensure bucket 0 was used for all minibatch entries.
self.assertAllEqual(0, bucketed_values_even[0])
self.assertAllEqual(1, bucketed_values_odd[0])
# Test the first bucket outputted, the events starting at 0
expected_scalar_int = np.arange(0, 32 * 2, 2)
expected_unk_int64 = np.zeros((32, 31 * 2)).astype(np.int64)
for i in range(0, 32):
expected_unk_int64[i, :2 * i] = 2 * i
expected_vec3_str = np.vstack(3 *
[np.arange(0, 32 * 2, 2).astype(bytes)]).T
# Must resort the output because num_threads > 1 leads to
# sometimes-inconsistent insertion order.
resort = np.argsort(bucketed_values_even[1][0])
self.assertAllEqual(expected_scalar_int,
bucketed_values_even[1][0][resort])
self.assertAllEqual(expected_unk_int64,
bucketed_values_even[1][1][resort])
self.assertAllEqual(expected_vec3_str, bucketed_values_even[1][2][resort])
# Test the second bucket outputted, the odds starting at 1
expected_scalar_int = np.arange(1, 32 * 2 + 1, 2)
expected_unk_int64 = np.zeros((32, 31 * 2 + 1)).astype(np.int64)
for i in range(0, 32):
expected_unk_int64[i, :2 * i + 1] = 2 * i + 1
expected_vec3_str = np.vstack(
3 * [np.arange(1, 32 * 2 + 1, 2).astype(bytes)]).T
# Must resort the output because num_threads > 1 leads to
# sometimes-inconsistent insertion order.
resort = np.argsort(bucketed_values_odd[1][0])
self.assertAllEqual(expected_scalar_int,
bucketed_values_odd[1][0][resort])
self.assertAllEqual(expected_unk_int64, bucketed_values_odd[1][1][resort])
self.assertAllEqual(expected_vec3_str, bucketed_values_odd[1][2][resort])
def testEvenOddBucketsFilterOutAllOdd(self):
which_bucket = (self.scalar_int % 2)
keep_input = math_ops.equal(which_bucket, 0)
bucketed_dynamic = bucket_ops.bucket(
tensors=[self.scalar_int, self.unk_int64, self.vec3_str],
which_bucket=which_bucket,
num_buckets=2,
batch_size=32,
num_threads=10,
keep_input=keep_input,
dynamic_pad=True)
# Check shape inference on bucketing outputs
self.assertAllEqual(
[[32], [32, None], [32, 3]],
[out.get_shape().as_list() for out in bucketed_dynamic[1]])
with self.cached_session() as sess:
for v in range(128):
self.enqueue_inputs(sess, {
self.scalar_int_feed: v,
self.unk_int64_feed: v * [v],
self.vec3_str_feed: 3 * [str(v)]
})
self.start_queue_runners(sess)
# Get two minibatches ([0, 2, ...] and [64, 66, ...])
bucketed_values_even0 = sess.run(bucketed_dynamic)
bucketed_values_even1 = sess.run(bucketed_dynamic)
# Ensure that bucket 1 was completely filtered out
self.assertAllEqual(0, bucketed_values_even0[0])
self.assertAllEqual(0, bucketed_values_even1[0])
# Merge their output for sorting and comparison
bucketed_values_all_elem0 = np.concatenate((bucketed_values_even0[1][0],
bucketed_values_even1[1][0]))
self.assertAllEqual(
np.arange(0, 128, 2), sorted(bucketed_values_all_elem0))
def testFailOnWrongBucketCapacities(self):
with self.assertRaisesRegexp(ValueError, r"must have exactly num_buckets"):
bucket_ops.bucket( # 2 buckets and 3 capacities raises ValueError.
tensors=[self.scalar_int, self.unk_int64, self.vec3_str],
which_bucket=constant_op.constant(0), num_buckets=2,
batch_size=32, bucket_capacities=[3, 4, 5])
class BucketBySequenceLengthTest(test.TestCase):
def _testBucketBySequenceLength(self,
allow_small_batch,
bucket_capacities=None,
drain_entire_queue=True):
ops.reset_default_graph()
# All inputs must be identical lengths across tuple index.
# The input reader will get input_length from the first tuple
# entry.
data_len = 4
labels_len = 3
input_pairs = [(length, ([np.int64(length)] * data_len,
[str(length).encode("ascii")] * labels_len))
for length in (1, 3, 4, 5, 6, 10)]
lengths = array_ops.placeholder(dtypes_lib.int32, ())
data = array_ops.placeholder(dtypes_lib.int64, (data_len,))
labels = array_ops.placeholder(dtypes_lib.string, (labels_len,))
batch_size = 8
bucket_boundaries = [3, 4, 5, 10]
num_pairs_to_enqueue = 50 * batch_size + 100
# Make capacity very large so we can feed all the inputs in the
# main thread without blocking
input_queue = data_flow_ops.FIFOQueue(
5000, (dtypes_lib.int32, dtypes_lib.int64, dtypes_lib.string), (
(), (data_len,), (labels_len,)))
input_enqueue_op = input_queue.enqueue((lengths, data, labels))
lengths_t, data_t, labels_t = input_queue.dequeue()
close_input_op = input_queue.close()
(out_lengths_t, data_and_labels_t) = (bucket_ops.bucket_by_sequence_length(
input_length=lengths_t,
tensors=[data_t, labels_t],
batch_size=batch_size,
bucket_boundaries=bucket_boundaries,
bucket_capacities=bucket_capacities,
allow_smaller_final_batch=allow_small_batch,
num_threads=10))
expected_batch_size = None if allow_small_batch else batch_size
self.assertEqual(out_lengths_t.get_shape().as_list(), [expected_batch_size])
self.assertEqual(data_and_labels_t[0].get_shape().as_list(),
[expected_batch_size, data_len])
self.assertEqual(data_and_labels_t[1].get_shape().as_list(),
[expected_batch_size, labels_len])
def _read_test(sess):
num_pairs_dequeued = 0
try:
while drain_entire_queue or num_pairs_dequeued < 40 * batch_size:
(out_lengths, (data, labels)) = sess.run(
(out_lengths_t, data_and_labels_t))
num_pairs_dequeued += out_lengths.shape[0]
if allow_small_batch:
self.assertEqual(data_len, data.shape[1])
self.assertEqual(labels_len, labels.shape[1])
self.assertGreaterEqual(batch_size, out_lengths.shape[0])
self.assertGreaterEqual(batch_size, data.shape[0])
self.assertGreaterEqual(batch_size, labels.shape[0])
else:
self.assertEqual((batch_size, data_len), data.shape)
self.assertEqual((batch_size, labels_len), labels.shape)
self.assertEqual((batch_size,), out_lengths.shape)
for (lr, dr, tr) in zip(out_lengths, data, labels):
# Make sure length matches data (here it's the same value).
self.assertEqual(dr[0], lr)
# Make sure data & labels match.
self.assertEqual(dr[0], int(tr[0].decode("ascii")))
# Make sure for each row, data came from the same bucket.
self.assertEqual(
_which_bucket(bucket_boundaries, dr[0]),
_which_bucket(bucket_boundaries, dr[1]))
except errors.OutOfRangeError:
if allow_small_batch:
self.assertEqual(num_pairs_to_enqueue, num_pairs_dequeued)
else:
# Maximum left over in the queues should be at most one less than the
# batch_size, for every bucket.
num_buckets = len(bucket_boundaries) + 2
self.assertLessEqual(
num_pairs_to_enqueue - (batch_size - 1) * num_buckets,
num_pairs_dequeued)
with self.cached_session() as sess:
coord = coordinator.Coordinator()
# Feed the inputs, then close the input thread.
for _ in range(num_pairs_to_enqueue):
which = random.randint(0, len(input_pairs) - 1)
length, pair = input_pairs[which]
sess.run(input_enqueue_op,
feed_dict={lengths: length,
data: pair[0],
labels: pair[1]})
sess.run(close_input_op)
# Start the queue runners
threads = queue_runner_impl.start_queue_runners(coord=coord)
# Read off the top of the bucket and ensure correctness of output
_read_test(sess)
coord.request_stop()
coord.join(threads)
def testBucketBySequenceLength(self):
self._testBucketBySequenceLength(allow_small_batch=False)
def testBucketBySequenceLengthAllow(self):
self._testBucketBySequenceLength(allow_small_batch=True)
def testBucketBySequenceLengthBucketCapacities(self):
# Above bucket_boundaries = [3, 4, 5, 10] so we need 5 capacities.
with self.assertRaisesRegexp(ValueError, r"must have exactly num_buckets"):
self._testBucketBySequenceLength(allow_small_batch=False,
bucket_capacities=[32, 32, 32, 32])
# Test with different capacities.
capacities = [48, 40, 32, 24, 16]
self._testBucketBySequenceLength(allow_small_batch=True,
bucket_capacities=capacities)
def testBucketBySequenceLengthShutdown(self):
self._testBucketBySequenceLength(allow_small_batch=True,
drain_entire_queue=False)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/training/python/training/bucket_ops_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Strategies for placing variables on parameter servers.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import hashlib
import numpy as np
from tensorflow.python.framework import tensor_shape
class RandomStrategy(object):
"""Returns a random PS task for op placement.
This may perform better than the default round-robin placement if you
have a large number of variables. Depending on your architecture and
number of parameter servers, round-robin can lead to situations where
all of one type of variable is placed on a single PS task, which may
lead to contention issues.
This strategy uses a hash function on the name of each op for deterministic
placement.
"""
def __init__(self, num_ps_tasks, seed=0):
"""Creates a new `RandomStrategy`."""
self._num_tasks = num_ps_tasks
self._seed = seed
def __call__(self, op):
"""Chooses a ps task index for the given `Operation`."""
key = "%s_%d" % (op.name, self._seed)
key = key.encode("utf-8")
# Use MD5 instead of Python's built-in hash() to get consistent outputs
# between runs.
n = int(hashlib.md5(key).hexdigest(), 16)
return int(n % self._num_tasks)
class GreedyLoadBalancingStrategy(object):
"""Returns the least-loaded ps task for op placement.
The load is calculated by a user-specified load function passed in at
construction. There are no units for load, and the load function is
responsible for providing an internally consistent measure.
Note that this strategy is very sensitive to the exact order in which
ps ops (typically variables) are created, as it greedily places ops
on the least-loaded ps at the point each op is processed.
One reasonable heuristic is the `byte_size_load_fn`, which
estimates load as the number of bytes that would be used to store and
transmit the entire variable. More advanced load functions
could consider the difference in access patterns across ops, or trade
off CPU-intensive ops with RAM-intensive ops with network bandwidth.
This class is intended to be used as a `ps_strategy` in
`tf.compat.v1.train.replica_device_setter`.
"""
def __init__(self, num_tasks, load_fn):
"""Create a new `LoadBalancingStrategy`.
Args:
num_tasks: Number of ps tasks to cycle among.
load_fn: A callable that takes an `Operation` and returns a
numeric load value for that op.
"""
self._num_tasks = num_tasks
self._load_fn = load_fn
self._ps_loads = np.zeros(num_tasks)
def __call__(self, op):
"""Choose a ps task index for the given `Operation`.
Args:
op: A `Operation` to be placed on ps.
Returns:
The next ps task index to use for the `Operation`. Greedily
places the op on the least-loaded ps task so far, as determined
by the load function.
"""
task = np.argmin(self._ps_loads)
self._ps_loads[task] += self._load_fn(op)
return task
def byte_size_load_fn(op):
"""Load function that computes the byte size of a single-output `Operation`.
This is intended to be used with `"Variable"` ops, which have a single
`Tensor` output with the contents of the variable. However, it can also be
used for calculating the size of any op that has a single output.
Intended to be used with `GreedyLoadBalancingStrategy`.
Args:
op: An `Operation` with a single output, typically a "Variable" op.
Returns:
The number of bytes in the output `Tensor`.
Raises:
ValueError: if `op` does not have a single output, or if the shape of the
single output is not fully-defined.
"""
if len(op.outputs) != 1:
raise ValueError("Op %s must have a single output" % op)
output = op.outputs[0]
elem_size = output.dtype.size
shape = output.get_shape()
if not shape.is_fully_defined():
# Due to legacy behavior, scalar "Variable" ops have output Tensors that
# have unknown shape when the op is created (and hence passed to this
# load function for placement), even though the scalar shape is set
# explicitly immediately afterward.
shape = tensor_shape.TensorShape(op.get_attr("shape"))
shape.assert_is_fully_defined()
return shape.num_elements() * elem_size
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/training/python/training/device_setter.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Hyperparameter values."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import numbers
import re
import six
from tensorflow.contrib.training.python.training import hparam_pb2
from tensorflow.python.framework import ops
from tensorflow.python.util import compat
from tensorflow.python.util import deprecation
# Define the regular expression for parsing a single clause of the input
# (delimited by commas). A legal clause looks like:
# <variable name>[<index>]? = <rhs>
# where <rhs> is either a single token or [] enclosed list of tokens.
# For example: "var[1] = a" or "x = [1,2,3]"
PARAM_RE = re.compile(r"""
(?P<name>[a-zA-Z][\w\.]*) # variable name: "var" or "x"
(\[\s*(?P<index>\d+)\s*\])? # (optional) index: "1" or None
\s*=\s*
((?P<val>[^,\[]*) # single value: "a" or None
|
\[(?P<vals>[^\]]*)\]) # list of values: None or "1,2,3"
($|,\s*)""", re.VERBOSE)
def _parse_fail(name, var_type, value, values):
"""Helper function for raising a value error for bad assignment."""
raise ValueError(
'Could not parse hparam \'%s\' of type \'%s\' with value \'%s\' in %s' %
(name, var_type.__name__, value, values))
def _reuse_fail(name, values):
"""Helper function for raising a value error for reuse of name."""
raise ValueError('Multiple assignments to variable \'%s\' in %s' % (name,
values))
def _process_scalar_value(name, parse_fn, var_type, m_dict, values,
results_dictionary):
"""Update results_dictionary with a scalar value.
Used to update the results_dictionary to be returned by parse_values when
encountering a clause with a scalar RHS (e.g. "s=5" or "arr[0]=5".)
Mutates results_dictionary.
Args:
name: Name of variable in assignment ("s" or "arr").
parse_fn: Function for parsing the actual value.
var_type: Type of named variable.
m_dict: Dictionary constructed from regex parsing.
m_dict['val']: RHS value (scalar)
m_dict['index']: List index value (or None)
values: Full expression being parsed
results_dictionary: The dictionary being updated for return by the parsing
function.
Raises:
ValueError: If the name has already been used.
"""
try:
parsed_value = parse_fn(m_dict['val'])
except ValueError:
_parse_fail(name, var_type, m_dict['val'], values)
# If no index is provided
if not m_dict['index']:
if name in results_dictionary:
_reuse_fail(name, values)
results_dictionary[name] = parsed_value
else:
if name in results_dictionary:
# The name has already been used as a scalar, then it
# will be in this dictionary and map to a non-dictionary.
if not isinstance(results_dictionary.get(name), dict):
_reuse_fail(name, values)
else:
results_dictionary[name] = {}
index = int(m_dict['index'])
# Make sure the index position hasn't already been assigned a value.
if index in results_dictionary[name]:
_reuse_fail('{}[{}]'.format(name, index), values)
results_dictionary[name][index] = parsed_value
def _process_list_value(name, parse_fn, var_type, m_dict, values,
results_dictionary):
"""Update results_dictionary from a list of values.
Used to update results_dictionary to be returned by parse_values when
encountering a clause with a list RHS (e.g. "arr=[1,2,3]".)
Mutates results_dictionary.
Args:
name: Name of variable in assignment ("arr").
parse_fn: Function for parsing individual values.
var_type: Type of named variable.
m_dict: Dictionary constructed from regex parsing.
m_dict['val']: RHS value (scalar)
values: Full expression being parsed
results_dictionary: The dictionary being updated for return by the parsing
function.
Raises:
ValueError: If the name has an index or the values cannot be parsed.
"""
if m_dict['index'] is not None:
raise ValueError('Assignment of a list to a list index.')
elements = filter(None, re.split('[ ,]', m_dict['vals']))
# Make sure the name hasn't already been assigned a value
if name in results_dictionary:
raise _reuse_fail(name, values)
try:
results_dictionary[name] = [parse_fn(e) for e in elements]
except ValueError:
_parse_fail(name, var_type, m_dict['vals'], values)
def _cast_to_type_if_compatible(name, param_type, value):
"""Cast hparam to the provided type, if compatible.
Args:
name: Name of the hparam to be cast.
param_type: The type of the hparam.
value: The value to be cast, if compatible.
Returns:
The result of casting `value` to `param_type`.
Raises:
ValueError: If the type of `value` is not compatible with param_type.
* If `param_type` is a string type, but `value` is not.
* If `param_type` is a boolean, but `value` is not, or vice versa.
* If `param_type` is an integer type, but `value` is not.
* If `param_type` is a float type, but `value` is not a numeric type.
"""
fail_msg = (
"Could not cast hparam '%s' of type '%s' from value %r" %
(name, param_type, value))
# If `value` is already of type `param_type`, return it directly.
# `isinstance` is too weak (e.g. isinstance(True, int) == True).
if type(value) == param_type: # pylint: disable=unidiomatic-typecheck
return value
# Some callers use None, for which we can't do any casting/checking. :(
if issubclass(param_type, type(None)):
return value
# Avoid converting a non-string type to a string.
if (issubclass(param_type, (six.string_types, six.binary_type)) and
not isinstance(value, (six.string_types, six.binary_type))):
raise ValueError(fail_msg)
# Avoid converting a number or string type to a boolean or vice versa.
if issubclass(param_type, bool) != isinstance(value, bool):
raise ValueError(fail_msg)
# Avoid converting float to an integer (the reverse is fine).
if (issubclass(param_type, numbers.Integral) and
not isinstance(value, numbers.Integral)):
raise ValueError(fail_msg)
# Avoid converting a non-numeric type to a numeric type.
if (issubclass(param_type, numbers.Number) and
not isinstance(value, numbers.Number)):
raise ValueError(fail_msg)
return param_type(value)
def parse_values(values, type_map, ignore_unknown=False):
"""Parses hyperparameter values from a string into a python map.
`values` is a string containing comma-separated `name=value` pairs.
For each pair, the value of the hyperparameter named `name` is set to
`value`.
If a hyperparameter name appears multiple times in `values`, a ValueError
is raised (e.g. 'a=1,a=2', 'a[1]=1,a[1]=2').
If a hyperparameter name in both an index assignment and scalar assignment,
a ValueError is raised. (e.g. 'a=[1,2,3],a[0] = 1').
The hyperparameter name may contain '.' symbols, which will result in an
attribute name that is only accessible through the getattr and setattr
functions. (And must be first explicit added through add_hparam.)
WARNING: Use of '.' in your variable names is allowed, but is not well
supported and not recommended.
The `value` in `name=value` must follows the syntax according to the
type of the parameter:
* Scalar integer: A Python-parsable integer point value. E.g.: 1,
100, -12.
* Scalar float: A Python-parsable floating point value. E.g.: 1.0,
-.54e89.
* Boolean: Either true or false.
* Scalar string: A non-empty sequence of characters, excluding comma,
spaces, and square brackets. E.g.: foo, bar_1.
* List: A comma separated list of scalar values of the parameter type
enclosed in square brackets. E.g.: [1,2,3], [1.0,1e-12], [high,low].
When index assignment is used, the corresponding type_map key should be the
list name. E.g. for "arr[1]=0" the type_map must have the key "arr" (not
"arr[1]").
Args:
values: String. Comma separated list of `name=value` pairs where
'value' must follow the syntax described above.
type_map: A dictionary mapping hyperparameter names to types. Note every
parameter name in values must be a key in type_map. The values must
conform to the types indicated, where a value V is said to conform to a
type T if either V has type T, or V is a list of elements of type T.
Hence, for a multidimensional parameter 'x' taking float values,
'x=[0.1,0.2]' will parse successfully if type_map['x'] = float.
ignore_unknown: Bool. Whether values that are missing a type in type_map
should be ignored. If set to True, a ValueError will not be raised for
unknown hyperparameter type.
Returns:
A python map mapping each name to either:
* A scalar value.
* A list of scalar values.
* A dictionary mapping index numbers to scalar values.
(e.g. "x=5,L=[1,2],arr[1]=3" results in {'x':5,'L':[1,2],'arr':{1:3}}")
Raises:
ValueError: If there is a problem with input.
* If `values` cannot be parsed.
* If a list is assigned to a list index (e.g. 'a[1] = [1,2,3]').
* If the same rvalue is assigned two different values (e.g. 'a=1,a=2',
'a[1]=1,a[1]=2', or 'a=1,a=[1]')
"""
results_dictionary = {}
pos = 0
while pos < len(values):
m = PARAM_RE.match(values, pos)
if not m:
raise ValueError('Malformed hyperparameter value: %s' % values[pos:])
# Check that there is a comma between parameters and move past it.
pos = m.end()
# Parse the values.
m_dict = m.groupdict()
name = m_dict['name']
if name not in type_map:
if ignore_unknown:
continue
raise ValueError('Unknown hyperparameter type for %s' % name)
type_ = type_map[name]
# Set up correct parsing function (depending on whether type_ is a bool)
if type_ == bool:
def parse_bool(value):
if value in ['true', 'True']:
return True
elif value in ['false', 'False']:
return False
else:
try:
return bool(int(value))
except ValueError:
_parse_fail(name, type_, value, values)
parse = parse_bool
else:
parse = type_
# If a singe value is provided
if m_dict['val'] is not None:
_process_scalar_value(name, parse, type_, m_dict, values,
results_dictionary)
# If the assigned value is a list:
elif m_dict['vals'] is not None:
_process_list_value(name, parse, type_, m_dict, values,
results_dictionary)
else: # Not assigned a list or value
_parse_fail(name, type_, '', values)
return results_dictionary
class HParams(object):
"""Class to hold a set of hyperparameters as name-value pairs.
A `HParams` object holds hyperparameters used to build and train a model,
such as the number of hidden units in a neural net layer or the learning rate
to use when training.
You first create a `HParams` object by specifying the names and values of the
hyperparameters.
To make them easily accessible the parameter names are added as direct
attributes of the class. A typical usage is as follows:
```python
# Create a HParams object specifying names and values of the model
# hyperparameters:
hparams = HParams(learning_rate=0.1, num_hidden_units=100)
# The hyperparameter are available as attributes of the HParams object:
hparams.learning_rate ==> 0.1
hparams.num_hidden_units ==> 100
```
Hyperparameters have type, which is inferred from the type of their value
passed at construction type. The currently supported types are: integer,
float, boolean, string, and list of integer, float, boolean, or string.
You can override hyperparameter values by calling the
[`parse()`](#HParams.parse) method, passing a string of comma separated
`name=value` pairs. This is intended to make it possible to override
any hyperparameter values from a single command-line flag to which
the user passes 'hyper-param=value' pairs. It avoids having to define
one flag for each hyperparameter.
The syntax expected for each value depends on the type of the parameter.
See `parse()` for a description of the syntax.
Example:
```python
# Define a command line flag to pass name=value pairs.
# For example using argparse:
import argparse
parser = argparse.ArgumentParser(description='Train my model.')
parser.add_argument('--hparams', type=str,
help='Comma separated list of "name=value" pairs.')
args = parser.parse_args()
...
def my_program():
# Create a HParams object specifying the names and values of the
# model hyperparameters:
hparams = tf.contrib.training.HParams(
learning_rate=0.1,
num_hidden_units=100,
activations=['relu', 'tanh'])
# Override hyperparameters values by parsing the command line
hparams.parse(args.hparams)
# If the user passed `--hparams=learning_rate=0.3` on the command line
# then 'hparams' has the following attributes:
hparams.learning_rate ==> 0.3
hparams.num_hidden_units ==> 100
hparams.activations ==> ['relu', 'tanh']
# If the hyperparameters are in json format use parse_json:
hparams.parse_json('{"learning_rate": 0.3, "activations": "relu"}')
```
"""
_HAS_DYNAMIC_ATTRIBUTES = True # Required for pytype checks.
def __init__(self, hparam_def=None, model_structure=None, **kwargs):
"""Create an instance of `HParams` from keyword arguments.
The keyword arguments specify name-values pairs for the hyperparameters.
The parameter types are inferred from the type of the values passed.
The parameter names are added as attributes of `HParams` object, so they
can be accessed directly with the dot notation `hparams._name_`.
Example:
```python
# Define 3 hyperparameters: 'learning_rate' is a float parameter,
# 'num_hidden_units' an integer parameter, and 'activation' a string
# parameter.
hparams = tf.contrib.training.HParams(
learning_rate=0.1, num_hidden_units=100, activation='relu')
hparams.activation ==> 'relu'
```
Note that a few names are reserved and cannot be used as hyperparameter
names. If you use one of the reserved name the constructor raises a
`ValueError`.
Args:
hparam_def: Serialized hyperparameters, encoded as a hparam_pb2.HParamDef
protocol buffer. If provided, this object is initialized by
deserializing hparam_def. Otherwise **kwargs is used.
model_structure: An instance of ModelStructure, defining the feature
crosses to be used in the Trial.
**kwargs: Key-value pairs where the key is the hyperparameter name and
the value is the value for the parameter.
Raises:
ValueError: If both `hparam_def` and initialization values are provided,
or if one of the arguments is invalid.
"""
# Register the hyperparameters and their type in _hparam_types.
# This simplifies the implementation of parse().
# _hparam_types maps the parameter name to a tuple (type, bool).
# The type value is the type of the parameter for scalar hyperparameters,
# or the type of the list elements for multidimensional hyperparameters.
# The bool value is True if the value is a list, False otherwise.
self._hparam_types = {}
self._model_structure = model_structure
if hparam_def:
self._init_from_proto(hparam_def)
if kwargs:
raise ValueError('hparam_def and initialization values are '
'mutually exclusive')
else:
for name, value in six.iteritems(kwargs):
self.add_hparam(name, value)
def _init_from_proto(self, hparam_def):
"""Creates a new HParams from `HParamDef` protocol buffer.
Args:
hparam_def: `HParamDef` protocol buffer.
"""
assert isinstance(hparam_def, hparam_pb2.HParamDef)
for name, value in hparam_def.hparam.items():
kind = value.WhichOneof('kind')
if kind.endswith('_value'):
# Single value.
if kind.startswith('int64'):
# Setting attribute value to be 'int' to ensure the type is compatible
# with both Python2 and Python3.
self.add_hparam(name, int(getattr(value, kind)))
elif kind.startswith('bytes'):
# Setting attribute value to be 'str' to ensure the type is compatible
# with both Python2 and Python3. UTF-8 encoding is assumed.
self.add_hparam(name, compat.as_str(getattr(value, kind)))
else:
self.add_hparam(name, getattr(value, kind))
else:
# List of values.
if kind.startswith('int64'):
# Setting attribute value to be 'int' to ensure the type is compatible
# with both Python2 and Python3.
self.add_hparam(name, [int(v) for v in getattr(value, kind).value])
elif kind.startswith('bytes'):
# Setting attribute value to be 'str' to ensure the type is compatible
# with both Python2 and Python3. UTF-8 encoding is assumed.
self.add_hparam(
name, [compat.as_str(v) for v in getattr(value, kind).value])
else:
self.add_hparam(name, [v for v in getattr(value, kind).value])
def add_hparam(self, name, value):
"""Adds {name, value} pair to hyperparameters.
Args:
name: Name of the hyperparameter.
value: Value of the hyperparameter. Can be one of the following types:
int, float, string, int list, float list, or string list.
Raises:
ValueError: if one of the arguments is invalid.
"""
# Keys in kwargs are unique, but 'name' could the name of a pre-existing
# attribute of this object. In that case we refuse to use it as a
# hyperparameter name.
if getattr(self, name, None) is not None:
raise ValueError('Hyperparameter name is reserved: %s' % name)
if isinstance(value, (list, tuple)):
if not value:
raise ValueError(
'Multi-valued hyperparameters cannot be empty: %s' % name)
self._hparam_types[name] = (type(value[0]), True)
else:
self._hparam_types[name] = (type(value), False)
setattr(self, name, value)
def set_hparam(self, name, value):
"""Set the value of an existing hyperparameter.
This function verifies that the type of the value matches the type of the
existing hyperparameter.
Args:
name: Name of the hyperparameter.
value: New value of the hyperparameter.
Raises:
KeyError: If the hyperparameter doesn't exist.
ValueError: If there is a type mismatch.
"""
param_type, is_list = self._hparam_types[name]
if isinstance(value, list):
if not is_list:
raise ValueError(
'Must not pass a list for single-valued parameter: %s' % name)
setattr(self, name, [
_cast_to_type_if_compatible(name, param_type, v) for v in value])
else:
if is_list:
raise ValueError(
'Must pass a list for multi-valued parameter: %s.' % name)
setattr(self, name, _cast_to_type_if_compatible(name, param_type, value))
def del_hparam(self, name):
"""Removes the hyperparameter with key 'name'.
Does nothing if it isn't present.
Args:
name: Name of the hyperparameter.
"""
if hasattr(self, name):
delattr(self, name)
del self._hparam_types[name]
def parse(self, values):
"""Override existing hyperparameter values, parsing new values from a string.
See parse_values for more detail on the allowed format for values.
Args:
values: String. Comma separated list of `name=value` pairs where 'value'
must follow the syntax described above.
Returns:
The `HParams` instance.
Raises:
ValueError: If `values` cannot be parsed or a hyperparameter in `values`
doesn't exist.
"""
type_map = {}
for name, t in self._hparam_types.items():
param_type, _ = t
type_map[name] = param_type
values_map = parse_values(values, type_map)
return self.override_from_dict(values_map)
def override_from_dict(self, values_dict):
"""Override existing hyperparameter values, parsing new values from a dictionary.
Args:
values_dict: Dictionary of name:value pairs.
Returns:
The `HParams` instance.
Raises:
KeyError: If a hyperparameter in `values_dict` doesn't exist.
ValueError: If `values_dict` cannot be parsed.
"""
for name, value in values_dict.items():
self.set_hparam(name, value)
return self
@deprecation.deprecated(None, 'Use `override_from_dict`.')
def set_from_map(self, values_map):
"""DEPRECATED. Use override_from_dict."""
return self.override_from_dict(values_dict=values_map)
def set_model_structure(self, model_structure):
self._model_structure = model_structure
def get_model_structure(self):
return self._model_structure
def to_json(self, indent=None, separators=None, sort_keys=False):
"""Serializes the hyperparameters into JSON.
Args:
indent: If a non-negative integer, JSON array elements and object members
will be pretty-printed with that indent level. An indent level of 0, or
negative, will only insert newlines. `None` (the default) selects the
most compact representation.
separators: Optional `(item_separator, key_separator)` tuple. Default is
`(', ', ': ')`.
sort_keys: If `True`, the output dictionaries will be sorted by key.
Returns:
A JSON string.
"""
return json.dumps(
self.values(),
indent=indent,
separators=separators,
sort_keys=sort_keys)
def parse_json(self, values_json):
"""Override existing hyperparameter values, parsing new values from a json object.
Args:
values_json: String containing a json object of name:value pairs.
Returns:
The `HParams` instance.
Raises:
KeyError: If a hyperparameter in `values_json` doesn't exist.
ValueError: If `values_json` cannot be parsed.
"""
values_map = json.loads(values_json)
return self.override_from_dict(values_map)
def values(self):
"""Return the hyperparameter values as a Python dictionary.
Returns:
A dictionary with hyperparameter names as keys. The values are the
hyperparameter values.
"""
return {n: getattr(self, n) for n in self._hparam_types.keys()}
def get(self, key, default=None):
"""Returns the value of `key` if it exists, else `default`."""
if key in self._hparam_types:
# Ensure that default is compatible with the parameter type.
if default is not None:
param_type, is_param_list = self._hparam_types[key]
type_str = 'list<%s>' % param_type if is_param_list else str(param_type)
fail_msg = ("Hparam '%s' of type '%s' is incompatible with "
'default=%s' % (key, type_str, default))
is_default_list = isinstance(default, list)
if is_param_list != is_default_list:
raise ValueError(fail_msg)
try:
if is_default_list:
for value in default:
_cast_to_type_if_compatible(key, param_type, value)
else:
_cast_to_type_if_compatible(key, param_type, default)
except ValueError as e:
raise ValueError('%s. %s' % (fail_msg, e))
return getattr(self, key)
return default
def __contains__(self, key):
return key in self._hparam_types
def __str__(self):
hpdict = self.values()
output_list = ['{}={}'.format(key, hpdict[key]) for key in hpdict]
return ','.join(output_list)
def __repr__(self):
strval = str(sorted(self.values().items()))
return '%s(%s)' % (type(self).__name__, strval)
@staticmethod
def _get_kind_name(param_type, is_list):
"""Returns the field name given parameter type and is_list.
Args:
param_type: Data type of the hparam.
is_list: Whether this is a list.
Returns:
A string representation of the field name.
Raises:
ValueError: If parameter type is not recognized.
"""
if issubclass(param_type, bool):
# This check must happen before issubclass(param_type, six.integer_types),
# since Python considers bool to be a subclass of int.
typename = 'bool'
elif issubclass(param_type, six.integer_types):
# Setting 'int' and 'long' types to be 'int64' to ensure the type is
# compatible with both Python2 and Python3.
typename = 'int64'
elif issubclass(param_type, (six.string_types, six.binary_type)):
# Setting 'string' and 'bytes' types to be 'bytes' to ensure the type is
# compatible with both Python2 and Python3.
typename = 'bytes'
elif issubclass(param_type, float):
typename = 'float'
else:
raise ValueError('Unsupported parameter type: %s' % str(param_type))
suffix = 'list' if is_list else 'value'
return '_'.join([typename, suffix])
def to_proto(self, export_scope=None): # pylint: disable=unused-argument
"""Converts a `HParams` object to a `HParamDef` protocol buffer.
Args:
export_scope: Optional `string`. Name scope to remove.
Returns:
A `HParamDef` protocol buffer.
"""
hparam_proto = hparam_pb2.HParamDef()
for name in self._hparam_types:
# Parse the values.
param_type, is_list = self._hparam_types.get(name, (None, None))
kind = HParams._get_kind_name(param_type, is_list)
if is_list:
if kind.startswith('bytes'):
v_list = [compat.as_bytes(v) for v in getattr(self, name)]
else:
v_list = [v for v in getattr(self, name)]
getattr(hparam_proto.hparam[name], kind).value.extend(v_list)
else:
v = getattr(self, name)
if kind.startswith('bytes'):
v = compat.as_bytes(getattr(self, name))
setattr(hparam_proto.hparam[name], kind, v)
return hparam_proto
@staticmethod
def from_proto(hparam_def, import_scope=None): # pylint: disable=unused-argument
return HParams(hparam_def=hparam_def)
ops.register_proto_function(
'hparams',
proto_type=hparam_pb2.HParamDef,
to_proto=HParams.to_proto,
from_proto=HParams.from_proto)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/training/python/training/hparam.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests `FeedingQueueRunner` using arrays and `DataFrames`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.estimator.inputs.queues.feeding_functions import _enqueue_data as enqueue_data
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner_impl
# pylint: disable=g-import-not-at-top
try:
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
def get_rows(array, row_indices):
rows = [array[i] for i in row_indices]
return np.vstack(rows)
class FeedingQueueRunnerTestCase(test.TestCase):
"""Tests for `FeedingQueueRunner`."""
def testArrayFeeding(self):
with ops.Graph().as_default():
array = np.arange(32).reshape([16, 2])
q = enqueue_data(array, capacity=100)
batch_size = 3
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for i in range(100):
indices = [
j % array.shape[0]
for j in range(batch_size * i, batch_size * (i + 1))
]
expected_dq = get_rows(array, indices)
dq = sess.run(dq_op)
np.testing.assert_array_equal(indices, dq[0])
np.testing.assert_array_equal(expected_dq, dq[1])
coord.request_stop()
coord.join(threads)
def testArrayFeedingMultiThread(self):
with ops.Graph().as_default():
array = np.arange(256).reshape([128, 2])
q = enqueue_data(array, capacity=128, num_threads=8, shuffle=True)
batch_size = 3
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for _ in range(100):
dq = sess.run(dq_op)
indices = dq[0]
expected_dq = get_rows(array, indices)
np.testing.assert_array_equal(expected_dq, dq[1])
coord.request_stop()
coord.join(threads)
def testPandasFeeding(self):
if not HAS_PANDAS:
return
with ops.Graph().as_default():
array1 = np.arange(32)
array2 = np.arange(32, 64)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(64, 96))
q = enqueue_data(df, capacity=100)
batch_size = 5
dq_op = q.dequeue_many(5)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for i in range(100):
indices = [
j % array1.shape[0]
for j in range(batch_size * i, batch_size * (i + 1))
]
expected_df_indices = df.index[indices]
expected_rows = df.iloc[indices]
dq = sess.run(dq_op)
np.testing.assert_array_equal(expected_df_indices, dq[0])
for col_num, col in enumerate(df.columns):
np.testing.assert_array_equal(expected_rows[col].values,
dq[col_num + 1])
coord.request_stop()
coord.join(threads)
def testPandasFeedingMultiThread(self):
if not HAS_PANDAS:
return
with ops.Graph().as_default():
array1 = np.arange(128, 256)
array2 = 2 * array1
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(128))
q = enqueue_data(df, capacity=128, num_threads=8, shuffle=True)
batch_size = 5
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for _ in range(100):
dq = sess.run(dq_op)
indices = dq[0]
expected_rows = df.iloc[indices]
for col_num, col in enumerate(df.columns):
np.testing.assert_array_equal(expected_rows[col].values,
dq[col_num + 1])
coord.request_stop()
coord.join(threads)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/training/python/training/feeding_queue_runner_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Sampling functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.training import input as input_ops
__all__ = [
'rejection_sample',
'stratified_sample',
]
def rejection_sample(tensors,
accept_prob_fn,
batch_size,
queue_threads=1,
enqueue_many=False,
prebatch_capacity=16,
prebatch_threads=1,
runtime_checks=False,
name=None):
"""Stochastically creates batches by rejection sampling.
Each list of non-batched tensors is evaluated by `accept_prob_fn`, to produce
a scalar tensor between 0 and 1. This tensor corresponds to the probability of
being accepted. When `batch_size` tensor groups have been accepted, the batch
queue will return a mini-batch.
Args:
tensors: List of tensors for data. All tensors are either one item or a
batch, according to enqueue_many.
accept_prob_fn: A python lambda that takes a non-batch tensor from each
item in `tensors`, and produces a scalar tensor.
batch_size: Size of batch to be returned.
queue_threads: The number of threads for the queue that will hold the final
batch.
enqueue_many: Bool. If true, interpret input tensors as having a batch
dimension.
prebatch_capacity: Capacity for the large queue that is used to convert
batched tensors to single examples.
prebatch_threads: Number of threads for the large queue that is used to
convert batched tensors to single examples.
runtime_checks: Bool. If true, insert runtime checks on the output of
`accept_prob_fn`. Using `True` might have a performance impact.
name: Optional prefix for ops created by this function.
Raises:
ValueError: enqueue_many is True and labels doesn't have a batch
dimension, or if enqueue_many is False and labels isn't a scalar.
ValueError: enqueue_many is True, and batch dimension on data and labels
don't match.
ValueError: if a zero initial probability class has a nonzero target
probability.
Returns:
A list of tensors of the same length as `tensors`, with batch dimension
`batch_size`.
Example:
# Get tensor for a single data and label example.
data, label = data_provider.Get(['data', 'label'])
# Get stratified batch according to data tensor.
accept_prob_fn = lambda x: (tf.tanh(x[0]) + 1) / 2
data_batch = tf.contrib.training.rejection_sample(
[data, label], accept_prob_fn, 16)
# Run batch through network.
...
"""
with variable_scope.variable_scope(name, 'rejection_sample', tensors):
tensor_list = ops.convert_n_to_tensor_or_indexed_slices(tensors)
# Reduce the case of a batched example to that of a batch of a single
# example by taking a batch of size one.
if enqueue_many:
# Validate that batch dimension of the input is consistent.
tensor_list = _verify_data_inputs(tensor_list)
# Make a single queue to hold input examples. Reshape output so examples
# don't have singleton batch dimension.
batched = input_ops.batch(
tensor_list,
batch_size=1,
num_threads=prebatch_threads,
capacity=prebatch_capacity,
enqueue_many=True)
tensor_list = [array_ops.squeeze(x, [0]) for x in batched]
# Set up a queue containing batches that have the distribution.
cur_prob = accept_prob_fn(tensor_list)
if runtime_checks:
cur_prob = array_ops.identity(
control_flow_ops.with_dependencies([
check_ops.assert_less_equal(0.0, cur_prob),
check_ops.assert_less_equal(cur_prob, 1.0)
], cur_prob),
name='prob_with_checks')
minibatch = input_ops.maybe_batch(
tensor_list,
keep_input=random_ops.random_uniform([]) < cur_prob,
batch_size=batch_size,
num_threads=queue_threads)
# Queues return a single tensor if the list of enqueued tensors is one. Since
# we want the type to always be the same, always return a list.
if isinstance(minibatch, ops.Tensor):
minibatch = [minibatch]
return minibatch
def stratified_sample(tensors,
labels,
target_probs,
batch_size,
init_probs=None,
enqueue_many=False,
queue_capacity=16,
threads_per_queue=1,
name=None):
"""Stochastically creates batches based on per-class probabilities.
This method discards examples. Internally, it creates one queue to amortize
the cost of disk reads, and one queue to hold the properly-proportioned
batch.
Args:
tensors: List of tensors for data. All tensors are either one item or a
batch, according to enqueue_many.
labels: Tensor for label of data. Label is a single integer or a batch,
depending on `enqueue_many`. It is not a one-hot vector.
target_probs: Target class proportions in batch. An object whose type has a
registered Tensor conversion function.
batch_size: Size of batch to be returned.
init_probs: Class proportions in the data. An object whose type has a
registered Tensor conversion function, or `None` for estimating the
initial distribution.
enqueue_many: Bool. If true, interpret input tensors as having a batch
dimension.
queue_capacity: Capacity of the large queue that holds input examples.
threads_per_queue: Number of threads for the large queue that holds input
examples and for the final queue with the proper class proportions.
name: Optional prefix for ops created by this function.
Raises:
ValueError: If `tensors` isn't iterable.
ValueError: `enqueue_many` is True and labels doesn't have a batch
dimension, or if `enqueue_many` is False and labels isn't a scalar.
ValueError: `enqueue_many` is True, and batch dimension on data and labels
don't match.
ValueError: if probs don't sum to one.
ValueError: if a zero initial probability class has a nonzero target
probability.
TFAssertion: if labels aren't integers in [0, num classes).
Returns:
(data_batch, label_batch), where data_batch is a list of tensors of the same
length as `tensors`
Example:
# Get tensor for a single data and label example.
data, label = data_provider.Get(['data', 'label'])
# Get stratified batch according to per-class probabilities.
target_probs = [...distribution you want...]
[data_batch], labels = tf.contrib.training.stratified_sample(
[data], label, target_probs)
# Run batch through network.
...
"""
with ops.name_scope(name, 'stratified_sample', list(tensors) + [labels]):
tensor_list = ops.convert_n_to_tensor_or_indexed_slices(tensors)
labels = ops.convert_to_tensor(labels)
target_probs = ops.convert_to_tensor(target_probs, dtype=dtypes.float32)
# Reduce the case of a single example to that of a batch of size 1.
if not enqueue_many:
tensor_list = [array_ops.expand_dims(tensor, 0) for tensor in tensor_list]
labels = array_ops.expand_dims(labels, 0)
# If `init_probs` is `None`, set up online estimation of data distribution.
if init_probs is None:
# We use `target_probs` to get the number of classes, so its shape must be
# fully defined at graph construction time.
target_probs.get_shape().assert_is_fully_defined()
init_probs = _estimate_data_distribution(
labels, target_probs.get_shape().num_elements())
else:
init_probs = ops.convert_to_tensor(init_probs, dtype=dtypes.float32)
# Validate that input is consistent.
tensor_list, labels, [init_probs, target_probs] = _verify_input(
tensor_list, labels, [init_probs, target_probs])
# Check that all zero initial probabilities also have zero target
# probabilities.
assert_op = control_flow_ops.Assert(
math_ops.reduce_all(
math_ops.logical_or(
math_ops.not_equal(init_probs, 0),
math_ops.equal(target_probs, 0))),
['All classes with zero initial probability must also have zero target '
'probability: ', init_probs, target_probs
])
init_probs = control_flow_ops.with_dependencies([assert_op], init_probs)
# Calculate acceptance sampling probabilities.
accept_probs = _calculate_acceptance_probabilities(init_probs, target_probs)
proportion_rejected = math_ops.reduce_sum((1 - accept_probs) * init_probs)
accept_probs = control_flow_ops.cond(
math_ops.less(proportion_rejected, .5),
lambda: accept_probs,
lambda: logging_ops.Print( # pylint: disable=g-long-lambda
accept_probs, [accept_probs],
message='Proportion of examples rejected by sampler is high.',
first_n=10))
# Make a single queue to hold input examples. Reshape output so examples
# don't have singleton batch dimension.
batched = input_ops.batch(
tensor_list + [labels],
batch_size=1,
num_threads=threads_per_queue,
capacity=queue_capacity,
enqueue_many=True)
val_list = [array_ops.squeeze(x, [0]) for x in batched[:-1]]
label = array_ops.squeeze(batched[-1], [0])
# Set up second queue containing batches that have the desired class
# proportions.
cur_prob = array_ops.gather(accept_probs, label)
batched = input_ops.maybe_batch(
val_list + [label],
keep_input=random_ops.random_uniform([]) < cur_prob,
batch_size=batch_size,
num_threads=threads_per_queue)
return batched[:-1], batched[-1]
def _estimate_data_distribution(labels, num_classes, smoothing_constant=10):
"""Estimate data distribution as labels are seen."""
# Variable to track running count of classes. Smooth by a nonzero value to
# avoid division-by-zero. Higher values provide more stability at the cost of
# slower convergence.
if smoothing_constant <= 0:
raise ValueError('smoothing_constant must be nonzero.')
num_examples_per_class_seen = variable_scope.variable(
initial_value=[smoothing_constant] * num_classes,
trainable=False,
name='class_count',
dtype=dtypes.int64)
# Update the class-count based on what labels are seen in batch.
num_examples_per_class_seen = num_examples_per_class_seen.assign_add(
math_ops.reduce_sum(
array_ops.one_hot(
labels, num_classes, dtype=dtypes.int64), 0))
# Normalize count into a probability.
# NOTE: Without the `+= 0` line below, the test
# `testMultiThreadedEstimateDataDistribution` fails. The reason is that
# before this line, `num_examples_per_class_seen` is a Tensor that shares a
# buffer with an underlying `ref` object. When the `ref` is changed by another
# thread, `num_examples_per_class_seen` changes as well. Since this can happen
# in the middle of the normalization computation, we get probabilities that
# are very far from summing to one. Adding `+= 0` copies the contents of the
# tensor to a new buffer, which will be consistent from the start to the end
# of the normalization computation.
num_examples_per_class_seen += 0
init_prob_estimate = math_ops.truediv(
num_examples_per_class_seen,
math_ops.reduce_sum(num_examples_per_class_seen))
# Must return float32 (not float64) to agree with downstream `_verify_input`
# checks.
return math_ops.cast(init_prob_estimate, dtypes.float32)
def _verify_data_inputs(tensor_list):
"""Verify that batched data inputs are well-formed."""
for tensor in tensor_list:
# Data tensor should have a batch dimension.
shape = tensor.get_shape().with_rank_at_least(1)
# Data batch dimensions must be compatible.
tensor_shape.dimension_at_index(shape, 0).assert_is_compatible_with(
tensor_list[0].get_shape()[0])
return tensor_list
def _verify_input(tensor_list, labels, probs_list):
"""Verify that batched inputs are well-formed."""
checked_probs_list = []
for probs in probs_list:
# Since number of classes shouldn't change at runtime, probabilities shape
# should be fully defined.
probs.get_shape().assert_is_fully_defined()
# Probabilities must be 1D.
probs.get_shape().assert_has_rank(1)
# Probabilities must be nonnegative and sum to one.
tol = 1e-6
prob_sum = math_ops.reduce_sum(probs)
checked_probs = control_flow_ops.with_dependencies([
check_ops.assert_non_negative(probs),
check_ops.assert_less(prob_sum, 1.0 + tol),
check_ops.assert_less(1.0 - tol, prob_sum)
], probs)
checked_probs_list.append(checked_probs)
# All probabilities should be the same length.
prob_length = checked_probs_list[0].get_shape().num_elements()
for checked_prob in checked_probs_list:
if checked_prob.get_shape().num_elements() != prob_length:
raise ValueError('Probability parameters must have the same length.')
# Labels tensor should only have batch dimension.
labels.get_shape().assert_has_rank(1)
for tensor in tensor_list:
# Data tensor should have a batch dimension.
shape = tensor.get_shape().with_rank_at_least(1)
# Data and label batch dimensions must be compatible.
tensor_shape.dimension_at_index(shape, 0).assert_is_compatible_with(
labels.get_shape()[0])
# Data and labels must have the same, strictly positive batch size. Since we
# can't assume we know the batch size at graph creation, add runtime checks.
labels_batch_size = array_ops.shape(labels)[0]
lbl_assert = check_ops.assert_positive(labels_batch_size)
# Make each tensor depend on its own checks.
labels = control_flow_ops.with_dependencies([lbl_assert], labels)
tensor_list = [
control_flow_ops.with_dependencies([
lbl_assert,
check_ops.assert_equal(array_ops.shape(x)[0], labels_batch_size)
], x) for x in tensor_list
]
# Label's classes must be integers 0 <= x < num_classes.
labels = control_flow_ops.with_dependencies([
check_ops.assert_integer(labels), check_ops.assert_non_negative(labels),
check_ops.assert_less(labels, math_ops.cast(prob_length, labels.dtype))
], labels)
return tensor_list, labels, checked_probs_list
def _calculate_acceptance_probabilities(init_probs, target_probs):
"""Calculate the per-class acceptance rates.
Args:
init_probs: The class probabilities of the data.
target_probs: The desired class proportion in minibatches.
Returns:
A list of the per-class acceptance probabilities.
This method is based on solving the following analysis:
Let F be the probability of a rejection (on any example).
Let p_i be the proportion of examples in the data in class i (init_probs)
Let a_i is the rate the rejection sampler should *accept* class i
Let t_i is the target proportion in the minibatches for class i (target_probs)
```
F = sum_i(p_i * (1-a_i))
= 1 - sum_i(p_i * a_i) using sum_i(p_i) = 1
```
An example with class `i` will be accepted if `k` rejections occur, then an
example with class `i` is seen by the rejector, and it is accepted. This can
be written as follows:
```
t_i = sum_k=0^inf(F^k * p_i * a_i)
= p_i * a_j / (1 - F) using geometric series identity, since 0 <= F < 1
= p_i * a_i / sum_j(p_j * a_j) using F from above
```
Note that the following constraints hold:
```
0 <= p_i <= 1, sum_i(p_i) = 1
0 <= a_i <= 1
0 <= t_i <= 1, sum_i(t_i) = 1
```
A solution for a_i in terms of the other variables is the following:
```a_i = (t_i / p_i) / max_i[t_i / p_i]```
"""
# Make list of t_i / p_i.
ratio_l = target_probs / init_probs
# Replace NaNs with 0s.
ratio_l = array_ops.where_v2(
math_ops.is_nan(ratio_l), array_ops.zeros_like(ratio_l), ratio_l)
# Calculate list of acceptance probabilities.
max_ratio = math_ops.reduce_max(ratio_l)
return ratio_l / max_ratio
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/training/python/training/sampling_ops.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A `QueueRunner` that takes a feed function as an argument."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from tensorflow.python.estimator.inputs.queues.feeding_queue_runner import _FeedingQueueRunner as FeedingQueueRunner
# pylint: enable=unused-import
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/training/python/training/feeding_queue_runner.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SGDR learning rate decay function."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops, control_flow_ops
def sgdr_decay(learning_rate, global_step, initial_period_steps,
t_mul=2.0, m_mul=1.0, name=None):
"""Implements Stochastic Gradient Descent with Warm Restarts (SGDR).
As described in "SGDR: Stochastic Gradient Descent
with Warm Restarts" by Ilya Loshchilov & Frank Hutter, Proceedings of
ICLR'2017, available at https://arxiv.org/pdf/1608.03983.pdf
The learning rate decreases according to cosine annealing:
```python
learning_rate * 0.5 * (1 + cos(x_val * pi)) # for x_val defined in [0, 1]
```
Thus, at the beginning (when the restart index i = 0),
the learning rate decreases for `initial_period_steps` steps from the initial
learning rate `learning_rate` (when `x_val=0`, we get `cos(0)=1`) to
0 (when `x_val=1`, we get `cos(pi)=-1`).
The decrease within the i-th period takes `t_i` steps,
where `t_0` = `initial_period_steps` is the user-defined number of batch
iterations (not epochs as in the paper) to be performed before the first
restart is launched.
Then, we perform the first restart (i=1) by setting the learning rate to
`learning_rate*(m_mul^i)`, where `m_mul in [0,1]` (set to 1 by default).
The i-th restart runs for `t_i=t_0*(t_mul^i)` steps, i.e., every new
restart runs `t_mul` times longer than the previous one.
Importantly, when one has no access to a validation set, SGDR suggests
to report the best expected / recommended solution in the following way:
When we are within our initial run (i=0), every new solution represents
SGDR's recommended solution. Instead, when i>0, the recommended solution is
the one obtained at the end of each restart.
Note that the minimum learning rate is set to 0 for simplicity,
you can adjust the code to deal with any positive minimum learning rate
as defined in the paper.
`initial_period_steps` is the duration of the first period measured in terms
of number of minibatch updates. If one wants to use epochs, one should compute
the number of updates required for an epoch.
For example, assume the following parameters and intention:
Minibatch size: 100
Training dataset size: 10000
If the user wants the first decay period to span across 5 epochs, then
`initial_period_steps` = 5 * 10000/100 = 500
Train for 10000 batch iterations with the initial learning rate set to
0.1, then restart to run 2 times longer, i.e, for 20000 batch iterations
and with the initial learning rate 0.05, then restart again and again,
doubling the runtime of each new period and with two times smaller
initial learning rate.
To accomplish the above, one would write:
```python
...
global_step = tf.Variable(0, trainable=False)
starter_learning_rate = 0.1
learning_rate = sgdr_decay(starter_learning_rate, global_step,
initial_period_steps=10000, t_mul=2, m_mul=0.5)
# Passing global_step to minimize() will increment it at each step.
learning_step = (
tf.compat.v1.train.GradientDescentOptimizer(learning_rate)
.minimize(...my loss..., global_step=global_step)
)
# Step | 0 | 1000 | 5000 | 9000 | 9999 | 10000 | 11000 |
# LR | 0.1 | 0.097 | 0.05 | 0.002 | 0.00 | 0.05 | 0.0496 |
# Step | 20000 | 29000 | 29999 | 30000 |
# LR | 0.025 | 0.0003 | 0.00 | 0.025 |
```
Args:
learning_rate: A scalar `float32` or `float64` `Tensor` or a
Python number. The initial learning rate.
global_step: A scalar `int32` or `int64` `Tensor` or a Python number.
Global step to use for the decay computation. Must not be negative.
initial_period_steps: Duration of the first period measured as the number
of minibatch updates, if one wants to use epochs, one should compute
the number of updates required for an epoch.
t_mul: A scalar `float32` or `float64` `Tensor` or a Python number.
Must be positive.
Used to derive the number of iterations in the i-th period:
`initial_period_steps * (t_mul^i)`. Defaults to 2.0.
m_mul: A scalar `float32` or `float64` `Tensor` or a Python number.
Must be positive.
Used to derive the initial learning rate of the i-th period:
`learning_rate * (m_mul^i)`. Defaults to 1.0
Returns:
A scalar `Tensor` of the same type as `learning_rate`.
The learning rate for a provided global_step.
Raises:
ValueError: if `global_step` is not supplied.
"""
if global_step is None:
raise ValueError("global_step is required for sgdr_decay.")
with ops.name_scope(name, "SGDRDecay",
[learning_rate, global_step,
initial_period_steps, t_mul, m_mul]) as name:
learning_rate = ops.convert_to_tensor(learning_rate,
name="initial_learning_rate")
dtype = learning_rate.dtype
global_step = math_ops.cast(global_step, dtype)
t_0 = math_ops.cast(initial_period_steps, dtype)
t_mul = math_ops.cast(t_mul, dtype)
m_mul = math_ops.cast(m_mul, dtype)
c_one = math_ops.cast(constant_op.constant(1.0), dtype)
c_half = math_ops.cast(constant_op.constant(0.5), dtype)
c_pi = math_ops.cast(constant_op.constant(math.pi), dtype)
# Find normalized value of the current step
x_val = math_ops.div(global_step, t_0)
def compute_step(x_val, geometric=False):
if geometric:
# Consider geometric series where t_mul != 1
# 1 + t_mul + t_mul^2 ... = (1 - t_mul^i_restart) / (1 - t_mul)
# First find how many restarts were performed for a given x_val
# Find maximal integer i_restart value for which this equation holds
# x_val >= (1 - t_mul^i_restart) / (1 - t_mul)
# x_val * (1 - t_mul) <= (1 - t_mul^i_restart)
# t_mul^i_restart <= (1 - x_val * (1 - t_mul))
# tensorflow allows only log with base e
# i_restart <= log(1 - x_val * (1 - t_mul) / log(t_mul)
# Find how many restarts were performed
i_restart = math_ops.floor(
math_ops.log(c_one - x_val * (c_one - t_mul)) / math_ops.log(t_mul))
# Compute the sum of all restarts before the current one
sum_r = (c_one - t_mul ** i_restart) / (c_one - t_mul)
# Compute our position within the current restart
x_val = (x_val - sum_r) / t_mul ** i_restart
else:
# Find how many restarts were performed
i_restart = math_ops.floor(x_val)
# Compute our position within the current restart
x_val = x_val - i_restart
return i_restart, x_val
i_restart, x_val = control_flow_ops.cond(
math_ops.equal(t_mul, c_one),
lambda: compute_step(x_val, geometric=False),
lambda: compute_step(x_val, geometric=True))
# If m_mul < 1, then the initial learning rate of every new restart will be
# smaller, i.e., by a factor of m_mul ** i_restart at i_restart-th restart
m_fac = learning_rate * (m_mul ** i_restart)
return math_ops.multiply(c_half * m_fac,
(math_ops.cos(x_val * c_pi) + c_one), name=name)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/training/python/training/sgdr_learning_rate_decay.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.contrib.training.training."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from tensorflow.contrib.framework.python.ops import variables as variables_lib
from tensorflow.contrib.layers.python.layers import layers
from tensorflow.contrib.training.python.training import training
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables as variables_lib2
from tensorflow.python.ops.losses import losses
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import monitored_session
from tensorflow.python.training import saver as saver_lib
# pylint: enable=g-import-not-at-top
def logistic_classifier(inputs):
return layers.fully_connected(inputs, 1, activation_fn=math_ops.sigmoid)
def batchnorm_classifier(inputs):
inputs = layers.batch_norm(inputs, decay=0.1, fused=False)
return layers.fully_connected(inputs, 1, activation_fn=math_ops.sigmoid)
class ClipGradsTest(test.TestCase):
def testClipGrads(self):
xs = variables_lib2.Variable(0.0)
ys = xs * 4.0
grads = gradients_impl.gradients([ys], [xs])
gradients_to_variables = list(zip(grads, [xs]))
clipped_gradients_to_variables = training.clip_gradient_norms(
gradients_to_variables, 3.0)
with self.cached_session() as session:
session.run(variables_lib2.global_variables_initializer())
self.assertAlmostEqual(4.0, gradients_to_variables[0][0].eval())
self.assertAlmostEqual(3.0, clipped_gradients_to_variables[0][0].eval())
def testClipGradsFn(self):
xs = variables_lib2.Variable(0.0)
ys = xs * 4.0
grads = gradients_impl.gradients([ys], [xs])
gradients_to_variables = list(zip(grads, [xs]))
clipped_gradients_to_variables = training.clip_gradient_norms_fn(3.0)(
gradients_to_variables)
with self.cached_session() as session:
session.run(variables_lib2.global_variables_initializer())
self.assertAlmostEqual(4.0, gradients_to_variables[0][0].eval())
self.assertAlmostEqual(3.0, clipped_gradients_to_variables[0][0].eval())
class CreateTrainOpTest(test.TestCase):
def setUp(self):
np.random.seed(0)
# Create an easy training set:
self._inputs = np.random.rand(16, 4).astype(np.float32)
self._labels = np.random.randint(0, 2, size=(16, 1)).astype(np.float32)
def testTrainOpInCollection(self):
with ops.Graph().as_default():
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = batchnorm_classifier(tf_inputs)
loss = losses.log_loss(tf_labels, tf_predictions)
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = training.create_train_op(loss, optimizer)
# Make sure the training op was recorded in the proper collection
self.assertTrue(train_op in ops.get_collection(ops.GraphKeys.TRAIN_OP))
def testUseUpdateOps(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
expected_mean = np.mean(self._inputs, axis=(0))
expected_var = np.var(self._inputs, axis=(0))
tf_predictions = batchnorm_classifier(tf_inputs)
loss = losses.log_loss(tf_labels, tf_predictions)
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = training.create_train_op(loss, optimizer)
moving_mean = variables_lib.get_variables_by_name('moving_mean')[0]
moving_variance = variables_lib.get_variables_by_name('moving_variance')[
0]
with self.cached_session() as session:
# Initialize all variables
session.run(variables_lib2.global_variables_initializer())
mean, variance = session.run([moving_mean, moving_variance])
# After initialization moving_mean == 0 and moving_variance == 1.
self.assertAllClose(mean, [0] * 4)
self.assertAllClose(variance, [1] * 4)
for _ in range(10):
session.run(train_op)
mean = moving_mean.eval()
variance = moving_variance.eval()
# After 10 updates with decay 0.1 moving_mean == expected_mean and
# moving_variance == expected_var.
self.assertAllClose(mean, expected_mean)
self.assertAllClose(variance, expected_var)
def testEmptyUpdateOps(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = batchnorm_classifier(tf_inputs)
loss = losses.log_loss(tf_labels, tf_predictions)
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = training.create_train_op(loss, optimizer, update_ops=[])
moving_mean = variables_lib.get_variables_by_name('moving_mean')[0]
moving_variance = variables_lib.get_variables_by_name('moving_variance')[
0]
with self.cached_session() as session:
# Initialize all variables
session.run(variables_lib2.global_variables_initializer())
mean, variance = session.run([moving_mean, moving_variance])
# After initialization moving_mean == 0 and moving_variance == 1.
self.assertAllClose(mean, [0] * 4)
self.assertAllClose(variance, [1] * 4)
for _ in range(10):
session.run(train_op)
mean = moving_mean.eval()
variance = moving_variance.eval()
# Since we skip update_ops the moving_vars are not updated.
self.assertAllClose(mean, [0] * 4)
self.assertAllClose(variance, [1] * 4)
def testGlobalStepIsIncrementedByDefault(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = batchnorm_classifier(tf_inputs)
loss = losses.log_loss(tf_labels, tf_predictions)
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = training.create_train_op(loss, optimizer)
global_step = variables_lib.get_or_create_global_step()
with self.cached_session() as session:
# Initialize all variables
session.run(variables_lib2.global_variables_initializer())
for _ in range(10):
session.run(train_op)
# After 10 updates global_step should be 10.
self.assertAllClose(global_step.eval(), 10)
def testGlobalStepNotIncrementedWhenSetToNone(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = batchnorm_classifier(tf_inputs)
loss = losses.log_loss(tf_labels, tf_predictions)
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = training.create_train_op(loss, optimizer, global_step=None)
global_step = variables_lib.get_or_create_global_step()
with self.cached_session() as session:
# Initialize all variables
session.run(variables_lib2.global_variables_initializer())
for _ in range(10):
session.run(train_op)
# Since train_op don't use global_step it shouldn't change.
self.assertAllClose(global_step.eval(), 0)
class TrainBatchNormClassifierTest(test.TestCase):
def setUp(self):
# Create an easy training set:
np.random.seed(0)
self._inputs = np.zeros((16, 4))
self._labels = np.random.randint(0, 2, size=(16, 1)).astype(np.float32)
for i in range(16):
j = int(2 * self._labels[i] + np.random.randint(0, 2))
self._inputs[i, j] = 1
def testTrainWithNoInitAssignCanAchieveZeroLoss(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = batchnorm_classifier(tf_inputs)
losses.log_loss(tf_labels, tf_predictions)
total_loss = losses.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = training.create_train_op(total_loss, optimizer)
loss = training.train(
train_op,
None,
hooks=[basic_session_run_hooks.StopAtStepHook(num_steps=300)],
save_summaries_steps=None,
save_checkpoint_secs=None)
self.assertLess(loss, .1)
class TrainTest(test.TestCase):
def setUp(self):
# Create an easy training set:
np.random.seed(0)
self._inputs = np.zeros((16, 4))
self._labels = np.random.randint(0, 2, size=(16, 1)).astype(np.float32)
for i in range(16):
j = int(2 * self._labels[i] + np.random.randint(0, 2))
self._inputs[i, j] = 1
def testCanAchieveZeroLoss(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = logistic_classifier(tf_inputs)
losses.log_loss(tf_labels, tf_predictions)
total_loss = losses.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = training.create_train_op(total_loss, optimizer)
loss = training.train(
train_op,
None,
hooks=[basic_session_run_hooks.StopAtStepHook(num_steps=300)],
save_summaries_steps=None,
save_checkpoint_secs=None)
self.assertIsNotNone(loss)
self.assertLess(loss, .015)
def testTrainWithLocalVariable(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
local_multiplier = variables_lib.local_variable(1.0)
tf_predictions = logistic_classifier(tf_inputs) * local_multiplier
losses.log_loss(tf_labels, tf_predictions)
total_loss = losses.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = training.create_train_op(total_loss, optimizer)
loss = training.train(
train_op,
None,
hooks=[basic_session_run_hooks.StopAtStepHook(num_steps=300)],
save_summaries_steps=None,
save_checkpoint_secs=None)
self.assertIsNotNone(loss)
self.assertLess(loss, .015)
def testResumeTrainAchievesRoughlyTheSameLoss(self):
number_of_steps = [300, 1, 5]
logdir = os.path.join(self.get_temp_dir(), 'resume_train_same_loss')
for i in range(len(number_of_steps)):
with ops.Graph().as_default():
random_seed.set_random_seed(i)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = logistic_classifier(tf_inputs)
losses.log_loss(tf_labels, tf_predictions)
total_loss = losses.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = training.create_train_op(total_loss, optimizer)
saver = saver_lib.Saver()
loss = training.train(
train_op,
logdir,
hooks=[
basic_session_run_hooks.StopAtStepHook(
num_steps=number_of_steps[i]),
basic_session_run_hooks.CheckpointSaverHook(
logdir, save_steps=50, saver=saver),
],
save_checkpoint_secs=None,
save_summaries_steps=None)
self.assertIsNotNone(loss)
self.assertLess(loss, .015)
def create_train_op(self, learning_rate=1.0, gradient_multiplier=1.0):
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = logistic_classifier(tf_inputs)
losses.log_loss(tf_labels, tf_predictions)
total_loss = losses.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(
learning_rate=learning_rate)
def transform_grads_fn(grads):
if gradient_multiplier != 1.0:
variables = variables_lib2.trainable_variables()
gradient_multipliers = {var: gradient_multiplier for var in variables}
with ops.name_scope('multiply_grads'):
return training.multiply_gradients(grads, gradient_multipliers)
else:
return grads
return training.create_train_op(
total_loss, optimizer, transform_grads_fn=transform_grads_fn)
def testTrainWithInitFromCheckpoint(self):
logdir1 = os.path.join(self.get_temp_dir(), 'tmp_logs1/')
logdir2 = os.path.join(self.get_temp_dir(), 'tmp_logs2/')
if gfile.Exists(logdir1): # For running on jenkins.
gfile.DeleteRecursively(logdir1)
if gfile.Exists(logdir2): # For running on jenkins.
gfile.DeleteRecursively(logdir2)
# First, train the model one step (make sure the error is high).
with ops.Graph().as_default():
random_seed.set_random_seed(0)
train_op = self.create_train_op()
saver = saver_lib.Saver()
loss = training.train(
train_op,
logdir1,
hooks=[
basic_session_run_hooks.CheckpointSaverHook(
logdir1, save_steps=1, saver=saver),
basic_session_run_hooks.StopAtStepHook(num_steps=1),
],
save_checkpoint_secs=None,
save_summaries_steps=None)
self.assertGreater(loss, .5)
# Next, train the model to convergence.
with ops.Graph().as_default():
random_seed.set_random_seed(1)
train_op = self.create_train_op()
saver = saver_lib.Saver()
loss = training.train(
train_op,
logdir1,
hooks=[
basic_session_run_hooks.CheckpointSaverHook(
logdir1, save_steps=300, saver=saver),
basic_session_run_hooks.StopAtStepHook(num_steps=300),
],
save_checkpoint_secs=None,
save_summaries_steps=None)
self.assertIsNotNone(loss)
self.assertLess(loss, .02)
# Finally, advance the model a single step and validate that the loss is
# still low.
with ops.Graph().as_default():
random_seed.set_random_seed(2)
train_op = self.create_train_op()
model_variables = variables_lib2.global_variables()
model_path = checkpoint_management.latest_checkpoint(logdir1)
assign_fn = variables_lib.assign_from_checkpoint_fn(
model_path, model_variables)
def init_fn(_, session):
assign_fn(session)
loss = training.train(
train_op,
None,
scaffold=monitored_session.Scaffold(init_fn=init_fn),
hooks=[basic_session_run_hooks.StopAtStepHook(num_steps=1)],
save_checkpoint_secs=None,
save_summaries_steps=None)
self.assertIsNotNone(loss)
self.assertLess(loss, .02)
def ModelLoss(self):
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = logistic_classifier(tf_inputs)
losses.log_loss(tf_labels, tf_predictions)
return losses.get_total_loss()
def testTrainAllVarsHasLowerLossThanTrainSubsetOfVars(self):
logdir = os.path.join(self.get_temp_dir(), 'tmp_logs3/')
if gfile.Exists(logdir): # For running on jenkins.
gfile.DeleteRecursively(logdir)
# First, train only the weights of the model.
with ops.Graph().as_default():
random_seed.set_random_seed(0)
total_loss = self.ModelLoss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
weights = variables_lib.get_variables_by_name('weights')
train_op = training.create_train_op(
total_loss, optimizer, variables_to_train=weights)
saver = saver_lib.Saver()
loss = training.train(
train_op,
logdir,
hooks=[
basic_session_run_hooks.CheckpointSaverHook(
logdir, save_steps=200, saver=saver),
basic_session_run_hooks.StopAtStepHook(num_steps=200),
],
save_checkpoint_secs=None,
save_summaries_steps=None)
self.assertGreater(loss, .015)
self.assertLess(loss, .05)
# Next, train the biases of the model.
with ops.Graph().as_default():
random_seed.set_random_seed(1)
total_loss = self.ModelLoss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
biases = variables_lib.get_variables_by_name('biases')
train_op = training.create_train_op(
total_loss, optimizer, variables_to_train=biases)
saver = saver_lib.Saver()
loss = training.train(
train_op,
logdir,
hooks=[
basic_session_run_hooks.CheckpointSaverHook(
logdir, save_steps=300, saver=saver),
basic_session_run_hooks.StopAtStepHook(num_steps=300),
],
save_checkpoint_secs=None,
save_summaries_steps=None)
self.assertGreater(loss, .015)
self.assertLess(loss, .05)
# Finally, train both weights and bias to get lower loss.
with ops.Graph().as_default():
random_seed.set_random_seed(2)
total_loss = self.ModelLoss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = training.create_train_op(total_loss, optimizer)
saver = saver_lib.Saver()
loss = training.train(
train_op,
logdir,
hooks=[
basic_session_run_hooks.StopAtStepHook(num_steps=400),
],
save_checkpoint_secs=None,
save_summaries_steps=None)
self.assertIsNotNone(loss)
self.assertLess(loss, .015)
def testTrainingSubsetsOfVariablesOnlyUpdatesThoseVariables(self):
# First, train only the weights of the model.
with ops.Graph().as_default():
random_seed.set_random_seed(0)
total_loss = self.ModelLoss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
weights, biases = variables_lib.get_variables()
train_op = training.create_train_op(total_loss, optimizer)
train_weights = training.create_train_op(
total_loss, optimizer, variables_to_train=[weights])
train_biases = training.create_train_op(
total_loss, optimizer, variables_to_train=[biases])
with self.cached_session() as session:
# Initialize the variables.
session.run(variables_lib2.global_variables_initializer())
# Get the initial weights and biases values.
weights_values, biases_values = session.run([weights, biases])
self.assertGreater(np.linalg.norm(weights_values), 0)
self.assertAlmostEqual(np.linalg.norm(biases_values), 0)
# Update weights and biases.
loss = session.run(train_op)
self.assertGreater(loss, .5)
new_weights, new_biases = session.run([weights, biases])
# Check that the weights and biases have been updated.
self.assertGreater(np.linalg.norm(weights_values - new_weights), 0)
self.assertGreater(np.linalg.norm(biases_values - new_biases), 0)
weights_values, biases_values = new_weights, new_biases
# Update only weights.
loss = session.run(train_weights)
self.assertGreater(loss, .5)
new_weights, new_biases = session.run([weights, biases])
# Check that the weights have been updated, but biases have not.
self.assertGreater(np.linalg.norm(weights_values - new_weights), 0)
self.assertAlmostEqual(np.linalg.norm(biases_values - new_biases), 0)
weights_values = new_weights
# Update only biases.
loss = session.run(train_biases)
self.assertGreater(loss, .5)
new_weights, new_biases = session.run([weights, biases])
# Check that the biases have been updated, but weights have not.
self.assertAlmostEqual(np.linalg.norm(weights_values - new_weights), 0)
self.assertGreater(np.linalg.norm(biases_values - new_biases), 0)
def testTrainWithAlteredGradients(self):
# Use the same learning rate but different gradient multipliers
# to train two models. Model with equivalently larger learning
# rate (i.e., learning_rate * gradient_multiplier) has smaller
# training loss.
multipliers = [1., 1000.]
number_of_steps = 10
learning_rate = 0.001
# First, train the model with equivalently smaller learning rate.
with ops.Graph().as_default():
random_seed.set_random_seed(0)
train_op = self.create_train_op(
learning_rate=learning_rate, gradient_multiplier=multipliers[0])
loss0 = training.train(
train_op,
None,
hooks=[
basic_session_run_hooks.StopAtStepHook(num_steps=number_of_steps),
],
save_checkpoint_secs=None,
save_summaries_steps=None)
self.assertIsNotNone(loss0)
self.assertGreater(loss0, .5)
# Second, train the model with equivalently larger learning rate.
with ops.Graph().as_default():
random_seed.set_random_seed(0)
train_op = self.create_train_op(
learning_rate=learning_rate, gradient_multiplier=multipliers[1])
loss1 = training.train(
train_op,
None,
hooks=[
basic_session_run_hooks.StopAtStepHook(num_steps=number_of_steps),
],
save_checkpoint_secs=None,
save_summaries_steps=None)
self.assertIsNotNone(loss1)
self.assertLess(loss1, .5)
# The loss of the model trained with larger learning rate should
# be smaller.
self.assertGreater(loss0, loss1)
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/training/python/training/training_test.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.