python_code
stringlengths 0
1.02M
| repo_name
stringlengths 9
48
| file_path
stringlengths 5
114
|
---|---|---|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for layers.feature_column."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import itertools
import os
import tempfile
import numpy as np
from tensorflow.contrib.layers.python.layers import feature_column as fc
from tensorflow.contrib.layers.python.layers import feature_column_ops
from tensorflow.python.feature_column import feature_column_lib as fc_core
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor as sparse_tensor_lib
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import saver
def _sparse_id_tensor(shape, vocab_size, seed=112123):
# Returns a arbitrary `SparseTensor` with given shape and vocab size.
np.random.seed(seed)
indices = np.array(list(itertools.product(*[range(s) for s in shape])))
# In order to create some sparsity, we include a value outside the vocab.
values = np.random.randint(0, vocab_size + 1, size=np.prod(shape))
# Remove entries outside the vocabulary.
keep = values < vocab_size
indices = indices[keep]
values = values[keep]
return sparse_tensor_lib.SparseTensor(
indices=indices, values=values, dense_shape=shape)
def _sparse_id_tensor_with_weights(shape, vocab_size, seed=112123):
# Returns a arbitrary `SparseTensor` with given shape and vocab size.
assert vocab_size >= shape[-1]
np.random.seed(seed)
indices = np.array(list(itertools.product(*[range(s) for s in shape])))
# Values must be distinct from the vocab
values = np.ndarray.flatten(np.array([
np.random.choice(vocab_size, size=shape[-1], replace=False)
for _ in range(np.prod(shape[:-1]))]))
weights = np.sort(np.random.rand(*shape), axis=len(shape)-1)
# Remove entries if weight < 0.5 for sparsity.
keep = np.ndarray.flatten(weights < 0.5) # Remove half of them
indices = indices[keep]
values = values[keep]
weights = np.ndarray.flatten(weights)[keep]
return (sparse_tensor_lib.SparseTensor(
indices=indices, values=values, dense_shape=shape),
sparse_tensor_lib.SparseTensor(
indices=indices, values=weights, dense_shape=shape))
class FeatureColumnTest(test.TestCase):
def testImmutability(self):
a = fc.sparse_column_with_hash_bucket("aaa", hash_bucket_size=100)
with self.assertRaises(AttributeError):
a.column_name = "bbb"
def testSparseColumnWithHashBucket(self):
a = fc.sparse_column_with_hash_bucket("aaa", hash_bucket_size=100)
self.assertEqual(a.name, "aaa")
self.assertEqual(a.dtype, dtypes.string)
a = fc.sparse_column_with_hash_bucket(
"aaa", hash_bucket_size=100, dtype=dtypes.int64)
self.assertEqual(a.name, "aaa")
self.assertEqual(a.dtype, dtypes.int64)
with self.assertRaisesRegexp(ValueError, "dtype must be string or integer"):
a = fc.sparse_column_with_hash_bucket(
"aaa", hash_bucket_size=100, dtype=dtypes.float32)
def testSparseColumnWithVocabularyFile(self):
b = fc.sparse_column_with_vocabulary_file(
"bbb", vocabulary_file="a_file", vocab_size=454)
self.assertEqual(b.dtype, dtypes.string)
self.assertEqual(b.lookup_config.vocab_size, 454)
self.assertEqual(b.lookup_config.vocabulary_file, "a_file")
with self.assertRaises(ValueError):
# Vocabulary size should be defined if vocabulary_file is used.
fc.sparse_column_with_vocabulary_file("bbb", vocabulary_file="somefile")
b = fc.sparse_column_with_vocabulary_file(
"bbb", vocabulary_file="a_file", vocab_size=454, dtype=dtypes.int64)
self.assertEqual(b.dtype, dtypes.int64)
with self.assertRaisesRegexp(ValueError, "dtype must be string or integer"):
b = fc.sparse_column_with_vocabulary_file(
"bbb", vocabulary_file="a_file", vocab_size=454, dtype=dtypes.float32)
def testWeightedSparseColumn(self):
ids = fc.sparse_column_with_keys("ids", ["marlo", "omar", "stringer"])
weighted_ids = fc.weighted_sparse_column(ids, "weights")
self.assertEqual(weighted_ids.name, "ids_weighted_by_weights")
def testWeightedSparseColumnWithVocabularyFile(self):
ids = fc.sparse_column_with_vocabulary_file(
"ids", "a_file", num_oov_buckets=7, vocab_size=3)
weighted_ids = fc.weighted_sparse_column(ids, "weights")
self.assertEqual(weighted_ids.name, "ids_weighted_by_weights")
self.assertEqual(weighted_ids.lookup_config, ids.lookup_config)
self.assertEqual(weighted_ids.lookup_config.vocab_size, 3)
self.assertEqual(weighted_ids.lookup_config.num_oov_buckets, 7)
self.assertEqual(weighted_ids.lookup_config.vocabulary_file, "a_file")
def testWeightedSparseColumnDeepCopy(self):
ids = fc.sparse_column_with_keys("ids", ["marlo", "omar", "stringer"])
weighted = fc.weighted_sparse_column(ids, "weights")
weighted_copy = copy.deepcopy(weighted)
self.assertEqual(weighted_copy.sparse_id_column.name, "ids")
self.assertEqual(weighted_copy.weight_column_name, "weights")
self.assertEqual(weighted_copy.name, "ids_weighted_by_weights")
def testEmbeddingColumn(self):
a = fc.sparse_column_with_hash_bucket(
"aaa", hash_bucket_size=100, combiner="sum")
b = fc.embedding_column(a, dimension=4, combiner="mean")
self.assertEqual(b.sparse_id_column.name, "aaa")
self.assertEqual(b.dimension, 4)
self.assertEqual(b.combiner, "mean")
def testEmbeddingColumnDeepCopy(self):
a = fc.sparse_column_with_hash_bucket(
"aaa", hash_bucket_size=100, combiner="sum")
column = fc.embedding_column(a, dimension=4, combiner="mean")
column_copy = copy.deepcopy(column)
self.assertEqual(column_copy.name, "aaa_embedding")
self.assertEqual(column_copy.sparse_id_column.name, "aaa")
self.assertEqual(column_copy.dimension, 4)
self.assertEqual(column_copy.combiner, "mean")
def testScatteredEmbeddingColumn(self):
column = fc.scattered_embedding_column(
"aaa", size=100, dimension=10, hash_key=1)
self.assertEqual(column.column_name, "aaa")
self.assertEqual(column.size, 100)
self.assertEqual(column.dimension, 10)
self.assertEqual(column.hash_key, 1)
self.assertEqual(column.name, "aaa_scattered_embedding")
def testScatteredEmbeddingColumnDeepCopy(self):
column = fc.scattered_embedding_column(
"aaa", size=100, dimension=10, hash_key=1)
column_copy = copy.deepcopy(column)
self.assertEqual(column_copy.column_name, "aaa")
self.assertEqual(column_copy.size, 100)
self.assertEqual(column_copy.dimension, 10)
self.assertEqual(column_copy.hash_key, 1)
self.assertEqual(column_copy.name, "aaa_scattered_embedding")
def testSharedEmbeddingColumn(self):
a1 = fc.sparse_column_with_keys("a1", ["marlo", "omar", "stringer"])
a2 = fc.sparse_column_with_keys("a2", ["marlo", "omar", "stringer"])
b = fc.shared_embedding_columns([a1, a2], dimension=4, combiner="mean")
self.assertEqual(len(b), 2)
self.assertEqual(b[0].shared_embedding_name, "a1_a2_shared_embedding")
self.assertEqual(b[1].shared_embedding_name, "a1_a2_shared_embedding")
# Create a sparse id tensor for a1.
input_tensor_c1 = sparse_tensor_lib.SparseTensor(
indices=[[0, 0], [1, 1], [2, 2]], values=[0, 1, 2], dense_shape=[3, 3])
# Create a sparse id tensor for a2.
input_tensor_c2 = sparse_tensor_lib.SparseTensor(
indices=[[0, 0], [1, 1], [2, 2]], values=[0, 1, 2], dense_shape=[3, 3])
with variable_scope.variable_scope("run_1"):
b1 = feature_column_ops.input_from_feature_columns({
b[0]: input_tensor_c1
}, [b[0]])
b2 = feature_column_ops.input_from_feature_columns({
b[1]: input_tensor_c2
}, [b[1]])
with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
b1_value = b1.eval()
b2_value = b2.eval()
for i in range(len(b1_value)):
self.assertAllClose(b1_value[i], b2_value[i])
# Test the case when a shared_embedding_name is explicitly specified.
d = fc.shared_embedding_columns(
[a1, a2],
dimension=4,
combiner="mean",
shared_embedding_name="my_shared_embedding")
# a3 is a completely different sparse column with a1 and a2, but since the
# same shared_embedding_name is passed in, a3 will have the same embedding
# as a1 and a2
a3 = fc.sparse_column_with_keys("a3", [42, 1, -1000], dtype=dtypes.int32)
e = fc.shared_embedding_columns(
[a3],
dimension=4,
combiner="mean",
shared_embedding_name="my_shared_embedding")
with variable_scope.variable_scope("run_2"):
d1 = feature_column_ops.input_from_feature_columns({
d[0]: input_tensor_c1
}, [d[0]])
e1 = feature_column_ops.input_from_feature_columns({
e[0]: input_tensor_c1
}, [e[0]])
with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
d1_value = d1.eval()
e1_value = e1.eval()
for i in range(len(d1_value)):
self.assertAllClose(d1_value[i], e1_value[i])
def testSharedEmbeddingColumnWithWeightedSparseColumn(self):
# Tests creation of shared embeddings containing weighted sparse columns.
sparse_col = fc.sparse_column_with_keys("a1", ["marlo", "omar", "stringer"])
ids = fc.sparse_column_with_keys("ids", ["marlo", "omar", "stringer"])
weighted_sparse_col = fc.weighted_sparse_column(ids, "weights")
self.assertEqual(weighted_sparse_col.name, "ids_weighted_by_weights")
b = fc.shared_embedding_columns(
[sparse_col, weighted_sparse_col], dimension=4, combiner="mean")
self.assertEqual(len(b), 2)
self.assertEqual(b[0].shared_embedding_name,
"a1_ids_weighted_by_weights_shared_embedding")
self.assertEqual(b[1].shared_embedding_name,
"a1_ids_weighted_by_weights_shared_embedding")
# Tries reversing order to check compatibility condition.
b = fc.shared_embedding_columns(
[weighted_sparse_col, sparse_col], dimension=4, combiner="mean")
self.assertEqual(len(b), 2)
self.assertEqual(b[0].shared_embedding_name,
"a1_ids_weighted_by_weights_shared_embedding")
self.assertEqual(b[1].shared_embedding_name,
"a1_ids_weighted_by_weights_shared_embedding")
# Tries adding two weighted columns to check compatibility between them.
weighted_sparse_col_2 = fc.weighted_sparse_column(ids, "weights_2")
b = fc.shared_embedding_columns(
[weighted_sparse_col, weighted_sparse_col_2],
dimension=4,
combiner="mean")
self.assertEqual(len(b), 2)
self.assertEqual(
b[0].shared_embedding_name,
"ids_weighted_by_weights_ids_weighted_by_weights_2_shared_embedding")
self.assertEqual(
b[1].shared_embedding_name,
"ids_weighted_by_weights_ids_weighted_by_weights_2_shared_embedding")
def testSharedEmbeddingColumnDeterminism(self):
# Tests determinism in auto-generated shared_embedding_name.
sparse_id_columns = tuple([
fc.sparse_column_with_keys(k, ["foo", "bar"])
for k in ["07", "02", "00", "03", "05", "01", "09", "06", "04", "08"]
])
output = fc.shared_embedding_columns(
sparse_id_columns, dimension=2, combiner="mean")
self.assertEqual(len(output), 10)
for x in output:
self.assertEqual(x.shared_embedding_name,
"00_01_02_plus_7_others_shared_embedding")
def testSharedEmbeddingColumnErrors(self):
# Tries passing in a string.
with self.assertRaises(TypeError):
invalid_string = "Invalid string."
fc.shared_embedding_columns(invalid_string, dimension=2, combiner="mean")
# Tries passing in a set of sparse columns.
with self.assertRaises(TypeError):
invalid_set = set([
fc.sparse_column_with_keys("a", ["foo", "bar"]),
fc.sparse_column_with_keys("b", ["foo", "bar"]),
])
fc.shared_embedding_columns(invalid_set, dimension=2, combiner="mean")
def testSharedEmbeddingColumnDeepCopy(self):
a1 = fc.sparse_column_with_keys("a1", ["marlo", "omar", "stringer"])
a2 = fc.sparse_column_with_keys("a2", ["marlo", "omar", "stringer"])
columns = fc.shared_embedding_columns(
[a1, a2], dimension=4, combiner="mean")
columns_copy = copy.deepcopy(columns)
self.assertEqual(columns_copy[0].shared_embedding_name,
"a1_a2_shared_embedding")
self.assertEqual(columns_copy[1].shared_embedding_name,
"a1_a2_shared_embedding")
def testOneHotColumn(self):
a = fc.sparse_column_with_keys("a", ["a", "b", "c", "d"])
onehot_a = fc.one_hot_column(a)
self.assertEqual(onehot_a.sparse_id_column.name, "a")
self.assertEqual(onehot_a.length, 4)
b = fc.sparse_column_with_hash_bucket(
"b", hash_bucket_size=100, combiner="sum")
onehot_b = fc.one_hot_column(b)
self.assertEqual(onehot_b.sparse_id_column.name, "b")
self.assertEqual(onehot_b.length, 100)
def testOneHotReshaping(self):
"""Tests reshaping behavior of `OneHotColumn`."""
id_tensor_shape = [3, 2, 4, 5]
sparse_column = fc.sparse_column_with_keys(
"animals", ["squirrel", "moose", "dragon", "octopus"])
one_hot = fc.one_hot_column(sparse_column)
vocab_size = len(sparse_column.lookup_config.keys)
id_tensor = _sparse_id_tensor(id_tensor_shape, vocab_size)
for output_rank in range(1, len(id_tensor_shape) + 1):
with variable_scope.variable_scope("output_rank_{}".format(output_rank)):
one_hot_output = one_hot._to_dnn_input_layer(
id_tensor, output_rank=output_rank)
with self.cached_session() as sess:
one_hot_value = sess.run(one_hot_output)
expected_shape = (id_tensor_shape[:output_rank - 1] + [vocab_size])
self.assertEquals(expected_shape, list(one_hot_value.shape))
def testOneHotColumnForWeightedSparseColumn(self):
ids = fc.sparse_column_with_keys("ids", ["marlo", "omar", "stringer"])
weighted_ids = fc.weighted_sparse_column(ids, "weights")
one_hot = fc.one_hot_column(weighted_ids)
self.assertEqual(one_hot.sparse_id_column.name, "ids_weighted_by_weights")
self.assertEqual(one_hot.length, 3)
def testIntegerizedOneHotColumnForWeightedSparseColumn(self):
vocab_size = 5
ids = fc.sparse_column_with_integerized_feature("ids", vocab_size)
weighted_ids = fc.weighted_sparse_column(ids, "weights")
one_hot = fc.one_hot_column(weighted_ids)
self.assertEqual(one_hot.sparse_id_column.name, "ids_weighted_by_weights")
self.assertEqual(one_hot.length, vocab_size)
def testIntegerizedOneHotWeightedSparseColumnShape(self):
vocab_size = 5
for id_tensor_shape in [[4, 3], [2, 4], [3, 3, 3]]:
output_rank = len(id_tensor_shape)
a = fc.sparse_column_with_integerized_feature("a", vocab_size)
weighted = fc.weighted_sparse_column(a, "weights")
one_hot = fc.one_hot_column(weighted)
id_tensor, weight_tensor = _sparse_id_tensor_with_weights(
id_tensor_shape, vocab_size)
one_hot_output = one_hot._to_dnn_input_layer(
(id_tensor, weight_tensor),
output_rank=output_rank)
one_hot_output_shape = one_hot_output.get_shape().as_list()
expected_shape = id_tensor_shape[:-1] + [vocab_size]
self.assertEquals(expected_shape, one_hot_output_shape)
with self.cached_session() as sess:
one_hot_value = sess.run(one_hot_output)
self.assertEquals(expected_shape, list(one_hot_value.shape))
def testOneHotColumnWithSparseColumnWithHashKeys(self):
input_values = ["marlo", "unknown", "omar"]
inputs = constant_op.constant(input_values)
hash_keys = [[10, 20], [20, 30]]
hash_column = fc.sparse_column_with_hash_bucket(
column_name="ids", hash_bucket_size=10, hash_keys=hash_keys)
columns_to_tensors = {}
columns_to_tensors["ids"] = inputs
hash_column.insert_transformed_feature(columns_to_tensors)
self.assertEqual(len(columns_to_tensors), 2)
self.assertTrue(hash_column in columns_to_tensors)
one_hot_column = fc.one_hot_column(hash_column)
one_hot_output = one_hot_column._to_dnn_input_layer(
columns_to_tensors[hash_column])
expected = np.array([[0., 1., 0., 0., 0., 0., 0., 1., 0.,
0.], [0., 1., 0., 0., 0., 0., 0., 0., 0., 1.],
[1., 0., 0., 0., 0., 0., 0., 0., 0., 1.]])
with self.cached_session() as sess:
one_hot_value = sess.run(one_hot_output)
self.assertTrue(np.array_equal(one_hot_value, expected))
def testSparseColumnWithHashKeysWithUnexpectedHashKeys(self):
with self.assertRaisesRegexp(ValueError,
"hash_keys must be a non-empty list."):
fc.sparse_column_with_hash_bucket(
column_name="ids", hash_bucket_size=100, hash_keys=[])
with self.assertRaisesRegexp(ValueError,
"hash_keys must be a non-empty list."):
fc.sparse_column_with_hash_bucket(
column_name="ids", hash_bucket_size=100, hash_keys=1)
with self.assertRaisesRegexp(
ValueError, "Each element of hash_keys must be a pair of integers."):
fc.sparse_column_with_hash_bucket(
column_name="ids", hash_bucket_size=100, hash_keys=[1, 2])
with self.assertRaisesRegexp(
ValueError, "Each element of hash_keys must be a pair of integers."):
fc.sparse_column_with_hash_bucket(
column_name="ids", hash_bucket_size=100, hash_keys=["key"])
with self.assertRaisesRegexp(
ValueError, "Each element of hash_keys must be a pair of integers."):
fc.sparse_column_with_hash_bucket(
column_name="ids", hash_bucket_size=100, hash_keys=[[1, 2.0]])
def testMissingValueInOneHotColumnForWeightedSparseColumn(self):
# Github issue 12583
ids = fc.sparse_column_with_keys("ids", ["marlo", "omar", "stringer"])
weighted_ids = fc.weighted_sparse_column(ids, "weights")
one_hot = fc.one_hot_column(weighted_ids)
features = {
"ids": constant_op.constant([["marlo", "unknown", "omar"]]),
"weights": constant_op.constant([[2., 4., 6.]])
}
one_hot_tensor = feature_column_ops.input_from_feature_columns(
features, [one_hot])
with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
sess.run(lookup_ops.tables_initializer())
self.assertAllEqual([[2., 6., 0.]], one_hot_tensor.eval())
def testMissingValueInOneHotColumnForSparseColumnWithKeys(self):
ids = fc.sparse_column_with_keys("ids", ["marlo", "omar", "stringer"])
one_hot = fc.one_hot_column(ids)
features = {"ids": constant_op.constant([["marlo", "unknown", "omar"]])}
one_hot_tensor = feature_column_ops.input_from_feature_columns(
features, [one_hot])
with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
sess.run(lookup_ops.tables_initializer())
self.assertAllEqual([[1., 1., 0.]], one_hot_tensor.eval())
def testOneHotColumnDeepCopy(self):
a = fc.sparse_column_with_keys("a", ["a", "b", "c", "d"])
column = fc.one_hot_column(a)
column_copy = copy.deepcopy(column)
self.assertEqual(column_copy.sparse_id_column.name, "a")
self.assertEqual(column.name, "a_one_hot")
self.assertEqual(column.length, 4)
def testRealValuedVarLenColumn(self):
c = fc._real_valued_var_len_column("ccc", is_sparse=True)
self.assertTrue(c.is_sparse)
self.assertTrue(c.default_value is None)
# default_value is an integer.
c5 = fc._real_valued_var_len_column("c5", default_value=2)
self.assertEqual(c5.default_value, 2)
# default_value is a float.
d4 = fc._real_valued_var_len_column("d4", is_sparse=True)
self.assertEqual(d4.default_value, None)
self.assertEqual(d4.is_sparse, True)
# Default value is a list but dimension is None.
with self.assertRaisesRegexp(ValueError, "Only scalar default value.*"):
fc._real_valued_var_len_column("g5", default_value=[2., 3.])
def testRealValuedVarLenColumnDtypes(self):
rvc = fc._real_valued_var_len_column("rvc", is_sparse=True)
self.assertDictEqual(
{
"rvc": parsing_ops.VarLenFeature(dtype=dtypes.float32)
}, rvc.config)
rvc = fc._real_valued_var_len_column(
"rvc", default_value=0, is_sparse=False)
self.assertDictEqual({
"rvc":
parsing_ops.FixedLenSequenceFeature(
shape=[],
dtype=dtypes.float32,
allow_missing=True,
default_value=0.0)
}, rvc.config)
rvc = fc._real_valued_var_len_column(
"rvc", dtype=dtypes.int32, default_value=0, is_sparse=True)
self.assertDictEqual(
{
"rvc": parsing_ops.VarLenFeature(dtype=dtypes.int32)
}, rvc.config)
with self.assertRaisesRegexp(TypeError,
"dtype must be convertible to float"):
fc._real_valued_var_len_column(
"rvc", dtype=dtypes.string, default_value="", is_sparse=True)
def testRealValuedColumn(self):
a = fc.real_valued_column("aaa")
self.assertEqual(a.name, "aaa")
self.assertEqual(a.dimension, 1)
b = fc.real_valued_column("bbb", 10)
self.assertEqual(b.dimension, 10)
self.assertTrue(b.default_value is None)
with self.assertRaisesRegexp(TypeError, "dimension must be an integer"):
fc.real_valued_column("d3", dimension=1.0)
with self.assertRaisesRegexp(ValueError,
"dimension must be greater than 0"):
fc.real_valued_column("d3", dimension=0)
with self.assertRaisesRegexp(ValueError,
"dtype must be convertible to float"):
fc.real_valued_column("d3", dtype=dtypes.string)
# default_value is an integer.
c1 = fc.real_valued_column("c1", default_value=2)
self.assertListEqual(list(c1.default_value), [2.])
c2 = fc.real_valued_column("c2", default_value=2, dtype=dtypes.int32)
self.assertListEqual(list(c2.default_value), [2])
c3 = fc.real_valued_column("c3", dimension=4, default_value=2)
self.assertListEqual(list(c3.default_value), [2, 2, 2, 2])
c4 = fc.real_valued_column(
"c4", dimension=4, default_value=2, dtype=dtypes.int32)
self.assertListEqual(list(c4.default_value), [2, 2, 2, 2])
# default_value is a float.
d1 = fc.real_valued_column("d1", default_value=2.)
self.assertListEqual(list(d1.default_value), [2.])
d2 = fc.real_valued_column("d2", dimension=4, default_value=2.)
self.assertListEqual(list(d2.default_value), [2., 2., 2., 2.])
with self.assertRaisesRegexp(TypeError,
"default_value must be compatible with dtype"):
fc.real_valued_column("d3", default_value=2., dtype=dtypes.int32)
# default_value is neither integer nor float.
with self.assertRaisesRegexp(TypeError,
"default_value must be compatible with dtype"):
fc.real_valued_column("e1", default_value="string")
with self.assertRaisesRegexp(TypeError,
"default_value must be compatible with dtype"):
fc.real_valued_column("e1", dimension=3, default_value=[1, 3., "string"])
# default_value is a list of integers.
f1 = fc.real_valued_column("f1", default_value=[2])
self.assertListEqual(list(f1.default_value), [2])
f2 = fc.real_valued_column("f2", dimension=3, default_value=[2, 2, 2])
self.assertListEqual(list(f2.default_value), [2., 2., 2.])
f3 = fc.real_valued_column(
"f3", dimension=3, default_value=[2, 2, 2], dtype=dtypes.int32)
self.assertListEqual(list(f3.default_value), [2, 2, 2])
# default_value is a list of floats.
g1 = fc.real_valued_column("g1", default_value=[2.])
self.assertListEqual(list(g1.default_value), [2.])
g2 = fc.real_valued_column("g2", dimension=3, default_value=[2., 2, 2])
self.assertListEqual(list(g2.default_value), [2., 2., 2.])
with self.assertRaisesRegexp(TypeError,
"default_value must be compatible with dtype"):
fc.real_valued_column("g3", default_value=[2.], dtype=dtypes.int32)
with self.assertRaisesRegexp(
ValueError, "The length of default_value must be equal to dimension"):
fc.real_valued_column("g4", dimension=3, default_value=[2.])
# Test that the normalizer_fn gets stored for a real_valued_column
normalizer = lambda x: x - 1
h1 = fc.real_valued_column("h1", normalizer=normalizer)
self.assertEqual(normalizer(10), h1.normalizer_fn(10))
# Test that normalizer is not stored within key
self.assertFalse("normalizer" in g1.key)
self.assertFalse("normalizer" in g2.key)
self.assertFalse("normalizer" in h1.key)
def testRealValuedColumnReshaping(self):
"""Tests reshaping behavior of `RealValuedColumn`."""
batch_size = 4
sequence_length = 8
dimensions = [3, 4, 5]
np.random.seed(2222)
input_shape = [batch_size, sequence_length] + dimensions
real_valued_input = np.random.rand(*input_shape)
real_valued_column = fc.real_valued_column("values")
for output_rank in range(1, 3 + len(dimensions)):
with variable_scope.variable_scope("output_rank_{}".format(output_rank)):
real_valued_output = real_valued_column._to_dnn_input_layer(
constant_op.constant(real_valued_input, dtype=dtypes.float32),
output_rank=output_rank)
with self.cached_session() as sess:
real_valued_eval = sess.run(real_valued_output)
expected_shape = (
input_shape[:output_rank - 1] +
[np.prod(input_shape[output_rank - 1:])])
self.assertEquals(expected_shape, list(real_valued_eval.shape))
def testRealValuedColumnDensification(self):
"""Tests densification behavior of `RealValuedColumn`."""
# No default value, dimension 1 float.
real_valued_column = fc._real_valued_var_len_column(
"sparse_real_valued1", is_sparse=True)
sparse_tensor = sparse_tensor_lib.SparseTensor(
values=[2.0, 5.0], indices=[[0, 0], [2, 0]], dense_shape=[3, 1])
with self.assertRaisesRegexp(ValueError, "Set is_sparse to False"):
real_valued_column._to_dnn_input_layer(sparse_tensor)
def testRealValuedColumnDeepCopy(self):
column = fc.real_valued_column(
"aaa", dimension=3, default_value=[1, 2, 3], dtype=dtypes.int32)
column_copy = copy.deepcopy(column)
self.assertEqual(column_copy.name, "aaa")
self.assertEqual(column_copy.dimension, 3)
self.assertEqual(column_copy.default_value, (1, 2, 3))
def testBucketizedColumnNameEndsWithUnderscoreBucketized(self):
a = fc.bucketized_column(fc.real_valued_column("aaa"), [0, 4])
self.assertEqual(a.name, "aaa_bucketized")
def testBucketizedColumnRequiresRealValuedColumn(self):
with self.assertRaisesRegexp(
TypeError, "source_column must be an instance of _RealValuedColumn"):
fc.bucketized_column("bbb", [0])
with self.assertRaisesRegexp(
TypeError, "source_column must be an instance of _RealValuedColumn"):
fc.bucketized_column(
fc.sparse_column_with_integerized_feature(
column_name="bbb", bucket_size=10), [0])
def testBucketizedColumnRequiresRealValuedColumnDimension(self):
with self.assertRaisesRegexp(
TypeError, "source_column must be an instance of _RealValuedColumn.*"):
fc.bucketized_column(
fc._real_valued_var_len_column("bbb", is_sparse=True), [0])
def testBucketizedColumnRequiresSortedBuckets(self):
with self.assertRaisesRegexp(ValueError,
"boundaries must be a sorted list"):
fc.bucketized_column(fc.real_valued_column("ccc"), [5, 0, 4])
def testBucketizedColumnWithSameBucketBoundaries(self):
a_bucketized = fc.bucketized_column(
fc.real_valued_column("a"), [1., 2., 2., 3., 3.])
self.assertEqual(a_bucketized.name, "a_bucketized")
self.assertTupleEqual(a_bucketized.boundaries, (1., 2., 3.))
def testBucketizedColumnDeepCopy(self):
"""Tests that we can do a deepcopy of a bucketized column.
This test requires that the bucketized column also accept boundaries
as tuples.
"""
bucketized = fc.bucketized_column(
fc.real_valued_column("a"), [1., 2., 2., 3., 3.])
self.assertEqual(bucketized.name, "a_bucketized")
self.assertTupleEqual(bucketized.boundaries, (1., 2., 3.))
bucketized_copy = copy.deepcopy(bucketized)
self.assertEqual(bucketized_copy.name, "a_bucketized")
self.assertTupleEqual(bucketized_copy.boundaries, (1., 2., 3.))
def testCrossedColumnNameCreatesSortedNames(self):
a = fc.sparse_column_with_hash_bucket("aaa", hash_bucket_size=100)
b = fc.sparse_column_with_hash_bucket("bbb", hash_bucket_size=100)
bucket = fc.bucketized_column(fc.real_valued_column("cost"), [0, 4])
crossed = fc.crossed_column(set([b, bucket, a]), hash_bucket_size=10000)
self.assertEqual("aaa_X_bbb_X_cost_bucketized", crossed.name,
"name should be generated by sorted column names")
self.assertEqual("aaa", crossed.columns[0].name)
self.assertEqual("bbb", crossed.columns[1].name)
self.assertEqual("cost_bucketized", crossed.columns[2].name)
def testCrossedColumnNotSupportRealValuedColumn(self):
b = fc.sparse_column_with_hash_bucket("bbb", hash_bucket_size=100)
with self.assertRaisesRegexp(
TypeError, "columns must be a set of _SparseColumn, _CrossedColumn, "
"or _BucketizedColumn instances"):
fc.crossed_column(
set([b, fc.real_valued_column("real")]), hash_bucket_size=10000)
def testCrossedColumnDeepCopy(self):
a = fc.sparse_column_with_hash_bucket("aaa", hash_bucket_size=100)
b = fc.sparse_column_with_hash_bucket("bbb", hash_bucket_size=100)
bucket = fc.bucketized_column(fc.real_valued_column("cost"), [0, 4])
crossed = fc.crossed_column(set([b, bucket, a]), hash_bucket_size=10000)
crossed_copy = copy.deepcopy(crossed)
self.assertEqual("aaa_X_bbb_X_cost_bucketized", crossed_copy.name,
"name should be generated by sorted column names")
self.assertEqual("aaa", crossed_copy.columns[0].name)
self.assertEqual("bbb", crossed_copy.columns[1].name)
self.assertEqual("cost_bucketized", crossed_copy.columns[2].name)
def testFloat32WeightedSparseInt32ColumnDtypes(self):
ids = fc.sparse_column_with_keys("ids", [42, 1, -1000], dtype=dtypes.int32)
weighted_ids = fc.weighted_sparse_column(ids, "weights")
self.assertDictEqual({
"ids": parsing_ops.VarLenFeature(dtypes.int32),
"weights": parsing_ops.VarLenFeature(dtypes.float32)
}, weighted_ids.config)
def testFloat32WeightedSparseStringColumnDtypes(self):
ids = fc.sparse_column_with_keys("ids", ["marlo", "omar", "stringer"])
weighted_ids = fc.weighted_sparse_column(ids, "weights")
self.assertDictEqual({
"ids": parsing_ops.VarLenFeature(dtypes.string),
"weights": parsing_ops.VarLenFeature(dtypes.float32)
}, weighted_ids.config)
def testInt32WeightedSparseStringColumnDtypes(self):
ids = fc.sparse_column_with_keys("ids", ["marlo", "omar", "stringer"])
weighted_ids = fc.weighted_sparse_column(ids, "weights", dtype=dtypes.int32)
self.assertDictEqual({
"ids": parsing_ops.VarLenFeature(dtypes.string),
"weights": parsing_ops.VarLenFeature(dtypes.int32)
}, weighted_ids.config)
with self.assertRaisesRegexp(ValueError,
"dtype is not convertible to float"):
weighted_ids = fc.weighted_sparse_column(
ids, "weights", dtype=dtypes.string)
def testInt32WeightedSparseInt64ColumnDtypes(self):
ids = fc.sparse_column_with_keys("ids", [42, 1, -1000], dtype=dtypes.int64)
weighted_ids = fc.weighted_sparse_column(ids, "weights", dtype=dtypes.int32)
self.assertDictEqual({
"ids": parsing_ops.VarLenFeature(dtypes.int64),
"weights": parsing_ops.VarLenFeature(dtypes.int32)
}, weighted_ids.config)
with self.assertRaisesRegexp(ValueError,
"dtype is not convertible to float"):
weighted_ids = fc.weighted_sparse_column(
ids, "weights", dtype=dtypes.string)
def testRealValuedColumnDtypes(self):
rvc = fc.real_valued_column("rvc")
self.assertDictEqual({
"rvc": parsing_ops.FixedLenFeature([1], dtype=dtypes.float32)
}, rvc.config)
rvc = fc.real_valued_column("rvc", dtype=dtypes.int32)
self.assertDictEqual({
"rvc": parsing_ops.FixedLenFeature([1], dtype=dtypes.int32)
}, rvc.config)
with self.assertRaisesRegexp(ValueError,
"dtype must be convertible to float"):
fc.real_valued_column("rvc", dtype=dtypes.string)
def testSparseColumnDtypes(self):
sc = fc.sparse_column_with_integerized_feature("sc", 10)
self.assertDictEqual(
{
"sc": parsing_ops.VarLenFeature(dtype=dtypes.int64)
}, sc.config)
sc = fc.sparse_column_with_integerized_feature("sc", 10, dtype=dtypes.int32)
self.assertDictEqual(
{
"sc": parsing_ops.VarLenFeature(dtype=dtypes.int32)
}, sc.config)
with self.assertRaisesRegexp(ValueError, "dtype must be an integer"):
fc.sparse_column_with_integerized_feature("sc", 10, dtype=dtypes.float32)
def testSparseColumnSingleBucket(self):
sc = fc.sparse_column_with_integerized_feature("sc", 1)
self.assertDictEqual(
{
"sc": parsing_ops.VarLenFeature(dtype=dtypes.int64)
}, sc.config)
self.assertEqual(1, sc._wide_embedding_lookup_arguments(None).vocab_size)
def testSparseColumnAcceptsDenseScalar(self):
"""Tests that `SparseColumn`s accept dense scalar inputs."""
batch_size = 4
dense_scalar_input = [1, 2, 3, 4]
sparse_column = fc.sparse_column_with_integerized_feature("values", 10)
features = {
"values": constant_op.constant(dense_scalar_input, dtype=dtypes.int64)
}
sparse_column.insert_transformed_feature(features)
sparse_output = features[sparse_column]
expected_shape = [batch_size, 1]
with self.cached_session() as sess:
sparse_result = sess.run(sparse_output)
self.assertEquals(expected_shape, list(sparse_result.dense_shape))
def testSparseColumnIntegerizedDeepCopy(self):
"""Tests deepcopy of sparse_column_with_integerized_feature."""
column = fc.sparse_column_with_integerized_feature("a", 10)
self.assertEqual("a", column.name)
column_copy = copy.deepcopy(column)
self.assertEqual("a", column_copy.name)
self.assertEqual(10, column_copy.bucket_size)
self.assertTrue(column_copy.is_integerized)
def testSparseColumnHashBucketDeepCopy(self):
"""Tests deepcopy of sparse_column_with_hash_bucket."""
column = fc.sparse_column_with_hash_bucket("a", 10)
self.assertEqual("a", column.name)
column_copy = copy.deepcopy(column)
self.assertEqual("a", column_copy.name)
self.assertEqual(10, column_copy.bucket_size)
self.assertFalse(column_copy.is_integerized)
def testSparseColumnKeysDeepCopy(self):
"""Tests deepcopy of sparse_column_with_keys."""
column = fc.sparse_column_with_keys("a", keys=["key0", "key1", "key2"])
self.assertEqual("a", column.name)
column_copy = copy.deepcopy(column)
self.assertEqual("a", column_copy.name)
self.assertEqual(
fc._SparseIdLookupConfig( # pylint: disable=protected-access
keys=("key0", "key1", "key2"),
vocab_size=3,
default_value=-1),
column_copy.lookup_config)
self.assertFalse(column_copy.is_integerized)
def testSparseColumnVocabularyDeepCopy(self):
"""Tests deepcopy of sparse_column_with_vocabulary_file."""
column = fc.sparse_column_with_vocabulary_file(
"a", vocabulary_file="path_to_file", vocab_size=3)
self.assertEqual("a", column.name)
column_copy = copy.deepcopy(column)
self.assertEqual("a", column_copy.name)
self.assertEqual(
fc._SparseIdLookupConfig( # pylint: disable=protected-access
vocabulary_file="path_to_file",
num_oov_buckets=0,
vocab_size=3,
default_value=-1),
column_copy.lookup_config)
self.assertFalse(column_copy.is_integerized)
def testCreateFeatureSpec(self):
sparse_col = fc.sparse_column_with_hash_bucket(
"sparse_column", hash_bucket_size=100)
embedding_col = fc.embedding_column(
fc.sparse_column_with_hash_bucket(
"sparse_column_for_embedding", hash_bucket_size=10),
dimension=4)
str_sparse_id_col = fc.sparse_column_with_keys(
"str_id_column", ["marlo", "omar", "stringer"])
int32_sparse_id_col = fc.sparse_column_with_keys(
"int32_id_column", [42, 1, -1000], dtype=dtypes.int32)
int64_sparse_id_col = fc.sparse_column_with_keys(
"int64_id_column", [42, 1, -1000], dtype=dtypes.int64)
weighted_id_col = fc.weighted_sparse_column(str_sparse_id_col,
"str_id_weights_column")
real_valued_col1 = fc.real_valued_column("real_valued_column1")
real_valued_col2 = fc.real_valued_column("real_valued_column2", 5)
bucketized_col1 = fc.bucketized_column(
fc.real_valued_column("real_valued_column_for_bucketization1"), [0, 4])
bucketized_col2 = fc.bucketized_column(
fc.real_valued_column("real_valued_column_for_bucketization2", 4),
[0, 4])
a = fc.sparse_column_with_hash_bucket("cross_aaa", hash_bucket_size=100)
b = fc.sparse_column_with_hash_bucket("cross_bbb", hash_bucket_size=100)
cross_col = fc.crossed_column(set([a, b]), hash_bucket_size=10000)
one_hot_col = fc.one_hot_column(
fc.sparse_column_with_hash_bucket(
"sparse_column_for_one_hot", hash_bucket_size=100))
scattered_embedding_col = fc.scattered_embedding_column(
"scattered_embedding_column", size=100, dimension=10, hash_key=1)
feature_columns = set([
sparse_col, embedding_col, weighted_id_col, int32_sparse_id_col,
int64_sparse_id_col, real_valued_col1, real_valued_col2,
bucketized_col1, bucketized_col2, cross_col, one_hot_col,
scattered_embedding_col
])
expected_config = {
"sparse_column":
parsing_ops.VarLenFeature(dtypes.string),
"sparse_column_for_embedding":
parsing_ops.VarLenFeature(dtypes.string),
"str_id_column":
parsing_ops.VarLenFeature(dtypes.string),
"int32_id_column":
parsing_ops.VarLenFeature(dtypes.int32),
"int64_id_column":
parsing_ops.VarLenFeature(dtypes.int64),
"str_id_weights_column":
parsing_ops.VarLenFeature(dtypes.float32),
"real_valued_column1":
parsing_ops.FixedLenFeature([1], dtype=dtypes.float32),
"real_valued_column2":
parsing_ops.FixedLenFeature([5], dtype=dtypes.float32),
"real_valued_column_for_bucketization1":
parsing_ops.FixedLenFeature([1], dtype=dtypes.float32),
"real_valued_column_for_bucketization2":
parsing_ops.FixedLenFeature([4], dtype=dtypes.float32),
"cross_aaa":
parsing_ops.VarLenFeature(dtypes.string),
"cross_bbb":
parsing_ops.VarLenFeature(dtypes.string),
"sparse_column_for_one_hot":
parsing_ops.VarLenFeature(dtypes.string),
"scattered_embedding_column":
parsing_ops.VarLenFeature(dtypes.string),
}
config = fc.create_feature_spec_for_parsing(feature_columns)
self.assertDictEqual(expected_config, config)
# Tests that contrib feature columns work with core library:
config_core = fc_core.make_parse_example_spec(feature_columns)
self.assertDictEqual(expected_config, config_core)
# Test that the same config is parsed out if we pass a dictionary.
feature_columns_dict = {
str(i): val
for i, val in enumerate(feature_columns)
}
config = fc.create_feature_spec_for_parsing(feature_columns_dict)
self.assertDictEqual(expected_config, config)
def testCreateFeatureSpec_ExperimentalColumns(self):
real_valued_col0 = fc._real_valued_var_len_column(
"real_valued_column0", is_sparse=True)
real_valued_col1 = fc._real_valued_var_len_column(
"real_valued_column1",
dtype=dtypes.int64,
default_value=0,
is_sparse=False)
feature_columns = set([real_valued_col0, real_valued_col1])
expected_config = {
"real_valued_column0":
parsing_ops.VarLenFeature(dtype=dtypes.float32),
"real_valued_column1":
parsing_ops.FixedLenSequenceFeature(
[], dtype=dtypes.int64, allow_missing=True, default_value=0),
}
config = fc.create_feature_spec_for_parsing(feature_columns)
self.assertDictEqual(expected_config, config)
def testCreateFeatureSpec_RealValuedColumnWithDefaultValue(self):
real_valued_col1 = fc.real_valued_column(
"real_valued_column1", default_value=2)
real_valued_col2 = fc.real_valued_column(
"real_valued_column2", 5, default_value=4)
real_valued_col3 = fc.real_valued_column(
"real_valued_column3", default_value=[8])
real_valued_col4 = fc.real_valued_column(
"real_valued_column4", 3, default_value=[1, 0, 6])
real_valued_col5 = fc._real_valued_var_len_column(
"real_valued_column5", default_value=2, is_sparse=True)
real_valued_col6 = fc._real_valued_var_len_column(
"real_valued_column6",
dtype=dtypes.int64,
default_value=1,
is_sparse=False)
feature_columns = [
real_valued_col1, real_valued_col2, real_valued_col3, real_valued_col4,
real_valued_col5, real_valued_col6
]
config = fc.create_feature_spec_for_parsing(feature_columns)
self.assertEqual(6, len(config))
self.assertDictEqual(
{
"real_valued_column1":
parsing_ops.FixedLenFeature(
[1], dtype=dtypes.float32, default_value=[2.]),
"real_valued_column2":
parsing_ops.FixedLenFeature(
[5],
dtype=dtypes.float32,
default_value=[4., 4., 4., 4., 4.]),
"real_valued_column3":
parsing_ops.FixedLenFeature(
[1], dtype=dtypes.float32, default_value=[8.]),
"real_valued_column4":
parsing_ops.FixedLenFeature(
[3], dtype=dtypes.float32, default_value=[1., 0., 6.]),
"real_valued_column5":
parsing_ops.VarLenFeature(dtype=dtypes.float32),
"real_valued_column6":
parsing_ops.FixedLenSequenceFeature(
[], dtype=dtypes.int64, allow_missing=True, default_value=1)
},
config)
def testCreateSequenceFeatureSpec(self):
sparse_col = fc.sparse_column_with_hash_bucket(
"sparse_column", hash_bucket_size=100)
embedding_col = fc.embedding_column(
fc.sparse_column_with_hash_bucket(
"sparse_column_for_embedding", hash_bucket_size=10),
dimension=4)
sparse_id_col = fc.sparse_column_with_keys("id_column",
["marlo", "omar", "stringer"])
weighted_id_col = fc.weighted_sparse_column(sparse_id_col,
"id_weights_column")
real_valued_col1 = fc.real_valued_column("real_valued_column", dimension=2)
real_valued_col2 = fc.real_valued_column(
"real_valued_default_column", dimension=5, default_value=3.0)
real_valued_col3 = fc._real_valued_var_len_column(
"real_valued_var_len_column", default_value=3.0, is_sparse=True)
real_valued_col4 = fc._real_valued_var_len_column(
"real_valued_var_len_dense_column", default_value=4.0, is_sparse=False)
feature_columns = set([
sparse_col, embedding_col, weighted_id_col, real_valued_col1,
real_valued_col2, real_valued_col3, real_valued_col4
])
feature_spec = fc._create_sequence_feature_spec_for_parsing(feature_columns)
expected_feature_spec = {
"sparse_column":
parsing_ops.VarLenFeature(dtypes.string),
"sparse_column_for_embedding":
parsing_ops.VarLenFeature(dtypes.string),
"id_column":
parsing_ops.VarLenFeature(dtypes.string),
"id_weights_column":
parsing_ops.VarLenFeature(dtypes.float32),
"real_valued_column":
parsing_ops.FixedLenSequenceFeature(
shape=[2], dtype=dtypes.float32, allow_missing=False),
"real_valued_default_column":
parsing_ops.FixedLenSequenceFeature(
shape=[5], dtype=dtypes.float32, allow_missing=True),
"real_valued_var_len_column":
parsing_ops.VarLenFeature(dtype=dtypes.float32),
"real_valued_var_len_dense_column":
parsing_ops.FixedLenSequenceFeature(
shape=[], dtype=dtypes.float32, allow_missing=True),
}
self.assertDictEqual(expected_feature_spec, feature_spec)
def testMakePlaceHolderTensorsForBaseFeatures(self):
sparse_col = fc.sparse_column_with_hash_bucket(
"sparse_column", hash_bucket_size=100)
real_valued_col = fc.real_valued_column("real_valued_column", 5)
vlen_real_valued_col = fc._real_valued_var_len_column(
"vlen_real_valued_column", is_sparse=True)
bucketized_col = fc.bucketized_column(
fc.real_valued_column("real_valued_column_for_bucketization"), [0, 4])
feature_columns = set(
[sparse_col, real_valued_col, vlen_real_valued_col, bucketized_col])
placeholders = (
fc.make_place_holder_tensors_for_base_features(feature_columns))
self.assertEqual(4, len(placeholders))
self.assertTrue(
isinstance(placeholders["sparse_column"],
sparse_tensor_lib.SparseTensor))
self.assertTrue(
isinstance(placeholders["vlen_real_valued_column"],
sparse_tensor_lib.SparseTensor))
placeholder = placeholders["real_valued_column"]
self.assertGreaterEqual(
placeholder.name.find(u"Placeholder_real_valued_column"), 0)
self.assertEqual(dtypes.float32, placeholder.dtype)
self.assertEqual([None, 5], placeholder.get_shape().as_list())
placeholder = placeholders["real_valued_column_for_bucketization"]
self.assertGreaterEqual(
placeholder.name.find(
u"Placeholder_real_valued_column_for_bucketization"), 0)
self.assertEqual(dtypes.float32, placeholder.dtype)
self.assertEqual([None, 1], placeholder.get_shape().as_list())
def testInitEmbeddingColumnWeightsFromCkpt(self):
sparse_col = fc.sparse_column_with_hash_bucket(
column_name="object_in_image", hash_bucket_size=4)
# Create _EmbeddingColumn which randomly initializes embedding of size
# [4, 16].
embedding_col = fc.embedding_column(sparse_col, dimension=16)
# Creating a SparseTensor which has all the ids possible for the given
# vocab.
input_tensor = sparse_tensor_lib.SparseTensor(
indices=[[0, 0], [1, 1], [2, 2], [3, 3]],
values=[0, 1, 2, 3],
dense_shape=[4, 4])
# Invoking 'layers.input_from_feature_columns' will create the embedding
# variable. Creating under scope 'run_1' so as to prevent name conflicts
# when creating embedding variable for 'embedding_column_pretrained'.
with variable_scope.variable_scope("run_1"):
with variable_scope.variable_scope(embedding_col.name):
# This will return a [4, 16] tensor which is same as embedding variable.
embeddings = feature_column_ops.input_from_feature_columns({
embedding_col: input_tensor
}, [embedding_col])
save = saver.Saver()
ckpt_dir_prefix = os.path.join(self.get_temp_dir(),
"init_embedding_col_w_from_ckpt")
ckpt_dir = tempfile.mkdtemp(prefix=ckpt_dir_prefix)
checkpoint_path = os.path.join(ckpt_dir, "model.ckpt")
with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
saved_embedding = embeddings.eval()
save.save(sess, checkpoint_path)
embedding_col_initialized = fc.embedding_column(
sparse_id_column=sparse_col,
dimension=16,
ckpt_to_load_from=checkpoint_path,
tensor_name_in_ckpt=("run_1/object_in_image_embedding/"
"input_from_feature_columns/object"
"_in_image_embedding/weights"))
with variable_scope.variable_scope("run_2"):
# This will initialize the embedding from provided checkpoint and return a
# [4, 16] tensor which is same as embedding variable. Since we didn't
# modify embeddings, this should be same as 'saved_embedding'.
pretrained_embeddings = feature_column_ops.input_from_feature_columns({
embedding_col_initialized: input_tensor
}, [embedding_col_initialized])
with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
loaded_embedding = pretrained_embeddings.eval()
self.assertAllClose(saved_embedding, loaded_embedding)
def testInitCrossedColumnWeightsFromCkpt(self):
sparse_col_1 = fc.sparse_column_with_hash_bucket(
column_name="col_1", hash_bucket_size=4)
sparse_col_2 = fc.sparse_column_with_keys(
column_name="col_2", keys=("foo", "bar", "baz"))
sparse_col_3 = fc.sparse_column_with_keys(
column_name="col_3", keys=(42, 1, -1000), dtype=dtypes.int64)
crossed_col = fc.crossed_column(
columns=[sparse_col_1, sparse_col_2, sparse_col_3], hash_bucket_size=4)
input_tensor = sparse_tensor_lib.SparseTensor(
indices=[[0, 0], [1, 1], [2, 2], [3, 3]],
values=[0, 1, 2, 3],
dense_shape=[4, 4])
# Invoking 'weighted_sum_from_feature_columns' will create the crossed
# column weights variable.
with variable_scope.variable_scope("run_1"):
with variable_scope.variable_scope(crossed_col.name):
# Returns looked up column weights which is same as crossed column
# weights as well as actual references to weights variables.
_, col_weights, _ = (
feature_column_ops.weighted_sum_from_feature_columns({
sparse_col_1.name: input_tensor,
sparse_col_2.name: input_tensor,
sparse_col_3.name: input_tensor
}, [crossed_col], 1))
# Update the weights since default initializer initializes all weights
# to 0.0.
for weight in col_weights.values():
assign_op = state_ops.assign(weight[0], weight[0] + 0.5)
save = saver.Saver()
ckpt_dir_prefix = os.path.join(self.get_temp_dir(),
"init_crossed_col_w_from_ckpt")
ckpt_dir = tempfile.mkdtemp(prefix=ckpt_dir_prefix)
checkpoint_path = os.path.join(ckpt_dir, "model.ckpt")
with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
sess.run(assign_op)
saved_col_weights = col_weights[crossed_col][0].eval()
save.save(sess, checkpoint_path)
crossed_col_initialized = fc.crossed_column(
columns=[sparse_col_1, sparse_col_2],
hash_bucket_size=4,
ckpt_to_load_from=checkpoint_path,
tensor_name_in_ckpt=("run_1/col_1_X_col_2_X_col_3/"
"weighted_sum_from_feature_columns/"
"col_1_X_col_2_X_col_3/weights"))
with variable_scope.variable_scope("run_2"):
# This will initialize the crossed column weights from provided checkpoint
# and return a [4, 1] tensor which is same as weights variable. Since we
# won't modify weights, this should be same as 'saved_col_weights'.
_, col_weights, _ = (
feature_column_ops.weighted_sum_from_feature_columns({
sparse_col_1.name: input_tensor,
sparse_col_2.name: input_tensor
}, [crossed_col_initialized], 1))
col_weights_from_ckpt = col_weights[crossed_col_initialized][0]
with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
loaded_col_weights = col_weights_from_ckpt.eval()
self.assertAllClose(saved_col_weights, loaded_col_weights)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/layers/python/layers/feature_column_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Embedding functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.contrib.framework.python.framework import tensor_util as contrib_tensor_util
from tensorflow.contrib.layers.python.ops import sparse_feature_cross_op
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
__all__ = [
"safe_embedding_lookup_sparse", "scattered_embedding_lookup",
"scattered_embedding_lookup_sparse", "embedding_lookup_unique",
"embedding_lookup_sparse_with_distributed_aggregation"
]
def safe_embedding_lookup_sparse(embedding_weights,
sparse_ids,
sparse_weights=None,
combiner=None,
default_id=None,
name=None,
partition_strategy="div",
max_norm=None):
"""Lookup embedding results, accounting for invalid IDs and empty features.
The partitioned embedding in `embedding_weights` must all be the same shape
except for the first dimension. The first dimension is allowed to vary as the
vocabulary size is not necessarily a multiple of `P`. `embedding_weights`
may be a `PartitionedVariable` as returned by using
`tf.compat.v1.get_variable()` with a
partitioner.
Invalid IDs (< 0) are pruned from input IDs and weights, as well as any IDs
with non-positive weight. For an entry with no features, the embedding vector
for `default_id` is returned, or the 0-vector if `default_id` is not supplied.
The ids and weights may be multi-dimensional. Embeddings are always aggregated
along the last dimension.
Args:
embedding_weights: A list of `P` float tensors or values representing
partitioned embedding tensors. Alternatively, a `PartitionedVariable`,
created by partitioning along dimension 0. The total unpartitioned shape
should be `[e_0, e_1, ..., e_m]`, where `e_0` represents the vocab size
and `e_1, ..., e_m` are the embedding dimensions.
sparse_ids: `SparseTensor` of shape `[d_0, d_1, ..., d_n]` containing the
ids. `d_0` is typically batch size.
sparse_weights: `SparseTensor` of same shape as `sparse_ids`, containing
float weights corresponding to `sparse_ids`, or `None` if all weights are
be assumed to be 1.0.
combiner: A string specifying how to combine embedding results for each
entry. Currently "mean", "sqrtn" and "sum" are supported, with "mean" the
default.
default_id: The id to use for an entry with no features.
name: A name for this operation (optional).
partition_strategy: A string specifying the partitioning strategy. Currently
`"div"` and `"mod"` are supported. Default is `"div"`.
max_norm: If not None, all embeddings are l2-normalized to max_norm before
combining.
Returns:
Dense tensor of shape `[d_0, d_1, ..., d_{n-1}, e_1, ..., e_m]`.
Raises:
ValueError: if `embedding_weights` is empty.
"""
if combiner is None:
logging.warn("The default value of combiner will change from \"mean\" "
"to \"sqrtn\" after 2016/11/01.")
combiner = "mean"
if embedding_weights is None:
raise ValueError("Missing embedding_weights %s." % embedding_weights)
if isinstance(embedding_weights, variables.PartitionedVariable):
embedding_weights = list(embedding_weights) # get underlying Variables.
if not isinstance(embedding_weights, list):
embedding_weights = [embedding_weights]
if len(embedding_weights) < 1:
raise ValueError("Missing embedding_weights %s." % embedding_weights)
dtype = sparse_weights.dtype if sparse_weights is not None else None
if isinstance(embedding_weights, variables.PartitionedVariable):
embedding_weights = list(embedding_weights)
embedding_weights = [
ops.convert_to_tensor(w, dtype=dtype) for w in embedding_weights
]
contrib_tensor_util.assert_same_float_dtype(embedding_weights +
[sparse_weights])
with ops.name_scope(name, "embedding_lookup", embedding_weights +
[sparse_ids, sparse_weights]) as scope:
# Reshape higher-rank sparse ids and weights to linear segment ids.
original_shape = sparse_ids.dense_shape
original_rank_dim = tensor_shape.Dimension(
tensor_shape.dimension_value(sparse_ids.dense_shape.get_shape()[0]))
original_rank = (
array_ops.size(original_shape)
if original_rank_dim.value is None else original_rank_dim.value)
sparse_ids = sparse_ops.sparse_reshape(sparse_ids, [
math_ops.reduce_prod(
array_ops.slice(original_shape, [0], [original_rank - 1])),
array_ops.gather(original_shape, original_rank - 1)
])
if sparse_weights is not None:
sparse_weights = sparse_tensor.SparseTensor(sparse_ids.indices,
sparse_weights.values,
sparse_ids.dense_shape)
# Prune invalid ids and weights.
sparse_ids, sparse_weights = _prune_invalid_ids(sparse_ids, sparse_weights)
if combiner != "sum":
sparse_ids, sparse_weights = _prune_invalid_weights(
sparse_ids, sparse_weights)
# Fill in dummy values for empty features, if necessary.
sparse_ids, is_row_empty = sparse_ops.sparse_fill_empty_rows(
sparse_ids, default_id or 0)
if sparse_weights is not None:
sparse_weights, _ = sparse_ops.sparse_fill_empty_rows(sparse_weights, 1.0)
result = embedding_ops.embedding_lookup_sparse(
embedding_weights,
sparse_ids,
sparse_weights,
combiner=combiner,
partition_strategy=partition_strategy,
name=None if default_id is None else scope,
max_norm=max_norm)
if default_id is None:
# Broadcast is_row_empty to the same shape as embedding_lookup_result,
# for use in Select.
is_row_empty = array_ops.tile(
array_ops.reshape(is_row_empty, [-1, 1]),
array_ops.stack([1, array_ops.shape(result)[1]]))
result = array_ops.where(
is_row_empty, array_ops.zeros_like(result), result, name=scope)
# Reshape back from linear ids back into higher-dimensional dense result.
final_result = array_ops.reshape(
result,
array_ops.concat([
array_ops.slice(
math_ops.cast(original_shape, dtypes.int32), [0],
[original_rank - 1]),
array_ops.slice(array_ops.shape(result), [1], [-1])
], 0))
final_result.set_shape(
tensor_shape.unknown_shape(
(original_rank_dim - 1).value).concatenate(result.get_shape()[1:]))
return final_result
def _prune_invalid_ids(sparse_ids, sparse_weights):
"""Prune invalid IDs (< 0) from the input ids and weights."""
is_id_valid = math_ops.greater_equal(sparse_ids.values, 0)
if sparse_weights is not None:
is_id_valid = math_ops.logical_and(
is_id_valid,
array_ops.ones_like(sparse_weights.values, dtype=dtypes.bool))
sparse_ids = sparse_ops.sparse_retain(sparse_ids, is_id_valid)
if sparse_weights is not None:
sparse_weights = sparse_ops.sparse_retain(sparse_weights, is_id_valid)
return sparse_ids, sparse_weights
def _prune_invalid_weights(sparse_ids, sparse_weights):
"""Prune invalid weights (< 0) from the input ids and weights."""
if sparse_weights is not None:
is_weights_valid = math_ops.greater(sparse_weights.values, 0)
sparse_ids = sparse_ops.sparse_retain(sparse_ids, is_weights_valid)
sparse_weights = sparse_ops.sparse_retain(sparse_weights, is_weights_valid)
return sparse_ids, sparse_weights
def scattered_embedding_lookup(params,
values,
dimension,
name=None,
hash_key=None):
"""Looks up embeddings using parameter hashing for each value in `values`.
The i-th embedding component of a value v in `values` is found by retrieving
the weight whose index is a fingerprint of the pair (v,i).
The concept is explored as "feature hashing" for model compression in this
paper: http://arxiv.org/pdf/1504.04788.pdf
Feature hashing has the pleasant effect of allowing us to compute an embedding
without needing a pre-determined vocabulary, relieving some amount of process
complexity. It also allows for us to maintain embeddings for possibly
trillions of features with a fixed amount of memory.
Note that this is superior to out-of-vocabulary shared "hash buckets" in that
the embedding is extremely likely to be unique for each token as opposed to
being shared across probably-colliding tokens. The price is that we must
compute a hash once for each scalar in the token's embedding as opposed to
once per token.
If `params` is a list, it represents a partition of the embedding parameters.
Each tensor in the list should have the same length, except for the first ones
which may have an additional element. For instance 10 parameters can be
partitioned in 4 tensors with length `[3, 3, 2, 2]`.
Args:
params: A `Tensor`, `list` of `Tensors`, or `PartitionedVariable`. Each
tensor must be of rank 1 with fully-defined shape.
values: `Tensor` of values to be embedded with shape `[d0, ..., dn]`.
dimension: Embedding dimension.
name: An optional name for this op.
hash_key: Specify the hash_key that will be used by the `FingerprintCat64`
function to combine the crosses fingerprints on SparseFeatureCrossOp
(optional).
Returns:
A `Tensor` with shape `[d0, ..., dn, dimension]`.
Raises:
ValueError: if dimension is not positive or the partition size is invalid.
"""
if dimension is None:
raise ValueError("You must specify dimension.")
return _sampled_scattered_embedding_lookup(
params,
values,
dimension=dimension,
sampled_candidates=None,
hash_key=hash_key,
name=name)
def _sampled_scattered_embedding_lookup(params,
values,
dimension=None,
sampled_candidates=None,
hash_key=None,
name=None):
"""Looks up embeddings using parameter hashing for each value in `values`.
This method looks up selected embedding dimensions if `sampled_candidates` is
given, otherwise looks up all dimensions.
The i-th embedding component of a value v in `values` is found by retrieving
the weight whose index is a fingerprint of the pair (v,i).
The concept is explored as "feature hashing" for model compression in this
paper: http://arxiv.org/pdf/1504.04788.pdf
Feature hashing has the pleasant effect of allowing us to compute an embedding
without needing a pre-determined vocabulary, relieving some amount of process
complexity. It also allows for us to maintain embeddings for possibly
trillions of features with a fixed amount of memory.
Note that this is superior to out-of-vocabulary shared "hash buckets" in that
the embedding is extremely likely to be unique for each token as opposed to
being shared across probably-colliding tokens. The price is that we must
compute a hash once for each scalar in the token's embedding as opposed to
once per token.
If `params` is a list, it represents a partition of the embedding parameters.
Each tensor in the list should have the same length, except for the first ones
which may have an additional element. For instance 10 parameters can be
partitioned in 4 tensors with length `[3, 3, 2, 2]`.
Args:
params: A `Tensor`, `list` of `Tensors`, or `PartitionedVariable`. Each
tensor must be of rank 1 with fully-defined shape.
values: `Tensor` of values to be embedded with shape `[d0, ..., dn]`.
dimension: Embedding dimension. The user must specify either `dimension` or
`sampled_candidates`.
sampled_candidates: An optional `Tensor` of slice indices to keep along the
final dimension with shape `[d0, ..., dn, N]`. If given, `dimension` is
ignored. If `None`, looks up all candidates.
hash_key: Specify the hash_key that will be used by the `FingerprintCat64`
function to combine the crosses fingerprints on SparseFeatureCrossOp
(optional).
name: An optional name for this op.
Returns:
A `Tensor` with shape `[d0, ..., dn, dimension]`.
If `sampled_candidates` is given, the output shape is `[d0, ..., dn, N]`
Raises:
ValueError: if dimension is not positive or the partition size is invalid.
"""
if isinstance(params, variables.PartitionedVariable):
params = list(params)
if not isinstance(params, list):
params = [params]
with ops.name_scope(name, "scattered_embedding_lookup",
params + [dimension, values]):
# Flatten the values
values_shape = array_ops.shape(values)
values = array_ops.reshape(values, [-1, 1])
if sampled_candidates is None:
if dimension is None:
raise ValueError(
"You must specify either dimension or sampled_candidates.")
if dimension <= 0:
raise ValueError("Dimension must be >0. Given is %d" % dimension)
sampled_candidates = array_ops.tile(
array_ops.expand_dims(math_ops.range(0, dimension), 0),
array_ops.shape(values))
else:
dimension = array_ops.shape(sampled_candidates)[math_ops.subtract(
array_ops.rank(sampled_candidates), 1)]
sampled_candidates_shape = array_ops.shape(sampled_candidates)
dimension_tensor = array_ops.reshape(
dimension, shape=[
1,
])
expected_shape = array_ops.concat([values_shape, dimension_tensor], 0)
with ops.control_dependencies([
control_flow_ops.Assert(
math_ops.reduce_all(
math_ops.equal(sampled_candidates_shape, expected_shape)),
[
"The shape of sampled_candidates: ", sampled_candidates_shape,
" does not match the shape of values: ", values_shape
])
]):
# Flatten sampled_candidates, same way as values are flattened.
sampled_candidates = array_ops.reshape(sampled_candidates,
[-1, dimension])
num_partitions = len(params)
partition_sizes = []
for p in range(num_partitions):
shape = params[p].get_shape()
shape.assert_has_rank(1)
shape.assert_is_fully_defined()
partition_sizes.append(tensor_shape.dimension_value(shape[0]))
num_params = sum(partition_sizes) # Total number of parameters.
# Assert the size of each partition.
for p in range(num_partitions):
expected_size = (num_params - p - 1) // num_partitions + 1
if partition_sizes[p] != expected_size:
raise ValueError("Tensor %d in params has size %d, expected %d." %
(p, partition_sizes[p], expected_size))
# With two values v1 and v2 and 3 dimensions, we will cross
# [[0, 1, 2], [0, 1, 2]] with [[v1], [v2]].
tensors_to_cross = [sampled_candidates, values]
ids = sparse_feature_cross_op.sparse_feature_cross(
tensors_to_cross,
hashed_output=True,
num_buckets=num_params,
hash_key=hash_key)
ids = sparse_ops.sparse_tensor_to_dense(ids)
# No need to validate the indices since we have checked the params
# dimensions and we know the largest id.
result = embedding_ops.embedding_lookup(
params, ids, partition_strategy="div")
return array_ops.reshape(result,
array_ops.concat([values_shape, [dimension]], 0))
def scattered_embedding_lookup_sparse(params,
sparse_values,
dimension,
combiner=None,
default_value=None,
name=None,
hash_key=None):
"""Looks up embeddings of a sparse feature using parameter hashing.
See `tf.contrib.layers.scattered_embedding_lookup` for embedding with hashing.
Args:
params: A `Tensor`, `list` of `Tensors`, or `PartitionedVariable`. Each
tensor must be of rank 1 with fully-defined shape.
sparse_values: A 2-D `SparseTensor` containing the values to be embedded.
Some rows may be empty.
dimension: Embedding dimension
combiner: A string specifying how to combine embedding results for each
entry. Currently "mean", "sqrtn" and "sum" are supported, with "mean" the
default.
default_value: The value to use for an entry with no features.
name: An optional name for this op.
hash_key: Specify the hash_key that will be used by the `FingerprintCat64`
function to combine the crosses fingerprints on SparseFeatureCrossOp
(optional).
Returns:
Dense tensor with shape [N, dimension] with N the number of rows in
sparse_values.
Raises:
TypeError: If sparse_values is not a SparseTensor.
ValueError: If combiner is not one of {"mean", "sqrtn", "sum"}.
"""
if combiner is None:
logging.warn("The default value of combiner will change from \"mean\" "
"to \"sqrtn\" after 2016/11/01.")
combiner = "mean"
if isinstance(params, variables.PartitionedVariable):
params = list(params)
if not isinstance(params, list):
params = [params]
if not isinstance(sparse_values, sparse_tensor.SparseTensor):
raise TypeError("sparse_values must be SparseTensor")
with ops.name_scope(name, "scattered_embedding_lookup_sparse",
params + [sparse_values]) as scope:
# Fill in the empty rows.
if default_value is None:
# Random default values to reduce the risk of collision.
if sparse_values.dtype == dtypes.string:
default_value = "6ZxWzWOHxZ"
else:
default_value = 1288896567
sparse_values, _ = sparse_ops.sparse_fill_empty_rows(
sparse_values, default_value)
segment_ids = sparse_values.indices[:, 0]
if segment_ids.dtype != dtypes.int32:
segment_ids = math_ops.cast(segment_ids, dtypes.int32)
values = sparse_values.values
values, idx = array_ops.unique(values)
embeddings = scattered_embedding_lookup(
params, values, dimension, hash_key=hash_key)
if combiner == "sum":
embeddings = math_ops.sparse_segment_sum(
embeddings, idx, segment_ids, name=scope)
elif combiner == "mean":
embeddings = math_ops.sparse_segment_mean(
embeddings, idx, segment_ids, name=scope)
elif combiner == "sqrtn":
embeddings = math_ops.sparse_segment_sqrt_n(
embeddings, idx, segment_ids, name=scope)
else:
raise ValueError("Combiner must be one of 'mean', 'sqrtn' or 'sum'.")
return embeddings
def embedding_lookup_unique(params, ids, partition_strategy="mod", name=None):
"""Version of embedding_lookup that avoids duplicate lookups.
This can save communication in the case of repeated ids.
Same interface as embedding_lookup. Except it supports multi-dimensional `ids`
which allows to not reshape input/output to fit gather.
Args:
params: A list of tensors with the same shape and type, or a
`PartitionedVariable`. Shape `[index, d1, d2, ...]`.
ids: A one-dimensional `Tensor` with type `int32` or `int64` containing the
ids to be looked up in `params`. Shape `[ids1, ids2, ...]`.
partition_strategy: A string specifying the partitioning strategy, relevant
if `len(params) > 1`. Currently `"div"` and `"mod"` are supported. Default
is `"mod"`.
name: A name for this operation (optional).
Returns:
A `Tensor` with the same type as the tensors in `params` and dimension of
`[ids1, ids2, d1, d2, ...]`.
Raises:
ValueError: If `params` is empty.
"""
with ops.name_scope(name, "EmbeddingLookupUnique", [params, ids]):
ids = ops.convert_to_tensor(ids)
shape = array_ops.shape(ids)
ids_flat = array_ops.reshape(ids,
math_ops.reduce_prod(shape, keepdims=True))
unique_ids, idx = array_ops.unique(ids_flat)
unique_embeddings = embedding_ops.embedding_lookup(params, unique_ids,
partition_strategy)
embeds_flat = array_ops.gather(unique_embeddings, idx)
embed_shape = array_ops.concat(
[shape, array_ops.shape(unique_embeddings)[1:]], 0)
embeds = array_ops.reshape(embeds_flat, embed_shape)
embeds.set_shape(ids.get_shape().concatenate(
unique_embeddings.get_shape()[1:]))
return embeds
def _sampled_scattered_embedding_lookup_sparse(params,
sp_values,
dimension=None,
sampled_candidates=None,
hash_key=None,
with_sign_hash=False,
name=None):
"""Looks up embeddings using parameter hashing for sparse values.
This method looks up selected embedding dimensions if `sampled_candidates` is
given, otherwise looks up all dimensions.
The i-th embedding component of a value v in `values` is found by retrieving
the weight whose index is a fingerprint of the pair (v,i).
The concept is explored as "feature hashing" for model compression in this
paper: http://arxiv.org/pdf/1504.04788.pdf
This is logically equivalent to:
* Transforming `sp_values` (which has shape `[d0, d1]`) into a one-hot
`Tensor` of shape `[d0, N]`.
* Multiplying with a `Tensor` `h` of shape `[N, dimension]`, where
`h(i, j) = params[hash(i, j)]`.
Args:
params: A float `Tensor` with rank 1 and fully-defined shape.
sp_values: A 2D `SparseTensor` to be embedded with shape `[d0, d1]`.
dimension: An int `Tensor` of the final dimension. The user needs to provide
either `dimension` or `sampled_candidates`.
sampled_candidates: An optional `Tensor` of column indices to keep along the
final dimension with shape `[d0, N]`. If given, `dimension` is ignored. If
`None`, looks up all candidates.
hash_key: Specify the hash_key that will be used by the `FingerprintCat64`
function to combine the crosses fingerprints on SparseFeatureCrossOp
(optional).
with_sign_hash: A `bool` indicating whether `h(i, j)` should be multiplied
by `+1` or `-1`, where the value selected is determined by hashing `(i,
j)`. This is often necessary to remove bias resulting from hash
collisions.
name: An optional name for this op.
Returns:
A `Tensor` of shape `[d0, dimension]`.
If `sampled_candidates` is given, the output shape is `[d0, N]`.
Raises:
TypeError: If sp_values is not `SparseTensor`.
ValueError: If both `dimension` and `sampled_candidates` are `None`.
"""
if not isinstance(sp_values, sparse_tensor.SparseTensor):
raise TypeError("sp_values must be SparseTensor")
with ops.name_scope(
name=name,
default_name="sampled_scattered_embedding_lookup_sparse",
values=[sp_values, params, dimension, sampled_candidates]) as name_scope:
segment_ids = sp_values.indices[:, 0]
if sampled_candidates is not None:
# Tile sampled_candidates so there is one line corresponding to each
# element in sp_values.values
sampled_candidates = array_ops.gather(sampled_candidates, segment_ids)
embeddings = _sampled_scattered_embedding_lookup(
params,
sp_values.values,
dimension=dimension,
sampled_candidates=sampled_candidates,
hash_key=hash_key,
name="values_lookup")
if with_sign_hash:
signs = _sampled_scattered_embedding_lookup(
array_ops.constant([-1., 1.]),
sp_values.values,
dimension=dimension,
sampled_candidates=sampled_candidates,
hash_key=hash_key,
name="signs_lookup")
embeddings = math_ops.multiply(signs, embeddings, name="signs_hash")
if segment_ids.dtype != dtypes.int32:
segment_ids = math_ops.cast(segment_ids, dtypes.int32)
num_segments = array_ops.shape(sp_values)[0]
return math_ops.unsorted_segment_sum(
embeddings, segment_ids, num_segments=num_segments, name=name_scope)
def embedding_lookup_sparse_with_distributed_aggregation(
params,
sp_ids,
sp_weights,
partition_strategy="mod",
name=None,
combiner=None,
max_norm=None):
"""Computes embeddings for the given ids and weights.
Embeddings belonging to same param are aggregated on that device first. This
op is intended to decrease data transmission and improve parallelism. See
`tf.nn.embedding_lookup_sparse` for the functionality and example of this op.
Args:
params: A single tensor representing the complete embedding tensor, or a
list of P tensors all of same shape except for the first dimension,
representing sharded embedding tensors. Alternatively, a
`PartitionedVariable`, created by partitioning along dimension 0. Each
element must be appropriately sized for the given `partition_strategy`.
sp_ids: N x M SparseTensor of int64 ids (typically from FeatureValueToId),
where N is typically batch size and M is arbitrary.
sp_weights: either a SparseTensor of float / double weights, or None to
indicate all weights should be taken to be 1. If specified, sp_weights
must have exactly the same shape and indices as sp_ids.
partition_strategy: A string specifying the partitioning strategy, relevant
if `len(params) > 1`. Currently `"div"` and `"mod"` are supported. Default
is `"mod"`. See `tf.nn.embedding_lookup` for more details.
name: Optional name for the op.
combiner: A string specifying the reduction op. Currently "mean", "sqrtn"
and "sum" are supported. "sum" computes the weighted sum of the embedding
results for each row. "mean" is the weighted sum divided by the total
weight. "sqrtn" is the weighted sum divided by the square root of the sum
of the squares of the weights.
max_norm: If not None, each embedding is normalized to have l2 norm equal to
max_norm before combining.
Returns:
A dense tensor representing the combined embeddings for the
sparse ids. For each row in the dense tensor represented by sp_ids, the op
looks up the embeddings for all ids in that row, multiplies them by the
corresponding weight, and combines these embeddings as specified.
Raises:
TypeError: If sp_ids is not a SparseTensor, or if sp_weights is neither
None nor SparseTensor.
ValueError: If combiner is not one of {"mean", "sqrtn", "sum"}.
"""
if combiner is None:
logging.warn("The default value of combiner will change from \"mean\" "
"to \"sqrtn\" after 2016/11/01.")
combiner = "mean"
if combiner not in ("mean", "sqrtn", "sum"):
raise ValueError("combiner must be one of 'mean', 'sqrtn' or 'sum'")
if isinstance(params, variables.PartitionedVariable):
params = list(params) # Iterate to get the underlying Variables.
if not isinstance(params, list):
params = [params]
if not isinstance(sp_ids, sparse_tensor.SparseTensor):
raise TypeError("sp_ids must be SparseTensor")
ignore_weights = sp_weights is None
if not ignore_weights:
if not isinstance(sp_weights, sparse_tensor.SparseTensor):
raise TypeError("sp_weights must be either None or SparseTensor")
sp_ids.values.get_shape().assert_is_compatible_with(
sp_weights.values.get_shape())
sp_ids.indices.get_shape().assert_is_compatible_with(
sp_weights.indices.get_shape())
sp_ids.dense_shape.get_shape().assert_is_compatible_with(
sp_weights.dense_shape.get_shape())
# TODO(yleon): Add enhanced node assertions to verify that sp_ids and
# sp_weights have equal indices and shapes.
with ops.name_scope(name, "embedding_lookup_sparse",
params + [sp_ids]) as name:
segment_ids = sp_ids.indices[:, 0]
if segment_ids.dtype != dtypes.int32:
segment_ids = math_ops.cast(segment_ids, dtypes.int32)
ids = sp_ids.values
if ignore_weights:
ids, idx = array_ops.unique(ids)
else:
idx = None
weights = None if ignore_weights else sp_weights.values
embeddings = _embedding_lookup_with_distributed_aggregation(
params,
ids,
partition_strategy=partition_strategy,
max_norm=max_norm,
weights=weights,
idx=idx,
segment_ids=segment_ids)
# Set weights to all one if ignore weights.
if ignore_weights:
weights = array_ops.fill([array_ops.shape(segment_ids)[0]], 1)
if weights.dtype != embeddings.dtype:
weights = math_ops.cast(weights, embeddings.dtype)
# Reshape weights.
ones = array_ops.fill(
array_ops.expand_dims(array_ops.rank(embeddings) - 1, 0), 1)
bcast_weights_shape = array_ops.concat([array_ops.shape(weights), ones], 0)
orig_weights_shape = weights.get_shape()
weights = array_ops.reshape(weights, bcast_weights_shape)
if embeddings.get_shape().ndims is not None:
weights.set_shape(
orig_weights_shape.concatenate(
[1 for _ in range(embeddings.get_shape().ndims - 1)]))
if combiner == "mean":
weight_sum = math_ops.segment_sum(weights, segment_ids)
embeddings = math_ops.div(embeddings, weight_sum)
elif combiner == "sqrtn":
weights_squared = math_ops.pow(weights, 2)
weight_sum = math_ops.segment_sum(weights_squared, segment_ids)
weight_sum_sqrt = math_ops.sqrt(weight_sum)
embeddings = math_ops.div(embeddings, weight_sum_sqrt)
elif combiner != "sum":
assert False, "Unrecognized combiner"
return embeddings
def _do_gather(params, ids, name=None):
"""Deals with doing gather differently for resource variables."""
if isinstance(params, resource_variable_ops.ResourceVariable):
return params.sparse_read(ids, name=name)
return array_ops.gather(params, ids, name=name)
def _embedding_lookup_with_distributed_aggregation(params,
ids,
partition_strategy="mod",
name=None,
max_norm=None,
weights=None,
idx=None,
segment_ids=None):
"""Lookup helper for embedding_lookup_sparse_with_distributed_aggregation."""
if params is None or params == []: # pylint: disable=g-explicit-bool-comparison
raise ValueError("Need at least one param")
if isinstance(params, variables.PartitionedVariable):
params = list(params) # Iterate to get the underlying Variables.
if not isinstance(params, list):
params = [params]
def maybe_normalize(x):
if max_norm is not None:
if x.get_shape().ndims is not None:
ndims = x.get_shape().ndims
else:
ndims = array_ops.size(array_ops.shape(x))
return clip_ops.clip_by_norm(x, max_norm, axes=list(range(1, ndims)))
return x
with ops.name_scope(name, "embedding_lookup_with_distributed_aggregation",
params + [ids]) as name:
np = len(params) # Number of partitions
# Preserve the resource variable status to avoid accidental dense reads.
if not any(
isinstance(p, resource_variable_ops.ResourceVariable) for p in params):
params = ops.convert_n_to_tensor_or_indexed_slices(params, name="params")
if np == 1:
with ops.colocate_with(params[0]):
ret = maybe_normalize(_do_gather(params[0], ids))
ignore_weights = weights is None
if not ignore_weights:
if weights.dtype != ret.dtype:
weights = math_ops.cast(weights, ret.dtype)
# Reshape to allow broadcast
ones = array_ops.fill(
array_ops.expand_dims(array_ops.rank(ret) - 1, 0), 1)
bcast_weights_shape = array_ops.concat(
[array_ops.shape(weights), ones], 0)
orig_weights_shape = weights.get_shape()
weights = array_ops.reshape(weights, bcast_weights_shape)
# Set weights shape after reshape
if ret.get_shape().ndims is not None:
weights.set_shape(
orig_weights_shape.concatenate(
[1 for _ in range(ret.get_shape().ndims - 1)]))
ret *= weights
return math_ops.segment_sum(ret, segment_ids, name=name)
else:
return math_ops.sparse_segment_sum(ret, idx, segment_ids, name=name)
else:
ids = ops.convert_to_tensor(ids, name="ids")
flat_ids = array_ops.reshape(ids, [-1])
original_indices = math_ops.range(array_ops.size(flat_ids))
# Create p_assignments and set new_ids depending on the strategy.
if partition_strategy == "mod":
p_assignments = flat_ids % np
new_ids = flat_ids // np
elif partition_strategy == "div":
# Compute num_total_ids as the sum of dim-0 of params, then assign to
# partitions based on a constant number of ids per partition. Optimize
# if we already know the full shape statically.
dim_0_size = params[0].get_shape().dims[0]
for p in xrange(1, np):
dim_0_size += params[p].get_shape().dims[0]
if dim_0_size.value:
num_total_ids = constant_op.constant(dim_0_size, flat_ids.dtype)
else:
dim_0_sizes = []
for p in xrange(np):
if params[p].get_shape().dims[0].value is not None:
dim_0_sizes.append(params[p].get_shape().dims[0].value)
else:
with ops.colocate_with(params[p]):
dim_0_sizes.append(array_ops.shape(params[p])[0])
num_total_ids = math_ops.reduce_sum(
math_ops.cast(array_ops.stack(dim_0_sizes), flat_ids.dtype))
ids_per_partition = num_total_ids // np
extras = num_total_ids % np
p_assignments = math_ops.maximum(flat_ids // (ids_per_partition + 1),
(flat_ids - extras) //
ids_per_partition)
# Emulate a conditional using a boolean indicator tensor
is_in_first_extras_partitions = math_ops.cast(p_assignments < extras,
flat_ids.dtype)
new_ids = (
is_in_first_extras_partitions * (flat_ids %
(ids_per_partition + 1)) +
(1 - is_in_first_extras_partitions) *
((flat_ids - extras) % ids_per_partition))
else:
raise ValueError("Unrecognized partition strategy: " +
partition_strategy)
# Cast partition assignments to int32 for use in dynamic_partition.
# There really should not be more than 2^32 partitions.
p_assignments = math_ops.cast(p_assignments, dtypes.int32)
# Partition list of ids based on assignments into np separate lists
gather_ids = data_flow_ops.dynamic_partition(new_ids, p_assignments, np)
# Similarly, partition the original indices.
pindices = data_flow_ops.dynamic_partition(original_indices,
p_assignments, np)
# Do np separate lookups, finding embeddings for plist[p] in params[p]
partitioned_result = []
for p in xrange(np):
with ops.colocate_with(params[p]):
partitioned_result.append(_do_gather(params[p], gather_ids[p]))
ignore_weights = weights is None
if not ignore_weights:
# Partition weights according to pindices.
partitioned_weight = []
for p in xrange(np):
partitioned_weight.append(array_ops.gather(weights, pindices[p]))
# Reshape each partition result.
element_shape = params[0].get_shape()[1:]
for p in params[1:]:
element_shape = element_shape.merge_with(p.get_shape()[1:])
if element_shape.is_fully_defined():
for p in xrange(np):
with ops.colocate_with(params[p]):
partitioned_result[p] = array_ops.reshape(
partitioned_result[p],
array_ops.concat([array_ops.shape(pindices[p]), element_shape],
0))
else:
with ops.colocate_with(params[0]):
params_shape = array_ops.shape(params[0])
for p in xrange(np):
with ops.colocate_with(params[p]):
partitioned_result[p] = array_ops.reshape(
partitioned_result[p],
array_ops.concat([
array_ops.shape(pindices[p]),
array_ops.slice(params_shape, [1], [-1])
], 0))
# Normalize each partition result.
for p in xrange(np):
with ops.colocate_with(params[p]):
partitioned_result[p] = maybe_normalize(partitioned_result[p])
if not ignore_weights:
# Multiply each partition result with partition weights.
for p in xrange(np):
with ops.colocate_with(params[p]):
if partitioned_weight[p].dtype != partitioned_result[p].dtype:
partitioned_weight[p] = math_ops.cast(partitioned_weight[p],
partitioned_result[p].dtype)
# Reshape partition weights.
ones = array_ops.fill(
array_ops.expand_dims(
array_ops.rank(partitioned_result[p]) - 1, 0), 1)
bcast_weights_shape = array_ops.concat(
[array_ops.shape(partitioned_weight[p]), ones], 0)
orig_weights_shape = partitioned_weight[p].get_shape()
partitioned_weight[p] = array_ops.reshape(partitioned_weight[p],
bcast_weights_shape)
if partitioned_result[p].get_shape().ndims is not None:
partitioned_weight[p].set_shape(
orig_weights_shape.concatenate([
1 for _ in range(partitioned_result[p].get_shape().ndims -
1)
]))
partitioned_result[p] *= partitioned_weight[p]
partitioned_segment_ids = []
for p in xrange(np):
if not ignore_weights:
# Partition segment_ids according to pindices.
p_segment_ids = array_ops.gather(segment_ids, pindices[p])
# Number the p_segment_ids to meet segment_sum's requirements. Note
# that unique_p_segment_ids contains unique segment ids of this
# partition and these ids' order is unchanged.
unique_p_segment_ids, unique_p_segment_idx = array_ops.unique(
p_segment_ids)
partitioned_segment_ids.append(unique_p_segment_ids)
# segment_sum this partition's result.
with ops.colocate_with(params[p]):
partitioned_result[p] = math_ops.segment_sum(
partitioned_result[p], unique_p_segment_idx)
else:
# When ignore weights, we need to get indexs of elements in idx and
# segment_ids.
_, exclude_idx = array_ops.setdiff1d(idx, pindices[p])
all_idx = math_ops.range(array_ops.shape(idx)[0])
_, include_idx = array_ops.setdiff1d(all_idx, exclude_idx)
# Gather segment_ids and idx according to indexs.
p_segment_ids = array_ops.gather(segment_ids, include_idx)
p_idx = array_ops.gather(idx, include_idx)
# Number the p_segment_ids, same as ignore_weights case above.
unique_p_segment_ids, unique_p_segment_idx = array_ops.unique(
p_segment_ids)
_, unique_p_idx_idx = array_ops.unique(p_idx)
partitioned_segment_ids.append(unique_p_segment_ids)
with ops.colocate_with(params[p]):
partitioned_result[p] = math_ops.sparse_segment_sum(
partitioned_result[p], unique_p_idx_idx, unique_p_segment_idx)
# Concat each partition's segment_ids and result for final segment_sum.
concat_segment_ids = array_ops.concat(partitioned_segment_ids, 0)
concat_partitioned_result = array_ops.concat(partitioned_result, 0)
return math_ops.unsorted_segment_sum(
concat_partitioned_result,
concat_segment_ids,
math_ops.reduce_max(concat_segment_ids) + 1,
name=name)
|
tensorflow-master
|
tensorflow/contrib/layers/python/layers/embedding_ops.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TargetColumn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.layers.python.layers import target_column as target_column_lib
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class RegressionTargetColumnTest(test.TestCase):
# TODO(zakaria): test multilabel regression.
def testRegression(self):
target_column = target_column_lib.regression_target()
with ops.Graph().as_default(), session.Session() as sess:
prediction = constant_op.constant([[1.], [1.], [3.]])
labels = constant_op.constant([[0.], [1.], [1.]])
self.assertAlmostEqual(
5. / 3, sess.run(target_column.loss(prediction, labels, {})))
def testRegressionWithWeights(self):
target_column = target_column_lib.regression_target(
weight_column_name="label_weight")
with ops.Graph().as_default(), session.Session() as sess:
features = {"label_weight": constant_op.constant([[2.], [5.], [0.]])}
prediction = constant_op.constant([[1.], [1.], [3.]])
labels = constant_op.constant([[0.], [1.], [1.]])
self.assertAlmostEqual(
2. / 7,
sess.run(target_column.loss(prediction, labels, features)),
places=3)
self.assertAlmostEqual(
2. / 3,
sess.run(target_column.training_loss(prediction, labels, features)),
places=3)
class MultiClassTargetColumnTest(test.TestCase):
def testBinaryClassification(self):
target_column = target_column_lib.multi_class_target(n_classes=2)
with ops.Graph().as_default(), session.Session() as sess:
logits = constant_op.constant([[1.], [1.]])
labels = constant_op.constant([[1.], [0.]])
# logloss: z:label, x:logit
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
self.assertAlmostEqual(
0.81326175,
sess.run(target_column.loss(logits, labels, {})),
delta=1e-6)
def testBinaryClassificationWithWeights(self):
target_column = target_column_lib.multi_class_target(
n_classes=2, weight_column_name="label_weight")
with ops.Graph().as_default(), session.Session() as sess:
features = {"label_weight": constant_op.constant([[1.], [0.]])}
logits = constant_op.constant([[1.], [1.]])
labels = constant_op.constant([[1.], [0.]])
# logloss: z:label, x:logit
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
self.assertAlmostEqual(
.31326166,
sess.run(target_column.loss(logits, labels, features)),
delta=1e-6)
def testBinaryEvalMetrics(self):
target_column = target_column_lib.multi_class_target(n_classes=2)
with ops.Graph().as_default(), session.Session() as sess:
logits = constant_op.constant([[1.], [1.], [-1.]])
labels = constant_op.constant([[1.], [0.], [1.]])
eval_dict = target_column.get_eval_ops({}, logits, labels)
# TODO(zakaria): test all metrics
accuracy_op, update_op = eval_dict["accuracy/threshold_0.500000_mean"]
sess.run(variables.global_variables_initializer())
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertAlmostEqual(1.0 / 3, sess.run(accuracy_op))
def testMultiClass(self):
target_column = target_column_lib.multi_class_target(n_classes=3)
with ops.Graph().as_default(), session.Session() as sess:
logits = constant_op.constant([[1., 0., 0.]])
labels = constant_op.constant([2])
# logloss: z:label, x:logit
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
self.assertAlmostEqual(1.5514446,
sess.run(target_column.loss(logits, labels, {})))
def testMultiClassWithWeight(self):
target_column = target_column_lib.multi_class_target(
n_classes=3, weight_column_name="label_weight")
with ops.Graph().as_default(), session.Session() as sess:
features = {"label_weight": constant_op.constant([0.1])}
logits = constant_op.constant([[1., 0., 0.]])
labels = constant_op.constant([2])
# logloss: z:label, x:logit
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
self.assertAlmostEqual(
1.5514446, sess.run(target_column.loss(logits, labels, features)))
def testMultiClassWithInvalidNClass(self):
try:
target_column_lib.multi_class_target(n_classes=1)
self.fail("Softmax with no n_classes did not raise error.")
except ValueError:
# Expected
pass
def testMultiClassEvalMetrics(self):
target_column = target_column_lib.multi_class_target(n_classes=3)
with ops.Graph().as_default(), session.Session() as sess:
logits = constant_op.constant([[1., 0., 0.]])
labels = constant_op.constant([2])
eval_dict = target_column.get_eval_ops({}, logits, labels)
loss_op, update_op = eval_dict["loss"]
sess.run(variables.global_variables_initializer())
sess.run(variables.local_variables_initializer())
sess.run(update_op)
# logloss: z:label, x:logit
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
self.assertAlmostEqual(1.5514446, sess.run(loss_op))
def testBinarySVMDefaultWeights(self):
target_column = target_column_lib.binary_svm_target()
predictions = constant_op.constant([[-0.5], [1.2]])
labels = constant_op.constant([0, 1])
loss = target_column.loss(predictions, labels, {})
# Prediction for first example is in the right side of the hyperplane (i.e.,
# < 0) but it is within the [-1,1] margin. There is a 0.5 loss incurred by
# this example. The 2nd prediction is outside the margin so it incurs no
# loss at all. The overall (normalized) loss is therefore 0.5/(1+1) = 0.25.
with session.Session() as sess:
self.assertAlmostEqual(0.25, sess.run(loss))
def testBinarySVMWithWeights(self):
target_column = target_column_lib.binary_svm_target(
weight_column_name="weights")
predictions = constant_op.constant([[-0.7], [0.2]])
labels = constant_op.constant([0, 1])
features = {"weights": constant_op.constant([2.0, 10.0])}
loss = target_column.loss(predictions, labels, features)
training_loss = target_column.training_loss(predictions, labels, features)
# Prediction for both examples are in the right side of the hyperplane but
# within the margin. The (weighted) loss incurred is 2*0.3=0.6 and 10*0.8=8
# respectively. The overall (normalized) loss is therefore 8.6/12.
with session.Session() as sess:
self.assertAlmostEqual(8.6 / 12, sess.run(loss), places=3)
self.assertAlmostEqual(8.6 / 2, sess.run(training_loss), places=3)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/layers/python/layers/target_column_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.contrib.layers.sparse_feature_cross."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy
from tensorflow.contrib import layers
from tensorflow.contrib.layers.python.ops import sparse_feature_cross_op
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import sparse_ops
from tensorflow.python.platform import test
class SparseCrossOpTest(test.TestCase):
def test_simple(self):
"""Tests a simple scenario.
"""
op = sparse_feature_cross_op.sparse_feature_cross([
self._sparse_tensor([['batch1-FC1-F1'],
['batch2-FC1-F1', 'batch2-FC1-F2']]),
self._sparse_tensor([['batch1-FC2-F1'],
['batch2-FC2-F1', 'batch2-FC2-F2']])
])
expected_out = self._sparse_tensor([['batch1-FC1-F1_X_batch1-FC2-F1'], [
'batch2-FC1-F1_X_batch2-FC2-F1', 'batch2-FC1-F1_X_batch2-FC2-F2',
'batch2-FC1-F2_X_batch2-FC2-F1', 'batch2-FC1-F2_X_batch2-FC2-F2'
]])
with self.cached_session() as sess:
self._assert_sparse_tensor_equals(expected_out, sess.run(op))
def test_dense(self):
"""Tests only dense inputs.
"""
op = sparse_feature_cross_op.sparse_feature_cross([
constant_op.constant([['batch1-FC1-F1', 'batch1-FC1-F2'],
['batch2-FC1-F1', 'batch2-FC1-F2']],
dtypes.string),
constant_op.constant([['batch1-FC2-F1', 'batch1-FC2-F2'],
['batch2-FC2-F1', 'batch2-FC2-F2']],
dtypes.string),
])
expected_out = self._sparse_tensor([[
'batch1-FC1-F1_X_batch1-FC2-F1', 'batch1-FC1-F1_X_batch1-FC2-F2',
'batch1-FC1-F2_X_batch1-FC2-F1', 'batch1-FC1-F2_X_batch1-FC2-F2'
], [
'batch2-FC1-F1_X_batch2-FC2-F1', 'batch2-FC1-F1_X_batch2-FC2-F2',
'batch2-FC1-F2_X_batch2-FC2-F1', 'batch2-FC1-F2_X_batch2-FC2-F2'
]])
with self.cached_session() as sess:
self._assert_sparse_tensor_equals(expected_out, sess.run(op))
def test_integer_mixed_string_sparse(self):
"""Tests mixed type."""
op = sparse_feature_cross_op.sparse_feature_cross([
self._sparse_tensor([[11], [333, 55555]]),
self._sparse_tensor([['batch1-FC2-F1'],
['batch2-FC2-F1', 'batch2-FC2-F2']])
])
expected_out = self._sparse_tensor([['11_X_batch1-FC2-F1'], [
'333_X_batch2-FC2-F1', '333_X_batch2-FC2-F2', '55555_X_batch2-FC2-F1',
'55555_X_batch2-FC2-F2'
]])
with self.cached_session() as sess:
self._assert_sparse_tensor_equals(expected_out, sess.run(op))
def test_integer_mixed_string_dense(self):
"""Tests mixed dense inputs.
"""
op = sparse_feature_cross_op.sparse_feature_cross([
constant_op.constant([[11, 333], [55555, 999999]], dtypes.int64),
constant_op.constant([['batch1-FC2-F1', 'batch1-FC2-F2'],
['batch2-FC2-F1', 'batch2-FC2-F2']],
dtypes.string),
])
expected_out = self._sparse_tensor([[
'11_X_batch1-FC2-F1', '11_X_batch1-FC2-F2', '333_X_batch1-FC2-F1',
'333_X_batch1-FC2-F2'
], [
'55555_X_batch2-FC2-F1', '55555_X_batch2-FC2-F2',
'999999_X_batch2-FC2-F1', '999999_X_batch2-FC2-F2'
]])
with self.cached_session() as sess:
self._assert_sparse_tensor_equals(expected_out, sess.run(op))
def test_sparse_cross_dense(self):
"""Tests sparse and dense inputs.
"""
op = sparse_feature_cross_op.sparse_feature_cross([
self._sparse_tensor([['batch1-FC1-F1'],
['batch2-FC1-F1', 'batch2-FC1-F2']]),
constant_op.constant([['batch1-FC2-F1', 'batch1-FC2-F2'],
['batch2-FC2-F1', 'batch2-FC2-F2']],
dtypes.string),
])
expected_out = self._sparse_tensor(
[['batch1-FC1-F1_X_batch1-FC2-F1', 'batch1-FC1-F1_X_batch1-FC2-F2'], [
'batch2-FC1-F1_X_batch2-FC2-F1', 'batch2-FC1-F1_X_batch2-FC2-F2',
'batch2-FC1-F2_X_batch2-FC2-F1', 'batch2-FC1-F2_X_batch2-FC2-F2'
]])
with self.cached_session() as sess:
self._assert_sparse_tensor_equals(expected_out, sess.run(op))
def test_integer_sparse_input(self):
"""Tests mixed type sparse and dense inputs."""
op = sparse_feature_cross_op.sparse_feature_cross([
self._sparse_tensor([[11], [333, 5555]]),
constant_op.constant([['batch1-FC2-F1', 'batch1-FC2-F2'],
['batch2-FC2-F1', 'batch2-FC2-F2']],
dtypes.string),
])
expected_out = self._sparse_tensor(
[['11_X_batch1-FC2-F1', '11_X_batch1-FC2-F2'], [
'333_X_batch2-FC2-F1', '333_X_batch2-FC2-F2',
'5555_X_batch2-FC2-F1', '5555_X_batch2-FC2-F2'
]])
with self.cached_session() as sess:
self._assert_sparse_tensor_equals(expected_out, sess.run(op))
def test_permutation_3x3x3(self):
"""Tests 3x3x3 permutation.
"""
op = sparse_feature_cross_op.sparse_feature_cross([
self._sparse_tensor(
[['batch1-FC1-F1', 'batch1-FC1-F2', 'batch1-FC1-F3']]),
self._sparse_tensor(
[['batch1-FC2-F1', 'batch1-FC2-F2', 'batch1-FC2-F3']]),
self._sparse_tensor(
[['batch1-FC3-F1', 'batch1-FC3-F2', 'batch1-FC3-F3']])
])
expected_out = self._sparse_tensor([[
'batch1-FC1-F1_X_batch1-FC2-F1_X_batch1-FC3-F1',
'batch1-FC1-F1_X_batch1-FC2-F1_X_batch1-FC3-F2',
'batch1-FC1-F1_X_batch1-FC2-F1_X_batch1-FC3-F3',
'batch1-FC1-F1_X_batch1-FC2-F2_X_batch1-FC3-F1',
'batch1-FC1-F1_X_batch1-FC2-F2_X_batch1-FC3-F2',
'batch1-FC1-F1_X_batch1-FC2-F2_X_batch1-FC3-F3',
'batch1-FC1-F1_X_batch1-FC2-F3_X_batch1-FC3-F1',
'batch1-FC1-F1_X_batch1-FC2-F3_X_batch1-FC3-F2',
'batch1-FC1-F1_X_batch1-FC2-F3_X_batch1-FC3-F3',
'batch1-FC1-F2_X_batch1-FC2-F1_X_batch1-FC3-F1',
'batch1-FC1-F2_X_batch1-FC2-F1_X_batch1-FC3-F2',
'batch1-FC1-F2_X_batch1-FC2-F1_X_batch1-FC3-F3',
'batch1-FC1-F2_X_batch1-FC2-F2_X_batch1-FC3-F1',
'batch1-FC1-F2_X_batch1-FC2-F2_X_batch1-FC3-F2',
'batch1-FC1-F2_X_batch1-FC2-F2_X_batch1-FC3-F3',
'batch1-FC1-F2_X_batch1-FC2-F3_X_batch1-FC3-F1',
'batch1-FC1-F2_X_batch1-FC2-F3_X_batch1-FC3-F2',
'batch1-FC1-F2_X_batch1-FC2-F3_X_batch1-FC3-F3',
'batch1-FC1-F3_X_batch1-FC2-F1_X_batch1-FC3-F1',
'batch1-FC1-F3_X_batch1-FC2-F1_X_batch1-FC3-F2',
'batch1-FC1-F3_X_batch1-FC2-F1_X_batch1-FC3-F3',
'batch1-FC1-F3_X_batch1-FC2-F2_X_batch1-FC3-F1',
'batch1-FC1-F3_X_batch1-FC2-F2_X_batch1-FC3-F2',
'batch1-FC1-F3_X_batch1-FC2-F2_X_batch1-FC3-F3',
'batch1-FC1-F3_X_batch1-FC2-F3_X_batch1-FC3-F1',
'batch1-FC1-F3_X_batch1-FC2-F3_X_batch1-FC3-F2',
'batch1-FC1-F3_X_batch1-FC2-F3_X_batch1-FC3-F3'
]])
with self.cached_session() as sess:
self._assert_sparse_tensor_equals(expected_out, sess.run(op))
def test_permutation_3x1x2(self):
"""Tests 3x1x2 permutation.
"""
op = sparse_feature_cross_op.sparse_feature_cross([
self._sparse_tensor(
[['batch1-FC1-F1', 'batch1-FC1-F2', 'batch1-FC1-F3']]),
self._sparse_tensor([['batch1-FC2-F1']]),
self._sparse_tensor([['batch1-FC3-F1', 'batch1-FC3-F2']])
])
expected_out = self._sparse_tensor([[
'batch1-FC1-F1_X_batch1-FC2-F1_X_batch1-FC3-F1',
'batch1-FC1-F1_X_batch1-FC2-F1_X_batch1-FC3-F2',
'batch1-FC1-F2_X_batch1-FC2-F1_X_batch1-FC3-F1',
'batch1-FC1-F2_X_batch1-FC2-F1_X_batch1-FC3-F2',
'batch1-FC1-F3_X_batch1-FC2-F1_X_batch1-FC3-F1',
'batch1-FC1-F3_X_batch1-FC2-F1_X_batch1-FC3-F2'
]])
with self.cached_session() as sess:
self._assert_sparse_tensor_equals(expected_out, sess.run(op))
def test_large_batch(self):
"""Tests with large batch size to force multithreading.
"""
batch_size = 5000
col1 = []
col2 = []
col3 = []
for b in range(batch_size):
col1.append(
['batch%d-FC1-F1' % b, 'batch%d-FC1-F2' % b, 'batch%d-FC1-F3' % b])
col2.append(['batch%d-FC2-F1' % b])
col3.append(['batch%d-FC3-F1' % b, 'batch%d-FC3-F2' % b])
op = sparse_feature_cross_op.sparse_feature_cross([
self._sparse_tensor(col1), self._sparse_tensor(col2),
self._sparse_tensor(col3)
])
col_out = []
for b in range(batch_size):
col_out.append([
'batch%d-FC1-F1_X_batch%d-FC2-F1_X_batch%d-FC3-F1' % (b, b, b),
'batch%d-FC1-F1_X_batch%d-FC2-F1_X_batch%d-FC3-F2' % (b, b, b),
'batch%d-FC1-F2_X_batch%d-FC2-F1_X_batch%d-FC3-F1' % (b, b, b),
'batch%d-FC1-F2_X_batch%d-FC2-F1_X_batch%d-FC3-F2' % (b, b, b),
'batch%d-FC1-F3_X_batch%d-FC2-F1_X_batch%d-FC3-F1' % (b, b, b),
'batch%d-FC1-F3_X_batch%d-FC2-F1_X_batch%d-FC3-F2' % (b, b, b)
])
expected_out = self._sparse_tensor(col_out)
with self.cached_session() as sess:
self._assert_sparse_tensor_equals(expected_out, sess.run(op))
def test_one_column_empty(self):
"""Tests when one column is empty.
The crossed tensor should be empty.
"""
op = sparse_feature_cross_op.sparse_feature_cross([
self._sparse_tensor([['batch1-FC1-F1', 'batch1-FC1-F2']]),
self._sparse_tensor([], 1),
self._sparse_tensor([['batch1-FC3-F1', 'batch1-FC3-F2']])
])
with self.cached_session() as sess:
self._assert_sparse_tensor_empty(sess.run(op))
def test_some_columns_empty(self):
"""Tests when more than one columns are empty.
Cross for the corresponding batch should be empty.
"""
op = sparse_feature_cross_op.sparse_feature_cross([
self._sparse_tensor([['batch1-FC1-F1', 'batch1-FC1-F2']], 2),
self._sparse_tensor([['batch1-FC2-F1'], ['batch2-FC2-F1']], 2),
self._sparse_tensor([['batch1-FC3-F1', 'batch1-FC3-F2']], 2)
])
expected_out = self._sparse_tensor([[
'batch1-FC1-F1_X_batch1-FC2-F1_X_batch1-FC3-F1',
'batch1-FC1-F1_X_batch1-FC2-F1_X_batch1-FC3-F2',
'batch1-FC1-F2_X_batch1-FC2-F1_X_batch1-FC3-F1',
'batch1-FC1-F2_X_batch1-FC2-F1_X_batch1-FC3-F2'
]], 2)
with self.cached_session() as sess:
self._assert_sparse_tensor_equals(expected_out, sess.run(op))
def test_all_columns_empty(self):
"""Tests when all columns are empty.
The crossed tensor should be empty.
"""
op = sparse_feature_cross_op.sparse_feature_cross([
self._sparse_tensor([]), self._sparse_tensor([]),
self._sparse_tensor([])
])
with self.cached_session() as sess:
self._assert_sparse_tensor_empty(sess.run(op))
def test_hashed_output_zero_bucket(self):
"""Tests a simple scenario.
"""
op = sparse_feature_cross_op.sparse_feature_cross(
[
self._sparse_tensor([['batch1-FC1-F1']]),
self._sparse_tensor([['batch1-FC2-F1']]),
self._sparse_tensor([['batch1-FC3-F1']])
],
hashed_output=True)
# Check actual hashed output to prevent unintentional hashing changes.
expected_out = self._sparse_tensor([[3735511728867393167]])
with self.cached_session() as sess:
self._assert_sparse_tensor_equals(expected_out, sess.run(op))
def test_hashed_output_zero_bucket_v2(self):
"""Tests a simple scenario.
"""
op = sparse_feature_cross_op.sparse_feature_cross(
[
self._sparse_tensor([['batch1-FC1-F1']]),
self._sparse_tensor([['batch1-FC2-F1']]),
self._sparse_tensor([['batch1-FC3-F1']])
],
hashed_output=True,
hash_key=layers.SPARSE_FEATURE_CROSS_DEFAULT_HASH_KEY)
# Check actual hashed output to prevent unintentional hashing changes.
expected_out = self._sparse_tensor([[1971693436396284976]])
with self.cached_session() as sess:
self._assert_sparse_tensor_equals(expected_out, sess.run(op))
# TODO(sibyl-Aix6ihai): Add benchmark to compare Hashed vs Non-hashed.
def test_hashed_output(self):
"""Tests a simple scenario.
"""
op = sparse_feature_cross_op.sparse_feature_cross(
[
self._sparse_tensor([['batch1-FC1-F1']]),
self._sparse_tensor([['batch1-FC2-F1']]),
self._sparse_tensor([['batch1-FC3-F1']])
],
hashed_output=True,
num_buckets=100)
# Check actual hashed output to prevent unintentional hashing changes.
expected_out = self._sparse_tensor([[74]])
with self.cached_session() as sess:
self._assert_sparse_tensor_equals(expected_out, sess.run(op))
def test_hashed_output_v2(self):
"""Tests a simple scenario.
"""
op = sparse_feature_cross_op.sparse_feature_cross(
[
self._sparse_tensor([['batch1-FC1-F1']]),
self._sparse_tensor([['batch1-FC2-F1']]),
self._sparse_tensor([['batch1-FC3-F1']])
],
hashed_output=True,
num_buckets=100,
hash_key=layers.SPARSE_FEATURE_CROSS_DEFAULT_HASH_KEY)
# Check actual hashed output to prevent unintentional hashing changes.
expected_out = self._sparse_tensor([[83]])
with self.cached_session() as sess:
self._assert_sparse_tensor_equals(expected_out, sess.run(op))
def test_hashed_output_v1_has_collision(self):
"""Tests the old version of the fingerprint concatenation has collisions.
"""
# The last 10 bits of 359 and 1024+359 are identical.
# As a result, all the crosses collide.
t1 = constant_op.constant([[359], [359 + 1024]])
t2 = constant_op.constant([list(range(10)), list(range(10))])
cross = sparse_feature_cross_op.sparse_feature_cross(
[t2, t1], hashed_output=True, num_buckets=1024)
cross_dense = sparse_ops.sparse_tensor_to_dense(cross)
with session.Session():
values = cross_dense.eval()
self.assertTrue(numpy.equal(values[0], values[1]).all())
def test_hashed_output_v2_has_no_collision(self):
"""Tests the new version of the fingerprint concatenation has no collisions.
"""
# Although the last 10 bits of 359 and 1024+359 are identical.
# As a result, all the crosses shouldn't collide.
t1 = constant_op.constant([[359], [359 + 1024]])
t2 = constant_op.constant([list(range(10)), list(range(10))])
cross = sparse_feature_cross_op.sparse_feature_cross(
[t2, t1],
hashed_output=True,
num_buckets=1024,
hash_key=layers.SPARSE_FEATURE_CROSS_DEFAULT_HASH_KEY)
cross_dense = sparse_ops.sparse_tensor_to_dense(cross)
with session.Session():
values = cross_dense.eval()
self.assertTrue(numpy.not_equal(values[0], values[1]).all())
def test_hashed_3x1x2(self):
"""Tests 3x1x2 permutation with hashed output.
"""
op = sparse_feature_cross_op.sparse_feature_cross(
[
self._sparse_tensor(
[['batch1-FC1-F1', 'batch1-FC1-F2', 'batch1-FC1-F3']]),
self._sparse_tensor([['batch1-FC2-F1']]),
self._sparse_tensor([['batch1-FC3-F1', 'batch1-FC3-F2']])
],
hashed_output=True,
num_buckets=1000)
with self.cached_session() as sess:
out = sess.run(op)
self.assertEqual(6, len(out.values))
self.assertAllEqual([[0, i] for i in range(6)], out.indices)
self.assertTrue(all(x < 1000 and x >= 0 for x in out.values))
all_values_are_different = len(out.values) == len(set(out.values))
self.assertTrue(all_values_are_different)
def _assert_sparse_tensor_empty(self, sp):
self.assertEquals(0, sp.indices.size)
self.assertEquals(0, sp.values.size)
# TODO(zakaria): check if we can ignore the first dim of the shape.
self.assertEquals(0, sp.dense_shape[1])
def _assert_sparse_tensor_equals(self, sp1, sp2):
self.assertAllEqual(sp1.indices.eval(), sp2.indices)
self.assertAllEqual(sp1.values.eval(), sp2.values)
self.assertAllEqual(sp1.dense_shape.eval(), sp2.dense_shape)
def _sparse_tensor(self, data, batch_size=-1):
"""Generates a SparseTensor.
Args:
data: Should be a list of list of strings or int64. Each item of the outer
list represents a batch. Each item of the batch is a feature of a
specific feature column.
batch_size: optional batch size, especially for cases when data has no
entry for some batches.
Returns:
A SparseTensor.
"""
indices = []
values = []
max_col_count = 0
for batch, batch_ix in zip(data, range(len(data))):
for column, column_ix in zip(batch, range(len(batch))):
indices.append([batch_ix, column_ix])
values.append(column)
max_col_count = max(max_col_count, column_ix + 1)
shape = [batch_size if batch_size != -1 else len(data), max_col_count]
value_type = (dtypes.string if not values or isinstance(values[0], str) else
dtypes.int64)
return sparse_tensor.SparseTensor(
constant_op.constant(indices, dtypes.int64, [len(indices), 2]),
constant_op.constant(values, value_type, [len(indices)]),
constant_op.constant(shape, dtypes.int64))
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/layers/python/kernel_tests/sparse_feature_cross_op_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Wrappers for bucketization operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.ops import math_ops
def bucketize(input_tensor, boundaries, name=None):
"""Bucketizes input_tensor by given boundaries.
See bucketize_op.cc for more details.
Args:
input_tensor: A `Tensor` which will be bucketize.
boundaries: A list of floats gives the boundaries. It has to be sorted.
name: A name prefix for the returned tensors (optional).
Returns:
A `Tensor` with type int32 which indicates the corresponding bucket for
each value in `input_tensor`.
Raises:
TypeError: If boundaries is not a list.
"""
return math_ops._bucketize( # pylint: disable=protected-access
input_tensor, boundaries=boundaries, name=name)
|
tensorflow-master
|
tensorflow/contrib/layers/python/ops/bucketization_op.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops to work with `SparseTensor`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.util import compat
def _multiplier_helper(shape):
"""Returns moving offset for each dimension given shape."""
multipliers = []
for dim in reversed(shape):
if multipliers:
multipliers.append(dim * multipliers[-1])
else:
multipliers.append(dim)
multipliers.reverse()
return multipliers
def _ignore_value_tensor(dtype, ignore_value=None):
"""Create `Tensor` from provided `ignore_value` and `dtype`."""
if ignore_value is None:
if dtype == dtypes.string:
# Exception due to TF strings are converted to numpy objects by default.
ignore_value = ""
else:
# NOTE: `as_numpy_dtype` is a property, so with the parentheses this is
# constructing a new numpy object of the given type, which yields the
# default value for that type.
ignore_value = dtype.as_numpy_dtype()
return math_ops.cast(ignore_value, dtype, name="ignore_value")
def dense_to_sparse_tensor(dense_tensor, ignore_value=None):
"""Converts dense `Tensor` to `SparseTensor`, dropping `ignore_value` cells.
Args:
dense_tensor: A `Tensor`.
ignore_value: Entries in `dense_tensor` equal to this value will be
absent from the return `SparseTensor`. If `None`, default value of
`dense_tensor` dtype will be used (e.g. '' for `str`, 0 for `int`).
Returns:
A `SparseTensor` with the same shape as `dense_tensor`.
Raises:
ValueError: when `dense_tensor`'s rank is `None`.
"""
with ops.name_scope("DenseToSparseTensor"):
dense_tensor = ops.convert_to_tensor(dense_tensor)
ignore_value = _ignore_value_tensor(dense_tensor.dtype, ignore_value)
indices = array_ops.where(
math_ops.not_equal(dense_tensor, ignore_value), name="indices")
return sparse_tensor.SparseTensor(
indices=indices,
values=array_ops.gather_nd(dense_tensor, indices, name="values"),
dense_shape=array_ops.shape(
dense_tensor, out_type=dtypes.int64, name="dense_shape"))
def indicators_to_sparse_ids(indicators, ignore_value=None, dtype=dtypes.int64):
"""Convert a dense indicator tensor to sparse IDs.
This is commonly used for converting a dense classification label to sparse.
In the following example, we have an input of shape (2, 2, num_classes),
where num_classes=4.
```python
indicators = [
[
[0, 0, 1, 0],
[0, 0, 0, 0]
], [
[1, 0, 1, 1],
[0, 0, 1, 0]
]
]
sparse_ids = indicator_to_sparse_ids(indicators)
```
`sparse_ids` in "jagged" format:
[
[
[2],
[]
], [
[0, 2, 3],
[2]
]
]
`sparse_ids` in `SparseTensor` format:
```python
{
indices: [[0, 0, 1], [1, 0, 0], [1, 0, 1], [1, 0, 2], [1, 1, 0]],
values: [2, 0, 2, 3, 2],
dense_shape: [2, 2, 3]
}
```
Args:
indicators: Dense `Tensor` of shape `(d0, ..., dn, num_classes)`.
`ignore_value` values are ignored. For other values (typically, ones), the
index along the last dimension is returned.
ignore_value: Entries in `indicators` equal to this value will be
absent from the returned `SparseTensor`. If `None`, default value of
`indicators` dtype will be used (e.g. '' for `str`, 0 for `int`).
dtype: Type of result, must be integer type.
Returns:
`SparseTensor` of type `dtype` and shape `(d0, ..., dn, max_num_labels)`,
where `max_num_labels` is the maximum number of non-zero values in any
row (in the example above, row (1, 1) has 3 non-zero values, so the result
shape is (2, 2, 3)). The values of this `SparseTensor` are in the range
`[0, num_classes)` and correspond to the index of non-ignore values along
the last dimension of `indicators`.
Raises:
ValueError: if `dtype` is not integer.
"""
if not dtype.is_integer:
raise ValueError("Invalid dtype {} not integer.".format(dtype))
with ops.name_scope(
None, "indicators_to_sparse_ids", (indicators, ignore_value)):
# Convert indicators to binary ones and zeros. We use int64 since
# SparseTensor requires int64 indices.
indicators = ops.convert_to_tensor(indicators, name="indicators")
missing_indicators = math_ops.equal(
indicators, _ignore_value_tensor(indicators.dtype, ignore_value),
name="missing")
zeros_like_indicators = array_ops.zeros_like(
indicators, dtype=dtypes.int64, name="zeros")
binary_indicators = array_ops.where(
missing_indicators, zeros_like_indicators,
array_ops.ones_like(indicators, dtype=dtypes.int64, name="ones"),
name="binary_indicators")
# Use cumsum along the last dimension to generate per-row indexes.
# Note that these are 1-based (since 0 indicates missing values), so they're
# off-by-1 from the actual indices. We'll subtract 1 below. Since they're
# off-by-one, the max value is the size of the last dimension (i.e.,
# last_index + 1).
row_index_indicators = array_ops.where(
missing_indicators, zeros_like_indicators,
math_ops.cumsum(binary_indicators, axis=-1), "row_index_indicators")
result_last_dim = array_ops.reshape(
math_ops.reduce_max(row_index_indicators), shape=(1,),
name="result_last_dim")
# Convert to a SparseTensor. The values of this SparseTensor are the last
# indices of our result, and the last indices of this SparseTensor (i.e.,
# the class IDs indicated by `indicators`) are the values of our result, so
# we use tensor slicing and concat to swap them.
sparse_row_index_indicators = dense_to_sparse_tensor(
row_index_indicators, ignore_value=0)
return sparse_tensor.SparseTensor(
indices=array_ops.concat((
sparse_row_index_indicators.indices[:, :-1],
array_ops.reshape(sparse_row_index_indicators.values - 1, (-1, 1))
), axis=1, name="indices"),
values=math_ops.cast(
sparse_row_index_indicators.indices[:, -1], dtype=dtype,
name="values"),
dense_shape=array_ops.concat(
(sparse_row_index_indicators.dense_shape[0:-1], result_last_dim),
axis=0, name="dense_shape"))
def sparse_row_envelope(sparse_input, row_axis=0, col_axis=1, name=None):
"""Returns the length of each 'row' in a `SparseTensor`.
For example, if `sparse_input` has indices `[[0,0], [2, 0], [2, 1], [2, 2]]`
and shape `[3, 3]`, this function will return `[1, 0, 3]`.
Args:
sparse_input: a `SparseTensor` of rank at least 2.
row_axis: An integer. The axis for the row of the envelope matrix. Default
is 0.
col_axis: An integer. The axis for the col of the envelope matrix. Default
is 1.
name: A name for the operation (optional).
Returns:
A one-dimensional `Tensor` whose entries correspond to the length of each
row of `SparseTensor`.
Raises:
ValueError: If row_axis and col_axis are the same axis or they are not
integers.
"""
if not (isinstance(row_axis, compat.integral_types) and
isinstance(col_axis, compat.integral_types)):
raise ValueError("`row_axis` and `col_axis` must be integers.")
if row_axis == col_axis:
raise ValueError("Row and column can not be the same axis.")
with ops.name_scope(name, "sparse_row_envelope", [sparse_input]):
indices = sparse_input.indices
row_indices = indices[:, row_axis]
col_indices = indices[:, col_axis]
num_rows = math_ops.cast(sparse_input.dense_shape[row_axis], dtypes.int32)
row_envelope = math_ops.unsorted_segment_max(
col_indices + 1, row_indices, num_rows, name=name)
zeros = array_ops.zeros_like(row_envelope)
return array_ops.where(row_envelope > zeros, row_envelope, zeros)
|
tensorflow-master
|
tensorflow/contrib/layers/python/ops/sparse_ops.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.contrib.layers.python.ops.sparse_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.layers.python.ops import sparse_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
def _assert_sparse_tensor_value(test_case, expected, actual):
test_case.assertEqual(np.int64, np.array(actual.indices).dtype)
test_case.assertAllEqual(expected.indices, actual.indices)
test_case.assertEqual(
np.array(expected.values).dtype, np.array(actual.values).dtype)
test_case.assertAllEqual(expected.values, actual.values)
test_case.assertEqual(np.int64, np.array(actual.dense_shape).dtype)
test_case.assertAllEqual(expected.dense_shape, actual.dense_shape)
class DenseToSparseTensorTest(test.TestCase):
def test_dense_to_sparse_tensor_1d(self):
with self.cached_session() as sess:
st = sparse_ops.dense_to_sparse_tensor([1, 0, 2, 0])
result = sess.run(st)
self.assertEqual(result.indices.dtype, np.int64)
self.assertEqual(result.values.dtype, np.int32)
self.assertEqual(result.dense_shape.dtype, np.int64)
self.assertAllEqual([[0], [2]], result.indices)
self.assertAllEqual([1, 2], result.values)
self.assertAllEqual([4], result.dense_shape)
def test_dense_to_sparse_tensor_1d_float(self):
with self.cached_session() as sess:
st = sparse_ops.dense_to_sparse_tensor([1.5, 0.0, 2.3, 0.0])
result = sess.run(st)
self.assertEqual(result.indices.dtype, np.int64)
self.assertEqual(result.values.dtype, np.float32)
self.assertEqual(result.dense_shape.dtype, np.int64)
self.assertAllEqual([[0], [2]], result.indices)
self.assertAllClose([1.5, 2.3], result.values)
self.assertAllEqual([4], result.dense_shape)
def test_dense_to_sparse_tensor_1d_bool(self):
with self.cached_session() as sess:
st = sparse_ops.dense_to_sparse_tensor([True, False, True, False])
result = sess.run(st)
self.assertEqual(result.indices.dtype, np.int64)
self.assertEqual(result.values.dtype, np.bool)
self.assertEqual(result.dense_shape.dtype, np.int64)
self.assertAllEqual([[0], [2]], result.indices)
self.assertAllEqual([True, True], result.values)
self.assertAllEqual([4], result.dense_shape)
def test_dense_to_sparse_tensor_1d_str(self):
with self.cached_session() as sess:
st = sparse_ops.dense_to_sparse_tensor([b'qwe', b'', b'ewq', b''])
result = sess.run(st)
self.assertEqual(result.indices.dtype, np.int64)
self.assertEqual(result.values.dtype, np.object)
self.assertEqual(result.dense_shape.dtype, np.int64)
self.assertAllEqual([[0], [2]], result.indices)
self.assertAllEqual([b'qwe', b'ewq'], result.values)
self.assertAllEqual([4], result.dense_shape)
def test_dense_to_sparse_tensor_1d_str_special_ignore(self):
with self.cached_session() as sess:
st = sparse_ops.dense_to_sparse_tensor(
[b'qwe', b'', b'ewq', b''], ignore_value=b'qwe')
result = sess.run(st)
self.assertEqual(result.indices.dtype, np.int64)
self.assertEqual(result.values.dtype, np.object)
self.assertEqual(result.dense_shape.dtype, np.int64)
self.assertAllEqual([[1], [2], [3]], result.indices)
self.assertAllEqual([b'', b'ewq', b''], result.values)
self.assertAllEqual([4], result.dense_shape)
def test_dense_to_sparse_tensor_2d(self):
with self.cached_session() as sess:
st = sparse_ops.dense_to_sparse_tensor([[1, 2, 0, 0], [3, 4, 5, 0]])
result = sess.run(st)
self.assertAllEqual([[0, 0], [0, 1], [1, 0], [1, 1], [1, 2]],
result.indices)
self.assertAllEqual([1, 2, 3, 4, 5], result.values)
self.assertAllEqual([2, 4], result.dense_shape)
def test_dense_to_sparse_tensor_3d(self):
with self.cached_session() as sess:
st = sparse_ops.dense_to_sparse_tensor([[[1, 2, 0, 0], [3, 4, 5, 0]],
[[7, 8, 0, 0], [9, 0, 0, 0]]])
result = sess.run(st)
self.assertAllEqual([[0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1], [0, 1, 2],
[1, 0, 0], [1, 0, 1], [1, 1, 0]], result.indices)
self.assertAllEqual([1, 2, 3, 4, 5, 7, 8, 9], result.values)
self.assertAllEqual([2, 2, 4], result.dense_shape)
def test_dense_to_sparse_tensor_unknown_1d_shape(self):
with self.cached_session() as sess:
tensor = array_ops.placeholder(shape=[None], dtype=dtypes.int32)
st = sparse_ops.dense_to_sparse_tensor(tensor)
result = sess.run(st, feed_dict={tensor: [0, 100, 0, 3]})
self.assertAllEqual([[1], [3]], result.indices)
self.assertAllEqual([100, 3], result.values)
self.assertAllEqual([4], result.dense_shape)
def test_dense_to_sparse_tensor_unknown_3d_shape(self):
with self.cached_session() as sess:
tensor = array_ops.placeholder(
shape=[None, None, None], dtype=dtypes.int32)
st = sparse_ops.dense_to_sparse_tensor(tensor)
result = sess.run(st,
feed_dict={
tensor: [[[1, 2, 0, 0], [3, 4, 5, 0]],
[[7, 8, 0, 0], [9, 0, 0, 0]]]
})
self.assertAllEqual([[0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1], [0, 1, 2],
[1, 0, 0], [1, 0, 1], [1, 1, 0]], result.indices)
self.assertAllEqual([1, 2, 3, 4, 5, 7, 8, 9], result.values)
self.assertAllEqual([2, 2, 4], result.dense_shape)
def test_dense_to_sparse_unknown_rank(self):
ph = array_ops.placeholder(dtype=dtypes.int32)
with self.cached_session() as sess:
st = sparse_ops.dense_to_sparse_tensor(ph)
result = sess.run(st, feed_dict={ph: [[1, 2, 0, 0], [3, 4, 5, 0]]})
self.assertAllEqual([[0, 0], [0, 1], [1, 0], [1, 1], [1, 2]],
result.indices)
self.assertAllEqual([1, 2, 3, 4, 5], result.values)
self.assertAllEqual([2, 4], result.dense_shape)
class SparseRowEnvelopeTest(test.TestCase):
def test_sparse_row_envelope(self):
expected_sparse_row_envelope = [1, 0, 3]
with self.cached_session() as sess:
sparse_input = sparse_tensor.SparseTensor(
indices=[[0, 0], [2, 0], [2, 1], [2, 2]],
values=[0, 1, 2, 3],
dense_shape=[3, 3])
sparse_row_envelope = sess.run(
sparse_ops.sparse_row_envelope(sparse_input))
self.assertAllEqual(expected_sparse_row_envelope,
sparse_row_envelope)
def test_sparse_row_envelope_unsorted_indices(self):
expected_sparse_row_envelope = [1, 0, 3]
with self.cached_session() as sess:
sparse_input = sparse_tensor.SparseTensor(
indices=[[2, 0], [2, 2], [2, 1], [0, 0]],
values=[0, 1, 2, 3],
dense_shape=[3, 3])
sparse_row_envelope = sess.run(
sparse_ops.sparse_row_envelope(sparse_input))
self.assertAllEqual(expected_sparse_row_envelope,
sparse_row_envelope)
def test_sparse_row_envelope_empty_in_the_end(self):
expected_sparse_row_envelope = [1, 0, 3, 0, 0]
with self.cached_session() as sess:
sparse_input = sparse_tensor.SparseTensor(
indices=[[0, 0], [2, 0], [2, 1], [2, 2]],
values=[0, 1, 2, 3],
dense_shape=[5, 3])
sparse_row_envelope = sess.run(
sparse_ops.sparse_row_envelope(sparse_input))
self.assertAllEqual(expected_sparse_row_envelope,
sparse_row_envelope)
def test_sparse_row_envelope_empty_3d(self):
expected_sparse_row_envelope = [1, 0, 3, 0, 0]
with self.cached_session() as sess:
sparse_input = sparse_tensor.SparseTensor(
indices=[[0, 0, 0], [0, 2, 0], [0, 2, 1], [0, 2, 2]],
values=[0, 1, 2, 3],
dense_shape=[1, 5, 3])
sparse_row_envelope = sess.run(
sparse_ops.sparse_row_envelope(sparse_input, 1, 2))
self.assertAllEqual(expected_sparse_row_envelope,
sparse_row_envelope)
class IndicatorToSparseIdsTest(test.TestCase):
def test_indicators_to_sparse_ids_1d(self):
indicators = (0, 0, 1, 0)
sparse_ids = sparse_ops.indicators_to_sparse_ids(indicators)
with self.cached_session():
_assert_sparse_tensor_value(self, sparse_tensor.SparseTensorValue(
indices=((0,),),
values=(2,),
dense_shape=(1,),
), sparse_ids.eval())
def test_indicators_to_sparse_ids_2d(self):
indicators = (
(0, 0, 1, 0),
(1, 0, 0, 1),
)
sparse_ids = sparse_ops.indicators_to_sparse_ids(indicators)
with self.cached_session():
_assert_sparse_tensor_value(self, sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=(2, 0, 3),
dense_shape=(2, 2),
), sparse_ids.eval())
def test_indicators_to_sparse_ids_3d(self):
indicators = (
((0, 0, 1, 0, 0), (0, 0, 0, 0, 0)),
((1, 0, 0, 1, 0), (0, 0, 1, 0, 0)),
((0, 0, 0, 0, 0), (0, 0, 0, 0, 0)),
((1, 0, 0, 1, 1), (0, 0, 1, 0, 0)),
)
sparse_ids = sparse_ops.indicators_to_sparse_ids(indicators)
with self.cached_session():
_assert_sparse_tensor_value(self, sparse_tensor.SparseTensorValue(
indices=(
(0, 0, 0),
(1, 0, 0), (1, 0, 1), (1, 1, 0),
(3, 0, 0), (3, 0, 1), (3, 0, 2), (3, 1, 0)
), values=(
2,
0, 3, 2,
0, 3, 4, 2
), dense_shape=(4, 2, 3),
), sparse_ids.eval())
def test_int16_to_sparse_ids_2d(self):
indicators = (
(0, 0, 1, 0),
(1, 0, 0, 1),
)
sparse_ids = sparse_ops.indicators_to_sparse_ids(
indicators, dtype=dtypes.int16)
with self.cached_session():
_assert_sparse_tensor_value(self, sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=np.array((2, 0, 3), dtype=np.int16),
dense_shape=(2, 2),
), sparse_ids.eval())
def test_indicators_to_sparse_ids_ignore_value(self):
indicators = (
((-1, -1, 10, -1), (-1, -1, -1, -1)),
((11, -1, -1, 12), (-1, -1, 13, -1)),
)
sparse_ids = sparse_ops.indicators_to_sparse_ids(
indicators, ignore_value=-1)
with self.cached_session():
_assert_sparse_tensor_value(self, sparse_tensor.SparseTensorValue(
indices=((0, 0, 0), (1, 0, 0), (1, 0, 1), (1, 1, 0)),
values=(2, 0, 3, 2),
dense_shape=(2, 2, 2),
), sparse_ids.eval())
def test_string_indicators_to_sparse_ids(self):
indicators = (
(('', '', 'A', ''), ('', '', '', '')),
(('B', '', '', 'C'), ('', '', 'D', '')),
)
sparse_ids = sparse_ops.indicators_to_sparse_ids(indicators)
with self.cached_session():
_assert_sparse_tensor_value(self, sparse_tensor.SparseTensorValue(
indices=((0, 0, 0), (1, 0, 0), (1, 0, 1), (1, 1, 0)),
values=(2, 0, 3, 2),
dense_shape=(2, 2, 2),
), sparse_ids.eval())
def test_string_indicators_to_sparse_ids_ignore_value(self):
indicators = (
(('x', 'x', 'A', 'x'), ('x', 'x', 'x', 'x')),
(('B', 'x', 'x', 'C'), ('x', 'x', 'D', 'x')),
)
sparse_ids = sparse_ops.indicators_to_sparse_ids(
indicators, ignore_value='x')
with self.cached_session():
_assert_sparse_tensor_value(self, sparse_tensor.SparseTensorValue(
indices=((0, 0, 0), (1, 0, 0), (1, 0, 1), (1, 1, 0)),
values=(2, 0, 3, 2),
dense_shape=(2, 2, 2),
), sparse_ids.eval())
def test_indicators_to_sparse_ids_unknown_3d_shape(self):
indicators_values = (
((0, 0, 1, 0), (0, 0, 0, 0)),
((1, 0, 0, 1), (0, 0, 1, 0)),
)
indicators = array_ops.placeholder(
dtype=dtypes.int32, shape=(None, None, None))
sparse_ids = sparse_ops.indicators_to_sparse_ids(indicators)
with self.cached_session():
_assert_sparse_tensor_value(self, sparse_tensor.SparseTensorValue(
indices=((0, 0, 0), (1, 0, 0), (1, 0, 1), (1, 1, 0)),
values=(2, 0, 3, 2),
dense_shape=(2, 2, 2),
), sparse_ids.eval(feed_dict={indicators: indicators_values}))
def test_indicators_to_sparse_ids_unknown_rank(self):
indicators_values = (
((0, 0, 1, 0), (0, 0, 0, 0)),
((1, 0, 0, 1), (0, 0, 1, 0)),
)
indicators = array_ops.placeholder(dtype=dtypes.int32)
sparse_ids = sparse_ops.indicators_to_sparse_ids(indicators)
with self.cached_session():
_assert_sparse_tensor_value(self, sparse_tensor.SparseTensorValue(
indices=((0, 0, 0), (1, 0, 0), (1, 0, 1), (1, 1, 0)),
values=(2, 0, 3, 2),
dense_shape=(2, 2, 2),
), sparse_ids.eval(feed_dict={indicators: indicators_values}))
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/layers/python/ops/sparse_ops_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Wrappers for sparse cross operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.framework import deprecated_arg_values
from tensorflow.contrib.layers.ops import gen_sparse_feature_cross_op
from tensorflow.contrib.util import loader
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import resource_loader
_sparse_feature_cross_op = loader.load_op_library(
resource_loader.get_path_to_datafile("_sparse_feature_cross_op.so"))
# Default hash key for the FingerprintCat64.
SPARSE_FEATURE_CROSS_DEFAULT_HASH_KEY = 0xDECAFCAFFE
@deprecated_arg_values(
"2016-11-20",
"The default behavior of sparse_feature_cross is changing, the default\n"
"value for hash_key will change to SPARSE_FEATURE_CROSS_DEFAULT_HASH_KEY.\n"
"From that point on sparse_feature_cross will always use FingerprintCat64\n"
"to concatenate the feature fingerprints. And the underlying\n"
"_sparse_feature_cross_op.sparse_feature_cross operation will be marked\n"
"as deprecated.",
hash_key=None)
def sparse_feature_cross(inputs, hashed_output=False, num_buckets=0,
name=None, hash_key=None):
"""Crosses a list of Tensor or SparseTensor objects.
See sparse_feature_cross_kernel.cc for more details.
Args:
inputs: List of `SparseTensor` or `Tensor` to be crossed.
hashed_output: If true, returns the hash of the cross instead of the string.
This will allow us avoiding string manipulations.
num_buckets: It is used if hashed_output is true.
output = hashed_value%num_buckets if num_buckets > 0 else hashed_value.
name: A name prefix for the returned tensors (optional).
hash_key: Specify the hash_key that will be used by the `FingerprintCat64`
function to combine the crosses fingerprints on SparseFeatureCrossOp.
The default value is None, but will become
SPARSE_FEATURE_CROSS_DEFAULT_HASH_KEY after 2016-11-20 (optional).
Returns:
A `SparseTensor` with the crossed features.
Return type is string if hashed_output=False, int64 otherwise.
Raises:
TypeError: If the inputs aren't either SparseTensor or Tensor.
"""
if not isinstance(inputs, list):
raise TypeError("Inputs must be a list")
if not all(isinstance(i, sparse_tensor.SparseTensor) or
isinstance(i, ops.Tensor) for i in inputs):
raise TypeError("All inputs must be SparseTensors")
sparse_inputs = [i for i in inputs
if isinstance(i, sparse_tensor.SparseTensor)]
dense_inputs = [i for i in inputs
if not isinstance(i, sparse_tensor.SparseTensor)]
indices = [sp_input.indices for sp_input in sparse_inputs]
values = [sp_input.values for sp_input in sparse_inputs]
shapes = [sp_input.dense_shape for sp_input in sparse_inputs]
out_type = dtypes.int64 if hashed_output else dtypes.string
internal_type = dtypes.string
for i in range(len(values)):
if values[i].dtype != dtypes.string:
values[i] = math_ops.cast(values[i], dtypes.int64)
internal_type = dtypes.int64
for i in range(len(dense_inputs)):
if dense_inputs[i].dtype != dtypes.string:
dense_inputs[i] = math_ops.cast(dense_inputs[i], dtypes.int64)
internal_type = dtypes.int64
if hash_key:
indices_out, values_out, shape_out = (
gen_sparse_feature_cross_op.sparse_feature_cross_v2(
indices,
values,
shapes,
dense_inputs,
hashed_output,
num_buckets,
hash_key=hash_key,
out_type=out_type,
internal_type=internal_type,
name=name))
else:
indices_out, values_out, shape_out = (
gen_sparse_feature_cross_op.sparse_feature_cross(
indices,
values,
shapes,
dense_inputs,
hashed_output,
num_buckets,
out_type=out_type,
internal_type=internal_type,
name=name))
return sparse_tensor.SparseTensor(indices_out, values_out, shape_out)
ops.NotDifferentiable("SparseFeatureCross")
ops.NotDifferentiable("SparseFeatureCrossV2")
|
tensorflow-master
|
tensorflow/contrib/layers/python/ops/sparse_feature_cross_op.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module for variants of ops in tf.nn.
@@alpha_dropout
@@conv1d_transpose
@@deprecated_flipped_softmax_cross_entropy_with_logits
@@deprecated_flipped_sparse_softmax_cross_entropy_with_logits
@@deprecated_flipped_sigmoid_cross_entropy_with_logits
@@nth_element
@@rank_sampled_softmax_loss
@@sampled_sparse_softmax_loss
@@scaled_softplus
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,wildcard-import
from tensorflow.contrib.nn.python.ops.alpha_dropout import *
from tensorflow.contrib.nn.python.ops.cross_entropy import *
from tensorflow.contrib.nn.python.ops.sampling_ops import *
from tensorflow.contrib.nn.python.ops.scaled_softplus import *
from tensorflow.python.ops.nn_ops import conv1d_transpose
from tensorflow.python.ops.nn_ops import nth_element
# pylint: enable=unused-import,wildcard-import
from tensorflow.python.util.all_util import remove_undocumented
remove_undocumented(__name__)
|
tensorflow-master
|
tensorflow/contrib/nn/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module for variants of ops in tf.nn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
|
tensorflow-master
|
tensorflow/contrib/nn/python/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Deprecated Wrappers for Neural Net (NN) Cross Entropy Operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.ops import nn
# TODO(b/33392402): Formally deprecate this API.
# After LSC (see b/33392402#comment1), this API will be deprecated and callers
# will be suggested to use the (updated version of)
# tf.nn.softmax_cross_entropy_with_logits.
def deprecated_flipped_softmax_cross_entropy_with_logits(logits,
labels,
dim=-1,
name=None):
"""Computes softmax cross entropy between `logits` and `labels`.
This function diffs from tf.nn.softmax_cross_entropy_with_logits only in the
argument order.
Measures the probability error in discrete classification tasks in which the
classes are mutually exclusive (each entry is in exactly one class). For
example, each CIFAR-10 image is labeled with one and only one label: an image
can be a dog or a truck, but not both.
**NOTE:** While the classes are mutually exclusive, their probabilities
need not be. All that is required is that each row of `labels` is
a valid probability distribution. If they are not, the computation of the
gradient will be incorrect.
If using exclusive `labels` (wherein one and only
one class is true at a time), see `sparse_softmax_cross_entropy_with_logits`.
**WARNING:** This op expects unscaled logits, since it performs a `softmax`
on `logits` internally for efficiency. Do not call this op with the
output of `softmax`, as it will produce incorrect results.
`logits` and `labels` must have the same shape `[batch_size, num_classes]`
and the same dtype (either `float16`, `float32`, or `float64`).
Args:
logits: Unscaled log probabilities.
labels: Each row `labels[i]` must be a valid probability distribution.
dim: The class dimension. Defaulted to -1 which is the last dimension.
name: A name for the operation (optional).
Returns:
A 1-D `Tensor` of length `batch_size` of the same type as `logits` with the
softmax cross entropy loss.
"""
return nn.softmax_cross_entropy_with_logits(
labels=labels, logits=logits, dim=dim, name=name)
# TODO(b/33392402): Formally deprecate this API.
# After LSC (see b/33392402#comment1), this API will be deprecated and callers
# will be suggested to use the (updated version of)
# tf.nn.sparse_softmax_cross_entropy_with_logits.
def deprecated_flipped_sparse_softmax_cross_entropy_with_logits(logits,
labels,
name=None):
"""Computes sparse softmax cross entropy between `logits` and `labels`.
This function diffs from tf.nn.sparse_softmax_cross_entropy_with_logits only
in the argument order.
Measures the probability error in discrete classification tasks in which the
classes are mutually exclusive (each entry is in exactly one class). For
example, each CIFAR-10 image is labeled with one and only one label: an image
can be a dog or a truck, but not both.
**NOTE:** For this operation, the probability of a given label is considered
exclusive. That is, soft classes are not allowed, and the `labels` vector
must provide a single specific index for the true class for each row of
`logits` (each minibatch entry). For soft softmax classification with
a probability distribution for each entry, see
`softmax_cross_entropy_with_logits`.
**WARNING:** This op expects unscaled logits, since it performs a softmax
on `logits` internally for efficiency. Do not call this op with the
output of `softmax`, as it will produce incorrect results.
A common use case is to have logits of shape `[batch_size, num_classes]` and
labels of shape `[batch_size]`. But higher dimensions are supported.
Args:
logits: Unscaled log probabilities of rank `r` and shape
`[d_0, d_1, ..., d_{r-2}, num_classes]` and dtype `float32` or `float64`.
labels: `Tensor` of shape `[d_0, d_1, ..., d_{r-2}]` and dtype `int32` or
`int64`. Each entry in `labels` must be an index in `[0, num_classes)`.
Other values will raise an exception when this op is run on CPU, and
return `NaN` for corresponding corresponding loss and gradient rows
on GPU.
name: A name for the operation (optional).
Returns:
A `Tensor` of the same shape as `labels` and of the same type as `logits`
with the softmax cross entropy loss.
Raises:
ValueError: If logits are scalars (need to have rank >= 1) or if the rank
of the labels is not equal to the rank of the logits minus one.
"""
return nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits, name=name)
# TODO(b/33392402): Formally deprecate this API.
# After LSC (see b/33392402#comment1), this API will be deprecated and callers
# will be suggested to use the (updated version of)
# tf.nn.sigmoid_cross_entropy_with_logits.
def deprecated_flipped_sigmoid_cross_entropy_with_logits(logits,
targets,
name=None):
"""Computes sigmoid cross entropy given `logits`.
This function diffs from tf.nn.sigmoid_cross_entropy_with_logits only in the
argument order.
Measures the probability error in discrete classification tasks in which each
class is independent and not mutually exclusive. For instance, one could
perform multilabel classification where a picture can contain both an elephant
and a dog at the same time.
For brevity, let `x = logits`, `z = targets`. The logistic loss is
z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
= z * -log(1 / (1 + exp(-x))) + (1 - z) * -log(exp(-x) / (1 + exp(-x)))
= z * log(1 + exp(-x)) + (1 - z) * (-log(exp(-x)) + log(1 + exp(-x)))
= z * log(1 + exp(-x)) + (1 - z) * (x + log(1 + exp(-x))
= (1 - z) * x + log(1 + exp(-x))
= x - x * z + log(1 + exp(-x))
For x < 0, to avoid overflow in exp(-x), we reformulate the above
x - x * z + log(1 + exp(-x))
= log(exp(x)) - x * z + log(1 + exp(-x))
= - x * z + log(1 + exp(x))
Hence, to ensure stability and avoid overflow, the implementation uses this
equivalent formulation
max(x, 0) - x * z + log(1 + exp(-abs(x)))
`logits` and `targets` must have the same type and shape.
Args:
logits: A `Tensor` of type `float32` or `float64`.
targets: A `Tensor` of the same type and shape as `logits`.
name: A name for the operation (optional).
Returns:
A `Tensor` of the same shape as `logits` with the componentwise
logistic losses.
Raises:
ValueError: If `logits` and `targets` do not have the same shape.
"""
return nn.sigmoid_cross_entropy_with_logits(
labels=targets, logits=logits, name=name)
|
tensorflow-master
|
tensorflow/contrib/nn/python/ops/cross_entropy.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for forward_ad.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.nn.python.ops import fwd_gradients
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class ForwardAdTest(test.TestCase):
def testSquare(self):
x = constant_op.constant(1.)
y = math_ops.square(x)
grad_x = 3.
dydx_tf = fwd_gradients.fwd_gradients([y], [x], [grad_x])[0]
dydx_py = 2. * grad_x
with self.cached_session() as sess:
self.assertAllClose(sess.run(dydx_tf), dydx_py, 1e-6)
def testGather(self):
x = constant_op.constant([1., 2., 3.])
y = array_ops.gather(x, [0, 1])
y.set_shape([2])
dydx = fwd_gradients.fwd_gradients([y], [x], assert_unused=True)
with self.cached_session() as sess:
sess.run(dydx)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/nn/python/ops/fwd_gradients_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Forward-mode derivatives."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops.gradients_impl import gradients
def fwd_gradients(ys, xs, grad_xs=None, assert_unused=False):
"""Computes forward-mode derivatives.
This is accomplished in pure-python using tensorflow's existing (reverse-mode)
gradients. There is additional overhead on graph construction, but runtime
performance should be equal to a manual implementation [citation needed].
See https://j-towns.github.io/2017/06/12/A-new-trick.html and
https://github.com/HIPS/autograd/pull/175 for the original discussion of this
method, and https://github.com/renmengye/tensorflow-forward-ad for a "direct"
implementation.
Args:
ys: A list of tensors.
xs: A list of tensors.
grad_xs: An optional list of tensors. If provided, must have the same length
and shapes compatible with xs.
assert_unused: Add assertions that intermediate values are not computed.
Returns:
A list of tensors of the same shapes as ys. The directional derivatives of
ys with respect to xs in the direction grad_xs. Leaving grad_xs unspecified
is equivalent to passing in 1s for each x in xs.
"""
# This version of forward-mode autodiff is based on code by Tim Cooijmans
# and handles list arguments and certain special cases such as when the
# ys doesn't depend on one or more of the xs, and when tf.IndexedSlices are
# generated by the first tf.gradients call.
us = [array_ops.zeros_like(y) + float('nan') for y in ys]
dydxs = gradients(ys, xs, grad_ys=us)
# deal with strange types that tf.gradients returns but can't deal with
dydxs = [ops.convert_to_tensor(dydx) if isinstance(dydx, ops.IndexedSlices)
else dydx for dydx in dydxs]
if assert_unused:
with ops.control_dependencies(dydxs):
assert_unused = control_flow_ops.Assert(False, [1], name='fwd_gradients')
with ops.control_dependencies([assert_unused]):
dydxs = array_ops.identity_n(dydxs)
dydxs = [array_ops.zeros_like(x) if dydx is None else dydx
for x, dydx in zip(xs, dydxs)]
for x, dydx in zip(xs, dydxs):
dydx.set_shape(x.shape)
dysdx = gradients(dydxs, us, grad_ys=grad_xs)
return dysdx
|
tensorflow-master
|
tensorflow/contrib/nn/python/ops/fwd_gradients.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Support for scaled softplus, a smoothed version of ReLU."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
def _reduce_and_reshape_grad(g, t):
"""Returns the gradient, sum-reduced and reshaped to `t`'s shape."""
shape = array_ops.shape(t)
g_shape = array_ops.shape(g)
bcast_dims, _ = gen_array_ops.broadcast_gradient_args(shape, g_shape)
return array_ops.reshape(math_ops.reduce_sum(g, bcast_dims), shape)
def scaled_softplus(x, alpha, clip=None, name=None):
"""Returns `y = alpha * ln(1 + exp(x / alpha))` or `min(y, clip)`.
This can be seen as a softplus applied to the scaled input, with the output
appropriately scaled. As `alpha` tends to 0, `scaled_softplus(x, alpha)` tends
to `relu(x)`. The clipping is optional. As alpha->0, scaled_softplus(x, alpha)
tends to relu(x), and scaled_softplus(x, alpha, clip=6) tends to relu6(x).
Note: the gradient for this operation is defined to depend on the backprop
inputs as well as the outputs of this operation.
Args:
x: A `Tensor` of inputs.
alpha: A `Tensor`, indicating the amount of smoothness. The caller
must ensure that `alpha > 0`.
clip: (optional) A `Tensor`, the upper bound to clip the values.
name: A name for the scope of the operations (optional).
Returns:
A tensor of the size and type determined by broadcasting of the inputs.
"""
clipping = clip is not None
with ops.name_scope(name, 'scaled_softplus',
[x, alpha] + ([clip] if clipping else [])):
x = ops.convert_to_tensor(x, name='x')
dtype = x.dtype
alpha = ops.convert_to_tensor(alpha, dtype=dtype, name='alpha')
# Compute the forward value.
y = alpha * nn.softplus(x / alpha)
if clipping:
clip = ops.convert_to_tensor(clip, dtype=dtype, name='clip')
y = math_ops.minimum(y, clip)
def _grad(op, g):
"""Backprop for scaled softplus, with optional clipping."""
y, x, alpha = op.inputs[:3]
# Prevent the memory-expensive computations from happening before g is
# available.
with ops.control_dependencies([g]):
y = array_ops.identity(y)
clip_grad = []
if clipping:
clip = op.inputs[3]
unclipped = math_ops.cast(y < clip, g.dtype)
clip_grad = [_reduce_and_reshape_grad(g * (1. - unclipped), clip)]
g *= unclipped
y /= alpha
emy = math_ops.exp(-y)
dy_dx = 1. - emy
# The eps below avoids log(0). Note that t*log(t) -> 0 as t->0.
eps = 1e-8
dy_dalpha = y * emy - dy_dx * math_ops.log(dy_dx + eps)
# Backprop to the actual inputs, but not to the output.
return [None,
_reduce_and_reshape_grad(g * dy_dx, x),
_reduce_and_reshape_grad(g * dy_dalpha, alpha)] + clip_grad
if clipping:
@function.Defun(dtype, dtype, dtype, dtype,
func_name='ScaledSoftplusHelper_clip_%s' % dtype.name,
shape_func=lambda op: [op.inputs[0].shape],
python_grad_func=_grad)
def _forward_helper_clip(y, x, alpha, clip):
del x, alpha, clip # Unused.
return y
return _forward_helper_clip(y, x, alpha, clip)
# No clipping.
@function.Defun(dtype, dtype, dtype,
func_name='ScaledSoftplusHelper_%s' % dtype.name,
shape_func=lambda op: [op.inputs[0].shape],
python_grad_func=_grad)
def _forward_helper(y, x, alpha):
del x, alpha # Unused.
return y
return _forward_helper(y, x, alpha)
|
tensorflow-master
|
tensorflow/contrib/nn/python/ops/scaled_softplus.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for sampling_ops.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.nn.python.ops import sampling_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import nn
from tensorflow.python.platform import test
class RankSampledSoftmaxLossTest(test.TestCase):
def setUp(self):
self._sampled = [3, 4, 5, 6, 7]
self._num_sampled = len(self._sampled)
# Because values of all matrices increase with indices, logits increase with
# class id. So, for the above sampled classes, adaptive sampling will select
# these resampled classes.
self._resampled = [5, 6, 7]
self._num_resampled = len(self._resampled)
self._num_classes = 10
self._num_true = 2
self._sampled_values = (self._sampled, [[0.5], [0.5]],
[0.5, 0.5, 0.5, 0.5, 0.5])
self._resampled_values = (self._resampled, [[0.5], [0.5]], [0.5, 0.5, 0.5])
self._remove_accidental_hits = False
self._embed_dim = 5
self._batch_size = 2
def _weights(self):
return constant_op.constant([
[0.0, 0.1, 0.2, 0.3, 0.4],
[1.0, 1.1, 1.2, 1.3, 1.4],
[2.0, 2.1, 2.2, 2.3, 2.4],
[3.0, 3.1, 3.2, 3.3, 3.4],
[4.0, 4.1, 4.2, 4.3, 4.4],
[5.0, 5.1, 5.2, 5.3, 5.4],
[6.0, 6.1, 6.2, 6.3, 6.4],
[7.0, 7.1, 7.2, 7.3, 7.4],
[8.0, 8.1, 8.2, 8.3, 8.4],
[9.0, 9.1, 9.2, 9.3, 9.4],
])
def _div_sharded_weights(self):
return [
constant_op.constant([
[0.0, 0.1, 0.2, 0.3, 0.4],
[1.0, 1.1, 1.2, 1.3, 1.4],
]),
constant_op.constant([
[2.0, 2.1, 2.2, 2.3, 2.4],
[3.0, 3.1, 3.2, 3.3, 3.4],
]),
constant_op.constant([
[4.0, 4.1, 4.2, 4.3, 4.4],
[5.0, 5.1, 5.2, 5.3, 5.4],
]),
constant_op.constant([
[6.0, 6.1, 6.2, 6.3, 6.4],
[7.0, 7.1, 7.2, 7.3, 7.4],
]),
constant_op.constant([
[8.0, 8.1, 8.2, 8.3, 8.4],
[9.0, 9.1, 9.2, 9.3, 9.4],
]),
]
def _mod_sharded_weights(self):
return [
constant_op.constant([
[0.0, 0.1, 0.2, 0.3, 0.4],
[5.0, 5.1, 5.2, 5.3, 5.4],
]),
constant_op.constant([
[1.0, 1.1, 1.2, 1.3, 1.4],
[6.0, 6.1, 6.2, 6.3, 6.4],
]),
constant_op.constant([
[2.0, 2.1, 2.2, 2.3, 2.4],
[7.0, 7.1, 7.2, 7.3, 7.4],
]),
constant_op.constant([
[3.0, 3.1, 3.2, 3.3, 3.4],
[8.0, 8.1, 8.2, 8.3, 8.4],
]),
constant_op.constant([
[4.0, 4.1, 4.2, 4.3, 4.4],
[9.0, 9.1, 9.2, 9.3, 9.4],
]),
]
def _biases(self):
return constant_op.constant(
[0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9])
def _div_sharded_biases(self):
return [
constant_op.constant([0.0, 0.1]),
constant_op.constant([0.2, 0.3]),
constant_op.constant([0.4, 0.5]),
constant_op.constant([0.6, 0.7]),
constant_op.constant([0.8, 0.9]),
]
def _mod_sharded_biases(self):
return [
constant_op.constant([0.0, 0.5]),
constant_op.constant([0.1, 0.6]),
constant_op.constant([0.2, 0.7]),
constant_op.constant([0.3, 0.8]),
constant_op.constant([0.4, 0.9]),
]
def _labels(self):
return constant_op.constant(
[[0, 1], [1, 2]],
shape=(self._batch_size, self._num_true),
name='labels',
dtype=dtypes.int64)
def _inputs(self):
return constant_op.constant(
[
[0., 1., 2., 3., 4.],
[10., 11., 12., 13., 14.],
],
shape=(self._batch_size, self._embed_dim),
name='inputs')
def testInvalidNumSampled0(self):
with ops.Graph().as_default():
with self.assertRaisesRegexp(
ValueError,
r'num_resampled \(3\) must be less than num_sampled \(3\)'):
sampling_ops.rank_sampled_softmax_loss(
weights=self._weights(),
biases=self._biases(),
labels=self._labels(),
inputs=self._inputs(),
num_sampled=3,
num_resampled=3,
num_classes=self._num_classes,
num_true=self._num_true,
sampled_values=None,
resampling_temperature=1.,
remove_accidental_hits=True,
partition_strategy='div')
def testInvalidNumSampled1(self):
with ops.Graph().as_default():
with self.assertRaisesRegexp(
ValueError,
r'num_resampled \(3\) must be less than num_sampled \(2\)'):
sampling_ops.rank_sampled_softmax_loss(
weights=self._weights(),
biases=self._biases(),
labels=self._labels(),
inputs=self._inputs(),
num_sampled=2,
num_resampled=3,
num_classes=self._num_classes,
num_true=self._num_true,
sampled_values=None,
resampling_temperature=1.,
remove_accidental_hits=True,
partition_strategy='div')
def testMissingPartitionStrategy(self):
with ops.Graph().as_default():
with self.assertRaisesRegexp(ValueError,
r'unsupported partition_strategy \(None\)'):
sampling_ops.rank_sampled_softmax_loss(
weights=self._weights(),
biases=self._biases(),
labels=self._labels(),
inputs=self._inputs(),
num_sampled=2,
num_resampled=1,
num_classes=self._num_classes,
num_true=self._num_true,
sampled_values=None,
resampling_temperature=1.,
remove_accidental_hits=True,
partition_strategy=None)
def _testCompareWithNN(self, weights, biases, partition_strategy):
with ops.Graph().as_default():
loss = sampling_ops.rank_sampled_softmax_loss(
weights=weights(),
biases=biases(),
labels=self._labels(),
inputs=self._inputs(),
num_sampled=self._num_sampled,
num_resampled=self._num_resampled,
num_classes=self._num_classes,
num_true=self._num_true,
sampled_values=self._sampled_values,
resampling_temperature=1.,
remove_accidental_hits=self._remove_accidental_hits,
partition_strategy=partition_strategy)
loss_nn = nn.sampled_softmax_loss(
weights=weights(),
biases=biases(),
labels=self._labels(),
inputs=self._inputs(),
num_sampled=self._num_resampled,
num_classes=self._num_classes,
num_true=self._num_true,
sampled_values=self._resampled_values,
remove_accidental_hits=self._remove_accidental_hits,
partition_strategy=partition_strategy)
with self.cached_session() as sess:
loss_val = sess.run(loss)
loss_nn_val = sess.run(loss_nn)
self.assertAllClose(loss_val, loss_nn_val)
def testCompareWithNNUnsharded(self):
self._testCompareWithNN(self._weights, self._biases, 'div')
def testCompareWithNNShardWeightsDiv(self):
self._testCompareWithNN(self._div_sharded_weights, self._biases, 'div')
def testCompareWithNNShardWeightsAndBiasesDiv(self):
self._testCompareWithNN(self._div_sharded_weights, self._div_sharded_biases,
'div')
def testCompareWithNNShardWeightsMod(self):
self._testCompareWithNN(self._mod_sharded_weights, self._biases, 'mod')
def testCompareWithNNShardWeightsAndBiasesMod(self):
self._testCompareWithNN(self._mod_sharded_weights, self._mod_sharded_biases,
'mod')
def _testCompareWithNNTemperature(self, temperature, resampled):
weights = [[1., 2.], [3., 4.]] # two sampled classes
inputs = [[6., -5. / 2.], [-11., 21. / 2.]]
# Let w0, w1 = weights of sampled classes (biases set to 0 for simplicity)
# Let x0, x1 = inputs
# logits:
# w0.x0 = 1
# w0.x1 = 10
# w1.x0 = 8
# w1.x1 = 9
# Resampling 1 class with temperature = t will pick the larger of:
# exp(1/t) + exp(10/t) ==> w0, for values of t < 2.12
# exp(8/t) + exp(9/t) ==> w1, for values of t > 2.13
num_sampled = 2
num_resampled = 1
num_classes = 2
num_true = 1
sampled_values = [0, 1], [[1.], [1.]], [1., 1.]
resampled_values = [resampled], [[1.], [1.]], [1.]
remove_accidental_hits = False
with ops.Graph().as_default():
weights = constant_op.constant(weights)
biases = constant_op.constant([0., 0.])
labels = constant_op.constant([[0], [1]], dtype=dtypes.int64)
inputs = constant_op.constant(inputs)
loss = sampling_ops.rank_sampled_softmax_loss(
weights=weights,
biases=biases,
labels=labels,
inputs=inputs,
num_sampled=num_sampled,
num_resampled=num_resampled,
num_classes=num_classes,
num_true=num_true,
sampled_values=sampled_values,
resampling_temperature=constant_op.constant(temperature),
remove_accidental_hits=remove_accidental_hits,
partition_strategy='div')
loss_nn = nn.sampled_softmax_loss(
weights=weights,
biases=biases,
labels=labels,
inputs=inputs,
num_sampled=num_resampled,
num_classes=num_classes,
num_true=num_true,
sampled_values=resampled_values,
remove_accidental_hits=remove_accidental_hits,
partition_strategy='div')
with self.cached_session() as sess:
loss_val = sess.run(loss)
loss_nn_val = sess.run(loss_nn)
self.assertAllClose(loss_val, loss_nn_val)
def testCompareWithNNTemperatureLo1(self):
self._testCompareWithNNTemperature(1., 0)
def testCompareWithNNTemperatureLo2(self):
self._testCompareWithNNTemperature(2.12, 0)
def testCompareWithNNTemperatureHi1(self):
self._testCompareWithNNTemperature(2.13, 1)
def testCompareWithNNTemperatureHi2(self):
self._testCompareWithNNTemperature(3., 1)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/nn/python/ops/sampling_ops_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for sampling_ops.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.nn.python.ops.alpha_dropout import alpha_dropout
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import nn_impl
from tensorflow.python.platform import test
class AlphaDropoutTest(test.TestCase):
def testAlphaDropout(self):
x_dim, y_dim = 40, 30
for keep_prob in [0.1, 0.5, 0.8]:
with self.cached_session():
t = random_ops.random_normal([x_dim, y_dim])
output = alpha_dropout(t, keep_prob)
self.assertEqual([x_dim, y_dim], output.get_shape())
t_mean, t_std = nn_impl.moments(t, axes=[0, 1])
output_mean, output_std = nn_impl.moments(output, axes=[0, 1])
self.assertLess(abs(t_mean.eval() - output_mean.eval()), 0.1)
self.assertLess(abs(t_std.eval() - output_std.eval()), 0.1)
def testShapedDropoutShapeError(self):
# Runs shaped dropout and verifies an error is thrown on misshapen noise.
x_dim = 40
y_dim = 30
keep_prob = 0.5
t = constant_op.constant(1.0, shape=[x_dim, y_dim], dtype=dtypes.float32)
with self.assertRaises(ValueError):
_ = alpha_dropout(t, keep_prob, noise_shape=[x_dim, y_dim + 10])
with self.assertRaises(ValueError):
_ = alpha_dropout(t, keep_prob, noise_shape=[x_dim, y_dim, 5])
with self.assertRaises(ValueError):
_ = alpha_dropout(t, keep_prob, noise_shape=[x_dim + 3])
with self.assertRaises(ValueError):
_ = alpha_dropout(t, keep_prob, noise_shape=[x_dim])
# test that broadcasting proceeds
_ = alpha_dropout(t, keep_prob, noise_shape=[y_dim])
_ = alpha_dropout(t, keep_prob, noise_shape=[1, y_dim])
_ = alpha_dropout(t, keep_prob, noise_shape=[x_dim, 1])
_ = alpha_dropout(t, keep_prob, noise_shape=[1, 1])
def testInvalidKeepProb(self):
x_dim, y_dim = 40, 30
t = constant_op.constant(1.0, shape=[x_dim, y_dim], dtype=dtypes.float32)
with self.assertRaises(ValueError):
alpha_dropout(t, -1.0)
with self.assertRaises(ValueError):
alpha_dropout(t, 1.1)
with self.assertRaises(ValueError):
alpha_dropout(t, [0.0, 1.0])
with self.assertRaises(ValueError):
alpha_dropout(t, array_ops.placeholder(dtypes.float64))
with self.assertRaises(ValueError):
alpha_dropout(t, array_ops.placeholder(dtypes.float32, shape=[2]))
def testNoDropoutFast(self):
x = array_ops.zeros((5,))
for p in 1, constant_op.constant(1.0):
y = alpha_dropout(x, keep_prob=p)
self.assertTrue(x is y)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/nn/python/ops/alpha_dropout_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for scaled_softplus.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.nn.python.ops.scaled_softplus import scaled_softplus
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import gradient_checker
from tensorflow.python.platform import test
class ScaledSoftplusTest(test.TestCase):
def test(self):
np.random.seed(1) # Make it reproducible.
x = np.random.randn(3, 4).astype(np.float32)
x64 = np.random.randn(3, 4).astype(np.float64)
alpha = np.random.rand() + 0.01
clip = np.float32(0.1)
y = np.minimum(alpha * np.log(1. + np.exp(x / alpha)), clip)
y64 = alpha * np.log(1. + np.exp(x64 / alpha))
with self.test_session(use_gpu=True) as sess:
z = scaled_softplus(constant_op.constant(x), alpha, clip)
z64 = scaled_softplus(constant_op.constant(x64), alpha)
z, z64 = sess.run([z, z64])
eps = 1e-6
self.assertAllClose(y, z, eps)
self.assertAllClose(y64, z64, eps)
def testGradient(self):
np.random.seed(1) # Make it reproducible.
x_shape = [5, 10]
x_np = np.random.randn(*x_shape).astype(np.float32)
alpha_np = np.float32(np.random.rand(1, x_shape[1]) + 0.01)
clip_np = np.float32(np.random.rand(x_shape[0], 1) * 5.)
with self.test_session(use_gpu=True):
x_tf = constant_op.constant(x_np)
alpha_tf = constant_op.constant(alpha_np)
clip_tf = constant_op.constant(clip_np)
y_tf = scaled_softplus(x_tf, alpha_tf)
z_tf = scaled_softplus(x_tf, alpha_tf, clip_tf * 0.1)
err = gradient_checker.compute_gradient_error([x_tf, alpha_tf],
[x_shape, alpha_np.shape],
y_tf, x_shape,
[x_np, alpha_np],
delta=0.002)
err_clip = gradient_checker.compute_gradient_error(
[x_tf, alpha_tf, clip_tf],
[x_shape, alpha_np.shape, clip_np.shape],
z_tf, x_shape,
[x_np, alpha_np, clip_np],
delta=0.002)
eps = 2e-4
self.assertLess(err, eps)
self.assertLess(err_clip, eps)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/nn/python/ops/scaled_softplus_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module for variants of ops in tf.nn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
|
tensorflow-master
|
tensorflow/contrib/nn/python/ops/__init__.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops related to candidate sampling."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import nn_impl
from tensorflow.python.ops import nn_ops
def _rank_resample(weights, biases, inputs, sampled_values, num_resampled,
resampling_temperature, partition_strategy):
"""A helper function for rank_sampled_softmax_loss.
This computes, for each i in `sampled_values`,
log(sum_j exp((w_i * x_j + b_i) / resampling_temperature))
where w_i, b_i are the weight and bias of the i-th class, respectively,
and j ranges over the rows of `inputs`. For efficiency, we rearrange the
computation to
log(sum_j exp(w_i * (x_j / resampling_temperature))) +
b_i / resampling_temperature.
This translates to the following batched computation using tensorflow ops:
reduce_logsumexp(matmul(embeddings,
transpose(inputs / resampling_temperature))) +
biases / resampling_temperature
The computation of the first term is colocated with the embeddings using
`transform_fn` in `embedding_ops._embedding_lookup_and_transform`. The second
term, not the bottleneck, is computed at the worker.
Args:
weights: From `rank_sampled_softmax_loss`.
biases: From `rank_sampled_softmax_loss`.
inputs: From `rank_sampled_softmax_loss`.
sampled_values: A tuple of (`sampled_candidates`, `true_expected_count`,
`sampled_expected_count`) returned by a `*_candidate_sampler` function.
num_resampled: An `int`. This many values are selected from
`sampled_values` using the adaptive resampling algorithm. The caller
must ensure that `num_resampled` is less than the size of
`sampled_values`.
resampling_temperature: A scalar `Tensor` with the temperature parameter
for the adaptive resampling algorithm.
partition_strategy: From `rank_sampled_softmax_loss`.
Returns:
A tuple of (`resampled_candidates`, `true_expected_count`,
`resampled_expected_count`), similar to `sampled_values` but sampled
down to `num_resampled` values.
"""
# This code supports passing a Tensor for num_resampled, but since it is only
# called with an int, that's what we specify in the arg list. If this
# function is ever externalized, we should change the doc to support Tensor.
sampled, true_expected_count, sampled_expected_count = sampled_values
sampled = math_ops.cast(array_ops.stop_gradient(sampled), dtypes.int64)
true_expected_count = array_ops.stop_gradient(true_expected_count)
sampled_expected_count = array_ops.stop_gradient(sampled_expected_count)
reweighted_inputs = inputs / resampling_temperature
def logsumexp_logit(embeddings):
return math_ops.reduce_logsumexp(
math_ops.matmul(embeddings, reweighted_inputs, transpose_b=True),
axis=1,
keepdims=False)
# Calling this protected form of embedding_lookup allows co-locating
# the logsumexp computation with the partitioned weights, which yields
# a large speedup in practice.
sampled_logits = embedding_ops._embedding_lookup_and_transform( # pylint: disable=protected-access
weights, sampled, partition_strategy, transform_fn=logsumexp_logit)
sampled_b = array_ops.reshape(
embedding_ops.embedding_lookup(biases, sampled, partition_strategy), [-1])
sampled_logits += sampled_b / resampling_temperature
_, resampled_indices = nn.top_k(sampled_logits, k=num_resampled, sorted=False)
resampled = array_ops.gather(sampled, indices=resampled_indices)
resampled_expected_count = array_ops.gather(
sampled_expected_count, indices=resampled_indices)
return resampled, true_expected_count, resampled_expected_count
def rank_sampled_softmax_loss(weights,
biases,
labels,
inputs,
num_sampled,
num_resampled,
num_classes,
num_true,
sampled_values,
resampling_temperature,
remove_accidental_hits,
partition_strategy,
name=None):
"""Computes softmax loss using rank-based adaptive resampling.
This has been shown to improve rank loss after training compared to
`tf.nn.sampled_softmax_loss`. For a description of the algorithm and some
experimental results, please see: [TAPAS: Two-pass Approximate Adaptive
Sampling for Softmax](https://arxiv.org/abs/1707.03073).
Sampling follows two phases:
* In the first phase, `num_sampled` classes are selected using
`tf.nn.learned_unigram_candidate_sampler` or supplied `sampled_values`.
The logits are calculated on those sampled classes. This phases is
similar to `tf.nn.sampled_softmax_loss`.
* In the second phase, the `num_resampled` classes with highest predicted
probability are kept. Probabilities are
`LogSumExp(logits / resampling_temperature)`, where the sum is over
`inputs`.
The `resampling_temperature` parameter controls the "adaptiveness" of the
resampling. At lower temperatures, resampling is more adaptive because it
picks more candidates close to the predicted classes. A common strategy is
to decrease the temperature as training proceeds.
See `tf.nn.sampled_softmax_loss` for more documentation on sampling and
for typical default values for some of the parameters.
This operation is for training only. It is generally an underestimate of
the full softmax loss.
A common use case is to use this method for training, and calculate the full
softmax loss for evaluation or inference. In this case, you must set
`partition_strategy="div"` for the two losses to be consistent, as in the
following example:
```python
if mode == "train":
loss = rank_sampled_softmax_loss(
weights=weights,
biases=biases,
labels=labels,
inputs=inputs,
...,
partition_strategy="div")
elif mode == "eval":
logits = tf.matmul(inputs, tf.transpose(weights))
logits = tf.nn.bias_add(logits, biases)
labels_one_hot = tf.one_hot(labels, n_classes)
loss = tf.nn.softmax_cross_entropy_with_logits(
labels=labels_one_hot,
logits=logits)
```
Args:
weights: A `Tensor` or `PartitionedVariable` of shape `[num_classes, dim]`,
or a list of `Tensor` objects whose concatenation along dimension 0
has shape [num_classes, dim]. The (possibly-sharded) class embeddings.
biases: A `Tensor` or `PartitionedVariable` of shape `[num_classes]`.
The (possibly-sharded) class biases.
labels: A `Tensor` of type `int64` and shape `[batch_size,
num_true]`. The target classes. Note that this format differs from
the `labels` argument of `nn.softmax_cross_entropy_with_logits`.
inputs: A `Tensor` of shape `[batch_size, dim]`. The forward
activations of the input network.
num_sampled: An `int`. The number of classes to randomly sample per batch.
num_resampled: An `int`. The number of classes to select from the
`num_sampled` classes using the adaptive resampling algorithm. Must be
less than `num_sampled`.
num_classes: An `int`. The number of possible classes.
num_true: An `int`. The number of target classes per training example.
sampled_values: A tuple of (`sampled_candidates`, `true_expected_count`,
`sampled_expected_count`) returned by a `*_candidate_sampler` function.
If None, default to `nn.learned_unigram_candidate_sampler`.
resampling_temperature: A scalar `Tensor` with the temperature parameter
for the adaptive resampling algorithm.
remove_accidental_hits: A `bool`. Whether to remove "accidental hits"
where a sampled class equals one of the target classes.
partition_strategy: A string specifying the partitioning strategy, relevant
if `len(weights) > 1`. Currently `"div"` and `"mod"` are supported.
See `tf.nn.embedding_lookup` for more details.
name: A name for the operation (optional).
Returns:
A `batch_size` 1-D tensor of per-example sampled softmax losses.
Raises:
ValueError: If `num_sampled <= num_resampled`.
"""
if num_sampled > num_classes:
raise ValueError("num_sampled ({}) cannot be greater than num_classes ({})".
format(num_sampled, num_classes))
if num_sampled <= num_resampled:
raise ValueError("num_resampled ({}) must be less than num_sampled ({})".
format(num_resampled, num_sampled))
if partition_strategy not in ("div", "mod"):
raise ValueError(
"unsupported partition_strategy ({})".format(partition_strategy))
with ops.name_scope(name, "rank_sampled_softmax_loss", [
weights, biases, labels, inputs, sampled_values, resampling_temperature
]) as name:
if not sampled_values:
sampled_values = nn.learned_unigram_candidate_sampler(
true_classes=labels,
num_true=num_true,
num_sampled=num_sampled,
unique=True,
range_max=num_classes)
# From sampled_values, select the top num_resampled values using the
# adaptive rank resampling strategy.
resampled_values = _rank_resample(weights, biases, inputs, sampled_values,
num_resampled, resampling_temperature,
partition_strategy)
return nn.sampled_softmax_loss(
weights=weights,
biases=biases,
labels=labels,
inputs=inputs,
num_sampled=num_resampled,
num_classes=num_classes,
num_true=num_true,
sampled_values=resampled_values,
remove_accidental_hits=remove_accidental_hits,
partition_strategy=partition_strategy,
name=name)
def sampled_sparse_softmax_loss(weights,
biases,
labels,
inputs,
num_sampled,
num_classes,
sampled_values=None,
remove_accidental_hits=True,
partition_strategy="mod",
name="sampled_sparse_softmax_loss"):
"""Computes and returns the sampled sparse softmax training loss.
This is a faster way to train a softmax classifier over a huge number of
classes.
This operation is for training only. It is generally an underestimate of
the full softmax loss.
A common use case is to use this method for training, and calculate the full
softmax loss for evaluation or inference. In this case, you must set
`partition_strategy="div"` for the two losses to be consistent, as in the
following example:
```python
if mode == "train":
loss = tf.nn.sampled_sparse_softmax_loss(
weights=weights,
biases=biases,
labels=labels,
inputs=inputs,
...,
partition_strategy="div")
elif mode == "eval":
logits = tf.matmul(inputs, tf.transpose(weights))
logits = tf.nn.bias_add(logits, biases)
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=tf.squeeze(labels),
logits=logits)
```
See our [Candidate Sampling Algorithms Reference]
(https://www.tensorflow.org/extras/candidate_sampling.pdf)
Also see Section 3 of [Jean et al., 2014](http://arxiv.org/abs/1412.2007)
([pdf](http://arxiv.org/pdf/1412.2007.pdf)) for the math.
Args:
weights: A `Tensor` of shape `[num_classes, dim]`, or a list of `Tensor`
objects whose concatenation along dimension 0 has shape
[num_classes, dim]. The (possibly-sharded) class embeddings.
biases: A `Tensor` of shape `[num_classes]`. The class biases.
labels: A `Tensor` of type `int64` and shape `[batch_size, 1]`.
The index of the single target class for each row of logits. Note that
this format differs from the `labels` argument of
`nn.sparse_softmax_cross_entropy_with_logits`.
inputs: A `Tensor` of shape `[batch_size, dim]`. The forward
activations of the input network.
num_sampled: An `int`. The number of classes to randomly sample per batch.
num_classes: An `int`. The number of possible classes.
sampled_values: a tuple of (`sampled_candidates`, `true_expected_count`,
`sampled_expected_count`) returned by a `*_candidate_sampler` function.
(if None, we default to `log_uniform_candidate_sampler`)
remove_accidental_hits: A `bool`. whether to remove "accidental hits"
where a sampled class equals one of the target classes. Default is
True.
partition_strategy: A string specifying the partitioning strategy, relevant
if `len(weights) > 1`. Currently `"div"` and `"mod"` are supported.
Default is `"mod"`. See `tf.nn.embedding_lookup` for more details.
name: A name for the operation (optional).
Returns:
A `batch_size` 1-D tensor of per-example sampled softmax losses.
"""
logits, _ = nn_impl._compute_sampled_logits(
weights=weights,
biases=biases,
labels=labels,
inputs=inputs,
num_sampled=num_sampled,
num_classes=num_classes,
num_true=1,
sampled_values=sampled_values,
subtract_log_q=True,
remove_accidental_hits=remove_accidental_hits,
partition_strategy=partition_strategy,
name=name)
# There is only one true label. _compute_sampled_logits puts the true logit
# at index 0.
labels = array_ops.zeros([array_ops.shape(logits)[0], 1], dtype=dtypes.int64)
sampled_losses = nn_ops.sparse_softmax_cross_entropy_with_logits(
labels=array_ops.squeeze(labels), logits=logits)
# sampled_losses is a [batch_size] tensor.
return sampled_losses
|
tensorflow-master
|
tensorflow/contrib/nn/python/ops/sampling_ops.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numbers
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import math_ops
def alpha_dropout(x, keep_prob, noise_shape=None, seed=None, name=None): # pylint: disable=invalid-name
"""Computes alpha dropout.
Alpha Dropout is a dropout that maintains the self-normalizing property. For
an input with zero mean and unit standard deviation, the output of
Alpha Dropout maintains the original mean and standard deviation of the input.
See [Self-Normalizing Neural Networks](https://arxiv.org/abs/1706.02515)
Args:
x: A tensor.
keep_prob: A scalar `Tensor` with the same type as x. The probability
that each element is kept.
noise_shape: A 1-D `Tensor` of type `int32`, representing the
shape for randomly generated keep/drop flags.
seed: A Python integer. Used to create random seeds. See
`tf.compat.v1.set_random_seed` for behavior.
name: A name for this operation (optional).
Returns:
A Tensor of the same shape of `x`.
Raises:
ValueError: If `keep_prob` is not in `(0, 1]`.
"""
with ops.name_scope(name, "alpha_dropout", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
if isinstance(keep_prob, numbers.Real) and not 0 < keep_prob <= 1.:
raise ValueError("keep_prob must be a scalar tensor or a float in the "
"range (0, 1], got %g" % keep_prob)
keep_prob = ops.convert_to_tensor(keep_prob,
dtype=x.dtype,
name="keep_prob")
keep_prob.get_shape().assert_is_compatible_with(tensor_shape.scalar())
# Do nothing if we know keep_prob == 1
if tensor_util.constant_value(keep_prob) == 1:
return x
alpha = -1.7580993408473766
noise_shape = noise_shape if noise_shape is not None else array_ops.shape(x)
random_tensor = random_ops.random_uniform(noise_shape,
seed=seed,
dtype=x.dtype)
kept_idx = gen_math_ops.greater_equal(random_tensor, 1 - keep_prob)
kept_idx = math_ops.cast(kept_idx, x.dtype)
# Mask
x = x * kept_idx + alpha * (1 - kept_idx)
# Affine transformation parameters
a = (keep_prob + keep_prob * (1 - keep_prob) * alpha ** 2) ** -0.5
b = -a * alpha * (1 - keep_prob)
# Affine transformation
return a * x + b
|
tensorflow-master
|
tensorflow/contrib/nn/python/ops/alpha_dropout.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops and modules related to input_pipeline.
@@obtain_next
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.input_pipeline.python.ops.input_pipeline_ops import obtain_next
from tensorflow.python.util.all_util import remove_undocumented
remove_undocumented(__name__)
|
tensorflow-master
|
tensorflow/contrib/input_pipeline/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for input_pipeline_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.input_pipeline.python.ops import input_pipeline_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class InputPipelineOpsTest(test.TestCase):
def testObtainNext(self):
with self.cached_session():
var = state_ops.variable_op([], dtypes.int64)
state_ops.assign(var, -1).op.run()
c = constant_op.constant(["a", "b"])
sample1 = input_pipeline_ops.obtain_next(c, var)
self.assertEqual(b"a", sample1.eval())
self.assertEqual(0, var.eval())
sample2 = input_pipeline_ops.obtain_next(c, var)
self.assertEqual(b"b", sample2.eval())
self.assertEqual(1, var.eval())
sample3 = input_pipeline_ops.obtain_next(c, var)
self.assertEqual(b"a", sample3.eval())
self.assertEqual(0, var.eval())
def testSeekNext(self):
string_list = ["a", "b", "c"]
with self.cached_session() as session:
elem = input_pipeline_ops.seek_next(string_list)
session.run([variables.global_variables_initializer()])
self.assertEqual(b"a", session.run(elem))
self.assertEqual(b"b", session.run(elem))
self.assertEqual(b"c", session.run(elem))
# Make sure we loop.
self.assertEqual(b"a", session.run(elem))
# Helper method that runs the op len(expected_list) number of times, asserts
# that the results are elements of the expected_list and then throws an
# OutOfRangeError.
def _assert_output(self, expected_list, session, op):
for element in expected_list:
self.assertEqual(element, session.run(op))
with self.assertRaises(errors.OutOfRangeError):
session.run(op)
def testSeekNextLimitEpochs(self):
string_list = ["a", "b", "c"]
with self.cached_session() as session:
elem = input_pipeline_ops.seek_next(string_list, num_epochs=1)
session.run([
variables.local_variables_initializer(),
variables.global_variables_initializer()
])
self._assert_output([b"a", b"b", b"c"], session, elem)
def testSeekNextLimitEpochsThree(self):
string_list = ["a", "b", "c"]
with self.cached_session() as session:
elem = input_pipeline_ops.seek_next(string_list, num_epochs=3)
session.run([
variables.local_variables_initializer(),
variables.global_variables_initializer()
])
# Expect to see [a, b, c] three times.
self._assert_output([b"a", b"b", b"c"] * 3, session, elem)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/input_pipeline/python/ops/input_pipeline_ops_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python wrapper for input_pipeline_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
from tensorflow.contrib.input_pipeline.ops import gen_input_pipeline_ops
from tensorflow.contrib.util import loader
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import resource_loader
_input_pipeline_ops = loader.load_op_library(
resource_loader.get_path_to_datafile("_input_pipeline_ops.so"))
def obtain_next(string_list_tensor, counter):
"""Basic wrapper for the ObtainNextOp.
Args:
string_list_tensor: A tensor that is a list of strings
counter: an int64 ref tensor to keep track of which element is returned.
Returns:
An op that produces the element at counter + 1 in the list, round
robin style.
"""
return gen_input_pipeline_ops.obtain_next(string_list_tensor, counter)
def _maybe_randomize_list(string_list, shuffle):
if shuffle:
random.shuffle(string_list)
return string_list
def _create_list(string_list, shuffle, seed, num_epochs):
if shuffle and seed:
random.seed(seed)
expanded_list = _maybe_randomize_list(string_list, shuffle)[:]
if num_epochs:
for _ in range(num_epochs - 1):
expanded_list.extend(_maybe_randomize_list(string_list, shuffle))
return expanded_list
def seek_next(string_list, shuffle=False, seed=None, num_epochs=None):
"""Returns an op that seeks the next element in a list of strings.
Seeking happens in a round robin fashion. This op creates a variable called
obtain_next_counter that is initialized to -1 and is used to keep track of
which element in the list was returned, and a variable
obtain_next_expanded_list to hold the list. If num_epochs is not None, then we
limit the number of times we go around the string_list before OutOfRangeError
is thrown. It creates a variable to keep track of this.
Args:
string_list: A list of strings.
shuffle: If true, we shuffle the string_list differently for each epoch.
seed: Seed used for shuffling.
num_epochs: Returns OutOfRangeError once string_list has been repeated
num_epoch times. If unspecified then keeps on looping.
Returns:
An op that produces the next element in the provided list.
"""
expanded_list = _create_list(string_list, shuffle, seed, num_epochs)
with variable_scope.variable_scope("obtain_next"):
counter = variable_scope.get_variable(
name="obtain_next_counter",
initializer=constant_op.constant(
-1, dtype=dtypes.int64),
dtype=dtypes.int64,
trainable=False)
with ops.colocate_with(counter):
string_tensor = variable_scope.get_variable(
name="obtain_next_expanded_list",
initializer=constant_op.constant(expanded_list),
dtype=dtypes.string,
trainable=False)
if num_epochs:
filename_counter = variable_scope.get_variable(
name="obtain_next_filename_counter",
initializer=constant_op.constant(
0, dtype=dtypes.int64),
dtype=dtypes.int64,
trainable=False)
c = filename_counter.count_up_to(len(expanded_list))
with ops.control_dependencies([c]):
return obtain_next(string_tensor, counter)
else:
return obtain_next(string_tensor, counter)
|
tensorflow-master
|
tensorflow/contrib/input_pipeline/python/ops/input_pipeline_ops.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops and modules related to proto.
@@decode_proto
@@encode_proto
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.ops.proto_ops import decode_proto
from tensorflow.python.ops.proto_ops import encode_proto
from tensorflow.python.util.all_util import remove_undocumented
remove_undocumented(__name__)
|
tensorflow-master
|
tensorflow/contrib/proto/__init__.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Backwards compatibility tests for imports of tf.contrib.proto."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import proto
from tensorflow.python.platform import test
class ProtoImportTest(test.TestCase):
def testImport(self):
self.assertTrue(proto.decode_proto) # Should be accessible
self.assertTrue(proto.encode_proto) # Should be accessible
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/proto/import_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from tensorflow.python.ops.proto_ops import encode_proto
|
tensorflow-master
|
tensorflow/contrib/proto/python/ops/encode_proto_op.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from tensorflow.python.ops.proto_ops import decode_proto
|
tensorflow-master
|
tensorflow/contrib/proto/python/ops/decode_proto_op.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Complain about invalid or missing entries in python_*.txt files.
Problematic entries can be commented for temporary whitelisting.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import unittest
def abs_path(path):
root = os.path.dirname(__file__)
for _ in range(3):
root = os.path.join(root, os.pardir)
path = os.path.join(root, path)
path = os.path.abspath(path)
return path
def read_entries(test):
with open(abs_path(test.entries_file), "r") as f:
lines = f.readlines()
lines = [line.strip() for line in lines]
lines = [line for line in lines if line]
test.entries = []
test.whitelist = []
for line in lines:
# line is comment
if line.startswith("#"):
line = line[1:].strip()
# whitelist entry
if line.startswith("tensorflow/"):
test.whitelist.append(line)
# line has comment -> strip comment
elif line.find("#") != -1:
line = line[:line.find("#")].strip()
test.entries.append(line)
else:
test.entries.append(line)
def test_invalid_directories(test):
for entry in test.entries:
if not os.path.isdir(abs_path(entry)):
problem = "'" + test.entries_file + "' contains invalid '" + entry + "'"
solution = ("Please remove the invalid entry (or add the missing "
"directory).")
raise AssertionError(problem + "\n" + solution)
def test_missing_directory(test, path):
if path in test.whitelist:
return
dir_exists = os.path.isdir(abs_path(path))
entry_exists = path in test.entries
if dir_exists and not entry_exists:
problem = "'" + test.entries_file + "' is missing '" + path + "'"
solution = "Please add the missing entry (comment to whitelist if needed)."
raise AssertionError(problem + "\n" + solution)
class PythonModuleTest(unittest.TestCase):
def setUp(self):
self.entries_file = "tensorflow/contrib/cmake/python_modules.txt"
read_entries(self)
def testInvalidEntries(self):
test_invalid_directories(self)
def testMissingModules(self):
module_names = next(os.walk(abs_path("tensorflow/contrib")))[1]
for module_name in module_names:
path = "tensorflow/contrib/" + module_name
test_missing_directory(self, path + "/python")
test_missing_directory(self, path + "/python/ops")
test_missing_directory(self, path + "/python/kernels")
test_missing_directory(self, path + "/python/layers")
class PythonProtoTest(unittest.TestCase):
def setUp(self):
self.entries_file = "tensorflow/contrib/cmake/python_protos.txt"
read_entries(self)
def testInvalidEntries(self):
test_invalid_directories(self)
class PythonProtoCCTest(unittest.TestCase):
def setUp(self):
self.entries_file = "tensorflow/contrib/cmake/python_protos_cc.txt"
read_entries(self)
def testInvalidEntries(self):
test_invalid_directories(self)
if __name__ == "__main__":
unittest.main()
|
tensorflow-master
|
tensorflow/contrib/cmake/python_sanity_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""create_def_file.py - tool to create a windows def file.
The def file can be used to export symbols from the tensorflow dll to enable
tf.load_library().
Because the linker allows only 64K symbols to be exported per dll
we filter the symbols down to the essentials. The regular expressions
we use for this are specific to tensorflow.
TODO: this works fine but there is an issue with exporting
'const char * const' and importing it from a user_ops. The problem is
on the importing end and using __declspec(dllimport) works around it.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import codecs
import os
import re
import subprocess
import sys
import tempfile
# External tools we use that come with visual studio sdk and
# we assume that the caller has the correct PATH to the sdk
UNDNAME = "undname.exe"
DUMPBIN = "dumpbin.exe"
# Exclude if matched
EXCLUDE_RE = re.compile(r"RTTI|deleting destructor|::internal::|Internal|"
r"python_op_gen_internal|grappler")
# Include if matched before exclude
INCLUDEPRE_RE = re.compile(r"google::protobuf::internal::ExplicitlyConstructed|"
r"tensorflow::internal::LogMessage|"
r"tensorflow::internal::LogString|"
r"tensorflow::internal::CheckOpMessageBuilder|"
r"tensorflow::internal::PickUnusedPortOrDie|"
r"tensorflow::internal::ValidateDevice|"
r"tensorflow::ops::internal::Enter|"
r"tensorflow::strings::internal::AppendPieces|"
r"tensorflow::strings::internal::CatPieces|"
r"tensorflow::errors::Internal|"
r"tensorflow::Tensor::CopyFromInternal|"
r"tensorflow::kernel_factory::"
r"OpKernelRegistrar::InitInternal|"
r"tensorflow::io::internal::JoinPathImpl")
# Include if matched after exclude
INCLUDE_RE = re.compile(r"^(TF_\w*)$|"
r"^(TFE_\w*)$|"
r"tensorflow::|"
r"functor::|"
r"\?nsync_|"
r"stream_executor::")
# We want to identify data members explicitly in the DEF file, so that no one
# can implicitly link against the DLL if they use one of the variables exported
# from the DLL and the header they use does not decorate the symbol with
# __declspec(dllimport). It is easier to detect what a data symbol does
# NOT look like, so doing it with the below regex.
DATA_EXCLUDE_RE = re.compile(r"[)(]|"
r"vftable|"
r"vbtable|"
r"vcall|"
r"RTTI|"
r"protobuf::internal::ExplicitlyConstructed")
def get_args():
"""Parse command line."""
filename_list = lambda x: x.split(";")
parser = argparse.ArgumentParser()
parser.add_argument("--input", type=filename_list,
help="paths to input libraries separated by semicolons",
required=True)
parser.add_argument("--output", help="output deffile", required=True)
parser.add_argument("--target", help="name of the target", required=True)
parser.add_argument("--bitness", help="build target bitness", required=True)
args = parser.parse_args()
return args
def main():
"""main."""
args = get_args()
# Pipe dumpbin to extract all linkable symbols from libs.
# Good symbols are collected in candidates and also written to
# a temp file.
candidates = []
tmpfile = tempfile.NamedTemporaryFile(mode="w", delete=False)
for lib_path in args.input:
proc = subprocess.Popen([DUMPBIN, "/nologo", "/linkermember:1", lib_path],
stdout=subprocess.PIPE)
for line in codecs.getreader("utf-8")(proc.stdout):
cols = line.split()
if len(cols) < 2:
continue
sym = cols[1]
tmpfile.file.write(sym + "\n")
candidates.append(sym)
exit_code = proc.wait()
if exit_code != 0:
print("{} failed, exit={}".format(DUMPBIN, exit_code))
return exit_code
tmpfile.file.close()
# Run the symbols through undname to get their undecorated name
# so we can filter on something readable.
with open(args.output, "w") as def_fp:
# track dupes
taken = set()
# Header for the def file.
def_fp.write("LIBRARY " + args.target + "\n")
def_fp.write("EXPORTS\n")
if args.bitness == "64":
def_fp.write("\t??1OpDef@tensorflow@@UEAA@XZ\n")
else:
def_fp.write("\t??1OpDef@tensorflow@@UAE@XZ\n")
# Each symbols returned by undname matches the same position in candidates.
# We compare on undname but use the decorated name from candidates.
dupes = 0
proc = subprocess.Popen([UNDNAME, tmpfile.name], stdout=subprocess.PIPE)
for idx, line in enumerate(codecs.getreader("utf-8")(proc.stdout)):
decorated = candidates[idx]
if decorated in taken:
# Symbol is already in output, done.
dupes += 1
continue
if not INCLUDEPRE_RE.search(line):
if EXCLUDE_RE.search(line):
continue
if not INCLUDE_RE.search(line):
continue
if "deleting destructor" in line:
# Some of the symbols convered by INCLUDEPRE_RE export deleting
# destructor symbols, which is a bad idea.
# So we filter out such symbols here.
continue
if DATA_EXCLUDE_RE.search(line):
def_fp.write("\t" + decorated + "\n")
else:
def_fp.write("\t" + decorated + " DATA\n")
taken.add(decorated)
exit_code = proc.wait()
if exit_code != 0:
print("{} failed, exit={}".format(UNDNAME, exit_code))
return exit_code
os.unlink(tmpfile.name)
print("symbols={}, taken={}, dupes={}"
.format(len(candidates), len(taken), dupes))
return 0
if __name__ == "__main__":
sys.exit(main())
|
tensorflow-master
|
tensorflow/contrib/cmake/tools/create_def_file.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for dealing with Tensors.
@@constant_value
@@make_tensor_proto
@@make_ndarray
@@ops_used_by_graph_def
@@stripped_op_list_for_graph
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from tensorflow.python.framework.meta_graph import ops_used_by_graph_def
from tensorflow.python.framework.meta_graph import stripped_op_list_for_graph
from tensorflow.python.framework.tensor_util import constant_value
from tensorflow.python.framework.tensor_util import make_tensor_proto
from tensorflow.python.framework.tensor_util import MakeNdarray as make_ndarray
# pylint: disable=unused_import
from tensorflow.python.util.all_util import remove_undocumented
remove_undocumented(__name__)
|
tensorflow-master
|
tensorflow/contrib/util/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for loading op libraries.
@@load_op_library
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import re
from tensorflow.python.framework import load_library
from tensorflow.python.platform import resource_loader
def load_op_library(path):
"""Loads a contrib op library from the given path.
NOTE(mrry): On Windows, we currently assume that some contrib op
libraries are statically linked into the main TensorFlow Python
extension DLL - use dynamically linked ops if the .so is present.
Args:
path: An absolute path to a shared object file.
Returns:
A Python module containing the Python wrappers for Ops defined in the
plugin.
"""
if os.name == 'nt':
# To avoid making every user_ops aware of windows, re-write
# the file extension from .so to .dll if .so file doesn't exist.
if not os.path.exists(path):
path = re.sub(r'\.so$', '.dll', path)
# Currently we have only some user_ops as dlls on windows - don't try
# to load them if the dll is not found.
# TODO(mrry): Once we have all of them this check should be removed.
if not os.path.exists(path):
return None
path = resource_loader.get_path_to_datafile(path)
ret = load_library.load_op_library(path)
assert ret, 'Could not load %s' % path
return ret
|
tensorflow-master
|
tensorflow/contrib/util/loader.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops for (approximate) nearest neighbor look-ups.
## Ops for (approximate) nearest neighbor look-ups
This package provides several ops for efficient (approximate) nearest
neighbor look-ups.
### LSH multiprobe ops
The following ops generate multiprobe sequences for various hash families.
@@hyperplane_lsh_hash
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,wildcard-import, line-too-long
from tensorflow.contrib.nearest_neighbor.python.ops.nearest_neighbor_ops import *
# pylint: enable=unused-import,wildcard-import,line-too-long
|
tensorflow-master
|
tensorflow/contrib/nearest_neighbor/__init__.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for hyperplane_lsh_probes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.nearest_neighbor.python.ops.nearest_neighbor_ops import hyperplane_lsh_probes
from tensorflow.python.platform import test
class HyperplaneLshProbesTest(test.TestCase):
# We only test the batch functionality of the op here because the multiprobe
# tests in hyperplane_lsh_probes_test.cc already cover most of the LSH
# functionality.
def simple_batch_test(self):
with self.cached_session():
hyperplanes = np.eye(4)
points = np.array([[1.2, 0.5, -0.9, -1.0], [2.0, -3.0, 1.0, -1.5]])
product = np.dot(points, hyperplanes)
num_tables = 2
num_hyperplanes_per_table = 2
num_probes = 4
hashes, tables = hyperplane_lsh_probes(product,
num_tables,
num_hyperplanes_per_table,
num_probes)
self.assertAllEqual(hashes.eval(), [[3, 0, 2, 2], [2, 2, 0, 3]])
self.assertAllEqual(tables.eval(), [[0, 1, 0, 1], [0, 1, 1, 1]])
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/nearest_neighbor/python/kernel_tests/hyperplane_lsh_probes_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Wrappers for nearest neighbor operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.util import loader
from tensorflow.python.framework import ops
from tensorflow.python.platform import resource_loader
_nearest_neighbor_ops = loader.load_op_library(
resource_loader.get_path_to_datafile("_nearest_neighbor_ops.so"))
def hyperplane_lsh_probes(point_hyperplane_product,
num_tables,
num_hyperplanes_per_table,
num_probes,
name=None):
"""Computes probes for the hyperplane hash.
The op supports multiprobing, i.e., the number of requested probes can be
larger than the number of tables. In that case, the same table can be probed
multiple times.
The first `num_tables` probes are always the primary hashes for each table.
Args:
point_hyperplane_product: a matrix of inner products between the hyperplanes
and the points to be hashed. These values should not be quantized so that
we can correctly compute the probing sequence. The expected shape is
`batch_size` times `num_tables * num_hyperplanes_per_table`, i.e., each
element of the batch corresponds to one row of the matrix.
num_tables: the number of tables to compute probes for.
num_hyperplanes_per_table: the number of hyperplanes per table.
num_probes: the requested number of probes per table.
name: A name prefix for the returned tensors (optional).
Returns:
probes: the output matrix of probes. Size `batch_size` times `num_probes`.
table_ids: the output matrix of tables ids. Size `batch_size` times
`num_probes`.
"""
return _nearest_neighbor_ops.hyperplane_lsh_probes(point_hyperplane_product,
num_tables,
num_hyperplanes_per_table,
num_probes,
name=name)
ops.NotDifferentiable("HyperplaneLSHProbes")
|
tensorflow-master
|
tensorflow/contrib/nearest_neighbor/python/ops/nearest_neighbor_ops.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Framework utilities.
@@assert_same_float_dtype
@@assert_scalar
@@assert_scalar_int
@@convert_to_tensor_or_sparse_tensor
@@get_graph_from_inputs
@@is_numeric_tensor
@@is_non_decreasing
@@is_strictly_increasing
@@is_tensor
@@reduce_sum_n
@@remove_squeezable_dimensions
@@with_shape
@@with_same_shape
@@deprecated
@@deprecated_args
@@deprecated_arg_values
@@arg_scope
@@add_arg_scope
@@current_arg_scope
@@has_arg_scope
@@arg_scoped_arguments
@@prepend_name_scope
@@strip_name_scope
@@add_model_variable
@@assert_global_step
@@assert_or_get_global_step
@@assign_from_checkpoint
@@assign_from_checkpoint_fn
@@assign_from_values
@@assign_from_values_fn
@@create_global_step
@@filter_variables
@@fuse_op
@@get_global_step
@@get_or_create_global_step
@@get_local_variables
@@get_model_variables
@@get_name_scope
@@get_trainable_variables
@@get_unique_variable
@@get_variables_by_name
@@get_variables_by_suffix
@@get_variable_full_name
@@get_variables_to_restore
@@get_variables
@@global_variable
@@local_variable
@@model_variable
@@variable
@@VariableDeviceChooser
@@convolutional_delta_orthogonal
@@convolutional_orthogonal_1d
@@convolutional_orthogonal_2d
@@convolutional_orthogonal_3d
@@zero_initializer
@@load_checkpoint
@@list_variables
@@load_variable
@@init_from_checkpoint
@@load_and_remap_matrix_initializer
@@load_embedding_initializer
@@load_linear_multiclass_bias_initializer
@@load_variable_slot_initializer
@@argsort
@@py_func
@@sort
@@get_placeholders
@@smart_cond
@@smart_constant_value
@@smart_case
@@BoundedTensorSpec
@@TensorSpec
@@RecordInput
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,wildcard-import
from tensorflow.contrib.framework.python.framework import *
from tensorflow.contrib.framework.python.framework import nest
from tensorflow.contrib.framework.python.ops import *
# pylint: enable=unused-import,wildcard-import
from tensorflow.python.framework.ops import prepend_name_scope
from tensorflow.python.framework.ops import strip_name_scope
from tensorflow.python.framework.smart_cond import smart_case
from tensorflow.python.framework.smart_cond import smart_cond
from tensorflow.python.framework.smart_cond import smart_constant_value
from tensorflow.python.framework.tensor_spec import BoundedTensorSpec
from tensorflow.python.framework.tensor_spec import TensorSpec
from tensorflow.python.ops.data_flow_ops import RecordInput
from tensorflow.python.ops.init_ops import convolutional_delta_orthogonal
from tensorflow.python.ops.init_ops import convolutional_orthogonal_1d
from tensorflow.python.ops.init_ops import convolutional_orthogonal_2d
from tensorflow.python.ops.init_ops import convolutional_orthogonal_3d
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = ['nest']
_nest_allowed_symbols = [
'assert_same_structure',
'is_nested',
'is_sequence',
'is_sequence_or_composite',
'flatten',
'flatten_dict_items',
'pack_sequence_as',
'map_structure',
'map_structure_with_paths',
'map_structure_with_tuple_paths',
'assert_shallow_structure',
'flatten_up_to',
'flatten_with_tuple_paths_up_to',
'map_structure_up_to',
'map_structure_with_tuple_paths_up_to',
'get_traverse_shallow_structure',
'yield_flat_paths',
'flatten_with_joined_string_paths',
'flatten_with_tuple_paths',
]
remove_undocumented(nest.__name__, allowed_exception_list=_nest_allowed_symbols)
remove_undocumented(__name__, allowed_exception_list=_allowed_symbols)
|
tensorflow-master
|
tensorflow/contrib/framework/__init__.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""@graph_util tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.framework.python.framework import graph_util
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import node_def_pb2
from tensorflow.core.framework import types_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
def GetNewNode(name, op, input_nodes):
new_node = node_def_pb2.NodeDef()
new_node.op = op
new_node.name = name
for node in input_nodes:
new_node.input.append(node)
return new_node
class GraphUtilTest(test.TestCase):
def testGraphUtil(self):
graph_def = graph_pb2.GraphDef()
node_a = GetNewNode('A', 'Placeholder', [])
node_b = GetNewNode('B', 'Op1', ['A'])
# A loop in the part that will be fused.
node_c = GetNewNode('C', 'Op1', ['B', 'C'])
node_d = GetNewNode('D', 'Op1', ['C'])
node_e = GetNewNode('E', 'Op1', ['D'])
graph_def.node.extend([node_a, node_b, node_c, node_d, node_e])
fused_graph_def = graph_util.fuse_op(
graph_def, ['A'], ['D'], [types_pb2.DT_FLOAT], True, 'FusedOp', 'Op2')
self.assertEqual(len(fused_graph_def.node), 4)
self.assertEqual(fused_graph_def.node[0].name, 'A')
self.assertEqual(fused_graph_def.node[1].name, 'FusedOp')
self.assertEqual(fused_graph_def.node[1].input[0], 'A')
self.assertEqual(fused_graph_def.node[1].op, 'Op2')
self.assertEqual(fused_graph_def.node[1].attr['_output_quantized'].b, True)
self.assertEqual(fused_graph_def.node[1].attr['_output_types'].list.type,
[types_pb2.DT_FLOAT])
self.assertEqual(fused_graph_def.node[2].name, 'D')
self.assertEqual(fused_graph_def.node[3].name, 'E')
def testGraphUtilArtificialDependencyInjection(self):
graph_def = graph_pb2.GraphDef()
node_a = GetNewNode('A', 'Placeholder', [])
node_a1 = GetNewNode('A1', 'Placeholder', [])
node_b = GetNewNode('B', 'Op1', ['A'])
node_c = GetNewNode('C', 'Op1', ['B'])
node_d = GetNewNode('D', 'Op1', ['C'])
node_e = GetNewNode('E', 'Op1', ['D'])
graph_def.node.extend([node_a, node_a1, node_b, node_c, node_d, node_e])
fused_graph_def = graph_util.fuse_op(graph_def, ['A', 'A1'], ['D'],
[types_pb2.DT_FLOAT], True, 'FusedOp',
'Op2')
self.assertEqual(len(fused_graph_def.node), 5)
self.assertEqual(fused_graph_def.node[0].name, 'A')
self.assertEqual(fused_graph_def.node[1].name, 'A1')
self.assertEqual(fused_graph_def.node[2].name, 'FusedOp')
self.assertEqual(fused_graph_def.node[2].input[0], 'A')
self.assertEqual(fused_graph_def.node[2].op, 'Op2')
self.assertEqual(fused_graph_def.node[2].attr['_output_quantized'].b, True)
self.assertEqual(fused_graph_def.node[2].attr['_output_types'].list.type,
[types_pb2.DT_FLOAT])
self.assertEqual(fused_graph_def.node[3].name, 'D')
self.assertEqual(fused_graph_def.node[4].name, 'E')
class GetPlaceholdersTest(test.TestCase):
def test_get_placeholders(self):
with ops.Graph().as_default() as g:
placeholders = [array_ops.placeholder(dtypes.float32) for _ in range(5)]
results = graph_util.get_placeholders(g)
self.assertEqual(
sorted(placeholders, key=lambda x: x._id), # pylint: disable=protected-access
sorted(results, key=lambda x: x._id)) # pylint: disable=protected-access
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/framework/python/framework/graph_util_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tensor utility functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import decorator_utils
def _add_experimental_function_notice_to_docstring(doc):
"""Adds an experimental notice to a docstring for experimental functions."""
return decorator_utils.add_notice_to_docstring(
doc, '',
'EXPERIMENTAL FUNCTION',
'(experimental)', ['THIS FUNCTION IS EXPERIMENTAL. It may change or '
'be removed at any time, and without warning.'])
def experimental(func):
"""Decorator for marking functions or methods experimental.
This decorator logs an experimental warning whenever the decorated function is
called. It has the following format:
<function> (from <module>) is experimental and may change or be removed at
any time, and without warning.
<function> will include the class name if it is a method.
It also edits the docstring of the function: ' (experimental)' is appended
to the first line of the docstring and a notice is prepended to the rest of
the docstring.
Args:
func: A function or method to mark experimental.
Returns:
Decorated function or method.
"""
decorator_utils.validate_callable(func, 'experimental')
@functools.wraps(func)
def new_func(*args, **kwargs):
logging.warning(
'%s (from %s) is experimental and may change or be removed at '
'any time, and without warning.',
decorator_utils.get_qualified_name(func), func.__module__)
return func(*args, **kwargs)
new_func.__doc__ = _add_experimental_function_notice_to_docstring(
func.__doc__)
return new_func
|
tensorflow-master
|
tensorflow/contrib/framework/python/framework/experimental.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for checkpoints tools."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.contrib.framework.python.framework import checkpoint_utils
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import saver as saver_lib
def _create_checkpoints(sess, checkpoint_dir):
checkpoint_prefix = os.path.join(checkpoint_dir, "model")
checkpoint_state_name = "checkpoint"
v1 = variable_scope.get_variable("var1", [1, 10])
v2 = variable_scope.get_variable("var2", [10, 10])
v3 = variable_scope.get_variable("var3", [100, 100])
with variable_scope.variable_scope("useful_scope"):
v4 = variable_scope.get_variable("var4", [9, 9])
sess.run(variables.global_variables_initializer())
v1_value, v2_value, v3_value, v4_value = sess.run([v1, v2, v3, v4])
saver = saver_lib.Saver()
saver.save(
sess,
checkpoint_prefix,
global_step=0,
latest_filename=checkpoint_state_name)
return v1_value, v2_value, v3_value, v4_value
def _create_partition_checkpoints(sess, checkpoint_dir):
checkpoint_prefix = os.path.join(checkpoint_dir, "model")
checkpoint_state_name = "checkpoint"
with variable_scope.variable_scope("scope"):
v1 = variable_scope.get_variable(
name="var1",
shape=[100, 100],
initializer=init_ops.truncated_normal_initializer(0.5),
partitioner=partitioned_variables.min_max_variable_partitioner(
max_partitions=5, axis=0, min_slice_size=8 << 10))
sess.run(variables.global_variables_initializer())
v1_value = sess.run(v1._get_variable_list())
saver = saver_lib.Saver()
saver.save(
sess,
checkpoint_prefix,
global_step=0,
latest_filename=checkpoint_state_name)
return v1_value
class CheckpointsTest(test.TestCase):
def testNoCheckpoints(self):
checkpoint_dir = self.get_temp_dir() + "/no_checkpoints"
with self.assertRaises(errors_impl.OpError):
self.assertAllEqual(
checkpoint_utils.load_variable(checkpoint_dir, "var1"), [])
def testNoTensor(self):
checkpoint_dir = self.get_temp_dir()
with self.cached_session() as session:
_, _, _, _ = _create_checkpoints(session, checkpoint_dir)
with self.assertRaises(errors_impl.OpError):
self.assertAllEqual(
checkpoint_utils.load_variable(checkpoint_dir, "var5"), [])
def testGetTensor(self):
checkpoint_dir = self.get_temp_dir()
with self.cached_session() as session:
v1, v2, v3, v4 = _create_checkpoints(session, checkpoint_dir)
self.assertAllEqual(
checkpoint_utils.load_variable(checkpoint_dir, "var1"), v1)
self.assertAllEqual(
checkpoint_utils.load_variable(checkpoint_dir, "var2"), v2)
self.assertAllEqual(
checkpoint_utils.load_variable(checkpoint_dir, "var3"), v3)
self.assertAllEqual(
checkpoint_utils.load_variable(checkpoint_dir, "useful_scope/var4"), v4)
def testGetAllVariables(self):
checkpoint_dir = self.get_temp_dir()
with self.cached_session() as session:
_create_checkpoints(session, checkpoint_dir)
self.assertEqual(
checkpoint_utils.list_variables(checkpoint_dir),
[("useful_scope/var4", [9, 9]), ("var1", [1, 10]), ("var2", [10, 10]),
("var3", [100, 100])])
def testInitFromCheckpoint(self):
checkpoint_dir = self.get_temp_dir()
with self.cached_session() as session:
v1, v2, v3, v4 = _create_checkpoints(session, checkpoint_dir)
# New graph and session.
with ops.Graph().as_default() as g:
with self.session(graph=g) as session:
with variable_scope.variable_scope("some_scope"):
my1 = variable_scope.get_variable("my1", [1, 10])
with variable_scope.variable_scope("some_other_scope"):
my2 = variable_scope.get_variable("my2", [10, 10])
with variable_scope.variable_scope("other_useful_scope"):
my4 = variable_scope.get_variable("var4", [9, 9])
my3 = variable_scope.get_variable("my3", [100, 100])
checkpoint_utils.init_from_checkpoint(checkpoint_dir, {
"var1": "some_scope/my1",
"useful_scope/": "some_scope/some_other_scope/other_useful_scope/",
})
checkpoint_utils.init_from_checkpoint(checkpoint_dir, {
"var2": "some_scope/some_other_scope/my2",
"var3": my3,
})
session.run(variables.global_variables_initializer())
self.assertAllEqual(my1.eval(session), v1)
self.assertAllEqual(my2.eval(session), v2)
self.assertAllEqual(my3.eval(session), v3)
self.assertAllEqual(my4.eval(session), v4)
# Check that tensors are not explicitly in the graph.
self.assertLess(len(str(session.graph.as_graph_def())), 27000)
def testInitWithScopeDoesNotCaptureSuffixes(self):
checkpoint_dir = self.get_temp_dir()
with self.cached_session() as session:
_, _, _, v4 = _create_checkpoints(session, checkpoint_dir)
with ops.Graph().as_default() as g:
with variable_scope.variable_scope("useful_scope"):
my4 = variable_scope.get_variable("var4", [9, 9])
with variable_scope.variable_scope("useful_scope_1"):
my5_init = [[1.0, 2.0], [3.0, 4.0]]
my5 = variable_scope.get_variable("var5", initializer=my5_init)
checkpoint_utils.init_from_checkpoint(checkpoint_dir,
{"useful_scope/": "useful_scope/"})
with self.session(graph=g) as session:
session.run(variables.global_variables_initializer())
self.assertAllEqual(my4.eval(session), v4)
self.assertAllEqual(my5.eval(session), my5_init)
def testInitFromRootCheckpoint(self):
checkpoint_dir = self.get_temp_dir()
with self.cached_session() as session:
v1, v2, v3, v4 = _create_checkpoints(session, checkpoint_dir)
# New graph and session.
with ops.Graph().as_default() as g:
with self.session(graph=g) as session:
with variable_scope.variable_scope("some_scope"):
my1 = variable_scope.get_variable("var1", [1, 10])
my2 = variable_scope.get_variable("var2", [10, 10])
my3 = variable_scope.get_variable("var3", [100, 100])
with variable_scope.variable_scope("useful_scope"):
my4 = variable_scope.get_variable("var4", [9, 9])
checkpoint_utils.init_from_checkpoint(checkpoint_dir,
{"/": "some_scope/",})
session.run(variables.global_variables_initializer())
self.assertAllEqual(my1.eval(session), v1)
self.assertAllEqual(my2.eval(session), v2)
self.assertAllEqual(my3.eval(session), v3)
self.assertAllEqual(my4.eval(session), v4)
def testInitToRootCheckpoint(self):
checkpoint_dir = self.get_temp_dir()
with self.cached_session() as session:
v1, v2, v3, v4 = _create_checkpoints(session, checkpoint_dir)
# New graph and session.
with ops.Graph().as_default() as g:
with self.session(graph=g) as session:
my1 = variable_scope.get_variable("var1", [1, 10])
my2 = variable_scope.get_variable("var2", [10, 10])
my3 = variable_scope.get_variable("var3", [100, 100])
with variable_scope.variable_scope("useful_scope"):
my4 = variable_scope.get_variable("var4", [9, 9])
checkpoint_utils.init_from_checkpoint(checkpoint_dir,
{"/": "/",})
session.run(variables.global_variables_initializer())
self.assertAllEqual(my1.eval(session), v1)
self.assertAllEqual(my2.eval(session), v2)
self.assertAllEqual(my3.eval(session), v3)
self.assertAllEqual(my4.eval(session), v4)
def testInitFromPartitionVar(self):
checkpoint_dir = self.get_temp_dir()
with self.cached_session() as session:
v1 = _create_partition_checkpoints(session, checkpoint_dir)
# New graph and session.
with ops.Graph().as_default() as g:
with self.session(graph=g) as session:
with variable_scope.variable_scope("some_scope"):
my1 = variable_scope.get_variable(
name="my1",
shape=[100, 100],
initializer=init_ops.truncated_normal_initializer(0.5),
partitioner=partitioned_variables.min_max_variable_partitioner(
max_partitions=5, axis=0, min_slice_size=8 << 10))
my1_var_list = my1._get_variable_list()
with variable_scope.variable_scope("some_other_scope"):
my2 = variable_scope.get_variable(
name="var1",
shape=[100, 100],
initializer=init_ops.truncated_normal_initializer(0.5),
partitioner=partitioned_variables.min_max_variable_partitioner(
max_partitions=5, axis=0, min_slice_size=8 << 10))
my2_var_list = my2._get_variable_list()
checkpoint_utils.init_from_checkpoint(checkpoint_dir, {
"scope/var1": "some_scope/my1",
"scope/": "some_other_scope/"})
session.run(variables.global_variables_initializer())
my1_values = session.run(my1_var_list)
self.assertAllEqual(my1_values, v1)
my2_values = session.run(my2_var_list)
self.assertAllEqual(my2_values, v1)
# New graph and session.
with ops.Graph().as_default() as g:
with self.session(graph=g) as session:
with variable_scope.variable_scope("some_scope"):
my1 = variable_scope.get_variable(
name="my1",
shape=[100, 100],
initializer=init_ops.truncated_normal_initializer(0.5),
partitioner=partitioned_variables.min_max_variable_partitioner(
max_partitions=5, axis=0, min_slice_size=8 << 10))
my1_var_list = my1._get_variable_list()
checkpoint_utils.init_from_checkpoint(checkpoint_dir,
{"scope/var1": my1_var_list,})
session.run(variables.global_variables_initializer())
my1_values = session.run(my1_var_list)
self.assertAllEqual(my1_values, v1)
def testInitFromCheckpointMissing(self):
checkpoint_dir = self.get_temp_dir()
with self.cached_session() as session:
_, _, _, _ = _create_checkpoints(session, checkpoint_dir)
# New graph and session.
with ops.Graph().as_default() as g:
with self.session(graph=g) as session:
with variable_scope.variable_scope("some_scope"):
_ = variable_scope.get_variable("my1", [10, 10])
_ = variable_scope.get_variable(
"my2", [1, 10],
dtype=dtypes.int64,
initializer=init_ops.zeros_initializer())
# No directory.
with self.assertRaises(errors_impl.OpError):
checkpoint_utils.init_from_checkpoint("no_dir",
{"var1": "some_scope/my1"})
# No variable in checkpoint.
with self.assertRaises(ValueError):
checkpoint_utils.init_from_checkpoint(checkpoint_dir,
{"no_var": "some_scope/my1"})
# No variable in the graph.
with self.assertRaises(ValueError):
checkpoint_utils.init_from_checkpoint(checkpoint_dir,
{"var3": "some_scope/no_var"})
# Shape mismatch.
with self.assertRaises(ValueError):
checkpoint_utils.init_from_checkpoint(checkpoint_dir,
{"var1": "some_scope/my1"})
# Variable 'my1' and 'my2' are missing in given checkpoint scope.
with self.assertRaises(ValueError):
checkpoint_utils.init_from_checkpoint(
checkpoint_dir, {"useful_scope/": "some_scope/"})
# Mapping is not to scope name.
with self.assertRaises(ValueError):
checkpoint_utils.init_from_checkpoint(checkpoint_dir,
{"useful_scope": "some_scope/"})
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/framework/python/framework/checkpoint_utils_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tensor utility functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.util.deprecation import deprecated
__all__ = [
'assert_same_float_dtype',
'assert_scalar',
'assert_scalar_int',
'convert_to_tensor_or_sparse_tensor',
'is_tensor',
'reduce_sum_n',
'remove_squeezable_dimensions',
'with_shape',
'with_same_shape']
# Temporary for backwards compatibility
is_tensor = tensor_util.is_tensor
assert_same_float_dtype = check_ops.assert_same_float_dtype
assert_scalar = check_ops.assert_scalar
convert_to_tensor_or_sparse_tensor = (
sparse_tensor.convert_to_tensor_or_sparse_tensor)
def reduce_sum_n(tensors, name=None):
"""Reduce tensors to a scalar sum.
This reduces each tensor in `tensors` to a scalar via `tf.reduce_sum`, then
adds them via `tf.add_n`.
Args:
tensors: List of tensors, all of the same numeric type.
name: Tensor name, and scope for all other ops.
Returns:
Total loss tensor, or None if no losses have been configured.
Raises:
ValueError: if `losses` is missing or empty.
"""
if not tensors:
raise ValueError('No tensors provided.')
with ops.name_scope(name, 'reduce_sum_n', tensors) as name_scope:
tensors = [
math_ops.reduce_sum(t, name='%s/sum' % t.op.name) for t in tensors]
if len(tensors) == 1:
return tensors[0]
return math_ops.add_n(tensors, name=name_scope)
@deprecated(
None, "Please switch to remove_squeezable_dimensions from "
"tf.confusion_matrix. Note that the order of the inputs and outputs of "
"labels and predictions have also been switched.")
def remove_squeezable_dimensions(predictions, labels, name=None):
"""Squeeze last dim if ranks of `predictions` and `labels` differ by 1.
This will use static shape if available. Otherwise, it will add graph
operations, which could result in a performance hit.
Args:
predictions: Predicted values, a `Tensor` of arbitrary dimensions.
labels: Label values, a `Tensor` whose dimensions match `predictions`.
name: Name of the op.
Returns:
Tuple of `predictions` and `labels`, possibly with last dim squeezed.
"""
with ops.name_scope(name, 'remove_squeezable_dimensions',
[predictions, labels]):
predictions = ops.convert_to_tensor(predictions)
labels = ops.convert_to_tensor(labels)
predictions_shape = predictions.get_shape()
predictions_rank = predictions_shape.ndims
labels_shape = labels.get_shape()
labels_rank = labels_shape.ndims
if (labels_rank is not None) and (predictions_rank is not None):
# Use static rank.
rank_diff = predictions_rank - labels_rank
if rank_diff == -1:
labels = array_ops.squeeze(labels, [-1])
elif rank_diff == 1:
predictions = array_ops.squeeze(predictions, [-1])
return predictions, labels
# Use dynamic rank.
rank_diff = array_ops.rank(predictions) - array_ops.rank(labels)
if (predictions_rank is None) or (
predictions_shape.dims[-1].is_compatible_with(1)):
predictions = control_flow_ops.cond(
math_ops.equal(1, rank_diff),
lambda: array_ops.squeeze(predictions, [-1]),
lambda: predictions)
if (labels_rank is None) or (
labels_shape.dims[-1].is_compatible_with(1)):
labels = control_flow_ops.cond(
math_ops.equal(-1, rank_diff),
lambda: array_ops.squeeze(labels, [-1]),
lambda: labels)
return predictions, labels
def _shape_tensor_compatible(expected_shape, actual_shape):
"""Returns whether actual_shape is compatible with expected_shape.
Note that -1 in `expected_shape` is recognized as unknown dimension.
Args:
expected_shape: Integer list defining the expected shape, or tensor of same.
actual_shape: Shape of the tensor to test.
Returns:
New tensor.
"""
with ops.name_scope('shape_tensor_equal',
values=[expected_shape, actual_shape]) as scope:
return math_ops.reduce_all(
math_ops.logical_or(
math_ops.equal(expected_shape, -1),
math_ops.equal(expected_shape, actual_shape, 'equal'),
name='exclude_partial_shape'),
name=scope)
def _is_rank(expected_rank, actual_tensor):
"""Returns whether actual_tensor's rank is expected_rank.
Args:
expected_rank: Integer defining the expected rank, or tensor of same.
actual_tensor: Tensor to test.
Returns:
New tensor.
"""
with ops.name_scope('is_rank', values=[actual_tensor]) as scope:
expected = ops.convert_to_tensor(expected_rank, name='expected')
actual = array_ops.rank(actual_tensor, name='actual')
return math_ops.equal(expected, actual, name=scope)
def _is_shape(expected_shape, actual_tensor, actual_shape=None):
"""Returns whether actual_tensor's shape is expected_shape.
Note that -1 in `expected_shape` is recognized as unknown dimension.
Args:
expected_shape: Integer list defining the expected shape, or tensor of same.
actual_tensor: Tensor to test.
actual_shape: Shape of actual_tensor, if we already have it.
Returns:
New tensor.
"""
with ops.name_scope('is_shape', values=[actual_tensor]) as scope:
is_rank = _is_rank(array_ops.size(expected_shape), actual_tensor)
if actual_shape is None:
actual_shape = array_ops.shape(actual_tensor, name='actual')
shape_equal = _shape_tensor_compatible(expected_shape, actual_shape)
return math_ops.logical_and(is_rank, shape_equal, name=scope)
def _assert_shape_op(expected_shape, actual_tensor):
"""Asserts actual_tensor's shape is expected_shape.
Note that unknown dimension in `expected_shape` will be ignored.
Args:
expected_shape: List of integers defining the expected shape, or tensor of
same.
actual_tensor: Tensor to test.
Returns:
New assert tensor.
"""
with ops.name_scope('assert_shape', values=[actual_tensor]) as scope:
actual_shape = array_ops.shape(actual_tensor, name='actual')
if (isinstance(expected_shape, tensor_shape.TensorShape)
and not expected_shape.is_fully_defined()):
expected_shape = [d if d else -1 for d in expected_shape.as_list()]
is_shape = _is_shape(expected_shape, actual_tensor, actual_shape)
return control_flow_ops.Assert(
is_shape, [
'Wrong shape for %s [expected] [actual].' % actual_tensor.name,
expected_shape,
actual_shape
], name=scope)
def with_same_shape(expected_tensor, tensor):
"""Assert tensors are the same shape, from the same graph.
Args:
expected_tensor: Tensor with expected shape.
tensor: Tensor of actual values.
Returns:
The original tensor argument, possibly with assert ops added.
"""
with ops.name_scope('%s/' % tensor.op.name, values=[expected_tensor, tensor]):
tensor_shape = expected_tensor.get_shape()
expected_shape = (
tensor_shape.as_list() if tensor_shape.is_fully_defined()
else array_ops.shape(expected_tensor, name='expected_shape'))
return with_shape(expected_shape, tensor)
def with_shape(expected_shape, tensor):
"""Asserts tensor has expected shape.
If tensor shape and expected_shape, are fully defined, assert they match.
Otherwise, add assert op that will validate the shape when tensor is
evaluated, and set shape on tensor.
Args:
expected_shape: Expected shape to assert, as a 1D array of ints, or tensor
of same.
tensor: Tensor whose shape we're validating.
Returns:
tensor, perhaps with a dependent assert operation.
Raises:
ValueError: if tensor has an invalid shape.
"""
if isinstance(tensor, sparse_tensor.SparseTensor):
raise ValueError('SparseTensor not supported.')
# Shape type must be 1D int32.
if tensor_util.is_tensor(expected_shape):
if expected_shape.dtype.base_dtype != dtypes.int32:
raise ValueError(
'Invalid dtype %s for shape %s expected of tensor %s.' % (
expected_shape.dtype, expected_shape, tensor.name))
if isinstance(expected_shape, (list, tuple)):
if not expected_shape:
expected_shape = np.asarray([], dtype=np.int32)
else:
np_expected_shape = np.asarray(expected_shape)
expected_shape = (
np.asarray(expected_shape, dtype=np.int32)
if np_expected_shape.dtype == np.int64 else np_expected_shape)
if isinstance(expected_shape, np.ndarray):
if expected_shape.ndim > 1:
raise ValueError(
'Invalid rank %s for shape %s expected of tensor %s.' % (
expected_shape.ndim, expected_shape, tensor.name))
if expected_shape.dtype != np.int32:
raise ValueError(
'Invalid dtype %s for shape %s expected of tensor %s.' % (
expected_shape.dtype, expected_shape, tensor.name))
actual_shape = tensor.get_shape()
if (not actual_shape.is_fully_defined()
or tensor_util.is_tensor(expected_shape)):
with ops.name_scope('%s/' % tensor.op.name, values=[tensor]):
if (not tensor_util.is_tensor(expected_shape)
and (len(expected_shape) < 1)):
# TODO(irving): Remove scalar special case
return array_ops.reshape(tensor, [])
with ops.control_dependencies([_assert_shape_op(expected_shape, tensor)]):
result = array_ops.identity(tensor)
if not tensor_util.is_tensor(expected_shape):
result.set_shape(expected_shape)
return result
if (not tensor_util.is_tensor(expected_shape) and
not actual_shape.is_compatible_with(expected_shape)):
if (len(expected_shape) < 1) and actual_shape.is_compatible_with([1]):
# TODO(irving): Remove scalar special case.
with ops.name_scope('%s/' % tensor.op.name, values=[tensor]):
return array_ops.reshape(tensor, [])
raise ValueError('Invalid shape for tensor %s, expected %s, got %s.' % (
tensor.name, expected_shape, actual_shape))
return tensor
def assert_scalar_int(tensor, name=None):
"""Assert `tensor` is 0-D, of type `tf.int32` or `tf.int64`.
Args:
tensor: `Tensor` to test.
name: Name of the op and of the new `Tensor` if one is created.
Returns:
`tensor`, for chaining.
Raises:
ValueError: if `tensor` is not 0-D, of integer type.
"""
with ops.name_scope(name, 'assert_scalar_int', [tensor]) as name_scope:
tensor = ops.convert_to_tensor(tensor)
data_type = tensor.dtype
if not data_type.base_dtype.is_integer:
raise ValueError('Expected integer type for %s, received type: %s.'
% (tensor.name, data_type))
return check_ops.assert_scalar(tensor, name=name_scope)
|
tensorflow-master
|
tensorflow/contrib/framework/python/framework/tensor_util.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A module containing TensorFlow ops whose API may change in the future."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import
from tensorflow.contrib.framework.python.framework.checkpoint_utils import *
from tensorflow.contrib.framework.python.framework.experimental import experimental
from tensorflow.contrib.framework.python.framework.graph_util import *
from tensorflow.contrib.framework.python.framework.tensor_util import *
# pylint: enable=wildcard-import
from tensorflow.python.util import decorator_utils
from tensorflow.python.util import nest
from tensorflow.python.util.deprecation import deprecated
from tensorflow.python.util.deprecation import deprecated_arg_values
from tensorflow.python.util.deprecation import deprecated_args
|
tensorflow-master
|
tensorflow/contrib/framework/python/framework/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tools to work with checkpoints."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training import training as train
__all__ = [
"load_checkpoint",
"load_variable",
"list_variables",
"init_from_checkpoint"]
def _get_checkpoint_filename(filepattern):
"""Returns checkpoint filename given directory or specific filepattern."""
if gfile.IsDirectory(filepattern):
return checkpoint_management.latest_checkpoint(filepattern)
return filepattern
def load_checkpoint(filepattern):
"""Returns CheckpointReader for latest checkpoint.
Args:
filepattern: Directory with checkpoints file or path to checkpoint.
Returns:
`CheckpointReader` object.
Raises:
ValueError: if checkpoint_dir doesn't have 'checkpoint' file or checkpoints.
"""
filename = _get_checkpoint_filename(filepattern)
if filename is None:
raise ValueError("Couldn't find 'checkpoint' file or checkpoints in "
"given directory %s" % filepattern)
return train.NewCheckpointReader(filename)
def load_variable(checkpoint_dir, name):
"""Returns a Tensor with the contents of the given variable in the checkpoint.
Args:
checkpoint_dir: Directory with checkpoints file or path to checkpoint.
name: Name of the tensor to return.
Returns:
`Tensor` object.
"""
# TODO(b/29227106): Fix this in the right place and remove this.
if name.endswith(":0"):
name = name[:-2]
reader = load_checkpoint(checkpoint_dir)
return reader.get_tensor(name)
def list_variables(checkpoint_dir):
"""Returns list of all variables in the latest checkpoint.
Args:
checkpoint_dir: Directory with checkpoints file or path to checkpoint.
Returns:
List of tuples `(name, shape)`.
"""
reader = load_checkpoint(checkpoint_dir)
variable_map = reader.get_variable_to_shape_map()
names = sorted(variable_map.keys())
result = []
for name in names:
result.append((name, variable_map[name]))
return result
# pylint: disable=protected-access
# Currently variable_scope doesn't provide very good APIs to access
# all variables under scope and retrieve and check existing scopes.
# TODO(ipolosukhin): Refactor variable_scope module to provide nicer APIs.
def _set_checkpoint_initializer(variable, file_pattern, tensor_name, slice_spec,
name="checkpoint_initializer"):
"""Sets variable initializer to assign op form value in checkpoint's tensor.
Args:
variable: `Variable` object.
file_pattern: string, where to load checkpoints from.
tensor_name: Name of the `Tensor` to load from checkpoint reader.
slice_spec: Slice specification for loading partitioned variables.
name: Name of the operation.
"""
base_type = variable.dtype.base_dtype
restore_op = io_ops.restore_v2(
file_pattern, [tensor_name], [slice_spec], [base_type], name=name)[0]
variable._initializer_op = state_ops.assign(variable, restore_op)
def _set_variable_or_list_initializer(variable_or_list, file_pattern,
tensor_name):
if isinstance(variable_or_list, (list, tuple)):
# A set of slices.
slice_name = None
for v in variable_or_list:
if slice_name is None:
slice_name = v._save_slice_info.full_name
elif slice_name != v._save_slice_info.full_name:
raise ValueError("Slices must all be from the same tensor: %s != %s" %
(slice_name, v._save_slice_info.full_name))
_set_checkpoint_initializer(v, file_pattern, tensor_name,
v._save_slice_info.spec)
else:
_set_checkpoint_initializer(variable_or_list, file_pattern, tensor_name, "")
def _collect_partitioned_variable(name, var_scope):
if name + "/part_0" in var_scope._vars:
var = []
i = 0
while name + "/part_%d" % i in var_scope._vars:
var.append(var_scope._vars[name + "/part_%d" % i])
i += 1
return var
return None
def init_from_checkpoint(checkpoint_dir, assignment_map):
"""Using assignment map initializes current variables with loaded tensors.
Note: This overrides default initialization ops of specified variables and
redefines dtype.
Assignment map supports following syntax:
* `'checkpoint_scope_name/': 'scope_name/'` - will load all variables in
current `scope_name` from `checkpoint_scope_name` with matching variable
names.
* `'checkpoint_scope_name/some_other_variable': 'scope_name/variable_name'` -
will initialize `scope_name/variable_name` variable
from `checkpoint_scope_name/some_other_variable`.
* `'scope_variable_name': variable` - will initialize given `tf.Variable`
object with variable from the checkpoint.
* `'scope_variable_name': list(variable)` - will initialize list of
partitioned variables with variable from the checkpoint.
* `'/': 'scope_name/'` - will load all variables in current `scope_name` from
checkpoint's root (e.g. no scope).
Supports loading into partitioned variables, which are represented as
`'<variable>/part_<part #>'`.
Example:
```python
# Create variables.
with tf.compat.v1.variable_scope('test'):
m = tf.compat.v1.get_variable('my_var')
with tf.compat.v1.variable_scope('test2'):
var2 = tf.compat.v1.get_variable('my_var')
var3 = tf.compat.v1.get_variable(name="my1", shape=[100, 100],
partitioner=lambda shape, dtype: [5, 1])
...
# Specify which variables to initialize from checkpoint.
init_from_checkpoint(checkpoint_dir, {
'some_var': 'test/my_var',
'some_scope/': 'test2/'})
...
# Or use `Variable` objects to identify what to initialize.
init_from_checkpoint(checkpoint_dir, {
'some_scope/var2': var2,
})
# Initialize partitioned variables
init_from_checkpoint(checkpoint_dir, {
'some_var_from_ckpt': 'part_var',
})
# Or specifying the list of `Variable` objects.
init_from_checkpoint(checkpoint_dir, {
'some_var_from_ckpt': var3._get_variable_list(),
})
...
# Initialize variables as usual.
session.run(tf.get_all_variables())
```
Args:
checkpoint_dir: Directory with checkpoints file or path to checkpoint.
assignment_map: Dict, where keys are names of the variables in the
checkpoint and values are current variables or names of current variables
(in default graph).
Raises:
tf.errors.OpError: If missing checkpoints or tensors in checkpoints.
ValueError: If missing variables in current graph.
"""
filepattern = _get_checkpoint_filename(checkpoint_dir)
reader = load_checkpoint(checkpoint_dir)
variable_map = reader.get_variable_to_shape_map()
for tensor_name_in_ckpt, current_var_or_name in six.iteritems(assignment_map):
var = None
# Check if this is Variable object or list of Variable objects (in case of
# partitioned variables).
is_var = lambda x: isinstance(x, variables.Variable)
if is_var(current_var_or_name) or (
isinstance(current_var_or_name, list)
and all(is_var(v) for v in current_var_or_name)):
var = current_var_or_name
else:
var_scope = vs._get_default_variable_store()
# Check if this variable is in var_store.
var = var_scope._vars.get(current_var_or_name, None)
# Also check if variable is partitioned as list.
if var is None:
var = _collect_partitioned_variable(current_var_or_name, var_scope)
if var is not None:
# If 1 to 1 mapping was provided, find variable in the checkpoint.
if tensor_name_in_ckpt not in variable_map:
raise ValueError("Tensor %s is not found in %s checkpoint %s" % (
tensor_name_in_ckpt, checkpoint_dir, variable_map
))
if is_var(var):
# Additional at-call-time checks.
if not var.get_shape().is_compatible_with(
variable_map[tensor_name_in_ckpt]):
raise ValueError(
"Shape of variable %s (%s) doesn't match with shape of "
"tensor %s (%s) from checkpoint reader." % (
var.name, str(var.get_shape()),
tensor_name_in_ckpt, str(variable_map[tensor_name_in_ckpt])
))
var_name = var.name
else:
var_name = ",".join([v.name for v in var])
_set_variable_or_list_initializer(var, filepattern, tensor_name_in_ckpt)
logging.info("Initialize variable %s from checkpoint %s with %s" % (
var_name, checkpoint_dir, tensor_name_in_ckpt
))
else:
scopes = ""
# TODO(vihanjain): Support list of 'current_var_or_name' here.
if "/" in current_var_or_name:
scopes = current_var_or_name[:current_var_or_name.rindex("/")]
if not tensor_name_in_ckpt.endswith("/"):
raise ValueError(
"Assignment map with scope only name {} should map to scope only "
"{}. Should be 'scope/': 'other_scope/'.".format(
scopes, tensor_name_in_ckpt))
# If scope to scope mapping was provided, find all variables in the scope
# and create variable to variable mapping.
scope_variables = set()
for var_name in var_scope._vars:
if not scopes or var_name.startswith(scopes + "/"):
# Consume /part_ if partitioned variable.
if "/part_" in var_name:
var_name = var_name[:var_name.index("/part_")]
scope_variables.add(var_name)
for var_name in scope_variables:
# Lookup name with specified prefix and suffix from current variable.
# If tensor_name given is '/' (root), don't use it for full name.
full_tensor_name = var_name[len(scopes):]
if current_var_or_name != "/":
full_tensor_name = full_tensor_name[1:]
if tensor_name_in_ckpt != "/":
full_tensor_name = tensor_name_in_ckpt + full_tensor_name
if full_tensor_name not in variable_map:
raise ValueError(
"Tensor %s (%s in %s) is not found in %s checkpoint" % (
full_tensor_name, var_name[len(scopes) + 1:],
tensor_name_in_ckpt, checkpoint_dir
))
var = var_scope._vars.get(var_name, None)
if var is None:
var = _collect_partitioned_variable(var_name, var_scope)
_set_variable_or_list_initializer(var, filepattern, full_tensor_name)
logging.info("Initialize variable %s from checkpoint %s with %s" % (
var_name, checkpoint_dir, full_tensor_name
))
# pylint: enable=protected-access
|
tensorflow-master
|
tensorflow/contrib/framework/python/framework/checkpoint_utils.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helpers to manipulate a tensor graph in python.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import six
# pylint: disable=unused-import
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import node_def_pb2
from tensorflow.python.framework import ops
from tensorflow.python.framework.graph_util_impl import _assert_nodes_are_present
from tensorflow.python.framework.graph_util_impl import _bfs_for_reachable_nodes
from tensorflow.python.framework.graph_util_impl import _extract_graph_summary
from tensorflow.python.framework.graph_util_impl import _node_name
__all__ = ["fuse_op", "get_placeholders"]
def fuse_op(graph_def, input_nodes, output_nodes, output_dtypes,
output_quantized, op_name, op_type):
"""Fuse subgraph between input_nodes and output_nodes into a single custom op.
Args:
graph_def: A graph_pb2.GraphDef proto.
input_nodes: input nodes to the subgraph to be fused.
output_nodes: output nodes to the subgraph to be fused.
output_dtypes: A list of output datatypes for the custom op
output_quantized: A boolean flag that indicates if output is quantized
op_name: fused op name.
op_type: fused op type.
Returns:
The GraphDef of the new graph.
Raises:
TypeError: If 'graph_def' is not a graph_pb2.GraphDef proto.
"""
if not isinstance(graph_def, graph_pb2.GraphDef):
raise TypeError("graph_def must be a graph_pb2.GraphDef proto.")
if isinstance(input_nodes, six.string_types):
raise TypeError("input_nodes must be a list.")
if isinstance(output_nodes, six.string_types):
raise TypeError("output_nodes must be a list.")
name_to_input_name, name_to_node, name_to_seq_num = _extract_graph_summary(
graph_def)
_assert_nodes_are_present(name_to_node, input_nodes + output_nodes)
# Nodes upto and including input_nodes
reachable_by_input = _bfs_for_reachable_nodes(input_nodes, name_to_input_name)
# Nodes upto and including output_nodes
reachable_by_output = _bfs_for_reachable_nodes(output_nodes,
name_to_input_name)
# Set of nodes in the list input_nodes
input_nodes_set = set(input_nodes)
# Set of nodes in the list output_nodes
output_nodes_set = set(output_nodes)
nodes_post_output = []
for node in graph_def.node:
n = _node_name(node.name)
if n in reachable_by_output:
if n not in reachable_by_input and n not in output_nodes_set:
# n is between input and output, i.e., part of the fused op
next_to_visit = [n]
visited = set()
while next_to_visit:
cur_node = next_to_visit[0]
visited.add(cur_node)
del next_to_visit[0]
if cur_node in reachable_by_input and cur_node not in input_nodes_set:
raise TypeError("Node %s uses input %s not in input_nodes." %
(n, cur_node))
if cur_node not in input_nodes_set:
next_to_visit += [
input_node for input_node in name_to_input_name[cur_node]
if input_node not in visited
]
elif n not in reachable_by_input:
nodes_post_output.append(n)
# Add all nodes upto the input nodes
out = graph_pb2.GraphDef()
reachable_by_input_sorted = sorted(
list(reachable_by_input), key=lambda n: name_to_seq_num[n])
for node in reachable_by_input_sorted:
out.node.extend([copy.deepcopy(name_to_node[node])])
# Add the custom op
new_node = node_def_pb2.NodeDef()
for node in input_nodes:
new_node.input.append(node)
new_node.attr["_output_types"].list.type[:] = output_dtypes
new_node.attr["_output_quantized"].b = output_quantized
new_node.op = op_type
new_node.name = op_name
out.node.extend([new_node])
# Add the nodes in the output of the custom op
for index, n in enumerate(output_nodes):
assert len(name_to_node[n].input) == 1
new_node = copy.deepcopy(name_to_node[n])
del new_node.input[:]
new_node.input.append(op_name + (":" + str(index) if index != 0 else ""))
out.node.extend([new_node])
# Add the nodes post output_nodes
for n in nodes_post_output:
out.node.extend([copy.deepcopy(name_to_node[n])])
out.library.CopyFrom(graph_def.library)
out.versions.CopyFrom(graph_def.versions)
return out
def get_placeholders(graph):
"""Get placeholders of a graph.
For example:
```python
a = tf.compat.v1.placeholder(dtype=tf.float32, shape=[2, 2], name='a')
a = tf.compat.v1.placeholder(dtype=tf.int32, shape=[3, 2], name='b')
tf.contrib.framework.get_placeholders(tf.compat.v1.get_default_graph())
# Returns:
# [<tf.Tensor 'a:0' shape=(2, 2) dtype=float32>,
# <tf.Tensor 'b:0' shape=(3, 2) dtype=int32>]
```
Args:
graph: A tf.Graph.
Returns:
A list contains all placeholders of given graph.
Raises:
TypeError: If `graph` is not a tensorflow graph.
"""
if not isinstance(graph, ops.Graph):
raise TypeError("Input graph needs to be a Graph: %s" % graph)
# For each placeholder() call, there is a corresponding
# operation of type 'Placeholder' registered to the graph.
# The return value (a Tensor) of placeholder() is the
# first output of this operation in fact.
operations = graph.get_operations()
result = [i.outputs[0] for i in operations if i.type == "Placeholder"]
return result
|
tensorflow-master
|
tensorflow/contrib/framework/python/framework/graph_util.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""tensor_util tests."""
# pylint: disable=unused-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import numpy as np
from tensorflow.contrib.framework.python.framework import tensor_util
from tensorflow.contrib.framework.python.ops import variables as variables_lib2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import test
class LocalVariabletest(test.TestCase):
def test_local_variable(self):
with self.cached_session() as sess:
self.assertEquals([], variables_lib.local_variables())
value0 = 42
variables_lib2.local_variable(value0)
value1 = 43
variables_lib2.local_variable(value1)
variables = variables_lib.local_variables()
self.assertEquals(2, len(variables))
self.assertRaises(errors_impl.OpError, sess.run, variables)
variables_lib.variables_initializer(variables).run()
self.assertAllEqual(set([value0, value1]), set(sess.run(variables)))
class ReduceSumNTest(test.TestCase):
def test_reduce_sum_n(self):
with self.cached_session():
a = constant_op.constant(1)
b = constant_op.constant([2])
c = constant_op.constant([[3, 4], [5, 6]])
self.assertEqual(21, tensor_util.reduce_sum_n([a, b, c]).eval())
class AssertScalarIntTest(test.TestCase):
def test_assert_scalar_int(self):
tensor_util.assert_scalar_int(constant_op.constant(3, dtype=dtypes.int32))
tensor_util.assert_scalar_int(constant_op.constant(3, dtype=dtypes.int64))
tensor_util.assert_scalar_int(3)
with self.assertRaisesRegexp(ValueError, "Expected integer"):
tensor_util.assert_scalar_int(
constant_op.constant(
3, dtype=dtypes.float32))
with self.assertRaisesRegexp(ValueError, "Expected scalar"):
tensor_util.assert_scalar_int(
constant_op.constant(
[3, 4], dtype=dtypes.int32))
class WithShapeTest(test.TestCase):
def _assert_with_shape(self, tensor, expected_value, expected_shape,
unexpected_shapes):
for unexpected_shape in unexpected_shapes:
self.assertRaises(ValueError, tensor_util.with_shape, unexpected_shape,
tensor)
pattern = (
r"\[Wrong shape for %s \[expected\] \[actual\].\] \[%s\] \[%s\]" %
(tensor.name, " ".join([str(dim) for dim in unexpected_shape]),
" ".join([str(dim) for dim in expected_shape])))
self.assertRaisesRegexp(errors_impl.OpError,
re.compile(pattern),
tensor_util.with_shape(
constant_op.constant(unexpected_shape),
tensor).eval)
expected_placeholder = array_ops.placeholder(dtypes.float32)
self.assertRaisesRegexp(errors_impl.OpError,
re.compile(pattern),
tensor_util.with_same_shape(expected_placeholder,
tensor).eval,
{expected_placeholder: np.ones(unexpected_shape)})
self.assertIs(tensor, tensor_util.with_shape(expected_shape, tensor))
self.assertIs(
tensor,
tensor_util.with_same_shape(
constant_op.constant(
1, shape=expected_shape), tensor))
tensor_with_shape = tensor_util.with_shape(
constant_op.constant(expected_shape), tensor)
np.testing.assert_array_equal(expected_value, tensor_with_shape.eval())
tensor_with_same_shape = tensor_util.with_same_shape(expected_placeholder,
tensor)
np.testing.assert_array_equal(expected_value,
tensor_with_same_shape.eval({
expected_placeholder:
np.ones(expected_shape)
}))
def test_with_shape_invalid_expected_shape(self):
with self.cached_session():
self.assertRaisesRegexp(ValueError, "Invalid rank",
tensor_util.with_shape, [[1], [2]],
constant_op.constant(1.0))
def test_with_shape_invalid_type(self):
with self.cached_session():
self.assertRaisesRegexp(ValueError, "Invalid dtype",
tensor_util.with_shape, [1.1],
constant_op.constant([1.0]))
self.assertRaisesRegexp(ValueError, "Invalid dtype",
tensor_util.with_shape,
np.array([1.1]), constant_op.constant(1.0))
self.assertRaisesRegexp(ValueError, "Invalid dtype",
tensor_util.with_shape,
constant_op.constant(np.array([1.1])),
constant_op.constant(1.0))
def test_with_shape_0(self):
with self.cached_session():
value = 42
shape = [0]
unexpected_shapes = [[1], [2], [1, 1]]
self._assert_with_shape(
constant_op.constant(
value, shape=shape),
value,
shape,
unexpected_shapes)
def test_with_shape_1(self):
with self.cached_session():
value = [42]
shape = [1]
unexpected_shapes = [[0], [2], [1, 1]]
self._assert_with_shape(
constant_op.constant(
value, shape=shape),
value,
shape,
unexpected_shapes)
def test_with_shape_2(self):
with self.cached_session():
value = [42, 43]
shape = [2]
unexpected_shapes = [[0], [1], [2, 1]]
self._assert_with_shape(
constant_op.constant(
value, shape=shape),
value,
shape,
unexpected_shapes)
def test_with_shape_2x2(self):
with self.cached_session():
value = [[42, 43], [44, 45]]
shape = [2, 2]
unexpected_shapes = [[0], [1], [2, 1]]
self._assert_with_shape(
constant_op.constant(
value, shape=shape),
value,
shape,
unexpected_shapes)
def test_with_shape_2x2_with_partial_expected_shape(self):
with self.cached_session():
value = [[42, 43], [44, 45]]
actual_shape = [2, 2]
tensor = constant_op.constant(value, shape=actual_shape)
partial_expected_shape = tensor_shape.TensorShape([None, 2])
# Won't raise any exception here:
tensor_with_shape = tensor_util.with_shape(partial_expected_shape, tensor)
np.testing.assert_array_equal(value, tensor_with_shape.eval())
def test_with_shape_none(self):
with self.cached_session():
tensor_no_shape = array_ops.placeholder(dtypes.float32)
compatible_shape = [2, 2]
with_present_2x2 = tensor_util.with_shape(compatible_shape,
tensor_no_shape)
self.assertEquals(compatible_shape, with_present_2x2.get_shape().dims)
with_future_2x2 = tensor_util.with_shape(
constant_op.constant(compatible_shape), tensor_no_shape)
array_2x2 = [[42.0, 43.0], [44.0, 45.0]]
for tensor_2x2 in [with_present_2x2, with_future_2x2]:
np.testing.assert_array_equal(array_2x2,
tensor_2x2.eval({
tensor_no_shape: array_2x2
}))
self.assertRaisesRegexp(errors_impl.OpError, "Wrong shape",
tensor_2x2.eval,
{tensor_no_shape: [42.0, 43.0]})
self.assertRaisesRegexp(errors_impl.OpError, "Wrong shape",
tensor_2x2.eval, {tensor_no_shape: [42.0]})
def test_with_shape_partial(self):
with self.cached_session():
tensor_partial_shape = array_ops.placeholder(dtypes.float32)
tensor_partial_shape.set_shape([None, 2])
for incompatible_shape in [[0], [1]]:
self.assertRaisesRegexp(
ValueError, "Shapes must be equal rank, but are 2 and 1",
tensor_util.with_shape, incompatible_shape, tensor_partial_shape)
for incompatible_shape in [[1, 2, 1]]:
self.assertRaisesRegexp(ValueError, "Dimensions must be equal",
tensor_util.with_shape, incompatible_shape,
tensor_partial_shape)
for incompatible_shape in [[2, 1]]:
self.assertRaisesRegexp(
ValueError,
r"Dimension 1 in both shapes must be equal, but are 2 and 1. "
r"Shapes are \[\?,2\] and \[2,1\].",
tensor_util.with_shape, incompatible_shape, tensor_partial_shape)
compatible_shape = [2, 2]
with_present_2x2 = tensor_util.with_shape(compatible_shape,
tensor_partial_shape)
self.assertEquals(compatible_shape, with_present_2x2.get_shape().dims)
with_future_2x2 = tensor_util.with_shape(
constant_op.constant(compatible_shape), tensor_partial_shape)
array_2x2 = [[42.0, 43.0], [44.0, 45.0]]
for tensor_2x2 in [with_present_2x2, with_future_2x2]:
np.testing.assert_array_equal(array_2x2,
tensor_2x2.eval({
tensor_partial_shape: array_2x2
}))
self.assertRaises(ValueError, tensor_2x2.eval,
{tensor_partial_shape: [42.0, 43.0]})
self.assertRaises(ValueError, tensor_2x2.eval,
{tensor_partial_shape: [42.0]})
class RemoveSqueezableDimensionsTest(test.TestCase):
def testRemoveSqueezableDimensions(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=False,
predictions_have_extra_dim=False,
labels_have_static_shape=False,
labels_have_extra_dim=False)
def testRemoveSqueezableDimensions_extraLabelDim(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=False,
predictions_have_extra_dim=False,
labels_have_static_shape=False,
labels_have_extra_dim=True)
def testRemoveSqueezableDimensions_staticLabel(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=False,
predictions_have_extra_dim=False,
labels_have_static_shape=True,
labels_have_extra_dim=False)
def testRemoveSqueezableDimensions_staticLabel_extraLabelDim(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=False,
predictions_have_extra_dim=False,
labels_have_static_shape=True,
labels_have_extra_dim=True)
def testRemoveSqueezableDimensions_extraPredictionDim(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=False,
predictions_have_extra_dim=True,
labels_have_static_shape=False,
labels_have_extra_dim=False)
def testRemoveSqueezableDimensions_extraPredictionDim_staticLabel(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=False,
predictions_have_extra_dim=True,
labels_have_static_shape=True,
labels_have_extra_dim=False)
def testRemoveSqueezableDimensions_staticPrediction(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=True,
predictions_have_extra_dim=False,
labels_have_static_shape=False,
labels_have_extra_dim=False)
def testRemoveSqueezableDimensions_staticPrediction_extraLabelDim(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=True,
predictions_have_extra_dim=False,
labels_have_static_shape=False,
labels_have_extra_dim=True)
def testRemoveSqueezableDimensions_static(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=True,
predictions_have_extra_dim=False,
labels_have_static_shape=True,
labels_have_extra_dim=False)
def testRemoveSqueezableDimensions_static_extraLabelDim(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=True,
predictions_have_extra_dim=False,
labels_have_static_shape=True,
labels_have_extra_dim=True)
def testRemoveSqueezableDimensions_staticPrediction_extraPredictionDim(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=True,
predictions_have_extra_dim=True,
labels_have_static_shape=False,
labels_have_extra_dim=False)
def testRemoveSqueezableDimensions_static_extraPredictionDim(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=True,
predictions_have_extra_dim=True,
labels_have_static_shape=True,
labels_have_extra_dim=False)
# TODO(ptucker): Replace this with parameterized test.
def _testRemoveSqueezableDimensions(self, predictions_have_static_shape,
predictions_have_extra_dim,
labels_have_static_shape,
labels_have_extra_dim):
assert not (predictions_have_extra_dim and labels_have_extra_dim)
predictions_value = (0, 1, 1, 0, 0, 1, 0)
labels_value = (0, 0, 1, 1, 0, 0, 0)
input_predictions_value = ([[p] for p in predictions_value] if
predictions_have_extra_dim else
predictions_value)
input_labels_value = ([[l] for l in labels_value] if labels_have_extra_dim
else labels_value)
with ops.Graph().as_default() as g:
feed_dict = {}
if predictions_have_static_shape:
predictions = constant_op.constant(
input_predictions_value, dtype=dtypes.int32)
else:
predictions = array_ops.placeholder(
dtype=dtypes.int32, name="predictions")
feed_dict[predictions] = input_predictions_value
if labels_have_static_shape:
labels = constant_op.constant(input_labels_value, dtype=dtypes.int32)
else:
labels = array_ops.placeholder(dtype=dtypes.int32, name="labels")
feed_dict[labels] = input_labels_value
squeezed_predictions, squeezed_labels = (
tensor_util.remove_squeezable_dimensions(predictions, labels))
with self.session(g):
variables_lib.local_variables_initializer().run()
self.assertAllClose(
predictions_value, squeezed_predictions.eval(feed_dict=feed_dict))
self.assertAllClose(
labels_value, squeezed_labels.eval(feed_dict=feed_dict))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/framework/python/framework/tensor_util_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""@experimental tests."""
# pylint: disable=unused-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.framework.python.framework import experimental
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
class ExperimentalTest(test.TestCase):
@test.mock.patch.object(logging, "warning", autospec=True)
def test_warning(self, mock_warning):
@experimental
def _fn(arg0, arg1):
"""fn doc.
Args:
arg0: Arg 0.
arg1: Arg 1.
Returns:
Sum of args.
"""
return arg0 + arg1
# Assert function docs are properly updated.
self.assertEqual("_fn", _fn.__name__)
self.assertEqual(
"fn doc. (experimental)"
"\n"
"\nWarning: THIS FUNCTION IS EXPERIMENTAL. It may change "
"or be removed at any time, and without warning."
"\n"
"\nArgs:"
"\n arg0: Arg 0."
"\n arg1: Arg 1."
"\n"
"\nReturns:"
"\n Sum of args.", _fn.__doc__)
# Assert calling new fn issues log warning.
self.assertEqual(3, _fn(1, 2))
self.assertEqual(1, mock_warning.call_count)
(args, _) = mock_warning.call_args
self.assertRegexpMatches(args[0], r"is experimental and may change")
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/framework/python/framework/experimental_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=unused-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.framework.python.ops import prettyprint_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class PrettyPrintOpsTest(test.TestCase):
def testPrintTensorPassthrough(self):
a = constant_op.constant([1])
a = prettyprint_ops.print_op(a)
with self.cached_session():
self.assertEqual(a.eval(), constant_op.constant([1]).eval())
def testPrintSparseTensorPassthrough(self):
a = sparse_tensor.SparseTensor(
indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4])
b = sparse_tensor.SparseTensor(
indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4])
a = prettyprint_ops.print_op(a)
with self.cached_session():
self.assertAllEqual(
sparse_ops.sparse_tensor_to_dense(a).eval(),
sparse_ops.sparse_tensor_to_dense(b).eval())
def testPrintTensorArrayPassthrough(self):
a = tensor_array_ops.TensorArray(
size=2, dtype=dtypes.int32, clear_after_read=False)
a = a.write(1, 1)
a = a.write(0, 0)
a = prettyprint_ops.print_op(a)
with self.cached_session():
self.assertAllEqual(a.stack().eval(), constant_op.constant([0, 1]).eval())
def testPrintVariable(self):
a = variables.Variable(1.0)
a = prettyprint_ops.print_op(a)
with self.cached_session():
variables.global_variables_initializer().run()
a.eval()
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/framework/python/ops/prettyprint_ops_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Script Language Operators.
@@py_func
"""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops.script_ops import py_func as _py_func
from tensorflow.python.util import nest
__all__ = ['py_func']
def py_func(func,
args=(),
kwargs=None,
output_types=None,
output_shapes=None,
stateful=True,
name=None):
"""Wraps a python function and uses it as a TensorFlow op.
This function is a wrapper around `tf.compat.v1.py_func` and improve it with
kwargs
and output_shapes. Further it changed some argument names.
Given a python function `func`, which takes numpy arrays as its
inputs and returns numpy arrays as its outputs, wrap this function as an
operation in a TensorFlow graph. The following snippet constructs a simple
TensorFlow graph that invokes the `np.sinh()` NumPy function as a operation
in the graph:
```python
def my_func(x):
# x will be a numpy array with the contents of the placeholder below
return np.sinh(x)
inp = tf.compat.v1.placeholder(tf.float32)
y = tf.compat.v1.py_func(my_func, [inp], tf.float32)
```
**N.B.** The `tf.compat.v1.py_func()` operation has the following known
limitations:
* The body of the function (i.e. `func`) will not be serialized in a
`GraphDef`. Therefore, you should not use this function if you need to
serialize your model and restore it in a different environment.
* The operation must run in the same address space as the Python program
that calls `tf.compat.v1.py_func()`. If you are using distributed
TensorFlow, you
must run a `tf.distribute.Server` in the same process as the program that
calls
`tf.compat.v1.py_func()` and you must pin the created operation to a device
in that
server (e.g. using `with tf.device():`).
Args:
func: A Python function, which accepts a list of NumPy `ndarray` objects
having element types that match the corresponding `tf.Tensor` objects in
`inp`, and returns a list of `ndarray` objects (or a single `ndarray`)
having element types that match the corresponding values in `Tout`.
args: A list of `Tensor` objects.
kwargs: A dict with `Tensor` objects as values.
output_types: A nested structure of tensorflow data types or a single
tensorflow data type if there is only one, indicating what `func` returns.
output_shapes: Same as output_types, except the types are replaces with
shapes (optional).
stateful: (Boolean.) If True, the function should be considered stateful. If
a function is stateless, when given the same input it will return the same
output and have no observable side effects. Optimizations such as common
subexpression elimination are only performed on stateless operations.
name: A name for the operation (optional).
Returns:
Tensorflow op that wraps the input python function.
"""
if kwargs is None:
kwargs = {}
if not isinstance(args, (list, tuple)):
raise TypeError('args must be list and not {}. args: {}'.format(
type(args), args))
if not isinstance(kwargs, dict):
raise TypeError('kwargs must be dict and not {}. args: {}'.format(
type(kwargs), kwargs))
# For dynamic type inference use callable output_types and output_shapes
if callable(output_types):
# If callable assume same signature and call with tensors and get the types
output_types = output_types(*args, **kwargs)
if callable(output_shapes):
# If callable assume same signature and call with tensors and get the shapes
output_shapes = output_shapes(*args, **kwargs)
flat_output_types = nest.flatten(output_types)
args = (args, kwargs)
flat_args = nest.flatten(args)
def python_function_wrapper(*py_args):
py_args, py_kwargs = nest.pack_sequence_as(args, py_args)
ret = func(*py_args, **py_kwargs)
# TODO(alextp): Catch Exceptions and improve msg, because tensorflow
# ist not able to preserve the traceback, i.e. the Exceptions does not
# contain any information where the Exception was raised.
nest.assert_shallow_structure(output_types, ret)
return nest.flatten(ret)
flat_values = _py_func(
python_function_wrapper,
flat_args,
flat_output_types,
stateful=stateful,
name=name)
if output_shapes is not None:
# I am not sure if this is nessesary
output_shapes = nest.map_structure_up_to(output_types,
tensor_shape.as_shape,
output_shapes)
flattened_shapes = nest.flatten(output_shapes)
for ret_t, shape in zip(flat_values, flattened_shapes):
ret_t.set_shape(shape)
return nest.pack_sequence_as(output_types, flat_values)
|
tensorflow-master
|
tensorflow/contrib/framework/python/ops/script_ops.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Variable functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import re
from tensorflow.contrib.framework.python.ops import add_arg_scope as contrib_add_arg_scope
from tensorflow.contrib.framework.python.ops import gen_variable_ops
from tensorflow.contrib.util import loader
from tensorflow.core.protobuf import saver_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.framework import device as tf_device
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import resource_loader
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import saver as tf_saver
from tensorflow.python.training import training_util
from tensorflow.python.util.deprecation import deprecated
__all__ = ['add_model_variable',
'assert_global_step',
'assert_or_get_global_step',
'assign_from_checkpoint',
'assign_from_checkpoint_fn',
'assign_from_values',
'assign_from_values_fn',
'create_global_step',
'filter_variables',
'get_global_step',
'get_or_create_global_step',
'get_local_variables',
'get_model_variables',
'get_trainable_variables',
'get_unique_variable',
'get_variables_by_name',
'get_variables_by_suffix',
'get_variable_full_name',
'get_variables_to_restore',
'get_variables',
'global_variable',
'local_variable',
'model_variable',
'variable',
'VariableDeviceChooser',
'zero_initializer']
def zero_initializer(ref, use_locking=True, name="zero_initializer"):
"""Initialize 'ref' with all zeros, ref tensor should be uninitialized.
If already initialized, you will get ValueError. This op is intended to
save memory during initialization.
Args:
ref: ref of the tensor need to be zero initialized.
name: optional name for this operation.
Returns:
ref that initialized.
Raises:
ValueError: If ref tensor is initialized.
"""
loader.load_op_library(
resource_loader.get_path_to_datafile('_variable_ops.so'))
if resource_variable_ops.is_resource_variable(ref):
return gen_variable_ops.zero_var_initializer(
ref.handle, shape=ref.shape, dtype=ref.dtype, name=name)
else:
return gen_variable_ops.zero_initializer(ref, name=name)
@deprecated(None, 'Please switch to tf.train.assert_global_step')
def assert_global_step(global_step_tensor):
training_util.assert_global_step(global_step_tensor)
def assert_or_get_global_step(graph=None, global_step_tensor=None):
"""Verifies that a global step tensor is valid or gets one if None is given.
If `global_step_tensor` is not None, check that it is a valid global step
tensor (using `assert_global_step`). Otherwise find a global step tensor using
`get_global_step` and return it.
Args:
graph: The graph to find the global step tensor for.
global_step_tensor: The tensor to check for suitability as a global step. If
None is given (the default), find a global step tensor.
Returns:
A tensor suitable as a global step, or `None` if none was provided and none
was found.
"""
if global_step_tensor is None:
# Get the global step tensor the same way the supervisor would.
global_step_tensor = get_global_step(graph)
else:
assert_global_step(global_step_tensor)
return global_step_tensor
@deprecated(None, 'Please switch to tf.train.get_global_step')
def get_global_step(graph=None):
return training_util.get_global_step(graph)
@deprecated(None, 'Please switch to tf.train.create_global_step')
def create_global_step(graph=None):
"""Create global step tensor in graph.
This API is deprecated. Use core framework training version instead.
Args:
graph: The graph in which to create the global step tensor. If missing, use
default graph.
Returns:
Global step tensor.
Raises:
ValueError: if global step tensor is already defined.
"""
return training_util.create_global_step(graph)
@deprecated(None, 'Please switch to tf.train.get_or_create_global_step')
def get_or_create_global_step(graph=None):
"""Returns and create (if necessary) the global step tensor.
Args:
graph: The graph in which to create the global step tensor. If missing, use
default graph.
Returns:
The global step tensor.
"""
return training_util.get_or_create_global_step(graph)
def local_variable(initial_value,
validate_shape=True,
name=None,
use_resource=None):
"""Create a variable with a value and add it to `GraphKeys.LOCAL_VARIABLES`.
Args:
initial_value: See variables.Variable.__init__.
validate_shape: See variables.Variable.__init__.
name: See variables.Variable.__init__.
use_resource: If `True` use a ResourceVariable instead of a Variable.
Returns:
New variable.
"""
return variable_scope.variable(
initial_value,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
validate_shape=validate_shape,
use_resource=use_resource,
name=name)
def global_variable(initial_value,
validate_shape=True,
name=None,
use_resource=None):
"""Create a variable with a value and add it to `GraphKeys.GLOBAL_VARIABLES`.
Args:
initial_value: See variables.Variable.__init__.
validate_shape: See variables.Variable.__init__.
name: See variables.Variable.__init__.
use_resource: If `True` use a ResourceVariable instead of a Variable.
Returns:
New variable.
"""
return variable_scope.variable(
initial_value,
trainable=False,
collections=[ops.GraphKeys.GLOBAL_VARIABLES],
validate_shape=validate_shape,
use_resource=use_resource,
name=name)
@contrib_add_arg_scope
def variable(name,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
trainable=True,
collections=None,
caching_device=None,
device=None,
partitioner=None,
custom_getter=None,
use_resource=None,
synchronization=variables.VariableSynchronization.AUTO,
aggregation=variables.VariableAggregation.NONE):
"""Gets an existing variable with these parameters or creates a new one.
Args:
name: the name of the new or existing variable.
shape: shape of the new or existing variable.
dtype: type of the new or existing variable (defaults to `DT_FLOAT`).
initializer: initializer for the variable if one is created.
regularizer: a (Tensor -> Tensor or None) function; the result of applying
it on a newly created variable will be added to the collection
GraphKeys.REGULARIZATION_LOSSES and can be used for regularization.
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
collections: A list of collection names to which the Variable will be added.
If None it would default to `tf.GraphKeys.GLOBAL_VARIABLES`.
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's device.
device: Optional device to place the variable. It can be an string or a
function that is called to get the device for the variable.
partitioner: Optional callable that accepts a fully defined `TensorShape`
and dtype of the `Variable` to be created, and returns a list of
partitions for each axis (currently only one axis can be partitioned).
custom_getter: Callable that allows overwriting the internal get_variable
method and has to have the same signature.
use_resource: If `True` use a ResourceVariable instead of a Variable.
synchronization: Indicates when a distributed a variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses when to synchronize.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
Returns:
The created or existing variable.
"""
collections = list(collections if collections is not None else
[ops.GraphKeys.GLOBAL_VARIABLES])
# Remove duplicates
collections = list(set(collections))
getter = variable_scope.get_variable
if custom_getter is not None:
getter = functools.partial(
custom_getter, reuse=variable_scope.get_variable_scope().reuse)
with ops.device(device or ''):
return getter(
name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
trainable=trainable,
collections=collections,
caching_device=caching_device,
partitioner=partitioner,
use_resource=use_resource,
synchronization=synchronization,
aggregation=aggregation)
@contrib_add_arg_scope
def model_variable(name,
shape=None,
dtype=dtypes.float32,
initializer=None,
regularizer=None,
trainable=True,
collections=None,
caching_device=None,
device=None,
partitioner=None,
custom_getter=None,
use_resource=None,
synchronization=variables.VariableSynchronization.AUTO,
aggregation=variables.VariableAggregation.NONE):
"""Gets an existing model variable with these parameters or creates a new one.
Args:
name: the name of the new or existing variable.
shape: shape of the new or existing variable.
dtype: type of the new or existing variable (defaults to `DT_FLOAT`).
initializer: initializer for the variable if one is created.
regularizer: a (Tensor -> Tensor or None) function; the result of applying
it on a newly created variable will be added to the collection
GraphKeys.REGULARIZATION_LOSSES and can be used for regularization.
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
collections: A list of collection names to which the Variable will be added.
Note that the variable is always also added to the
`GraphKeys.GLOBAL_VARIABLES` and `GraphKeys.MODEL_VARIABLES` collections.
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's device.
device: Optional device to place the variable. It can be an string or a
function that is called to get the device for the variable.
partitioner: Optional callable that accepts a fully defined `TensorShape`
and dtype of the `Variable` to be created, and returns a list of
partitions for each axis (currently only one axis can be partitioned).
custom_getter: Callable that allows overwriting the internal get_variable
method and has to have the same signature.
use_resource: If `True` use a ResourceVariable instead of a Variable.
synchronization: Indicates when a distributed a variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses when to synchronize.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
Returns:
The created or existing variable.
"""
collections = list(collections or [])
collections += [ops.GraphKeys.GLOBAL_VARIABLES, ops.GraphKeys.MODEL_VARIABLES]
var = variable(
name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
trainable=trainable,
collections=collections,
caching_device=caching_device,
device=device,
partitioner=partitioner,
custom_getter=custom_getter,
use_resource=use_resource,
synchronization=synchronization,
aggregation=aggregation)
return var
def add_model_variable(var):
"""Adds a variable to the `GraphKeys.MODEL_VARIABLES` collection.
Args:
var: a variable.
"""
if var not in ops.get_collection(ops.GraphKeys.MODEL_VARIABLES):
ops.add_to_collection(ops.GraphKeys.MODEL_VARIABLES, var)
def get_variables(scope=None,
suffix=None,
collection=ops.GraphKeys.GLOBAL_VARIABLES):
"""Gets the list of variables, filtered by scope and/or suffix.
Args:
scope: an optional scope for filtering the variables to return. Can be a
variable scope or a string.
suffix: an optional suffix for filtering the variables to return.
collection: in which collection search for. Defaults to
`GraphKeys.GLOBAL_VARIABLES`.
Returns:
a list of variables in collection with scope and suffix.
"""
if isinstance(scope, variable_scope.VariableScope):
scope = scope.name
if suffix is not None:
if ':' not in suffix:
suffix += ':'
scope = (scope or '') + '.*' + suffix
return ops.get_collection(collection, scope)
def get_model_variables(scope=None, suffix=None):
"""Gets the list of model variables, filtered by scope and/or suffix.
Args:
scope: an optional scope for filtering the variables to return.
suffix: an optional suffix for filtering the variables to return.
Returns:
a list of variables in collection with scope and suffix.
"""
return get_variables(scope, suffix, ops.GraphKeys.MODEL_VARIABLES)
def get_local_variables(scope=None, suffix=None):
"""Gets the list of local variables, filtered by scope and/or suffix.
Args:
scope: an optional scope for filtering the variables to return.
suffix: an optional suffix for filtering the variables to return.
Returns:
a list of variables in collection with scope and suffix.
"""
return get_variables(scope, suffix, ops.GraphKeys.LOCAL_VARIABLES)
def get_trainable_variables(scope=None, suffix=None):
"""Gets the list of trainable variables, filtered by scope and/or suffix.
Args:
scope: an optional scope for filtering the variables to return.
suffix: an optional suffix for filtering the variables to return.
Returns:
a list of variables in the trainable collection with scope and suffix.
"""
return get_variables(scope, suffix, ops.GraphKeys.TRAINABLE_VARIABLES)
def get_variables_to_restore(include=None, exclude=None):
"""Gets the list of the variables to restore.
Args:
include: an optional list/tuple of scope strings for filtering which
variables from the VARIABLES collection to include. None would include all
the variables.
exclude: an optional list/tuple of scope strings for filtering which
variables from the VARIABLES collection to exclude. None it would not
exclude any.
Returns:
a list of variables to restore.
Raises:
TypeError: include or exclude is provided but is not a list or a tuple.
"""
if include is None:
# Include all variables.
vars_to_include = get_variables()
else:
if not isinstance(include, (list, tuple)):
raise TypeError('include is provided but is not a list or a tuple.')
vars_to_include = []
for scope in include:
vars_to_include += get_variables(scope)
vars_to_exclude = set()
if exclude is not None:
if not isinstance(exclude, (list, tuple)):
raise TypeError('exclude is provided but is not a list or a tuple.')
for scope in exclude:
vars_to_exclude |= set(get_variables(scope))
# Exclude the variables in vars_to_exclude
return [v for v in vars_to_include if v not in vars_to_exclude]
def get_variables_by_suffix(suffix, scope=None):
"""Gets the list of variables that end with the given suffix.
Args:
suffix: suffix for filtering the variables to return.
scope: an optional scope for filtering the variables to return.
Returns:
a copied list of variables with the given name and prefix.
"""
return get_variables(scope=scope, suffix=suffix)
def get_variables_by_name(given_name, scope=None):
"""Gets the list of variables that were given that name.
Args:
given_name: name given to the variable without any scope.
scope: an optional scope for filtering the variables to return.
Returns:
a copied list of variables with the given name and scope.
"""
suffix = '/' + given_name + ':|^' + given_name + ':'
return get_variables(scope=scope, suffix=suffix)
def get_unique_variable(var_op_name):
"""Gets the variable uniquely identified by that var_op_name.
Args:
var_op_name: the full name of the variable op, including the scope.
Returns:
a tensorflow variable.
Raises:
ValueError: if no variable uniquely identified by the name exists.
"""
candidates = get_variables(scope=var_op_name)
if not candidates:
raise ValueError('Couldn\'t find variable %s' % var_op_name)
for candidate in candidates:
if candidate.op.name == var_op_name:
return candidate
raise ValueError('Variable %s does not uniquely identify a variable' %
var_op_name)
def assign_from_values(var_names_to_values):
"""Creates an assignment operation from a given mapping.
This function provides a mechanism for performing assignment of variables
to values in a way that does not fill the graph with large assignment values.
Args:
var_names_to_values: A map from variable names to values.
Returns:
assign_op: An `Operation` that assigns each of the given variables to the
requested values.
feed_dict: The feed dictionary to use when evaluating `assign_op`.
Raises:
ValueError: if any of the given variable names were not found.
"""
feed_dict = {}
assign_ops = []
for var_name in var_names_to_values:
var_value = var_names_to_values[var_name]
var = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES, var_name)
if not var:
raise ValueError('Variable %s wasn\'t found' % var_name)
elif len(var) > 1:
# tf.compat.v1.get_collection is just a filter on the prefix: find the exact match:
found = False
for v in var:
if v.op.name == var_name:
var = v
found = True
break
if not found:
raise ValueError('Variable %s doesn\'t uniquely identify a variable' %
var_name)
else:
var = var[0]
# TODO(nsilberman): ensure placeholder and assign are on the same device.
# Assign a placeholder to the value that will be filled later.
placeholder_name = 'placeholder/' + var.op.name
placeholder_value = array_ops.placeholder(
dtype=var.dtype.base_dtype,
shape=var.get_shape(),
name=placeholder_name)
assign_ops.append(var.assign(placeholder_value))
feed_dict[placeholder_value] = var_value.reshape(var.get_shape())
assign_op = control_flow_ops.group(*assign_ops)
return assign_op, feed_dict
def assign_from_values_fn(var_names_to_values):
"""Returns a function that assigns specific variables from the given values.
This function provides a mechanism for performing assignment of variables
to values in a way that does not fill the graph with large assignment values.
Args:
var_names_to_values: A map from variable names to values.
Returns:
A function that takes a single argument, a `tf.compat.v1.Session`, that
applies the
assignment operation.
Raises:
ValueError: if any of the given variable names were not found.
"""
assign_op, feed_dict = assign_from_values(var_names_to_values)
def callback(session):
return session.run(assign_op, feed_dict)
return callback
# pylint: disable=protected-access
# Currently variable_scope doesn't provide very good APIs to access
# all variables under scope and retrieve and check existing scopes.
def get_variable_full_name(var):
"""Returns the full name of a variable.
For normal Variables, this is the same as the var.op.name. For
sliced or PartitionedVariables, this name is the same for all the
slices/partitions. In both cases, this is normally the name used in
a checkpoint file.
Args:
var: A `Variable` object.
Returns:
A string that is the full name.
"""
if var._save_slice_info:
return var._save_slice_info.full_name
else:
return var.op.name
# TODO(nsilberman): add flag to load exponential moving averages instead
#
# TODO(sguada): Update docs in slim/g3doc/index.md to describe
# the new feature where the var_list dictionary can have values that
# are each a list of Variables.
def assign_from_checkpoint(model_path, var_list, ignore_missing_vars=False):
"""Creates an operation to assign specific variables from a checkpoint.
Args:
model_path: The full path to the model checkpoint. To get latest checkpoint
use `model_path = tf.train.latest_checkpoint(checkpoint_dir)`
var_list: A list of (possibly partitioned) `Variable` objects or a
dictionary mapping names in the checkpoint to the corresponding variables
or list of variables to initialize from that checkpoint value. For
partitioned Variables, the name in the checkpoint must be the full
variable, not the name of the partitioned variable, eg. "my_var" rather
than "my_var/part_4". If empty, returns no_op(), {}.
ignore_missing_vars: Boolean, if True ignore variables missing in the
checkpoint with a warning instead of failing.
Returns:
the restore_op and the feed_dict that need to be run to restore var_list.
Raises:
ValueError: If `ignore_missing_vars` is False and the checkpoint specified
at `model_path` is missing one of the variables in `var_list`.
"""
# Normalize var_list into a dictionary mapping names in the
# checkpoint to the list of variables to initialize from that
# checkpoint variable. Sliced (including partitioned) variables will
# end up under the same key.
grouped_vars = {}
if isinstance(var_list, (tuple, list)):
for var in var_list:
ckpt_name = get_variable_full_name(var)
if ckpt_name not in grouped_vars:
grouped_vars[ckpt_name] = []
grouped_vars[ckpt_name].append(var)
else:
for ckpt_name, value in var_list.items():
if isinstance(value, (tuple, list)):
grouped_vars[ckpt_name] = value
else:
grouped_vars[ckpt_name] = [value]
# Read each checkpoint entry. Create a placeholder variable and
# add the (possibly sliced) data from the checkpoint to the feed_dict.
reader = pywrap_tensorflow.NewCheckpointReader(model_path)
feed_dict = {}
assign_ops = []
for ckpt_name in grouped_vars:
if not reader.has_tensor(ckpt_name):
log_str = 'Checkpoint is missing variable [%s]' % ckpt_name
if ignore_missing_vars:
logging.warning(log_str)
continue
else:
raise ValueError(log_str)
ckpt_value = reader.get_tensor(ckpt_name)
for var in grouped_vars[ckpt_name]:
placeholder_tensor = array_ops.placeholder(
dtype=var.dtype.base_dtype,
shape=var.get_shape(),
name='placeholder/' + var.op.name)
assign_ops.append(var.assign(placeholder_tensor))
if not var._save_slice_info:
if var.get_shape() != ckpt_value.shape:
raise ValueError(
'Total size of new array must be unchanged for %s '
'lh_shape: [%s], rh_shape: [%s]' %
(ckpt_name, str(ckpt_value.shape), str(var.get_shape())))
feed_dict[placeholder_tensor] = ckpt_value.reshape(ckpt_value.shape)
else:
slice_dims = zip(var._save_slice_info.var_offset,
var._save_slice_info.var_shape)
slice_dims = [(start, start + size) for (start, size) in slice_dims]
slice_dims = [slice(*x) for x in slice_dims]
slice_value = ckpt_value[slice_dims]
slice_value = slice_value.reshape(var._save_slice_info.var_shape)
feed_dict[placeholder_tensor] = slice_value
assign_op = control_flow_ops.group(*assign_ops)
return assign_op, feed_dict
# pylint: enable=protected-access
def assign_from_checkpoint_fn(model_path,
var_list,
ignore_missing_vars=False,
reshape_variables=False):
"""Returns a function that assigns specific variables from a checkpoint.
If ignore_missing_vars is True and no variables are found in the checkpoint
it returns None.
Args:
model_path: The full path to the model checkpoint. To get latest checkpoint
use `model_path = tf.train.latest_checkpoint(checkpoint_dir)`
var_list: A list of `Variable` objects or a dictionary mapping names in the
checkpoint to the corresponding variables to initialize. If empty or
`None`, it would return `no_op(), None`.
ignore_missing_vars: Boolean, if True it would ignore variables missing in
the checkpoint with a warning instead of failing.
reshape_variables: Boolean, if True it would automatically reshape variables
which are of different shape then the ones stored in the checkpoint but
which have the same number of elements.
Returns:
A function that takes a single argument, a `tf.compat.v1.Session`, that
applies the
assignment operation. If no matching variables were found in the checkpoint
then `None` is returned.
Raises:
ValueError: If var_list is empty.
"""
if not var_list:
raise ValueError('var_list cannot be empty')
if ignore_missing_vars:
reader = pywrap_tensorflow.NewCheckpointReader(model_path)
if isinstance(var_list, dict):
var_dict = var_list
else:
var_dict = {var.op.name: var for var in var_list}
available_vars = {}
for var in var_dict:
if reader.has_tensor(var):
available_vars[var] = var_dict[var]
else:
logging.warning('Variable %s missing in checkpoint %s', var, model_path)
var_list = available_vars
if var_list:
saver = tf_saver.Saver(
var_list,
reshape=reshape_variables,
write_version=saver_pb2.SaverDef.V1)
def callback(session):
saver.restore(session, model_path)
return callback
else:
logging.warning('No Variables to restore')
return None
class VariableDeviceChooser(object):
"""Device chooser for variables.
When using a parameter server it will assign them in a round-robin fashion.
When not using a parameter server it allows GPU or CPU placement.
"""
def __init__(self,
num_tasks=0,
job_name='ps',
device_type='CPU',
device_index=0,
replica=None):
"""Initialize VariableDeviceChooser.
Usage:
To use with 2 parameter servers:
VariableDeviceChooser(2)
To use without parameter servers:
VariableDeviceChooser()
VariableDeviceChooser(device_type='GPU') # For GPU placement
Args:
num_tasks: number of tasks.
job_name: String, a name for the parameter server job.
device_type: Optional device type string (e.g. "CPU" or "GPU")
device_index: int. Optional device index. If left unspecified, device
represents 'any' device_index.
"""
self._job_name = job_name
self._device_type = device_type
self._device_index = device_index
self._replica = replica
self._num_tasks = num_tasks
self._next_task_id = 0
def __call__(self, op):
device_spec = tf_device.DeviceSpec(
replica=self._replica,
device_type=self._device_type,
device_index=self._device_index)
if self._num_tasks > 0:
task_id = self._next_task_id
self._next_task_id = (self._next_task_id + 1) % self._num_tasks
device_spec.job = self._job_name
device_spec.task = task_id
return device_spec.to_string()
def filter_variables(var_list,
include_patterns=None,
exclude_patterns=None,
reg_search=True):
"""Filter a list of variables using regular expressions.
First includes variables according to the list of include_patterns.
Afterwards, eliminates variables according to the list of exclude_patterns.
For example, one can obtain a list of variables with the weights of all
convolutional layers (depending on the network definition) by:
```python
variables = tf.contrib.framework.get_model_variables()
conv_weight_variables = tf.contrib.framework.filter_variables(
variables,
include_patterns=['Conv'],
exclude_patterns=['biases', 'Logits'])
```
Args:
var_list: list of variables.
include_patterns: list of regular expressions to include. Defaults to None,
which means all variables are selected according to the include rules. A
variable is included if it matches any of the include_patterns.
exclude_patterns: list of regular expressions to exclude. Defaults to None,
which means all variables are selected according to the exclude rules. A
variable is excluded if it matches any of the exclude_patterns.
reg_search: boolean. If True (default), performs re.search to find matches
(i.e. pattern can match any substring of the variable name). If False,
performs re.match (i.e. regexp should match from the beginning of the
variable name).
Returns:
filtered list of variables.
"""
if reg_search:
reg_exp_func = re.search
else:
reg_exp_func = re.match
# First include variables.
if include_patterns is None:
included_variables = list(var_list)
else:
included_variables = []
for var in var_list:
if any(reg_exp_func(ptrn, var.name) for ptrn in include_patterns):
included_variables.append(var)
# Afterwards, exclude variables.
if exclude_patterns is None:
filtered_variables = included_variables
else:
filtered_variables = []
for var in included_variables:
if not any(reg_exp_func(ptrn, var.name) for ptrn in exclude_patterns):
filtered_variables.append(var)
return filtered_variables
|
tensorflow-master
|
tensorflow/contrib/framework/python/ops/variables.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""arg_scope tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.framework.python.ops import add_arg_scope
from tensorflow.contrib.framework.python.ops import arg_scope
from tensorflow.contrib.framework.python.ops import arg_scoped_arguments
from tensorflow.python.platform import test
@add_arg_scope
def func1(*args, **kwargs):
return (args, kwargs)
@add_arg_scope
def func2(*args, **kwargs):
return (args, kwargs)
@add_arg_scope
def func3(args, a=None, b=1, c=2):
"""Some cool doc string."""
return (args, a, b, c)
@add_arg_scope
def func4(x='x', y='y'):
if x:
pass
if y:
pass
def _key_op(op):
return getattr(op, '_key_op', str(op))
class ArgScopeTest(test.TestCase):
def testEmptyArgScope(self):
with self.cached_session():
with arg_scope([]) as sc:
self.assertEqual(sc, {})
def testClearArgScope(self):
func1_kwargs = {'a': 1, 'b': None, 'c': [1]}
key_op = _key_op(func1)
func1_scope = {key_op: func1_kwargs.copy()}
with self.cached_session():
with arg_scope([func1], a=1, b=None, c=[1]) as sc1:
self.assertEqual(sc1, func1_scope)
with arg_scope({}) as sc2:
self.assertEqual(sc2, {})
with arg_scope([]) as current_arg_scope:
self.assertEqual(current_arg_scope, func1_scope)
def testNonDecorated(self):
def my_func(t, a=None):
return (t, a)
with self.assertRaises(ValueError):
with arg_scope([my_func], a=1):
pass
def testUnexpectedArg(self):
with self.assertRaises(TypeError):
with arg_scope([func3], d=1):
func3(1)
def testCurrentArgScope(self):
func1_kwargs = {'a': 1, 'b': None, 'c': [1]}
key_op = _key_op(func1)
current_scope = {key_op: func1_kwargs.copy()}
with self.cached_session():
with arg_scope([func1], a=1, b=None, c=[1]) as scope:
self.assertDictEqual(scope, current_scope)
def testArgScopedArguments(self):
func3_kwargs = ('a', 'b', 'c')
self.assertEquals(arg_scoped_arguments(func3), func3_kwargs)
def testCurrentArgScopeNested(self):
func1_kwargs = {'a': 1, 'b': None, 'c': [1]}
func2_kwargs = {'b': 2, 'd': [2]}
key = _key_op
current_scope = {
key(func1): func1_kwargs.copy(),
key(func2): func2_kwargs.copy()
}
with self.cached_session():
with arg_scope([func1], a=1, b=None, c=[1]):
with arg_scope([func2], b=2, d=[2]) as scope:
self.assertDictEqual(scope, current_scope)
def testReuseArgScope(self):
func1_kwargs = {'a': 1, 'b': None, 'c': [1]}
key_op = _key_op(func1)
current_scope = {key_op: func1_kwargs.copy()}
with self.cached_session():
with arg_scope([func1], a=1, b=None, c=[1]) as scope1:
pass
with arg_scope(scope1) as scope:
self.assertDictEqual(scope, current_scope)
def testReuseArgScopeNested(self):
func1_kwargs = {'a': 1, 'b': None, 'c': [1]}
func2_kwargs = {'b': 2, 'd': [2]}
key = _key_op
current_scope1 = {key(func1): func1_kwargs.copy()}
current_scope2 = {
key(func1): func1_kwargs.copy(),
key(func2): func2_kwargs.copy()
}
with self.cached_session():
with arg_scope([func1], a=1, b=None, c=[1]) as scope1:
with arg_scope([func2], b=2, d=[2]) as scope2:
pass
with arg_scope(scope1):
with arg_scope([]) as current_arg_scope:
self.assertDictEqual(current_arg_scope, current_scope1)
with arg_scope(scope2):
with arg_scope([]) as current_arg_scope:
self.assertDictEqual(current_arg_scope, current_scope2)
def testSimpleArgScope(self):
func1_args = (0,)
func1_kwargs = {'a': 1, 'b': None, 'c': [1]}
with self.cached_session():
with arg_scope([func1], a=1, b=None, c=[1]):
args, kwargs = func1(0)
self.assertTupleEqual(args, func1_args)
self.assertDictEqual(kwargs, func1_kwargs)
def testSimpleArgScopeWithTuple(self):
func1_args = (0,)
func1_kwargs = {'a': 1, 'b': None, 'c': [1]}
with self.cached_session():
with arg_scope((func1,), a=1, b=None, c=[1]):
args, kwargs = func1(0)
self.assertTupleEqual(args, func1_args)
self.assertDictEqual(kwargs, func1_kwargs)
def testOverwriteArgScope(self):
func1_args = (0,)
func1_kwargs = {'a': 1, 'b': 2, 'c': [1]}
with arg_scope([func1], a=1, b=None, c=[1]):
args, kwargs = func1(0, b=2)
self.assertTupleEqual(args, func1_args)
self.assertDictEqual(kwargs, func1_kwargs)
def testNestedArgScope(self):
func1_args = (0,)
func1_kwargs = {'a': 1, 'b': None, 'c': [1]}
with arg_scope([func1], a=1, b=None, c=[1]):
args, kwargs = func1(0)
self.assertTupleEqual(args, func1_args)
self.assertDictEqual(kwargs, func1_kwargs)
func1_kwargs['b'] = 2
with arg_scope([func1], b=2):
args, kwargs = func1(0)
self.assertTupleEqual(args, func1_args)
self.assertDictEqual(kwargs, func1_kwargs)
def testNestedArgScopeObjectCreatedOutsideScopeOverridesArgScope(self):
def get_scope_object():
with arg_scope([func1], a=1, b=None, c=[1]) as sc:
return sc
scope_object = get_scope_object()
with arg_scope([func1], b=2, d=10):
with arg_scope(scope_object):
args, kwargs = func1(0)
self.assertTupleEqual(args, (0,))
self.assertDictEqual(kwargs, {'a': 1, 'b': None, 'c': [1]})
def testArgScopeObjectCreatedWithinScopeInheritsArgScope(self):
def get_scope_object():
with arg_scope([func1], a=1, b=None, c=[1]) as sc:
return sc
with arg_scope([func1], b=2, d=10):
with arg_scope(get_scope_object()):
args, kwargs = func1(0)
self.assertTupleEqual(args, (0,))
self.assertDictEqual(kwargs, {'a': 1, 'b': None, 'c': [1], 'd': 10})
def testSharedArgScope(self):
func1_args = (0,)
func1_kwargs = {'a': 1, 'b': None, 'c': [1]}
with arg_scope([func1, func2], a=1, b=None, c=[1]):
args, kwargs = func1(0)
self.assertTupleEqual(args, func1_args)
self.assertDictEqual(kwargs, func1_kwargs)
args, kwargs = func2(0)
self.assertTupleEqual(args, func1_args)
self.assertDictEqual(kwargs, func1_kwargs)
def testSharedArgScopeTuple(self):
func1_args = (0,)
func1_kwargs = {'a': 1, 'b': None, 'c': [1]}
with arg_scope((func1, func2), a=1, b=None, c=[1]):
args, kwargs = func1(0)
self.assertTupleEqual(args, func1_args)
self.assertDictEqual(kwargs, func1_kwargs)
args, kwargs = func2(0)
self.assertTupleEqual(args, func1_args)
self.assertDictEqual(kwargs, func1_kwargs)
def testPartiallySharedArgScope(self):
func1_args = (0,)
func1_kwargs = {'a': 1, 'b': None, 'c': [1]}
func2_args = (1,)
func2_kwargs = {'a': 1, 'b': None, 'd': [2]}
with arg_scope([func1, func2], a=1, b=None):
with arg_scope([func1], c=[1]):
with arg_scope([func2], d=[2]):
args, kwargs = func1(0)
self.assertTupleEqual(args, func1_args)
self.assertDictEqual(kwargs, func1_kwargs)
args, kwargs = func2(1)
self.assertTupleEqual(args, func2_args)
self.assertDictEqual(kwargs, func2_kwargs)
def testAddArgScopeRaceCondition(self):
func4_kwargs = ('a', 'b', 'c', 'd', 'e', 'f', 'g', 'h')
for i in range(4):
# redefine the function with different args
@add_arg_scope
def func4(a=1, b=2, c=3, d=4, e=5, f=6, g=7, h=8):
pass
self.assertTupleEqual(arg_scoped_arguments(func4), func4_kwargs)
def testDocString(self):
self.assertEqual(func3.__doc__, 'Some cool doc string.')
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/framework/python/ops/arg_scope_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions to provide simpler and prettier logging."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variables
__all__ = ["print_op"]
def _get_tensor_repr(t,
print_tensor_name=True,
print_tensor_type=True,
print_shape=True,
summarize_indicator_vector=True):
"""Return a list of Tensors that summarize the given tensor t."""
tensor_list = []
if print_tensor_name and isinstance(t, ops.Tensor):
tensor_list.append(constant_op.constant("Name: " + t.name))
if print_tensor_type:
if isinstance(t, ops.Tensor):
t_type_str = "Type: Tensor ({})".format(t.dtype.name)
elif isinstance(t, sparse_tensor.SparseTensor):
t_type_str = "Type: SparseTensor ({})".format(t.dtype.name)
elif isinstance(t, tensor_array_ops.TensorArray):
t_type_str = "Type: TensorArray ({})".format(t.dtype.name)
elif isinstance(t, variables.Variable):
t_type_str = "Type: Variable ({})".format(t.dtype.name)
else:
raise ValueError("t must be a Tensor, SparseTensor, TensorArray or "
"Variable.")
tensor_list.append(constant_op.constant(t_type_str))
if print_shape:
if isinstance(t, sparse_tensor.SparseTensor):
tensor_list.append(constant_op.constant("Shape:"))
tensor_list.append(t.dense_shape)
elif isinstance(t, ops.Tensor):
tensor_list.append(constant_op.constant("Shape: " + str(t.get_shape(
).dims)))
elif isinstance(t, tensor_array_ops.TensorArray):
tensor_list.append(constant_op.constant("Size:"))
tensor_list.append(t.size())
if summarize_indicator_vector and t.dtype == dtypes.bool:
int_tensor = math_ops.cast(t, dtypes.uint8)
tensor_list.append(constant_op.constant("First True in Boolean tensor at:"))
tensor_list.append(math_ops.argmax(int_tensor, 0))
if isinstance(t, sparse_tensor.SparseTensor):
tensor_list.append(constant_op.constant("Sparse indices:"))
tensor_list.append(t.indices)
tensor_list.append(constant_op.constant("Sparse values:"))
tensor_list.append(t.values)
elif isinstance(t, ops.Tensor):
tensor_list.append(constant_op.constant("Value:"))
tensor_list.append(t)
elif isinstance(t, tensor_array_ops.TensorArray):
tensor_list.append(constant_op.constant("Value:"))
tensor_list.append(t.stack())
return tensor_list
def print_op(input_,
data=None,
message=None,
first_n=None,
summarize=20,
print_tensor_name=True,
print_tensor_type=True,
print_shape=True,
summarize_indicator_vector=True,
name=None):
"""Creates a print op that will print when a tensor is accessed.
Wraps the tensor passed in so that whenever that tensor is accessed,
the message `message` is printed, along with the current value of the
tensor `t` and an optional list of other tensors.
Args:
input_: A Tensor/SparseTensor/TensorArray to print when it is evaluated.
data: A list of other tensors to print.
message: A string message to print as a prefix.
first_n: Only log `first_n` number of times. Negative numbers log always;
this is the default.
summarize: Print this number of elements in the tensor.
print_tensor_name: Print the tensor name.
print_tensor_type: Print the tensor type.
print_shape: Print the tensor's shape.
summarize_indicator_vector: Whether to print the index of the first true
value in an indicator vector (a Boolean tensor).
name: The name to give this op.
Returns:
A Print op. The Print op returns `input_`.
Raises:
ValueError: If the tensor `input_` is not a Tensor, SparseTensor or
TensorArray.
"""
message = message or ""
if input_ is None:
raise ValueError("input_ must be of type "
"Tensor, SparseTensor or TensorArray")
tensor_list = _get_tensor_repr(input_, print_tensor_name, print_tensor_type,
print_shape, summarize_indicator_vector)
if data is not None:
for t in data:
tensor_list.extend(_get_tensor_repr(t, print_tensor_name,
print_tensor_type, print_shape,
summarize_indicator_vector))
if isinstance(input_, ops.Tensor) or isinstance(input_, variables.Variable):
input_ = logging_ops.Print(input_, tensor_list, message, first_n, summarize,
name)
elif isinstance(input_, sparse_tensor.SparseTensor):
p = logging_ops.Print(
constant_op.constant([]), tensor_list, message, first_n, summarize,
name)
with ops.control_dependencies([p]):
input_ = sparse_tensor.SparseTensor(
array_ops.identity(input_.indices),
array_ops.identity(input_.values),
array_ops.identity(input_.dense_shape))
elif isinstance(input_, tensor_array_ops.TensorArray):
p = logging_ops.Print(
constant_op.constant([]), tensor_list, message, first_n, summarize,
name)
with ops.control_dependencies([p]):
input_ = tensor_array_ops.TensorArray(dtype=input_.dtype,
handle=input_.handle,
flow=input_.flow)
else:
raise ValueError("input_ must be of type "
"Tensor, SparseTensor or TensorArray")
return input_
|
tensorflow-master
|
tensorflow/contrib/framework/python/ops/prettyprint_ops.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A module containing TensorFlow ops whose API may change in the future."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# TODO(ptucker): Add these to tf.contrib.variables?
# pylint: disable=wildcard-import
from tensorflow.contrib.framework.python.ops.arg_scope import *
from tensorflow.contrib.framework.python.ops.checkpoint_ops import *
from tensorflow.contrib.framework.python.ops.ops import *
from tensorflow.contrib.framework.python.ops.prettyprint_ops import *
from tensorflow.contrib.framework.python.ops.script_ops import *
from tensorflow.contrib.framework.python.ops.sort_ops import *
from tensorflow.contrib.framework.python.ops.variables import *
# pylint: enable=wildcard-import
|
tensorflow-master
|
tensorflow/contrib/framework/python/ops/__init__.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for the op to generate vocab remapping."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from tensorflow.contrib import framework as contrib_framework
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import flags
from tensorflow.python.platform import test
from tensorflow.python.training import saver
FLAGS = flags.FLAGS
_TESTDATA_PATH = 'contrib/framework/testdata'
class LoadMulticlassBiasTest(test.TestCase):
"""Tests for the load_linear_multiclass_bias_initializer functionality."""
def setUp(self):
ops.reset_default_graph()
dim = 1
num = 3
with ops.name_scope('some_scope'):
# Basically from 0 to dim*num-1.
flat_data = math_ops.linspace(0.0, dim * num - 1, dim * num)
bias = variables.Variable(
array_ops.reshape(flat_data, (num, dim)), name='bias')
save = saver.Saver([bias])
with self.cached_session() as sess:
variables.global_variables_initializer().run()
self.bundle_file = os.path.join(test.get_temp_dir(), 'bias_checkpoint')
save.save(sess, self.bundle_file)
self.new_class_vocab_file = os.path.join(
test.test_src_dir_path(_TESTDATA_PATH), 'keyword_new.txt')
self.old_class_vocab_file = os.path.join(
test.test_src_dir_path(_TESTDATA_PATH), 'keyword.txt')
self.init_val = 42
def _init_val_initializer(shape, dtype=None, partition_info=None):
del dtype, partition_info # Unused by this unit-testing initializer.
return array_ops.tile(
constant_op.constant([[self.init_val]], dtype=dtypes.float32), shape)
self.initializer = _init_val_initializer
def test_load_linear_multiclass_bias_initializer(self):
"""Tests for the bias initializer wrapper."""
bias_loading_initializer = (
contrib_framework.load_linear_multiclass_bias_initializer(
new_class_vocab_file=self.new_class_vocab_file,
old_class_vocab_file=self.old_class_vocab_file,
new_class_vocab_size=4,
bias_tensor_name='some_scope/bias',
ckpt_path=[self.bundle_file],
num_class_oov_buckets=1,
initializer=self.initializer))
expected_remapped_bias_vector = np.reshape(
[2, 0, self.init_val, 1, self.init_val], [5, 1])
# The new bias vector is of size [4 class vocab + 1 class OOV, 1].
remapped_bias_vector = variable_scope.get_variable(
name='bias/obtained_bias_vector',
shape=[5, 1],
initializer=bias_loading_initializer,
partitioner=partitioned_variables.fixed_size_partitioner(3))
with self.cached_session():
variables.global_variables_initializer().run()
self.assertAllClose(expected_remapped_bias_vector,
remapped_bias_vector.as_tensor().eval())
class LoadVariableSlotTest(test.TestCase):
"""Tests for the load_variable_slot_initializer functionality."""
def setUp(self):
ops.reset_default_graph()
dim = 1
num = 3
with ops.name_scope('some_scope'):
# Basically from 0 to dim*num-1.
flat_data = math_ops.linspace(0.0, dim * num - 1, dim * num)
accum = variables.Variable(
array_ops.reshape(flat_data, (num, dim)), name='accum')
save = saver.Saver([accum])
with self.cached_session() as sess:
variables.global_variables_initializer().run()
self.bundle_file = os.path.join(test.get_temp_dir(), 'accum_checkpoint')
save.save(sess, self.bundle_file)
self.new_class_vocab_file = os.path.join(
test.test_src_dir_path(_TESTDATA_PATH), 'keyword_new.txt')
self.old_class_vocab_file = os.path.join(
test.test_src_dir_path(_TESTDATA_PATH), 'keyword.txt')
self.init_val = 42
def _init_val_initializer(shape, dtype=None, partition_info=None):
del dtype, partition_info # Unused by this unit-testing initializer.
return array_ops.tile(
constant_op.constant([[self.init_val]], dtype=dtypes.float32), shape)
self.initializer = _init_val_initializer
def test_load_variable_slot_initializer(self):
"""Tests for the slot initializer wrapper."""
# We have an initializer for each of two partitioned variables, which will
# be [3, 1] and [2, 1]. The partitioning information is passed here in
# initializer construction, as opposed to through a variable scope during
# variable creation.
variable_slot_initializer_part_0 = (
contrib_framework.load_variable_slot_initializer(
new_row_vocab_file=self.new_class_vocab_file,
old_row_vocab_file=self.old_class_vocab_file,
new_row_vocab_size=4,
new_col_vocab_size=1,
primary_partition_info=variable_scope._PartitionInfo(
full_shape=[5, 1], var_offset=[0, 0]),
old_tensor_name='some_scope/accum',
ckpt_path=[self.bundle_file],
num_row_oov_buckets=1,
initializer=self.initializer))
variable_slot_initializer_part_1 = (
contrib_framework.load_variable_slot_initializer(
new_row_vocab_file=self.new_class_vocab_file,
old_row_vocab_file=self.old_class_vocab_file,
new_row_vocab_size=4,
new_col_vocab_size=1,
primary_partition_info=variable_scope._PartitionInfo(
full_shape=[5, 1], var_offset=[3, 0]),
old_tensor_name='some_scope/accum',
ckpt_path=[self.bundle_file],
num_row_oov_buckets=1,
initializer=self.initializer))
expected_remapped_accum_vector_part_0 = np.reshape([2, 0, self.init_val],
[3, 1])
expected_remapped_accum_vector_part_1 = np.reshape([1, self.init_val],
[2, 1])
# Since there is no variable scope here, partition_info will be None, so
# if variable_slot_initializer_part_0 and variable_slot_initializer_part_1
# were instead instances of load_and_remap_matrix_initializer, the part_0
# obtained vector would still be [2, 0, self.init_val], but the part_1
# obtained vector would be [2, 0], since the partition_info would default to
# assuming a single partition.
remapped_accum_vector_part_0 = variable_scope.get_variable(
name='accum/obtained_accum_vector_part_0',
shape=[3, 1],
initializer=variable_slot_initializer_part_0)
remapped_accum_vector_part_1 = variable_scope.get_variable(
name='accum/obtained_accum_vector_part_1',
shape=[2, 1],
initializer=variable_slot_initializer_part_1)
with self.cached_session():
variables.global_variables_initializer().run()
self.assertAllClose(expected_remapped_accum_vector_part_0,
remapped_accum_vector_part_0.eval())
self.assertAllClose(expected_remapped_accum_vector_part_1,
remapped_accum_vector_part_1.eval())
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/framework/python/ops/checkpoint_ops_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Support for sorting tensors.
@@argsort
@@sort
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.ops import sort_ops
sort = sort_ops.sort
argsort = sort_ops.argsort
|
tensorflow-master
|
tensorflow/contrib/framework/python/ops/sort_ops.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Classes and functions used to construct graphs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
__all__ = ['get_graph_from_inputs',
'get_name_scope']
def get_graph_from_inputs(op_input_list, graph=None):
"""Returns the appropriate graph to use for the given inputs.
1. If `graph` is provided, we validate that all inputs in `op_input_list` are
from the same graph.
2. Otherwise, we attempt to select a graph from the first Operation- or
Tensor-valued input in `op_input_list`, and validate that all other
such inputs are in the same graph.
3. If the graph was not specified and it could not be inferred from
`op_input_list`, we attempt to use the default graph.
Args:
op_input_list: A list of inputs to an operation, which may include `Tensor`,
`Operation`, and other objects that may be converted to a graph element.
graph: (Optional) The explicit graph to use.
Raises:
TypeError: If `op_input_list` is not a list or tuple, or if graph is not a
Graph.
ValueError: If a graph is explicitly passed and not all inputs are from it,
or if the inputs are from multiple graphs, or we could not find a graph
and there was no default graph.
Returns:
The appropriate graph to use for the given inputs.
"""
# pylint: disable=protected-access
return ops._get_graph_from_inputs(op_input_list, graph)
def get_name_scope():
"""Returns the current name scope of the default graph.
For example:
```python
with tf.name_scope('scope1'):
with tf.name_scope('scope2'):
print(tf.contrib.framework.get_name_scope())
```
would print the string `scope1/scope2`.
Returns:
A string representing the current name scope.
"""
return ops.get_default_graph().get_name_scope()
|
tensorflow-master
|
tensorflow/contrib/framework/python/ops/ops.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operations for generating and loading vocab remappings."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import init_ops
from tensorflow.python.training import checkpoint_ops
# pylint: disable=protected-access,line-too-long
load_and_remap_matrix_initializer = checkpoint_ops._load_and_remap_matrix_initializer
# pylint: enable=line-too-long
load_embedding_initializer = checkpoint_ops._load_embedding_initializer
# pylint: enable=protected-access
def load_linear_multiclass_bias_initializer(ckpt_path,
bias_tensor_name,
new_class_vocab_size,
old_class_vocab_file,
new_class_vocab_file,
num_class_oov_buckets=0,
initializer=None,
max_rows_in_memory=-1):
"""Loads pre-trained multi-class biases for linear models from checkpoint.
Wrapper around `load_and_remap_matrix_initializer()` specialized for loading
multi-class bias and remapping according to the provided vocab files. See docs
for `load_and_remap_matrix_initializer()` for more details. In this case, the
provided row_vocab is the class vocabulary, and the expected shape is
`[new_class_vocab_size, 1]`.
Args:
ckpt_path: Path to the TensorFlow checkpoint (version 2, `TensorBundle`)
from which the old matrix `Tensor` will be loaded.
bias_tensor_name: Tensor name to load from in the checkpoints.
new_class_vocab_size: Number of entries in the new class vocab.
old_class_vocab_file: A scalar `Tensor` of type `string` containing the
path to the old class vocabulary file.
new_class_vocab_file: A scalar `Tensor` of type `string` containing the
path to the new class vocabulary file.
num_class_oov_buckets: `int` specifying the number of out-of-vocabulary
buckets to use for the classes. Must be >= 0.
initializer: Initializer function that accepts a 1-D tensor as the arg to
specify the shape of the returned tensor. If `None`, defaults to using
`zeros_initializer()`.
max_rows_in_memory: `int` specifying the maximum number of rows to load from
the checkpoint at once. If less than or equal to 0, the entire matrix will
be loaded into memory. Setting this arg trades increased disk reads for
lower memory usage.
Returns:
A variable initializer function.
"""
# Linear multi-class biases should be zero-initialized.
if initializer is None:
initializer = init_ops.zeros_initializer()
return load_and_remap_matrix_initializer(
ckpt_path=ckpt_path,
old_tensor_name=bias_tensor_name,
new_row_vocab_size=new_class_vocab_size,
new_col_vocab_size=1,
old_row_vocab_file=old_class_vocab_file,
new_row_vocab_file=new_class_vocab_file,
old_col_vocab_file=None,
new_col_vocab_file=None,
num_row_oov_buckets=num_class_oov_buckets,
num_col_oov_buckets=0,
initializer=initializer,
max_rows_in_memory=max_rows_in_memory)
def load_variable_slot_initializer(ckpt_path,
old_tensor_name,
primary_partition_info,
new_row_vocab_size,
new_col_vocab_size,
old_row_vocab_file=None,
new_row_vocab_file=None,
old_col_vocab_file=None,
new_col_vocab_file=None,
num_row_oov_buckets=0,
num_col_oov_buckets=0,
initializer=None,
max_rows_in_memory=-1):
"""Loads pre-trained multi-class slots for linear models from checkpoint.
Wrapper around `load_and_remap_matrix_initializer()` specialized for loading
multi-class slots (such as optimizer accumulators) and remapping them
according to the provided vocab files. See docs for
`load_and_remap_matrix_initializer()` for more details. Takes in a
`variable_scope._PartitionInfo` representing the slot's primary `Variable`'s
partitioning. This is necessary since accumulator `Variable` creation ignores
primary scoping and partitioning information.
Args:
ckpt_path: Path to the TensorFlow checkpoint (version 2, `TensorBundle`)
from which the old matrix `Tensor` will be loaded.
old_tensor_name: Name of the 2-D `Tensor` to load from checkpoint.
primary_partition_info: A `variable_scope._PartitionInfo` containing this
slot's primary `Variable`'s partitioning information. This is used to
calculate the offset and override the partition_info passed to the call to
_initialize.
new_row_vocab_size: `int` specifying the number of entries in
`new_row_vocab_file`. If no row remapping is needed (no row vocab
provided), this should be equal to the number of rows to load from the old
matrix (which can theoretically be smaller than the number of rows in the
old matrix).
new_col_vocab_size: `int` specifying the number of entries in
`new_col_vocab_file`. If no column remapping is needed (no column vocab
provided), this should be equal to the number of columns in the old
matrix.
old_row_vocab_file: A scalar `Tensor` of type `string` containing the
path to the old row vocabulary file. Can be None, which represents no
remapping on the row axis.
new_row_vocab_file: A scalar `Tensor` of type `string` containing the path
to the new row vocabulary file. Can be None, which represents no remapping
on the row axis.
old_col_vocab_file: A scalar `Tensor` of type `string` containing the
path to the old column vocabulary file. Can be None, which represents no
remapping on the column axis.
new_col_vocab_file: A scalar `Tensor` of type `string` containing the path
to the new column vocabulary file. Can be None, which represents no
remapping on the column axis.
num_row_oov_buckets: `int` specifying the number of out-of-vocabulary rows
to append. Must be >= 0.
num_col_oov_buckets: `int` specifying the number of out-of-vocabulary
columns to append. Must be >= 0.
initializer: Initializer function to initialize missing values. Accepts a
1-D tensor as the arg to specify the shape of the returned tensor. If
`None`, defaults to using `zeros_initializer()`.
max_rows_in_memory: `int` specifying the maximum number of rows to load from
the checkpoint at once. If less than or equal to 0, the entire matrix will
be loaded into memory. Setting this arg trades increased disk reads for
lower memory usage.
Returns:
A variable initializer function that should be used to initialize a
(potentially partitioned) `Variable` whose complete shape is
`[new_row_vocab_size + num_row_oov_buckets, new_col_vocab_size +
num_col_oov_buckets]`.
Raises:
TypeError: If `initializer` is specified but not callable.
"""
initializer_fn = load_and_remap_matrix_initializer(
ckpt_path=ckpt_path,
old_tensor_name=old_tensor_name,
new_row_vocab_size=new_row_vocab_size,
new_col_vocab_size=new_col_vocab_size,
old_row_vocab_file=old_row_vocab_file,
new_row_vocab_file=new_row_vocab_file,
old_col_vocab_file=old_col_vocab_file,
new_col_vocab_file=new_col_vocab_file,
num_row_oov_buckets=num_row_oov_buckets,
num_col_oov_buckets=num_col_oov_buckets,
initializer=initializer,
max_rows_in_memory=max_rows_in_memory)
def _initializer(shape, dtype=dtypes.float32, partition_info=None):
del partition_info # Unused by this override.
return initializer_fn(shape, dtype, partition_info=primary_partition_info)
return _initializer
|
tensorflow-master
|
tensorflow/contrib/framework/python/ops/checkpoint_ops.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""tensor_util tests."""
# pylint: disable=unused-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.framework.python.ops import ops as ops_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
class OpsTest(test.TestCase):
def testGetGraphFromEmptyInputs(self):
with ops.Graph().as_default() as g0:
self.assertIs(g0, ops_lib.get_graph_from_inputs([]))
def testGetGraphFromValidInputs(self):
g0 = ops.Graph()
with g0.as_default():
values = [constant_op.constant(0.0), constant_op.constant(1.0)]
self.assertIs(g0, ops_lib.get_graph_from_inputs(values))
self.assertIs(g0, ops_lib.get_graph_from_inputs(values, g0))
with ops.Graph().as_default():
self.assertIs(g0, ops_lib.get_graph_from_inputs(values))
self.assertIs(g0, ops_lib.get_graph_from_inputs(values, g0))
def testGetGraphFromInvalidInputs(self):
g0 = ops.Graph()
with g0.as_default():
values = [constant_op.constant(0.0), constant_op.constant(1.0)]
g1 = ops.Graph()
with self.assertRaisesRegexp(ValueError, "not from the passed-in graph"):
ops_lib.get_graph_from_inputs(values, g1)
with g1.as_default():
values.append(constant_op.constant(2.0))
with self.assertRaisesRegexp(ValueError, "must be from the same graph"):
ops_lib.get_graph_from_inputs(values)
with self.assertRaisesRegexp(ValueError, "not from the passed-in graph"):
ops_lib.get_graph_from_inputs(values, g0)
with self.assertRaisesRegexp(ValueError, "not from the passed-in graph"):
ops_lib.get_graph_from_inputs(values, g1)
def testGetNameScope(self):
with ops.name_scope("scope1"):
with ops.name_scope("scope2"):
with ops.name_scope("scope3"):
self.assertEqual("scope1/scope2/scope3", ops_lib.get_name_scope())
self.assertEqual("scope1/scope2", ops_lib.get_name_scope())
self.assertEqual("scope1", ops_lib.get_name_scope())
self.assertEqual("", ops_lib.get_name_scope())
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/framework/python/ops/ops_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the arg_scope used for scoping layers arguments.
Allows one to define models much more compactly by eliminating boilerplate
code. This is accomplished through the use of argument scoping (arg_scope).
Example of how to use tf.contrib.framework.arg_scope:
```
from third_party.tensorflow.contrib.layers.python import layers
arg_scope = tf.contrib.framework.arg_scope
with arg_scope([layers.conv2d], padding='SAME',
initializer=layers.variance_scaling_initializer(),
regularizer=layers.l2_regularizer(0.05)):
net = layers.conv2d(inputs, 64, [11, 11], 4, padding='VALID', scope='conv1')
net = layers.conv2d(net, 256, [5, 5], scope='conv2')
```
The first call to conv2d will behave as follows:
layers.conv2d(inputs, 64, [11, 11], 4, padding='VALID',
initializer=layers.variance_scaling_initializer(),
regularizer=layers.l2_regularizer(0.05), scope='conv1')
The second call to conv2d will also use the arg_scope's default for padding:
layers.conv2d(inputs, 256, [5, 5], padding='SAME',
initializer=layers.variance_scaling_initializer(),
regularizer=layers.l2_regularizer(0.05), scope='conv2')
Example of how to reuse an arg_scope:
```
with arg_scope([layers.conv2d], padding='SAME',
initializer=layers.variance_scaling_initializer(),
regularizer=layers.l2_regularizer(0.05)) as sc:
net = layers.conv2d(net, 256, [5, 5], scope='conv1')
....
with arg_scope(sc):
net = layers.conv2d(net, 256, [5, 5], scope='conv2')
```
Example of how to use tf.contrib.framework.add_arg_scope to enable your
function to be called within an arg_scope later:
@tf.contrib.framework.add_arg_scope
def conv2d(*args, **kwargs)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util import tf_decorator
__all__ = [
'arg_scope', 'add_arg_scope', 'current_arg_scope', 'has_arg_scope',
'arg_scoped_arguments', 'arg_scope_func_key'
]
_ARGSTACK = [{}]
_DECORATED_OPS = {}
def _get_arg_stack():
if _ARGSTACK:
return _ARGSTACK
else:
_ARGSTACK.append({})
return _ARGSTACK
def current_arg_scope():
stack = _get_arg_stack()
return stack[-1]
def arg_scope_func_key(op):
return getattr(op, '_key_op', str(op))
def _name_op(op):
return (op.__module__, op.__name__)
def _kwarg_names(func):
kwargs_length = len(func.__defaults__) if func.__defaults__ else 0
return func.__code__.co_varnames[-kwargs_length:func.__code__.co_argcount]
def _add_op(op):
key_op = arg_scope_func_key(op)
_DECORATED_OPS[key_op] = _kwarg_names(op)
@tf_contextlib.contextmanager
def arg_scope(list_ops_or_scope, **kwargs):
"""Stores the default arguments for the given set of list_ops.
For usage, please see examples at top of the file.
Args:
list_ops_or_scope: List or tuple of operations to set argument scope for or
a dictionary containing the current scope. When list_ops_or_scope is a
dict, kwargs must be empty. When list_ops_or_scope is a list or tuple,
then every op in it need to be decorated with @add_arg_scope to work.
**kwargs: keyword=value that will define the defaults for each op in
list_ops. All the ops need to accept the given set of arguments.
Yields:
the current_scope, which is a dictionary of {op: {arg: value}}
Raises:
TypeError: if list_ops is not a list or a tuple.
ValueError: if any op in list_ops has not be decorated with @add_arg_scope.
"""
if isinstance(list_ops_or_scope, dict):
# Assumes that list_ops_or_scope is a scope that is being reused.
if kwargs:
raise ValueError('When attempting to re-use a scope by suppling a'
'dictionary, kwargs must be empty.')
current_scope = list_ops_or_scope.copy()
try:
_get_arg_stack().append(current_scope)
yield current_scope
finally:
_get_arg_stack().pop()
else:
# Assumes that list_ops_or_scope is a list/tuple of ops with kwargs.
if not isinstance(list_ops_or_scope, (list, tuple)):
raise TypeError('list_ops_or_scope must either be a list/tuple or reused '
'scope (i.e. dict)')
try:
current_scope = current_arg_scope().copy()
for op in list_ops_or_scope:
key = arg_scope_func_key(op)
if not has_arg_scope(op):
raise ValueError('%s is not decorated with @add_arg_scope',
_name_op(op))
if key in current_scope:
current_kwargs = current_scope[key].copy()
current_kwargs.update(kwargs)
current_scope[key] = current_kwargs
else:
current_scope[key] = kwargs.copy()
_get_arg_stack().append(current_scope)
yield current_scope
finally:
_get_arg_stack().pop()
def add_arg_scope(func):
"""Decorates a function with args so it can be used within an arg_scope.
Args:
func: function to decorate.
Returns:
A tuple with the decorated function func_with_args().
"""
def func_with_args(*args, **kwargs):
current_scope = current_arg_scope()
current_args = kwargs
key_func = arg_scope_func_key(func)
if key_func in current_scope:
current_args = current_scope[key_func].copy()
current_args.update(kwargs)
return func(*args, **current_args)
_add_op(func)
setattr(func_with_args, '_key_op', arg_scope_func_key(func))
return tf_decorator.make_decorator(func, func_with_args)
def has_arg_scope(func):
"""Checks whether a func has been decorated with @add_arg_scope or not.
Args:
func: function to check.
Returns:
a boolean.
"""
return arg_scope_func_key(func) in _DECORATED_OPS
def arg_scoped_arguments(func):
"""Returns the list kwargs that arg_scope can set for a func.
Args:
func: function which has been decorated with @add_arg_scope.
Returns:
a list of kwargs names.
"""
assert has_arg_scope(func)
return _DECORATED_OPS[arg_scope_func_key(func)]
|
tensorflow-master
|
tensorflow/contrib/framework/python/ops/arg_scope.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""variables tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
import numpy as np
from tensorflow.contrib.framework.python.ops import arg_scope
from tensorflow.contrib.framework.python.ops import variables as variables_lib2
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.training import device_setter
from tensorflow.python.training import saver as saver_lib
class LocalVariableTest(test.TestCase):
def test_local_variable(self):
with self.cached_session() as sess:
self.assertEquals([], variables_lib.local_variables())
value0 = 42
variables_lib2.local_variable(value0)
value1 = 43
variables_lib2.local_variable(value1)
variables = variables_lib.local_variables()
self.assertEquals(2, len(variables))
self.assertRaises(errors_impl.OpError, sess.run, variables)
variables_lib.variables_initializer(variables).run()
self.assertAllEqual(set([value0, value1]), set(sess.run(variables)))
def testLocalVariableNameAndShape(self):
with self.cached_session():
with variable_scope.variable_scope('A'):
a = variables_lib2.local_variable([1, 1, 1, 1, 1], name='a')
self.assertEquals(a.op.name, 'A/a')
self.assertListEqual(a.get_shape().as_list(), [5])
self.assertListEqual([a], variables_lib2.get_local_variables())
def testLocalVariableNotInAllVariables(self):
with self.cached_session():
with variable_scope.variable_scope('A'):
a = variables_lib2.local_variable(0)
self.assertFalse(a in variables_lib.global_variables())
self.assertTrue(a in variables_lib.local_variables())
def testLocalVariableNotInVariablesToRestore(self):
with self.cached_session():
with variable_scope.variable_scope('A'):
a = variables_lib2.local_variable(0)
self.assertFalse(a in variables_lib2.get_variables_to_restore())
self.assertTrue(a in variables_lib.local_variables())
def testGetVariablesDontReturnsTransients(self):
with self.cached_session():
with variable_scope.variable_scope('A'):
variables_lib2.local_variable(0)
with variable_scope.variable_scope('B'):
variables_lib2.local_variable(0)
self.assertEquals([], variables_lib2.get_variables('A'))
self.assertEquals([], variables_lib2.get_variables('B'))
def testGetLocalVariablesReturnsTransients(self):
with self.cached_session():
with variable_scope.variable_scope('A'):
a = variables_lib2.local_variable(0)
with variable_scope.variable_scope('B'):
b = variables_lib2.local_variable(0)
self.assertEquals([a], variables_lib2.get_local_variables('A'))
self.assertEquals([b], variables_lib2.get_local_variables('B'))
def testInitializedVariableValue(self):
with self.cached_session() as sess:
a = variables_lib2.local_variable([0, 0, 0, 0, 0], name='a')
sess.run(variables_lib.local_variables_initializer())
self.assertAllEqual(a.eval(), [0] * 5)
def testResourceVariable(self):
a = variables_lib2.local_variable(0)
b = variables_lib2.local_variable(0, use_resource=True)
self.assertTrue(isinstance(a, variables_lib.Variable))
self.assertFalse(isinstance(a, resource_variable_ops.ResourceVariable))
self.assertTrue(isinstance(b, resource_variable_ops.ResourceVariable))
class GlobalVariableTest(test.TestCase):
def test_global_variable(self):
with self.cached_session() as sess:
self.assertEquals([], variables_lib.global_variables())
value0 = 42
variables_lib2.global_variable(value0)
value1 = 43
variables_lib2.global_variable(value1)
variables = variables_lib.global_variables()
self.assertEquals(2, len(variables))
with self.assertRaisesOpError(
'Attempting to use uninitialized value Variable'):
sess.run(variables)
variables_lib.variables_initializer(variables).run()
self.assertAllEqual(set([value0, value1]), set(sess.run(variables)))
def testVariableNameAndShape(self):
with self.cached_session():
with variable_scope.variable_scope('A'):
a = variables_lib2.global_variable([1, 1, 1, 1, 1], name='a')
self.assertEquals(a.op.name, 'A/a')
self.assertListEqual(a.get_shape().as_list(), [5])
self.assertListEqual([a], variables_lib.global_variables())
def testGlobalVariableNotInLocalVariables(self):
with self.cached_session():
with variable_scope.variable_scope('A'):
a = variables_lib2.global_variable(0)
self.assertFalse(a in variables_lib.local_variables())
self.assertTrue(a in variables_lib.global_variables())
def testGlobalVariableInVariablesToRestore(self):
with self.cached_session():
with variable_scope.variable_scope('A'):
a = variables_lib2.global_variable(0)
self.assertFalse(a in variables_lib.local_variables())
self.assertTrue(a in variables_lib2.get_variables_to_restore())
def testGetVariablesReturnsThem(self):
with self.cached_session():
with variable_scope.variable_scope('A'):
a = variables_lib2.global_variable(0)
with variable_scope.variable_scope('B'):
b = variables_lib2.global_variable(0)
self.assertEquals([a], variables_lib2.get_variables('A'))
self.assertEquals([b], variables_lib2.get_variables('B'))
def testGetLocalVariablesDontReturnsThem(self):
with self.cached_session():
with variable_scope.variable_scope('A'):
variables_lib2.global_variable(0)
with variable_scope.variable_scope('B'):
variables_lib2.global_variable(0)
self.assertEquals([], variables_lib2.get_local_variables('A'))
self.assertEquals([], variables_lib2.get_local_variables('B'))
def testInitializedVariableValue(self):
with self.cached_session() as sess:
a = variables_lib2.global_variable([0, 0, 0, 0, 0], name='a')
sess.run(variables_lib.global_variables_initializer())
self.assertAllEqual(a.eval(), [0] * 5)
def testResourceVariable(self):
a = variables_lib2.global_variable(0)
b = variables_lib2.global_variable(0, use_resource=True)
self.assertTrue(isinstance(a, variables_lib.Variable))
self.assertFalse(isinstance(a, resource_variable_ops.ResourceVariable))
self.assertTrue(isinstance(b, resource_variable_ops.ResourceVariable))
class GlobalStepTest(test.TestCase):
def _assert_global_step(self, global_step, expected_dtype=dtypes.int64):
self.assertEquals('%s:0' % ops.GraphKeys.GLOBAL_STEP, global_step.name)
self.assertEquals(expected_dtype, global_step.dtype.base_dtype)
self.assertEquals([], global_step.get_shape().as_list())
def test_invalid_dtype(self):
with ops.Graph().as_default() as g:
self.assertEquals(None, variables_lib2.get_global_step())
variables_lib.VariableV1(
0.0,
trainable=False,
dtype=dtypes.float32,
name=ops.GraphKeys.GLOBAL_STEP)
self.assertRaisesRegexp(TypeError, 'does not have integer type',
variables_lib2.get_global_step)
self.assertRaisesRegexp(TypeError, 'does not have integer type',
variables_lib2.get_global_step, g)
def test_invalid_shape(self):
with ops.Graph().as_default() as g:
self.assertEquals(None, variables_lib2.get_global_step())
variables_lib.VariableV1(
[0],
trainable=False,
dtype=dtypes.int32,
name=ops.GraphKeys.GLOBAL_STEP)
self.assertRaisesRegexp(TypeError, 'not scalar',
variables_lib2.get_global_step)
self.assertRaisesRegexp(TypeError, 'not scalar',
variables_lib2.get_global_step, g)
def test_create_global_step(self):
self.assertEquals(None, variables_lib2.get_global_step())
with ops.Graph().as_default() as g:
global_step = variables_lib2.create_global_step()
self._assert_global_step(global_step)
self.assertRaisesRegexp(ValueError, 'already exists',
variables_lib2.create_global_step)
self.assertRaisesRegexp(ValueError, 'already exists',
variables_lib2.create_global_step, g)
self._assert_global_step(variables_lib2.create_global_step(ops.Graph()))
def test_get_global_step(self):
with ops.Graph().as_default() as g:
self.assertEquals(None, variables_lib2.get_global_step())
variables_lib.VariableV1(
0,
trainable=False,
dtype=dtypes.int32,
name=ops.GraphKeys.GLOBAL_STEP)
self._assert_global_step(
variables_lib2.get_global_step(), expected_dtype=dtypes.int32)
self._assert_global_step(
variables_lib2.get_global_step(g), expected_dtype=dtypes.int32)
def test_get_or_create_global_step(self):
with ops.Graph().as_default() as g:
self.assertEquals(None, variables_lib2.get_global_step())
self._assert_global_step(variables_lib2.get_or_create_global_step())
self._assert_global_step(variables_lib2.get_or_create_global_step(g))
class VariablesTest(test.TestCase):
def testCreateVariable(self):
with self.cached_session():
with variable_scope.variable_scope('A'):
a = variables_lib2.variable('a', [5])
self.assertEquals(a.op.name, 'A/a')
self.assertListEqual(a.get_shape().as_list(), [5])
self.assertTrue(a in ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES))
self.assertFalse(a in ops.get_collection(ops.GraphKeys.MODEL_VARIABLES))
self.assertFalse(a in variables_lib.local_variables())
def testGetVariables(self):
with self.cached_session():
with variable_scope.variable_scope('A'):
a = variables_lib2.variable('a', [5])
with variable_scope.variable_scope('B'):
b = variables_lib2.variable('a', [5])
self.assertEquals([a, b], variables_lib2.get_variables())
self.assertEquals([a], variables_lib2.get_variables('A'))
self.assertEquals([b], variables_lib2.get_variables('B'))
def testGetVariablesWithScope(self):
with self.cached_session():
with variable_scope.variable_scope('A') as var_scope:
a = variables_lib2.variable('a', [5])
b = variables_lib2.variable('b', [5])
self.assertSetEqual(
set([a, b]), set(variables_lib2.get_variables(var_scope)))
def testGetVariablesSuffix(self):
with self.cached_session():
with variable_scope.variable_scope('A'):
a = variables_lib2.variable('a', [5])
with variable_scope.variable_scope('A'):
b = variables_lib2.variable('b', [5])
self.assertEquals([a], variables_lib2.get_variables(suffix='a'))
self.assertEquals([b], variables_lib2.get_variables(suffix='b'))
def testGetVariableWithSingleVar(self):
with self.cached_session():
with variable_scope.variable_scope('parent'):
a = variables_lib2.variable('child', [5])
self.assertEquals(a, variables_lib2.get_unique_variable('parent/child'))
def testGetVariableWithDistractors(self):
with self.cached_session():
with variable_scope.variable_scope('parent'):
a = variables_lib2.variable('child', [5])
with variable_scope.variable_scope('child'):
variables_lib2.variable('grandchild1', [7])
variables_lib2.variable('grandchild2', [9])
self.assertEquals(a, variables_lib2.get_unique_variable('parent/child'))
def testGetVariableThrowsExceptionWithNoMatch(self):
var_name = 'cant_find_me'
with self.cached_session():
with self.assertRaises(ValueError):
variables_lib2.get_unique_variable(var_name)
def testGetThrowsExceptionWithChildrenButNoMatch(self):
var_name = 'parent/child'
with self.cached_session():
with variable_scope.variable_scope(var_name):
variables_lib2.variable('grandchild1', [7])
variables_lib2.variable('grandchild2', [9])
with self.assertRaises(ValueError):
variables_lib2.get_unique_variable(var_name)
def testGetVariablesToRestore(self):
with self.cached_session():
with variable_scope.variable_scope('A'):
a = variables_lib2.variable('a', [5])
with variable_scope.variable_scope('B'):
b = variables_lib2.variable('a', [5])
self.assertEquals([a, b], variables_lib2.get_variables_to_restore())
def testIncludeGetVariablesToRestore(self):
with self.cached_session():
with variable_scope.variable_scope('A'):
a = variables_lib2.variable('a', [5])
with variable_scope.variable_scope('B'):
b = variables_lib2.variable('a', [5])
self.assertEquals([a, b], variables_lib2.get_variables())
self.assertEquals([a], variables_lib2.get_variables_to_restore(['A']))
def testExcludeGetVariablesToRestore(self):
with self.cached_session():
with variable_scope.variable_scope('A'):
a = variables_lib2.variable('a', [5])
with variable_scope.variable_scope('B'):
b = variables_lib2.variable('a', [5])
self.assertEquals([a, b], variables_lib2.get_variables())
self.assertEquals(
[a], variables_lib2.get_variables_to_restore(exclude=['B']))
def testWrongIncludeGetVariablesToRestore(self):
with self.cached_session():
with variable_scope.variable_scope('A'):
a = variables_lib2.variable('a', [5])
with variable_scope.variable_scope('B'):
b = variables_lib2.variable('a', [5])
self.assertEquals([a, b], variables_lib2.get_variables())
self.assertEquals([], variables_lib2.get_variables_to_restore(['a']))
def testGetMixedVariablesToRestore(self):
with self.cached_session():
with variable_scope.variable_scope('A'):
a = variables_lib2.variable('a', [5])
b = variables_lib2.variable('b', [5])
with variable_scope.variable_scope('B'):
c = variables_lib2.variable('c', [5])
d = variables_lib2.variable('d', [5])
self.assertEquals([a, b, c, d], variables_lib2.get_variables())
self.assertEquals(
[a, c],
variables_lib2.get_variables_to_restore(include=['A/a', 'B/c']))
def testExcludeGetMixedVariablesToRestore(self):
with self.cached_session():
with variable_scope.variable_scope('A'):
a = variables_lib2.variable('a', [5])
b = variables_lib2.variable('b', [5])
with variable_scope.variable_scope('B'):
c = variables_lib2.variable('c', [5])
d = variables_lib2.variable('d', [5])
self.assertEquals([a, b, c, d], variables_lib2.get_variables())
self.assertEquals(
[b, d],
variables_lib2.get_variables_to_restore(exclude=['A/a', 'B/c']))
def testReuseVariable(self):
with self.cached_session():
with variable_scope.variable_scope('A'):
a = variables_lib2.variable('a', [])
with variable_scope.variable_scope('A', reuse=True):
b = variables_lib2.variable('a', [])
self.assertEquals(a, b)
self.assertListEqual([a], variables_lib2.get_variables())
def testVariableWithRegularizer(self):
with self.cached_session():
with variable_scope.variable_scope('A'):
a = variables_lib2.variable('a', [], regularizer=nn_ops.l2_loss)
loss = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)[0]
self.assertDeviceEqual(loss.device, a.device)
def testVariableWithRegularizerColocate(self):
with self.cached_session():
with variable_scope.variable_scope('A'):
a = variables_lib2.variable(
'a', [], device='gpu:0', regularizer=nn_ops.l2_loss)
loss = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)[0]
self.assertDeviceEqual(loss.device, a.device)
def testVariableWithDevice(self):
with self.cached_session():
with variable_scope.variable_scope('A'):
a = variables_lib2.variable('a', [], device='cpu:0')
b = variables_lib2.variable('b', [], device='cpu:1')
self.assertDeviceEqual(a.device, 'cpu:0')
self.assertDeviceEqual(b.device, 'cpu:1')
def testVariableWithDeviceFromScope(self):
with self.cached_session():
with ops.device('/cpu:0'):
a = variables_lib2.variable('a', [])
b = variables_lib2.variable('b', [], device='cpu:1')
self.assertDeviceEqual(a.device, 'cpu:0')
self.assertDeviceEqual(b.device, 'cpu:1')
def testVariableWithDeviceFunction(self):
class DevFn(object):
def __init__(self):
self.counter = -1
def __call__(self, op):
self.counter += 1
return 'cpu:%d' % self.counter
with self.cached_session():
with arg_scope([variables_lib2.variable], device=DevFn()):
a = variables_lib2.variable('a', [])
b = variables_lib2.variable('b', [])
c = variables_lib2.variable('c', [], device='cpu:12')
d = variables_lib2.variable('d', [])
with ops.device('cpu:99'):
e_init = constant_op.constant(12)
e = variables_lib2.variable('e', initializer=e_init)
self.assertDeviceEqual(a.device, 'cpu:0')
self.assertEqual(a.initial_value.op.colocation_groups(),
a.op.colocation_groups())
self.assertDeviceEqual(b.device, 'cpu:1')
self.assertEqual(b.initial_value.op.colocation_groups(),
b.op.colocation_groups())
self.assertDeviceEqual(c.device, 'cpu:12')
self.assertEqual(c.initial_value.op.colocation_groups(),
c.op.colocation_groups())
self.assertDeviceEqual(d.device, 'cpu:2')
self.assertEqual(d.initial_value.op.colocation_groups(),
d.op.colocation_groups())
self.assertDeviceEqual(e.device, 'cpu:3')
self.assertDeviceEqual(e.initial_value.device, 'cpu:99')
def testVariableWithReplicaDeviceSetter(self):
with self.cached_session():
with ops.device(device_setter.replica_device_setter(ps_tasks=2)):
a = variables_lib2.variable('a', [])
b = variables_lib2.variable('b', [])
c = variables_lib2.variable('c', [], device='cpu:12')
d = variables_lib2.variable('d', [])
with ops.device('cpu:99'):
e_init = constant_op.constant(12)
e = variables_lib2.variable('e', initializer=e_init)
# The values below highlight how the replica_device_setter puts initial
# values on the worker job, and how it merges explicit devices.
self.assertDeviceEqual(a.device, '/job:ps/task:0/cpu:0')
self.assertEqual(a.initial_value.op.colocation_groups(),
a.op.colocation_groups())
self.assertDeviceEqual(b.device, '/job:ps/task:1/cpu:0')
self.assertEqual(b.initial_value.op.colocation_groups(),
b.op.colocation_groups())
self.assertDeviceEqual(c.device, '/job:ps/task:0/cpu:12')
self.assertEqual(c.initial_value.op.colocation_groups(),
c.op.colocation_groups())
self.assertDeviceEqual(d.device, '/job:ps/task:1/cpu:0')
self.assertEqual(d.initial_value.op.colocation_groups(),
d.op.colocation_groups())
self.assertDeviceEqual(e.device, '/job:ps/task:0/cpu:0')
self.assertDeviceEqual(e.initial_value.device, '/job:worker/cpu:99')
def testVariableWithVariableDeviceChooser(self):
with ops.Graph().as_default():
device_fn = variables_lib2.VariableDeviceChooser(num_tasks=2)
with arg_scope([variables_lib2.variable], device=device_fn):
a = variables_lib2.variable('a', [])
b = variables_lib2.variable('b', [])
c = variables_lib2.variable('c', [], device='cpu:12')
d = variables_lib2.variable('d', [])
with ops.device('cpu:99'):
e_init = constant_op.constant(12)
e = variables_lib2.variable('e', initializer=e_init)
# The values below highlight how the VariableDeviceChooser puts initial
# values on the same device as the variable job.
self.assertDeviceEqual(a.device, '/job:ps/task:0/cpu:0')
self.assertEqual(a.initial_value.op.colocation_groups(),
a.op.colocation_groups())
self.assertDeviceEqual(b.device, '/job:ps/task:1/cpu:0')
self.assertEqual(b.initial_value.op.colocation_groups(),
b.op.colocation_groups())
self.assertDeviceEqual(c.device, '/cpu:12')
self.assertEqual(c.initial_value.op.colocation_groups(),
c.op.colocation_groups())
self.assertDeviceEqual(d.device, '/job:ps/task:0/cpu:0')
self.assertEqual(d.initial_value.op.colocation_groups(),
d.op.colocation_groups())
self.assertDeviceEqual(e.device, '/job:ps/task:1/cpu:0')
self.assertDeviceEqual(e.initial_value.device, '/cpu:99')
def testVariableWithVariableDeviceChooserWithReplica(self):
with ops.Graph().as_default():
device_fn = variables_lib2.VariableDeviceChooser(replica=3, num_tasks=2)
with arg_scope([variables_lib2.variable], device=device_fn):
a = variables_lib2.variable('a', [])
b = variables_lib2.variable('b', [])
c = variables_lib2.variable('c', [], device='cpu:12')
d = variables_lib2.variable('d', [])
with ops.device('cpu:99'):
e_init = constant_op.constant(12)
e = variables_lib2.variable('e', initializer=e_init)
# The values below highlight how the VariableDeviceChooser puts initial
# values on the same device as the variable job.
self.assertDeviceEqual(a.device, '/job:ps/replica:3/task:0/cpu:0')
self.assertEqual(a.initial_value.op.colocation_groups(),
a.op.colocation_groups())
self.assertDeviceEqual(b.device, '/job:ps/replica:3/task:1/cpu:0')
self.assertEqual(b.initial_value.op.colocation_groups(),
b.op.colocation_groups())
self.assertDeviceEqual(c.device, '/cpu:12')
self.assertEqual(c.initial_value.op.colocation_groups(),
c.op.colocation_groups())
self.assertDeviceEqual(d.device, '/job:ps/replica:3/task:0/cpu:0')
self.assertEqual(d.initial_value.op.colocation_groups(),
d.op.colocation_groups())
self.assertDeviceEqual(e.device, '/job:ps/replica:3/task:1/cpu:0')
self.assertDeviceEqual(e.initial_value.device, '/cpu:99')
def testVariableGPUPlacement(self):
with ops.Graph().as_default():
device_fn = variables_lib2.VariableDeviceChooser(device_type='GPU')
with arg_scope([variables_lib2.variable], device=device_fn):
a = variables_lib2.variable('a', [])
b = variables_lib2.variable('b', [])
c = variables_lib2.variable('c', [], device='cpu:12')
d = variables_lib2.variable('d', [])
with ops.device('cpu:99'):
e_init = constant_op.constant(12)
e = variables_lib2.variable('e', initializer=e_init)
# The values below highlight how the VariableDeviceChooser puts initial
# values on the same device as the variable job.
self.assertDeviceEqual(a.device, '/device:GPU:0')
self.assertEqual(a.initial_value.op.colocation_groups(),
a.op.colocation_groups())
self.assertDeviceEqual(b.device, '/device:GPU:0')
self.assertEqual(b.initial_value.op.colocation_groups(),
b.op.colocation_groups())
self.assertDeviceEqual(c.device, '/cpu:12')
self.assertEqual(c.initial_value.op.colocation_groups(),
c.op.colocation_groups())
self.assertDeviceEqual(d.device, '/device:GPU:0')
self.assertEqual(d.initial_value.op.colocation_groups(),
d.op.colocation_groups())
self.assertDeviceEqual(e.device, '/device:GPU:0')
self.assertDeviceEqual(e.initial_value.device, '/cpu:99')
class ModelVariablesTest(test.TestCase):
def testNameAndShape(self):
with self.cached_session():
with variable_scope.variable_scope('A'):
a = variables_lib2.model_variable('a', [5])
self.assertEquals(a.op.name, 'A/a')
self.assertListEqual(a.get_shape().as_list(), [5])
self.assertListEqual([a], variables_lib2.get_model_variables('A'))
def testNotInLocalVariables(self):
with self.cached_session():
with variable_scope.variable_scope('A'):
a = variables_lib2.model_variable('a', [5])
self.assertTrue(a in variables_lib.global_variables())
self.assertTrue(a in ops.get_collection(ops.GraphKeys.MODEL_VARIABLES))
self.assertFalse(a in variables_lib.local_variables())
def testGetVariablesReturns(self):
with self.cached_session():
with variable_scope.variable_scope('A'):
a = variables_lib2.model_variable('a', [5])
with variable_scope.variable_scope('B'):
b = variables_lib2.model_variable('a', [5])
self.assertEquals([a], variables_lib2.get_variables('A'))
self.assertEquals([b], variables_lib2.get_variables('B'))
def testGetModelVariables(self):
with self.cached_session():
with variable_scope.variable_scope('A'):
a = variables_lib2.model_variable('a', [5])
with variable_scope.variable_scope('B'):
b = variables_lib2.model_variable('a', [5])
self.assertEquals([a], variables_lib2.get_model_variables('A'))
self.assertEquals([b], variables_lib2.get_model_variables('B'))
def testGetTrainableVariables(self):
with self.cached_session():
with variable_scope.variable_scope('A'):
variables_lib2.local_variable([5])
a = variables_lib.VariableV1([5])
with variable_scope.variable_scope('B'):
variables_lib2.local_variable([5])
b = variables_lib.VariableV1([5])
self.assertEquals([a], variables_lib2.get_trainable_variables('A'))
self.assertEquals([b], variables_lib2.get_trainable_variables('B'))
def testGetLocalVariables(self):
with self.cached_session():
with variable_scope.variable_scope('A'):
_ = variables_lib2.model_variable('a', [5])
with variable_scope.variable_scope('B'):
_ = variables_lib2.model_variable('a', [5])
self.assertEquals([], variables_lib2.get_local_variables('A'))
self.assertEquals([], variables_lib2.get_local_variables('B'))
def testInitializedVariableValue(self):
with self.cached_session() as sess:
a = variables_lib2.model_variable(
'a', [5], initializer=init_ops.ones_initializer())
sess.run(variables_lib.global_variables_initializer())
self.assertAllEqual(a.eval(), [1] * 5)
def testDeviceFn(self):
class DevFn(object):
def __init__(self):
self.counter = -1
def __call__(self, op):
self.counter += 1
return '/cpu:%d' % self.counter
with ops.Graph().as_default():
with arg_scope([variables_lib2.model_variable], device=DevFn()):
a = variables_lib2.model_variable('a', [5])
b = variables_lib2.model_variable('b', [20])
self.assertDeviceEqual(a.device, '/cpu:0')
self.assertEqual(a.initial_value.op.colocation_groups(),
a.op.colocation_groups())
self.assertDeviceEqual(b.device, '/cpu:1')
self.assertEqual(b.initial_value.op.colocation_groups(),
b.op.colocation_groups())
def testVariableWithVariableDeviceChooser(self):
with ops.Graph().as_default():
device_fn = variables_lib2.VariableDeviceChooser()
with arg_scope([variables_lib2.model_variable], device=device_fn):
a = variables_lib2.model_variable('a', [5])
b = variables_lib2.model_variable('b', [20])
self.assertDeviceEqual(a.device, 'cpu:0')
self.assertEqual(a.initial_value.op.colocation_groups(),
a.op.colocation_groups())
self.assertDeviceEqual(b.device, 'cpu:0')
self.assertEqual(a.initial_value.op.colocation_groups(),
a.op.colocation_groups())
class GetVariablesCollections(test.TestCase):
def testVariableCollection(self):
with self.cached_session():
a = variables_lib2.variable('a', [], collections='A')
b = variables_lib2.variable('b', [], collections='B')
self.assertEquals(a, ops.get_collection('A')[0])
self.assertEquals(b, ops.get_collection('B')[0])
def testVariableCollections(self):
with self.cached_session():
a = variables_lib2.variable('a', [], collections=['A', 'C'])
b = variables_lib2.variable('b', [], collections=['B', 'C'])
self.assertEquals(a, ops.get_collection('A')[0])
self.assertEquals(b, ops.get_collection('B')[0])
self.assertListEqual([a, b], ops.get_collection('C'))
def testVariableCollectionsWithArgScope(self):
with self.cached_session():
with arg_scope([variables_lib2.variable], collections='A'):
a = variables_lib2.variable('a', [])
b = variables_lib2.variable('b', [])
self.assertListEqual([a, b], ops.get_collection('A'))
def testVariableCollectionsWithArgScopeNested(self):
with self.cached_session():
with arg_scope([variables_lib2.variable], collections='A'):
a = variables_lib2.variable('a', [])
with arg_scope([variables_lib2.variable], collections='B'):
b = variables_lib2.variable('b', [])
self.assertEquals(a, ops.get_collection('A')[0])
self.assertEquals(b, ops.get_collection('B')[0])
def testVariableCollectionsWithArgScopeNonNested(self):
with self.cached_session():
with arg_scope([variables_lib2.variable], collections='A'):
a = variables_lib2.variable('a', [])
with arg_scope([variables_lib2.variable], collections='B'):
b = variables_lib2.variable('b', [])
variables_lib2.variable('c', [])
self.assertListEqual([a], ops.get_collection('A'))
self.assertListEqual([b], ops.get_collection('B'))
def testVariableRestoreWithArgScopeNested(self):
with self.cached_session():
a = variables_lib2.variable('a', [])
with arg_scope(
[variables_lib2.variable], trainable=False, collections=['A', 'B']):
b = variables_lib2.variable('b', [])
c = variables_lib2.variable('c', [], trainable=False)
self.assertEquals([a, c], variables_lib2.get_variables_to_restore())
self.assertEquals([a], variables_lib.trainable_variables())
self.assertEquals([b], ops.get_collection('A'))
self.assertEquals([b], ops.get_collection('B'))
class GetVariablesBySuffixTest(test.TestCase):
def testGetVariableGivenNameScoped(self):
with self.cached_session():
with variable_scope.variable_scope('A'):
a = variables_lib2.variable('a', [5])
b = variables_lib2.variable('b', [5])
self.assertEquals([a], variables_lib2.get_variables_by_suffix('a'))
self.assertEquals([b], variables_lib2.get_variables_by_suffix('b'))
def testGetVariableWithScope(self):
with self.cached_session():
with variable_scope.variable_scope('A'):
a = variables_lib2.variable('a', [5])
fooa = variables_lib2.variable('fooa', [5])
with variable_scope.variable_scope('B'):
a2 = variables_lib2.variable('a', [5])
matched_variables = variables_lib2.get_variables_by_suffix('a')
self.assertEquals([a, fooa, a2], matched_variables)
matched_variables = variables_lib2.get_variables_by_suffix('/a')
self.assertEquals([a, a2], matched_variables)
matched_variables = variables_lib2.get_variables_by_suffix('a', scope='A')
self.assertEquals([a, fooa], matched_variables)
def testGetVariableWithoutScope(self):
with self.cached_session():
a = variables_lib2.variable('a', [5])
fooa = variables_lib2.variable('fooa', [5])
b_a = variables_lib2.variable('B/a', [5])
matched_variables = variables_lib2.get_variables_by_suffix('a')
self.assertEquals([a, fooa, b_a], matched_variables)
matched_variables = variables_lib2.get_variables_by_suffix('fooa')
self.assertEquals([fooa], matched_variables)
class GetVariablesByNameTest(test.TestCase):
def testGetVariableGivenNameScoped(self):
with self.cached_session():
with variable_scope.variable_scope('A'):
a = variables_lib2.variable('a', [5])
b = variables_lib2.variable('b', [5])
self.assertEquals([a], variables_lib2.get_variables_by_name('a'))
self.assertEquals([b], variables_lib2.get_variables_by_name('b'))
def testGetVariableWithScope(self):
with self.cached_session():
with variable_scope.variable_scope('A'):
a = variables_lib2.variable('a', [5])
fooa = variables_lib2.variable('fooa', [5])
with variable_scope.variable_scope('B'):
a2 = variables_lib2.variable('a', [5])
matched_variables = variables_lib2.get_variables_by_name('a')
self.assertEquals([a, a2], matched_variables)
matched_variables = variables_lib2.get_variables_by_name('fooa')
self.assertEquals([fooa], matched_variables)
matched_variables = variables_lib2.get_variables_by_name('/a')
self.assertEquals([], matched_variables)
matched_variables = variables_lib2.get_variables_by_name('a', scope='A')
self.assertEquals([a], matched_variables)
def testGetVariableWithoutScope(self):
with self.cached_session():
a = variables_lib2.variable('a', [5])
fooa = variables_lib2.variable('fooa', [5])
b_a = variables_lib2.variable('B/a', [5])
matched_variables = variables_lib2.get_variables_by_name('a')
self.assertEquals([a, b_a], matched_variables)
matched_variables = variables_lib2.get_variables_by_name('fooa')
self.assertEquals([fooa], matched_variables)
class GetVariableFullNameTest(test.TestCase):
def testVariable(self):
my_var0 = variables_lib2.variable('my_var0', shape=[])
full_name = variables_lib2.get_variable_full_name(my_var0)
self.assertEquals(full_name, my_var0.op.name)
def testPartitionedVariable(self):
input_full_name = 'my_var0'
partitioner = partitioned_variables.variable_axis_size_partitioner(2)
my_var0 = variables_lib2.variable(
'my_var0', shape=[2, 2], partitioner=partitioner)
for part_var in list(my_var0):
computed_full_name = variables_lib2.get_variable_full_name(part_var)
self.assertEquals(input_full_name, computed_full_name)
class AssignFromValuesTest(test.TestCase):
def testNoScopes(self):
init_value0 = np.asarray([1.0, 3.0, 9.0]).reshape((1, 3, 1))
init_value1 = np.asarray([2.0, 4.0, 6.0, 8.0]).reshape((2, 1, 2))
with self.cached_session() as sess:
initializer = init_ops.truncated_normal_initializer(stddev=.1)
var0 = variables_lib2.variable(
'my_var0', shape=[1, 3, 1], initializer=initializer)
var1 = variables_lib2.variable(
'my_var1', shape=[2, 1, 2], initializer=initializer)
var_names_to_values = {'my_var0': init_value0, 'my_var1': init_value1}
assign_op, feed_dict = variables_lib2.assign_from_values(
var_names_to_values)
# Initialize the variables.
sess.run(variables_lib.global_variables_initializer())
# Perform the assignment.
sess.run(assign_op, feed_dict)
# Request and test the variable values:
var0, var1 = sess.run([var0, var1])
self.assertAllEqual(init_value0, var0)
self.assertAllEqual(init_value1, var1)
def testWithScopes(self):
init_value0 = np.asarray([1.0, 3.0, 9.0]).reshape((1, 3, 1))
init_value1 = np.asarray([2.0, 4.0, 6.0, 8.0]).reshape((2, 1, 2))
with self.cached_session() as sess:
initializer = init_ops.truncated_normal_initializer(stddev=.1)
with variable_scope.variable_scope('my_model/my_layer0'):
var0 = variables_lib2.variable(
'my_var0', shape=[1, 3, 1], initializer=initializer)
with variable_scope.variable_scope('my_model/my_layer1'):
var1 = variables_lib2.variable(
'my_var1', shape=[2, 1, 2], initializer=initializer)
var_names_to_values = {
'my_model/my_layer0/my_var0': init_value0,
'my_model/my_layer1/my_var1': init_value1
}
assign_op, feed_dict = variables_lib2.assign_from_values(
var_names_to_values)
# Initialize the variables.
sess.run(variables_lib.global_variables_initializer())
# Perform the assignment.
sess.run(assign_op, feed_dict)
# Request and test the variable values:
var0, var1 = sess.run([var0, var1])
self.assertAllEqual(init_value0, var0)
self.assertAllEqual(init_value1, var1)
class AssignFromValuesFnTest(test.TestCase):
def testNoScopes(self):
init_value0 = np.asarray([1.0, 3.0, 9.0]).reshape((1, 3, 1))
init_value1 = np.asarray([2.0, 4.0, 6.0, 8.0]).reshape((2, 1, 2))
with self.cached_session() as sess:
initializer = init_ops.truncated_normal_initializer(stddev=.1)
var0 = variables_lib2.variable(
'my_var0', shape=[1, 3, 1], initializer=initializer)
var1 = variables_lib2.variable(
'my_var1', shape=[2, 1, 2], initializer=initializer)
var_names_to_values = {'my_var0': init_value0, 'my_var1': init_value1}
init_fn = variables_lib2.assign_from_values_fn(var_names_to_values)
# Initialize the variables.
sess.run(variables_lib.global_variables_initializer())
# Perform the assignment.
init_fn(sess)
# Request and test the variable values:
var0, var1 = sess.run([var0, var1])
self.assertAllEqual(init_value0, var0)
self.assertAllEqual(init_value1, var1)
def testWithScopes(self):
init_value0 = np.asarray([1.0, 3.0, 9.0]).reshape((1, 3, 1))
init_value1 = np.asarray([2.0, 4.0, 6.0, 8.0]).reshape((2, 1, 2))
with self.cached_session() as sess:
initializer = init_ops.truncated_normal_initializer(stddev=.1)
with variable_scope.variable_scope('my_model/my_layer0'):
var0 = variables_lib2.variable(
'my_var0', shape=[1, 3, 1], initializer=initializer)
with variable_scope.variable_scope('my_model/my_layer1'):
var1 = variables_lib2.variable(
'my_var1', shape=[2, 1, 2], initializer=initializer)
var_names_to_values = {
'my_model/my_layer0/my_var0': init_value0,
'my_model/my_layer1/my_var1': init_value1
}
init_fn = variables_lib2.assign_from_values_fn(var_names_to_values)
# Initialize the variables.
sess.run(variables_lib.global_variables_initializer())
# Perform the assignment.
init_fn(sess)
# Request and test the variable values:
var0, var1 = sess.run([var0, var1])
self.assertAllEqual(init_value0, var0)
self.assertAllEqual(init_value1, var1)
class AssignFromCheckpointTest(test.TestCase):
def create_checkpoint_from_values(self,
var_names_to_values,
checkpoint_dir,
global_step=None):
"""Creates a checkpoint from a mapping of name to values in model_dir.
Args:
var_names_to_values: a map from variable names to values.
checkpoint_dir: the directory where the checkpoint will be saved.
global_step: the global step used to save the checkpoint.
Returns:
the model_path to the checkpoint.
"""
var_list = []
with session.Session('', graph=ops.Graph()) as sess:
# Create a set of variables to save in the checkpoint.
for var_name in var_names_to_values:
var_value = var_names_to_values[var_name]
var_list.append(variables_lib.VariableV1(var_value, name=var_name))
saver = saver_lib.Saver(var_list)
init_op = variables_lib.variables_initializer(var_list)
sess.run(init_op)
# Save the initialized values in the file at 'checkpoint_dir'
return saver.save(sess, checkpoint_dir, global_step=global_step)
def testLoadExistingVariables(self):
model_dir = tempfile.mkdtemp(
prefix=os.path.join(self.get_temp_dir(), 'load_existing_variables'))
init_value0 = 10.0
init_value1 = 20.0
var_names_to_values = {'v0': init_value0, 'v1': init_value1}
with self.cached_session() as sess:
model_path = self.create_checkpoint_from_values(var_names_to_values,
model_dir)
var0 = variables_lib2.variable('my_var0', shape=[])
var1 = variables_lib2.variable('my_var1', shape=[])
vars_to_restore = {'v0': var0, 'v1': var1}
op, feed_dict = variables_lib2.assign_from_checkpoint(
model_path, vars_to_restore)
# Initialize the variables.
sess.run(variables_lib.global_variables_initializer())
# Perform the assignment.
sess.run(op, feed_dict)
# Request and test the variable values:
self.assertEqual(init_value0, var0.eval())
self.assertEqual(init_value1, var1.eval())
# Tests restoring PartitionedVariables and tests using a dictionary
# of lists as the assign_from_checkpoint() var_list param.
def testLoadPartitionedVariables(self):
model_dir = tempfile.mkdtemp(
prefix=os.path.join(self.get_temp_dir(), 'load_partitioned_variables'))
init_value0 = np.array([[10.0, 11.0], [12.0, 13.0]])
init_value1 = np.array([20.0]) # Partitioned into 1 part, edge case.
var_names_to_values = {'var0': init_value0, 'var1': init_value1}
with self.cached_session() as sess:
model_path = self.create_checkpoint_from_values(var_names_to_values,
model_dir)
# var0 and var1 are PartitionedVariables.
partitioner = partitioned_variables.variable_axis_size_partitioner(2)
var0 = variables_lib2.variable(
'var0', shape=init_value0.shape, partitioner=partitioner)
var0full = variables_lib2.variable('var0full', shape=init_value0.shape)
var1 = variables_lib2.variable(
'var1', shape=init_value1.shape, partitioner=partitioner)
# Convert var0 and var1 into a list of underlying variables.
vars_to_restore = {'var0': list(var0) + [var0full], 'var1': list(var1)}
op, feed_dict = variables_lib2.assign_from_checkpoint(
model_path, vars_to_restore)
# Initialize the variables.
sess.run(variables_lib.global_variables_initializer())
# Perform the assignment.
sess.run(op, feed_dict)
# Request and test the variable values. PartitionedVariables can't
# be evaled so we wrap them in an identity.
self.assertTrue(
np.array_equal(init_value0,
array_ops.identity(var0).eval()))
self.assertTrue(np.array_equal(init_value0, var0full.eval()))
self.assertTrue(
np.array_equal(init_value1,
array_ops.identity(var1).eval()))
def testRaisesValueErrorIfAVariableIsntFound(self):
model_dir = tempfile.mkdtemp(
prefix=os.path.join(self.get_temp_dir(),
'raises_value_error_if_var_isnt_found'))
init_value0 = 10.0
init_value1 = 20.0
var_names_to_values = {'v0': init_value0, 'v1': init_value1}
with self.cached_session():
model_path = self.create_checkpoint_from_values(var_names_to_values,
model_dir)
var0 = variables_lib2.variable('my_var0', shape=[])
var1 = variables_lib2.variable('my_var1', shape=[])
vars_to_restore = {'v0_fake': var0, 'v1': var1}
with self.assertRaises(ValueError):
variables_lib2.assign_from_checkpoint(model_path, vars_to_restore)
def testInitFromCheckpointWithScopes(self):
model_dir = tempfile.mkdtemp(
prefix=os.path.join(self.get_temp_dir(),
'init_from_checkpoint_with_scopes'))
init_value0 = np.asarray(
[1.0, 3.0, 9.0], dtype=np.float32).reshape((1, 3, 1))
init_value1 = np.asarray(
[2.0, 4.0, 6.0, 8.0], dtype=np.float32).reshape((2, 1, 2))
var_names_to_values = {'layer0/v0': init_value0, 'layer1/v1': init_value1}
with self.cached_session() as sess:
model_path = self.create_checkpoint_from_values(var_names_to_values,
model_dir)
with variable_scope.variable_scope('my_model/my_layer0'):
var0 = variables_lib2.variable('my_var0', shape=init_value0.shape)
with variable_scope.variable_scope('my_model/my_layer1'):
var1 = variables_lib2.variable('my_var1', shape=init_value1.shape)
vars_to_restore = {'layer0/v0': var0, 'layer1/v1': var1}
op, feed_dict = variables_lib2.assign_from_checkpoint(
model_path, vars_to_restore)
# Initialize the variables.
sess.run(variables_lib.global_variables_initializer())
# Perform the assignment.
sess.run(op, feed_dict)
# Request and test the variable values:
self.assertAllEqual(init_value0, var0.eval())
self.assertAllEqual(init_value1, var1.eval())
class AssignFromCheckpointFnTest(test.TestCase):
def create_checkpoint_from_values(self,
var_names_to_values,
checkpoint_dir,
global_step=None):
"""Creates a checkpoint from a mapping of name to values in model_dir.
Args:
var_names_to_values: a map from variable names to values.
checkpoint_dir: the directory where the checkpoint will be saved.
global_step: the global step used to save the checkpoint.
Returns:
the model_path to the checkpoint.
"""
var_list = []
with session.Session('', graph=ops.Graph()) as sess:
# Create a set of variables to save in the checkpoint.
for var_name in var_names_to_values:
var_value = var_names_to_values[var_name]
var_list.append(variables_lib.VariableV1(var_value, name=var_name))
saver = saver_lib.Saver(var_list)
init_op = variables_lib.variables_initializer(var_list)
sess.run(init_op)
# Save the initialized values in the file at 'checkpoint_dir'
return saver.save(sess, checkpoint_dir, global_step=global_step)
def testLoadExistingVariables(self):
model_dir = tempfile.mkdtemp(
prefix=os.path.join(self.get_temp_dir(), 'load_existing_variables'))
if gfile.Exists(model_dir):
gfile.DeleteRecursively(model_dir)
init_value0 = 10.0
init_value1 = 20.0
var_names_to_values = {'v0': init_value0, 'v1': init_value1}
with self.cached_session() as sess:
model_path = self.create_checkpoint_from_values(var_names_to_values,
model_dir)
var0 = variables_lib2.variable('my_var0', shape=[])
var1 = variables_lib2.variable('my_var1', shape=[])
vars_to_restore = {'v0': var0, 'v1': var1}
init_fn = variables_lib2.assign_from_checkpoint_fn(
model_path, vars_to_restore)
# Initialize the variables.
sess.run(variables_lib.global_variables_initializer())
# Perform the assignment.
init_fn(sess)
# Request and test the variable values:
self.assertEqual(init_value0, var0.eval())
self.assertEqual(init_value1, var1.eval())
def testLoadExistingVariablesDifferentShapeDefaultDoesNotAllowReshape(self):
model_dir = tempfile.mkdtemp(
prefix=os.path.join(self.get_temp_dir(),
'load_existing_vars_no_reshape'))
if gfile.Exists(model_dir):
gfile.DeleteRecursively(model_dir)
init_value0 = [[10.0, 11.0]]
init_value1 = 20.0
var_names_to_values = {'v0': init_value0, 'v1': init_value1}
with self.cached_session() as sess:
model_path = self.create_checkpoint_from_values(var_names_to_values,
model_dir)
var0 = variables_lib2.variable('my_var0', shape=[2, 1])
var1 = variables_lib2.variable('my_var1', shape=[])
vars_to_restore = {'v0': var0, 'v1': var1}
init_fn = variables_lib2.assign_from_checkpoint_fn(
model_path, vars_to_restore)
# Initialize the variables.
sess.run(variables_lib.global_variables_initializer())
# Perform the assignment.
with self.assertRaises(errors_impl.InvalidArgumentError):
init_fn(sess)
def testLoadExistingVariablesDifferentShapeAllowReshape(self):
model_dir = tempfile.mkdtemp(
prefix=os.path.join(
self.get_temp_dir(),
'load_existing_variables_different_shape_allow_reshape'))
if gfile.Exists(model_dir):
gfile.DeleteRecursively(model_dir)
init_value0 = [[10.0, 11.0]]
init_value1 = 20.0
var_names_to_values = {'v0': init_value0, 'v1': init_value1}
with self.cached_session() as sess:
model_path = self.create_checkpoint_from_values(var_names_to_values,
model_dir)
var0 = variables_lib2.variable('my_var0', shape=[2, 1])
var1 = variables_lib2.variable('my_var1', shape=[])
vars_to_restore = {'v0': var0, 'v1': var1}
init_fn = variables_lib2.assign_from_checkpoint_fn(
model_path, vars_to_restore, reshape_variables=True)
# Initialize the variables.
sess.run(variables_lib.global_variables_initializer())
# Perform the assignment.
init_fn(sess)
# Request and test the variable values:
self.assertAllEqual(np.transpose(np.array(init_value0)), var0.eval())
self.assertEqual(init_value1, var1.eval())
def testNotFoundError(self):
model_dir = tempfile.mkdtemp(
prefix=os.path.join(self.get_temp_dir(), 'not_found_error'))
if gfile.Exists(model_dir):
gfile.DeleteRecursively(model_dir)
init_value0 = 10.0
init_value1 = 20.0
var_names_to_values = {'v0': init_value0, 'v1': init_value1}
with self.cached_session() as sess:
model_path = self.create_checkpoint_from_values(var_names_to_values,
model_dir)
var0 = variables_lib2.variable('my_var0', shape=[])
var1 = variables_lib2.variable('my_var1', shape=[])
var2 = variables_lib2.variable('my_var2', shape=[])
vars_to_restore = {'v0': var0, 'v1': var1, 'v2': var2}
init_fn = variables_lib2.assign_from_checkpoint_fn(
model_path, vars_to_restore)
# Initialize the variables.
sess.run(variables_lib.global_variables_initializer())
# Perform the assignment.
with self.assertRaises(errors_impl.NotFoundError):
init_fn(sess)
def testMissingVariablesList(self):
model_dir = tempfile.mkdtemp(
prefix=os.path.join(self.get_temp_dir(), 'missing_variables_list'))
if gfile.Exists(model_dir):
gfile.DeleteRecursively(model_dir)
init_value0 = 10.0
init_value1 = 20.0
var_names_to_values = {'v0': init_value0, 'v1': init_value1}
with self.cached_session() as sess:
model_path = self.create_checkpoint_from_values(var_names_to_values,
model_dir)
var0 = variables_lib2.variable('v0', shape=[])
var1 = variables_lib2.variable('v1', shape=[])
var2 = variables_lib2.variable('v2', shape=[])
vars_to_restore = [var0, var1, var2]
init_fn = variables_lib2.assign_from_checkpoint_fn(
model_path, vars_to_restore, ignore_missing_vars=True)
# Initialize the variables.
sess.run(variables_lib.global_variables_initializer())
# Perform the assignment.
init_fn(sess)
# Request and test the variable values:
self.assertEqual(init_value0, var0.eval())
self.assertEqual(init_value1, var1.eval())
def testMissingVariablesDict(self):
model_dir = tempfile.mkdtemp(
prefix=os.path.join(self.get_temp_dir(), 'missing_variables_dict'))
if gfile.Exists(model_dir):
gfile.DeleteRecursively(model_dir)
init_value0 = 10.0
init_value1 = 20.0
var_names_to_values = {'v0': init_value0, 'v1': init_value1}
with self.cached_session() as sess:
model_path = self.create_checkpoint_from_values(var_names_to_values,
model_dir)
var0 = variables_lib2.variable('my_var0', shape=[])
var1 = variables_lib2.variable('my_var1', shape=[])
var2 = variables_lib2.variable('my_var2', shape=[])
vars_to_restore = {'v0': var0, 'v1': var1, 'v2': var2}
init_fn = variables_lib2.assign_from_checkpoint_fn(
model_path, vars_to_restore, ignore_missing_vars=True)
# Initialize the variables.
sess.run(variables_lib.global_variables_initializer())
# Perform the assignment.
init_fn(sess)
# Request and test the variable values:
self.assertEqual(init_value0, var0.eval())
self.assertEqual(init_value1, var1.eval())
class ZeroInitializerOpTest(test.TestCase):
def _testZeroInitializer(self, shape, initializer, use_init):
var = variables_lib.VariableV1(initializer)
var_zero = variables_lib2.zero_initializer(var)
with self.cached_session() as sess:
with self.assertRaisesOpError('Attempting to use uninitialized value'):
var.eval()
if use_init:
sess.run(var.initializer)
with self.assertRaisesOpError('input is already initialized'):
var_zero.eval()
self.assertAllClose(np.ones(shape), var.eval())
else:
var_zero.eval()
self.assertAllClose(np.zeros(shape), var.eval())
def testZeroInitializer(self):
for dtype in (dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64):
for use_init in (False, True):
self._testZeroInitializer([10, 20], array_ops.ones(
[10, 20], dtype=dtype), use_init)
class ZeroVarInitializerOpTest(test.TestCase):
def _testZeroVarInitializer(self, shape, initializer, use_init):
var = resource_variable_ops.ResourceVariable(initializer)
var_zero = variables_lib2.zero_initializer(var)
with self.cached_session() as sess:
with self.assertRaisesOpError('Error while reading resource variable'):
var.eval()
if use_init:
sess.run(var.initializer)
with self.assertRaisesOpError('input is already initialized'):
var_zero.eval()
self.assertAllClose(np.ones(shape), var.eval())
else:
var_zero.eval()
self.assertAllClose(np.zeros(shape), var.eval())
def testZeroVarInitializer(self):
for dtype in (dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64):
for use_init in (False, True):
self._testZeroVarInitializer([10, 20],
array_ops.ones([10, 20], dtype=dtype),
use_init)
class FilterVariablesTest(test.TestCase):
def setUp(self):
g = ops.Graph()
with g.as_default():
var_list = []
var_list.append(variables_lib.VariableV1(0, name='conv1/weights'))
var_list.append(variables_lib.VariableV1(0, name='conv1/biases'))
var_list.append(variables_lib.VariableV1(0, name='conv2/weights'))
var_list.append(variables_lib.VariableV1(0, name='conv2/biases'))
var_list.append(variables_lib.VariableV1(0, name='clfs/weights'))
var_list.append(variables_lib.VariableV1(0, name='clfs/biases'))
self._var_list = var_list
def _test_filter_variables(self,
expected_var_names,
include_patterns=None,
exclude_patterns=None,
reg_search=True):
filtered_var_list = variables_lib2.filter_variables(
self._var_list,
include_patterns=include_patterns,
exclude_patterns=exclude_patterns,
reg_search=reg_search)
filtered_var_names = [var.op.name for var in filtered_var_list]
for name in filtered_var_names:
self.assertIn(name, expected_var_names)
for name in expected_var_names:
self.assertIn(name, filtered_var_names)
self.assertEqual(len(filtered_var_names), len(expected_var_names))
def testNoFiltering(self):
self._test_filter_variables(expected_var_names=[
'conv1/weights', 'conv1/biases', 'conv2/weights', 'conv2/biases',
'clfs/weights', 'clfs/biases'
])
def testIncludeBiases(self):
self._test_filter_variables(
expected_var_names=['conv1/biases', 'conv2/biases', 'clfs/biases'],
include_patterns=['biases'])
def testExcludeWeights(self):
self._test_filter_variables(
expected_var_names=['conv1/biases', 'conv2/biases', 'clfs/biases'],
exclude_patterns=['weights'])
def testExcludeWeightsAndConv1(self):
self._test_filter_variables(
expected_var_names=['conv2/biases', 'clfs/biases'],
exclude_patterns=['weights', 'conv1'])
def testTwoIncludePatternsEnsureNoVariablesTwiceInFilteredList(self):
self._test_filter_variables(
expected_var_names=[
'conv1/weights', 'conv1/biases', 'conv2/weights', 'clfs/weights'
],
include_patterns=['conv1', 'weights'])
def testIncludeConv1ExcludeBiases(self):
self._test_filter_variables(
expected_var_names=['conv1/weights'],
include_patterns=['conv1'],
exclude_patterns=['biases'])
def testRegMatchIncludeBiases(self):
self._test_filter_variables(
expected_var_names=['conv1/biases', 'conv2/biases', 'clfs/biases'],
include_patterns=['.*biases'],
reg_search=False)
def testRegMatchIncludeBiasesWithIncompleteRegExpHasNoMatches(self):
self._test_filter_variables(
expected_var_names=[], include_patterns=['biases'], reg_search=False)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/framework/python/ops/variables_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-short-docstring-punctuation
"""Audio processing and decoding ops.
@@decode_wav
@@encode_wav
@@audio_spectrogram
@@mfcc
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_audio_ops import *
# pylint: enable=wildcard-import
from tensorflow.python.util.all_util import remove_undocumented
remove_undocumented(__name__, [])
|
tensorflow-master
|
tensorflow/contrib/framework/python/ops/audio_ops.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Integration and ODE solvers.
See the
[Contrib Integrate](https://tensorflow.org/api_guides/python/contrib.integrate)
guide.
@@odeint
@@odeint_fixed
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import
from tensorflow.contrib.integrate.python.ops.odes import *
from tensorflow.python.util.all_util import remove_undocumented
remove_undocumented(__name__)
|
tensorflow-master
|
tensorflow/contrib/integrate/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ODE solvers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.integrate.python.ops import odes
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class OdeIntTest(test.TestCase):
def setUp(self):
super(OdeIntTest, self).setUp()
# simple defaults (solution is a sin-wave)
matrix = constant_op.constant([[0, 1], [-1, 0]], dtype=dtypes.float64)
self.func = lambda y, t: math_ops.matmul(matrix, y)
self.y0 = np.array([[1.0], [0.0]])
def test_odeint_exp(self):
# Test odeint by an exponential function:
# dy / dt = y, y(0) = 1.0.
# Its analytical solution is y = exp(t).
func = lambda y, t: y
y0 = constant_op.constant(1.0, dtype=dtypes.float64)
t = np.linspace(0.0, 1.0, 11)
y_solved = odes.odeint(func, y0, t)
self.assertIn('odeint', y_solved.name)
self.assertEqual(y_solved.get_shape(), tensor_shape.TensorShape([11]))
with self.cached_session() as sess:
y_solved = sess.run(y_solved)
y_true = np.exp(t)
self.assertAllClose(y_true, y_solved)
def test_odeint_complex(self):
# Test a complex, linear ODE:
# dy / dt = k * y, y(0) = 1.0.
# Its analytical solution is y = exp(k * t).
k = 1j - 0.1
func = lambda y, t: k * y
t = np.linspace(0.0, 1.0, 11)
y_solved = odes.odeint(func, 1.0 + 0.0j, t)
with self.cached_session() as sess:
y_solved = sess.run(y_solved)
y_true = np.exp(k * t)
self.assertAllClose(y_true, y_solved)
def test_odeint_riccati(self):
# The Ricatti equation is:
# dy / dt = (y - t) ** 2 + 1.0, y(0) = 0.5.
# Its analytical solution is y = 1.0 / (2.0 - t) + t.
func = lambda t, y: (y - t)**2 + 1.0
t = np.linspace(0.0, 1.0, 11)
y_solved = odes.odeint(func, np.float64(0.5), t)
with self.cached_session() as sess:
y_solved = sess.run(y_solved)
y_true = 1.0 / (2.0 - t) + t
self.assertAllClose(y_true, y_solved)
def test_odeint_2d_linear(self):
# Solve the 2D linear differential equation:
# dy1 / dt = 3.0 * y1 + 4.0 * y2,
# dy2 / dt = -4.0 * y1 + 3.0 * y2,
# y1(0) = 0.0,
# y2(0) = 1.0.
# Its analytical solution is
# y1 = sin(4.0 * t) * exp(3.0 * t),
# y2 = cos(4.0 * t) * exp(3.0 * t).
matrix = constant_op.constant(
[[3.0, 4.0], [-4.0, 3.0]], dtype=dtypes.float64)
func = lambda y, t: math_ops.matmul(matrix, y)
y0 = constant_op.constant([[0.0], [1.0]], dtype=dtypes.float64)
t = np.linspace(0.0, 1.0, 11)
y_solved = odes.odeint(func, y0, t)
with self.cached_session() as sess:
y_solved = sess.run(y_solved)
y_true = np.zeros((len(t), 2, 1))
y_true[:, 0, 0] = np.sin(4.0 * t) * np.exp(3.0 * t)
y_true[:, 1, 0] = np.cos(4.0 * t) * np.exp(3.0 * t)
self.assertAllClose(y_true, y_solved, atol=1e-5)
def test_odeint_higher_rank(self):
func = lambda y, t: y
y0 = constant_op.constant(1.0, dtype=dtypes.float64)
t = np.linspace(0.0, 1.0, 11)
for shape in [(), (1,), (1, 1)]:
expected_shape = (len(t),) + shape
y_solved = odes.odeint(func, array_ops.reshape(y0, shape), t)
self.assertEqual(y_solved.get_shape(),
tensor_shape.TensorShape(expected_shape))
with self.cached_session() as sess:
y_solved = sess.run(y_solved)
self.assertEquals(y_solved.shape, expected_shape)
def test_odeint_all_dtypes(self):
func = lambda y, t: y
t = np.linspace(0.0, 1.0, 11)
for y0_dtype in [
dtypes.float32, dtypes.float64, dtypes.complex64, dtypes.complex128
]:
for t_dtype in [dtypes.float32, dtypes.float64]:
y0 = math_ops.cast(1.0, y0_dtype)
y_solved = odes.odeint(func, y0, math_ops.cast(t, t_dtype))
with self.cached_session() as sess:
y_solved = sess.run(y_solved)
expected = np.asarray(np.exp(t))
self.assertAllClose(y_solved, expected, rtol=1e-5)
self.assertEqual(dtypes.as_dtype(y_solved.dtype), y0_dtype)
def test_odeint_required_dtypes(self):
with self.assertRaisesRegexp(TypeError, '`y0` must have a floating point'):
odes.odeint(self.func, math_ops.cast(self.y0, dtypes.int32), [0, 1])
with self.assertRaisesRegexp(TypeError, '`t` must have a floating point'):
odes.odeint(self.func, self.y0, math_ops.cast([0, 1], dtypes.int32))
def test_odeint_runtime_errors(self):
with self.assertRaisesRegexp(ValueError, 'cannot supply `options` without'):
odes.odeint(self.func, self.y0, [0, 1], options={'first_step': 1.0})
y = odes.odeint(
self.func,
self.y0, [0, 1],
method='dopri5',
options={'max_num_steps': 0})
with self.cached_session() as sess:
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
'max_num_steps'):
sess.run(y)
y = odes.odeint(self.func, self.y0, [1, 0])
with self.cached_session() as sess:
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
'monotonic increasing'):
sess.run(y)
def test_odeint_different_times(self):
# integrate steps should be independent of interpolation times
times0 = np.linspace(0, 10, num=11, dtype=float)
times1 = np.linspace(0, 10, num=101, dtype=float)
with self.cached_session() as sess:
y_solved_0, info_0 = sess.run(
odes.odeint(self.func, self.y0, times0, full_output=True))
y_solved_1, info_1 = sess.run(
odes.odeint(self.func, self.y0, times1, full_output=True))
self.assertAllClose(y_solved_0, y_solved_1[::10])
self.assertEqual(info_0['num_func_evals'], info_1['num_func_evals'])
self.assertAllEqual(info_0['integrate_points'], info_1['integrate_points'])
self.assertAllEqual(info_0['error_ratio'], info_1['error_ratio'])
def test_odeint_5th_order_accuracy(self):
t = [0, 20]
kwargs = dict(
full_output=True, method='dopri5', options=dict(max_num_steps=2000))
with self.cached_session() as sess:
_, info_0 = sess.run(
odes.odeint(self.func, self.y0, t, rtol=0, atol=1e-6, **kwargs))
_, info_1 = sess.run(
odes.odeint(self.func, self.y0, t, rtol=0, atol=1e-9, **kwargs))
self.assertAllClose(
info_0['integrate_points'].size * 1000**0.2,
float(info_1['integrate_points'].size),
rtol=0.01)
class StepSizeTest(test.TestCase):
def test_error_ratio_one(self):
new_step = odes._optimal_step_size(
last_step=constant_op.constant(1.0),
error_ratio=constant_op.constant(1.0))
with self.cached_session() as sess:
new_step = sess.run(new_step)
self.assertAllClose(new_step, 0.9)
def test_ifactor(self):
new_step = odes._optimal_step_size(
last_step=constant_op.constant(1.0),
error_ratio=constant_op.constant(0.0))
with self.cached_session() as sess:
new_step = sess.run(new_step)
self.assertAllClose(new_step, 10.0)
def test_dfactor(self):
new_step = odes._optimal_step_size(
last_step=constant_op.constant(1.0),
error_ratio=constant_op.constant(1e6))
with self.cached_session() as sess:
new_step = sess.run(new_step)
self.assertAllClose(new_step, 0.2)
class InterpolationTest(test.TestCase):
def test_5th_order_polynomial(self):
# this should be an exact fit
f = lambda x: x**4 + x**3 - 2 * x**2 + 4 * x + 5
f_prime = lambda x: 4 * x**3 + 3 * x**2 - 4 * x + 4
coeffs = odes._interp_fit(
f(0.0), f(10.0), f(5.0), f_prime(0.0), f_prime(10.0), 10.0)
times = np.linspace(0, 10, dtype=np.float32)
y_fit = array_ops.stack(
[odes._interp_evaluate(coeffs, 0.0, 10.0, t) for t in times])
y_expected = f(times)
with self.cached_session() as sess:
y_actual = sess.run(y_fit)
self.assertAllClose(y_expected, y_actual)
# attempt interpolation outside bounds
y_invalid = odes._interp_evaluate(coeffs, 0.0, 10.0, 100.0)
with self.cached_session() as sess:
with self.assertRaises(errors_impl.InvalidArgumentError):
sess.run(y_invalid)
class OdeIntFixedTest(test.TestCase):
def _test_integrate_sine(self, method, t, dt=None):
def evol_func(y, t):
del t
return array_ops.stack([y[1], -y[0]])
y0 = [0., 1.]
y_grid = odes.odeint_fixed(evol_func, y0, t, dt, method=method)
with self.cached_session() as sess:
y_grid_array = sess.run(y_grid)
np.testing.assert_allclose(
y_grid_array[:, 0], np.sin(t), rtol=1e-2, atol=1e-2)
def _test_integrate_gaussian(self, method, t, dt=None):
def evol_func(y, t):
return -math_ops.cast(t, dtype=y.dtype) * y[0]
y0 = [1.]
y_grid = odes.odeint_fixed(evol_func, y0, t, dt, method=method)
with self.cached_session() as sess:
y_grid_array = sess.run(y_grid)
np.testing.assert_allclose(
y_grid_array[:, 0], np.exp(-t**2 / 2), rtol=1e-2, atol=1e-2)
def _test_integrate_sine_all(self, method):
uniform_time_grid = np.linspace(0., 10., 200)
non_uniform_time_grid = np.asarray([0.0, 0.4, 4.7, 5.2, 7.0])
uniform_dt = 0.02
non_uniform_dt = np.asarray([0.01, 0.001, 0.05, 0.03])
self._test_integrate_sine(method, uniform_time_grid)
self._test_integrate_sine(method, non_uniform_time_grid, uniform_dt)
self._test_integrate_sine(method, non_uniform_time_grid, non_uniform_dt)
def _test_integrate_gaussian_all(self, method):
uniform_time_grid = np.linspace(0., 2., 100)
non_uniform_time_grid = np.asarray([0.0, 0.1, 0.7, 1.2, 2.0])
uniform_dt = 0.01
non_uniform_dt = np.asarray([0.01, 0.001, 0.1, 0.03])
self._test_integrate_gaussian(method, uniform_time_grid)
self._test_integrate_gaussian(method, non_uniform_time_grid, uniform_dt)
self._test_integrate_gaussian(method, non_uniform_time_grid, non_uniform_dt)
def _test_everything(self, method):
self._test_integrate_sine_all(method)
self._test_integrate_gaussian_all(method)
def test_midpoint(self):
self._test_everything('midpoint')
def test_rk4(self):
self._test_everything('rk4')
def test_dt_size_exceptions(self):
times = np.linspace(0., 2., 100)
dt = np.ones(99) * 0.01
dt_wrong_length = np.asarray([0.01, 0.001, 0.1, 0.03])
dt_wrong_dim = np.expand_dims(np.linspace(0., 2., 99), axis=0)
times_wrong_dim = np.expand_dims(np.linspace(0., 2., 100), axis=0)
with self.assertRaises(ValueError):
self._test_integrate_gaussian('midpoint', times, dt_wrong_length)
with self.assertRaises(ValueError):
self._test_integrate_gaussian('midpoint', times, dt_wrong_dim)
with self.assertRaises(ValueError):
self._test_integrate_gaussian('midpoint', times_wrong_dim, dt)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/integrate/python/ops/odes_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""ODE solvers for TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import six
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import tensor_array_ops
_ButcherTableau = collections.namedtuple('_ButcherTableau',
'alpha beta c_sol c_mid c_error')
# Parameters from Shampine (1986), section 4.
_DORMAND_PRINCE_TABLEAU = _ButcherTableau(
alpha=[1 / 5, 3 / 10, 4 / 5, 8 / 9, 1., 1.],
beta=[
[1 / 5],
[3 / 40, 9 / 40],
[44 / 45, -56 / 15, 32 / 9],
[19372 / 6561, -25360 / 2187, 64448 / 6561, -212 / 729],
[9017 / 3168, -355 / 33, 46732 / 5247, 49 / 176, -5103 / 18656],
[35 / 384, 0, 500 / 1113, 125 / 192, -2187 / 6784, 11 / 84],
],
c_sol=[35 / 384, 0, 500 / 1113, 125 / 192, -2187 / 6784, 11 / 84, 0],
c_mid=[
6025192743 / 30085553152 / 2, 0, 51252292925 / 65400821598 / 2,
-2691868925 / 45128329728 / 2, 187940372067 / 1594534317056 / 2,
-1776094331 / 19743644256 / 2, 11237099 / 235043384 / 2
],
c_error=[
1951 / 21600 - 35 / 384,
0,
22642 / 50085 - 500 / 1113,
451 / 720 - 125 / 192,
-12231 / 42400 - -2187 / 6784,
649 / 6300 - 11 / 84,
1 / 60,
],)
def _possibly_nonzero(x):
return isinstance(x, ops.Tensor) or x != 0
def _scaled_dot_product(scale, xs, ys, name=None):
"""Calculate a scaled, vector inner product between lists of Tensors."""
with ops.name_scope(name, 'scaled_dot_product', [scale, xs, ys]) as scope:
# Some of the parameters in our Butcher tableau include zeros. Using
# _possibly_nonzero lets us avoid wasted computation.
return math_ops.add_n(
[(scale * x) * y for x, y in zip(xs, ys)
if _possibly_nonzero(x) and _possibly_nonzero(y)],
name=scope)
def _dot_product(xs, ys, name=None):
"""Calculate the vector inner product between two lists of Tensors."""
with ops.name_scope(name, 'dot_product', [xs, ys]) as scope:
return math_ops.add_n([x * y for x, y in zip(xs, ys)], name=scope)
def _runge_kutta_step(func,
y0,
f0,
t0,
dt,
tableau=_DORMAND_PRINCE_TABLEAU,
name=None):
"""Take an arbitrary Runge-Kutta step and estimate error.
Args:
func: Function to evaluate like `func(y, t)` to compute the time derivative
of `y`.
y0: Tensor initial value for the state.
f0: Tensor initial value for the derivative, computed from `func(y0, t0)`.
t0: float64 scalar Tensor giving the initial time.
dt: float64 scalar Tensor giving the size of the desired time step.
tableau: optional _ButcherTableau describing how to take the Runge-Kutta
step.
name: optional name for the operation.
Returns:
Tuple `(y1, f1, y1_error, k)` giving the estimated function value after
the Runge-Kutta step at `t1 = t0 + dt`, the derivative of the state at `t1`,
estimated error at `t1`, and a list of Runge-Kutta coefficients `k` used for
calculating these terms.
"""
with ops.name_scope(name, 'runge_kutta_step', [y0, f0, t0, dt]) as scope:
y0 = ops.convert_to_tensor(y0, name='y0')
f0 = ops.convert_to_tensor(f0, name='f0')
t0 = ops.convert_to_tensor(t0, name='t0')
dt = ops.convert_to_tensor(dt, name='dt')
dt_cast = math_ops.cast(dt, y0.dtype)
k = [f0]
for alpha_i, beta_i in zip(tableau.alpha, tableau.beta):
ti = t0 + alpha_i * dt
yi = y0 + _scaled_dot_product(dt_cast, beta_i, k)
k.append(func(yi, ti))
if not (tableau.c_sol[-1] == 0 and tableau.c_sol[:-1] == tableau.beta[-1]):
# This property (true for Dormand-Prince) lets us save a few FLOPs.
yi = y0 + _scaled_dot_product(dt_cast, tableau.c_sol, k)
y1 = array_ops.identity(yi, name='%s/y1' % scope)
f1 = array_ops.identity(k[-1], name='%s/f1' % scope)
y1_error = _scaled_dot_product(
dt_cast, tableau.c_error, k, name='%s/y1_error' % scope)
return (y1, f1, y1_error, k)
def _interp_fit(y0, y1, y_mid, f0, f1, dt):
"""Fit coefficients for 4th order polynomial interpolation.
Args:
y0: function value at the start of the interval.
y1: function value at the end of the interval.
y_mid: function value at the mid-point of the interval.
f0: derivative value at the start of the interval.
f1: derivative value at the end of the interval.
dt: width of the interval.
Returns:
List of coefficients `[a, b, c, d, e]` for interpolating with the polynomial
`p = a * x ** 4 + b * x ** 3 + c * x ** 2 + d * x + e` for values of `x`
between 0 (start of interval) and 1 (end of interval).
"""
# a, b, c, d, e = sympy.symbols('a b c d e')
# x, dt, y0, y1, y_mid, f0, f1 = sympy.symbols('x dt y0 y1 y_mid f0 f1')
# p = a * x ** 4 + b * x ** 3 + c * x ** 2 + d * x + e
# sympy.solve([p.subs(x, 0) - y0,
# p.subs(x, 1 / 2) - y_mid,
# p.subs(x, 1) - y1,
# (p.diff(x) / dt).subs(x, 0) - f0,
# (p.diff(x) / dt).subs(x, 1) - f1],
# [a, b, c, d, e])
# {a: -2.0*dt*f0 + 2.0*dt*f1 - 8.0*y0 - 8.0*y1 + 16.0*y_mid,
# b: 5.0*dt*f0 - 3.0*dt*f1 + 18.0*y0 + 14.0*y1 - 32.0*y_mid,
# c: -4.0*dt*f0 + dt*f1 - 11.0*y0 - 5.0*y1 + 16.0*y_mid,
# d: dt*f0,
# e: y0}
a = _dot_product([-2 * dt, 2 * dt, -8, -8, 16], [f0, f1, y0, y1, y_mid])
b = _dot_product([5 * dt, -3 * dt, 18, 14, -32], [f0, f1, y0, y1, y_mid])
c = _dot_product([-4 * dt, dt, -11, -5, 16], [f0, f1, y0, y1, y_mid])
d = dt * f0
e = y0
return [a, b, c, d, e]
def _interp_fit_rk(y0, y1, k, dt, tableau=_DORMAND_PRINCE_TABLEAU):
"""Fit an interpolating polynomial to the results of a Runge-Kutta step."""
with ops.name_scope('interp_fit_rk'):
dt = math_ops.cast(dt, y0.dtype)
y_mid = y0 + _scaled_dot_product(dt, tableau.c_mid, k)
f0 = k[0]
f1 = k[-1]
return _interp_fit(y0, y1, y_mid, f0, f1, dt)
def _interp_evaluate(coefficients, t0, t1, t):
"""Evaluate polynomial interpolation at the given time point.
Args:
coefficients: list of Tensor coefficients as created by `interp_fit`.
t0: scalar float64 Tensor giving the start of the interval.
t1: scalar float64 Tensor giving the end of the interval.
t: scalar float64 Tensor giving the desired interpolation point.
Returns:
Polynomial interpolation of the coefficients at time `t`.
"""
with ops.name_scope('interp_evaluate'):
t0 = ops.convert_to_tensor(t0)
t1 = ops.convert_to_tensor(t1)
t = ops.convert_to_tensor(t)
dtype = coefficients[0].dtype
assert_op = control_flow_ops.Assert(
(t0 <= t) & (t <= t1),
['invalid interpolation, fails `t0 <= t <= t1`:', t0, t, t1])
with ops.control_dependencies([assert_op]):
x = math_ops.cast((t - t0) / (t1 - t0), dtype)
xs = [constant_op.constant(1, dtype), x]
for _ in range(2, len(coefficients)):
xs.append(xs[-1] * x)
return _dot_product(coefficients, reversed(xs))
def _optimal_step_size(last_step,
error_ratio,
safety=0.9,
ifactor=10.0,
dfactor=0.2,
order=5,
name=None):
"""Calculate the optimal size for the next Runge-Kutta step."""
with ops.name_scope(name, 'optimal_step_size', [last_step,
error_ratio]) as scope:
error_ratio = math_ops.cast(error_ratio, last_step.dtype)
exponent = math_ops.cast(1 / order, last_step.dtype)
# this looks more complex than necessary, but importantly it keeps
# error_ratio in the numerator so we can't divide by zero:
factor = math_ops.maximum(1 / ifactor,
math_ops.minimum(error_ratio**exponent / safety,
1 / dfactor))
return math_ops.div(last_step, factor, name=scope)
def _abs_square(x):
if x.dtype.is_complex:
return math_ops.square(math_ops.real(x)) + math_ops.square(math_ops.imag(x))
else:
return math_ops.square(x)
def _ta_append(tensor_array, value):
"""Append a value to the end of a tf.TensorArray."""
return tensor_array.write(tensor_array.size(), value)
class _RungeKuttaState(
collections.namedtuple('_RungeKuttaState',
'y1, f1, t0, t1, dt, interp_coeff')):
"""Saved state of the Runge Kutta solver.
Attributes:
y1: Tensor giving the function value at the end of the last time step.
f1: Tensor giving derivative at the end of the last time step.
t0: scalar float64 Tensor giving start of the last time step.
t1: scalar float64 Tensor giving end of the last time step.
dt: scalar float64 Tensor giving the size for the next time step.
interp_coef: list of Tensors giving coefficients for polynomial
interpolation between `t0` and `t1`.
"""
class _History(
collections.namedtuple('_History', 'integrate_points, error_ratio')):
"""Saved integration history for use in `info_dict`.
Attributes:
integrate_points: tf.TensorArray storing integrating time points.
error_ratio: tf.TensorArray storing computed error ratios at each
integration step.
"""
def _assert_increasing(t):
assert_increasing = control_flow_ops.Assert(
math_ops.reduce_all(t[1:] > t[:-1]), ['`t` must be monotonic increasing'])
return ops.control_dependencies([assert_increasing])
def _check_input_types(y0, t, dt=None):
if not (y0.dtype.is_floating or y0.dtype.is_complex):
raise TypeError('`y0` must have a floating point or complex floating '
'point dtype')
if not t.dtype.is_floating:
raise TypeError('`t` must have a floating point dtype')
if dt is not None and not dt.dtype.is_floating:
raise TypeError('`dt` must have a floating point dtype')
def _check_input_sizes(t, dt):
if len(t.get_shape().as_list()) > 1:
raise ValueError('t must be a 1D tensor')
if len(dt.get_shape().as_list()) > 1:
raise ValueError('t must be a 1D tensor')
if t.get_shape()[0] != dt.get_shape()[0] + 1:
raise ValueError('t and dt have incompatible lengths, must be N and N-1')
def _dopri5(func,
y0,
t,
rtol,
atol,
full_output=False,
first_step=None,
safety=0.9,
ifactor=10.0,
dfactor=0.2,
max_num_steps=1000,
name=None):
"""Solve an ODE for `odeint` using method='dopri5'."""
if first_step is None:
# at some point, we might want to switch to picking the step size
# automatically
first_step = 1.0
with ops.name_scope(name, 'dopri5', [
y0, t, rtol, atol, safety, ifactor, dfactor, max_num_steps
]) as scope:
first_step = ops.convert_to_tensor(
first_step, dtype=t.dtype, name='first_step')
safety = ops.convert_to_tensor(safety, dtype=t.dtype, name='safety')
ifactor = ops.convert_to_tensor(ifactor, dtype=t.dtype, name='ifactor')
dfactor = ops.convert_to_tensor(dfactor, dtype=t.dtype, name='dfactor')
max_num_steps = ops.convert_to_tensor(
max_num_steps, dtype=dtypes.int32, name='max_num_steps')
def adaptive_runge_kutta_step(rk_state, history, n_steps):
"""Take an adaptive Runge-Kutta step to integrate the ODE."""
y0, f0, _, t0, dt, interp_coeff = rk_state
with ops.name_scope('assertions'):
check_underflow = control_flow_ops.Assert(t0 + dt > t0,
['underflow in dt', dt])
check_max_num_steps = control_flow_ops.Assert(
n_steps < max_num_steps, ['max_num_steps exceeded'])
check_numerics = control_flow_ops.Assert(
math_ops.reduce_all(math_ops.is_finite(abs(y0))),
['non-finite values in state `y`', y0])
with ops.control_dependencies(
[check_underflow, check_max_num_steps, check_numerics]):
y1, f1, y1_error, k = _runge_kutta_step(func, y0, f0, t0, dt)
with ops.name_scope('error_ratio'):
# We use the same approach as the dopri5 fortran code.
error_tol = atol + rtol * math_ops.maximum(abs(y0), abs(y1))
tensor_error_ratio = _abs_square(y1_error) / _abs_square(error_tol)
# Could also use reduce_maximum here.
error_ratio = math_ops.sqrt(math_ops.reduce_mean(tensor_error_ratio))
accept_step = error_ratio <= 1
with ops.name_scope('update/rk_state'):
# If we don't accept the step, the _RungeKuttaState will be useless
# (covering a time-interval of size 0), but that's OK, because in such
# cases we always immediately take another Runge-Kutta step.
y_next = control_flow_ops.cond(accept_step, lambda: y1, lambda: y0)
f_next = control_flow_ops.cond(accept_step, lambda: f1, lambda: f0)
t_next = control_flow_ops.cond(accept_step, lambda: t0 + dt, lambda: t0)
interp_coeff = control_flow_ops.cond(
accept_step, lambda: _interp_fit_rk(y0, y1, k, dt),
lambda: interp_coeff)
dt_next = _optimal_step_size(dt, error_ratio, safety, ifactor, dfactor)
rk_state = _RungeKuttaState(y_next, f_next, t0, t_next, dt_next,
interp_coeff)
with ops.name_scope('update/history'):
history = _History(
_ta_append(history.integrate_points, t0 + dt),
_ta_append(history.error_ratio, error_ratio))
return rk_state, history, n_steps + 1
def interpolate(solution, history, rk_state, i):
"""Interpolate through the next time point, integrating as necessary."""
with ops.name_scope('interpolate'):
rk_state, history, _ = control_flow_ops.while_loop(
lambda rk_state, *_: t[i] > rk_state.t1,
adaptive_runge_kutta_step, (rk_state, history, 0),
name='integrate_loop')
y = _interp_evaluate(rk_state.interp_coeff, rk_state.t0, rk_state.t1,
t[i])
solution = solution.write(i, y)
return solution, history, rk_state, i + 1
with _assert_increasing(t):
num_times = array_ops.size(t)
solution = tensor_array_ops.TensorArray(
y0.dtype, size=num_times).write(0, y0)
history = _History(
integrate_points=tensor_array_ops.TensorArray(
t.dtype, size=0, dynamic_size=True),
error_ratio=tensor_array_ops.TensorArray(
rtol.dtype, size=0, dynamic_size=True))
rk_state = _RungeKuttaState(
y0, func(y0, t[0]), t[0], t[0], first_step, interp_coeff=[y0] * 5)
solution, history, _, _ = control_flow_ops.while_loop(
lambda _, __, ___, i: i < num_times,
interpolate, (solution, history, rk_state, 1),
name='interpolate_loop')
y = solution.stack(name=scope)
y.set_shape(t.get_shape().concatenate(y0.get_shape()))
if not full_output:
return y
else:
integrate_points = history.integrate_points.stack()
info_dict = {
'num_func_evals': 6 * array_ops.size(integrate_points) + 1,
'integrate_points': integrate_points,
'error_ratio': history.error_ratio.stack()
}
return (y, info_dict)
def odeint(func,
y0,
t,
rtol=1e-6,
atol=1e-12,
method=None,
options=None,
full_output=False,
name=None):
"""Integrate a system of ordinary differential equations.
Solves the initial value problem for a non-stiff system of first order ODEs:
```
dy/dt = func(y, t), y(t[0]) = y0
```
where y is a Tensor of any shape.
For example:
```
# solve `dy/dt = -y`, corresponding to exponential decay
tf.contrib.integrate.odeint(lambda y, _: -y, 1.0, [0, 1, 2])
=> [1, exp(-1), exp(-2)]
```
Output dtypes and numerical precision are based on the dtypes of the inputs
`y0` and `t`.
Currently, implements 5th order Runge-Kutta with adaptive step size control
and dense output, using the Dormand-Prince method. Similar to the 'dopri5'
method of `scipy.integrate.ode` and MATLAB's `ode45`.
Based on: Shampine, Lawrence F. (1986), "Some Practical Runge-Kutta Formulas",
Mathematics of Computation, American Mathematical Society, 46 (173): 135-150,
doi:10.2307/2008219
Args:
func: Function that maps a Tensor holding the state `y` and a scalar Tensor
`t` into a Tensor of state derivatives with respect to time.
y0: N-D Tensor giving starting value of `y` at time point `t[0]`. May
have any floating point or complex dtype.
t: 1-D Tensor holding a sequence of time points for which to solve for
`y`. The initial time point should be the first element of this sequence,
and each time must be larger than the previous time. May have any floating
point dtype. If not provided as a Tensor, converted to a Tensor with
float64 dtype.
rtol: optional float64 Tensor specifying an upper bound on relative error,
per element of `y`.
atol: optional float64 Tensor specifying an upper bound on absolute error,
per element of `y`.
method: optional string indicating the integration method to use. Currently,
the only valid option is `'dopri5'`.
options: optional dict of configuring options for the indicated integration
method. Can only be provided if a `method` is explicitly set. For
`'dopri5'`, valid options include:
* first_step: an initial guess for the size of the first integration
(current default: 1.0, but may later be changed to use heuristics based
on the gradient).
* safety: safety factor for adaptive step control, generally a constant
in the range 0.8-1 (default: 0.9).
* ifactor: maximum factor by which the adaptive step may be increased
(default: 10.0).
* dfactor: maximum factor by which the adpative step may be decreased
(default: 0.2).
* max_num_steps: integer maximum number of integrate steps between time
points in `t` (default: 1000).
full_output: optional boolean. If True, `odeint` returns a tuple
`(y, info_dict)` describing the integration process.
name: Optional name for this operation.
Returns:
y: (N+1)-D tensor, where the first dimension corresponds to different
time points. Contains the solved value of y for each desired time point in
`t`, with the initial value `y0` being the first element along the first
dimension.
info_dict: only if `full_output == True`. A dict with the following values:
* num_func_evals: integer Tensor counting the number of function
evaluations.
* integrate_points: 1D float64 Tensor with the upper bound of each
integration time step.
* error_ratio: 1D float Tensor with the estimated ratio of the integration
error to the error tolerance at each integration step. An ratio greater
than 1 corresponds to rejected steps.
Raises:
ValueError: if an invalid `method` is provided.
TypeError: if `options` is supplied without `method`, or if `t` or `y0` has
an invalid dtype.
"""
if method is not None and method != 'dopri5':
raise ValueError('invalid method: %r' % method)
if options is None:
options = {}
elif method is None:
raise ValueError('cannot supply `options` without specifying `method`')
with ops.name_scope(name, 'odeint', [y0, t, rtol, atol]) as scope:
# TODO(shoyer): use nest.flatten (like tf.while_loop) to allow `y0` to be an
# arbitrarily nested tuple. This will help performance and usability by
# avoiding the need to pack/unpack in user functions.
y0 = ops.convert_to_tensor(y0, name='y0')
t = ops.convert_to_tensor(t, preferred_dtype=dtypes.float64, name='t')
_check_input_types(y0, t)
error_dtype = abs(y0).dtype
rtol = ops.convert_to_tensor(rtol, dtype=error_dtype, name='rtol')
atol = ops.convert_to_tensor(atol, dtype=error_dtype, name='atol')
return _dopri5(
func,
y0,
t,
rtol=rtol,
atol=atol,
full_output=full_output,
name=scope,
**options)
@six.add_metaclass(abc.ABCMeta)
class _FixedGridIntegrator(object):
"""Base class for fixed-grid ODE integrators."""
def integrate(self, evol_func, y0, time_grid, dt_grid, steps_on_intervals):
"""Returns integrated values of differential equation on the `time grid`.
Numerically integrates differential equation defined via time derivative
evaluator `evol_func` using fixed time steps specified in dt_grid.
Args:
evol_func: Callable, evaluates time derivative of y at a given time.
y0: N-D Tensor holds initial values of the solution.
time_grid: 1-D Tensor holding the time points at which the solution
will be recorded, must have a floating dtype.
dt_grid: 1-D Tensor holds fixed time steps to be used on time_grid
intervals. Must be a floating dtype and have one less element than that
of the time_grid.
steps_on_intervals: 1-D Tensor of integer dtype, must have the same size
as dt_grid. Specifies number of steps needed for every interval. Assumes
steps_on_intervals * dt_grid == time intervals.
Returns:
(N+1)-D tensor, where the first dimension corresponds to different
time points. Contains the solved value of y for each desired time point in
`t`, with the initial value `y0` being the first element along the first
dimension.
"""
iteration_func = self._make_iteration_func(evol_func, dt_grid)
integrate_interval = self._make_interval_integrator(iteration_func,
steps_on_intervals)
num_times = array_ops.size(time_grid)
current_time = time_grid[0]
solution_array = tensor_array_ops.TensorArray(y0.dtype, num_times)
solution_array = solution_array.write(0, y0)
solution_array, _, _, _ = control_flow_ops.while_loop(
lambda _, __, ___, i: i < num_times,
integrate_interval,
(solution_array, y0, current_time, 1)
)
solution_array = solution_array.stack()
solution_array.set_shape(time_grid.get_shape().concatenate(y0.get_shape()))
return solution_array
def _make_iteration_func(self, evol_func, dt_grid):
"""Returns a function that builds operations of a single time step."""
def iteration_func(y, t, dt_step, interval_step):
"""Performs a single time step advance."""
dt = dt_grid[interval_step - 1]
dy = self._step_func(evol_func, t, dt, y)
dy = math_ops.cast(dy, dtype=y.dtype)
return y + dy, t + dt, dt_step + 1, interval_step
return iteration_func
def _make_interval_integrator(self, iteration_func, interval_sizes):
"""Returns a function that builds operations for interval integration."""
def integrate_interval(solution_array, y, t, interval_num):
"""Integrates y with fixed time step on interval `interval_num`."""
y, t, _, _ = control_flow_ops.while_loop(
lambda _, __, j, interval_num: j < interval_sizes[interval_num - 1],
iteration_func,
(y, t, 0, interval_num)
)
return solution_array.write(interval_num, y), y, t, interval_num + 1
return integrate_interval
@abc.abstractmethod
def _step_func(self, evol_func, t, dt, y):
pass
class _MidpointFixedGridIntegrator(_FixedGridIntegrator):
"""Fixed grid integrator implementing midpoint scheme."""
def _step_func(self, evol_func, t, dt, y):
dt_cast = math_ops.cast(dt, y.dtype)
# yn1 = yn + h * f(tn + h/2, yn + f(tn, yn) * h/2)
return dt_cast * evol_func(y + evol_func(y, t) * dt_cast / 2, t + dt / 2)
class _RK4FixedGridIntegrator(_FixedGridIntegrator):
"""Fixed grid integrator implementing RK4 scheme."""
def _step_func(self, evol_func, t, dt, y):
k1 = evol_func(y, t)
half_step = t + dt / 2
dt_cast = math_ops.cast(dt, y.dtype)
k2 = evol_func(y + dt_cast * k1 / 2, half_step)
k3 = evol_func(y + dt_cast * k2 / 2, half_step)
k4 = evol_func(y + dt_cast * k3, t + dt)
return math_ops.add_n([k1, 2 * k2, 2 * k3, k4]) * (dt_cast / 6)
def odeint_fixed(func, y0, t, dt=None, method='rk4', name=None):
"""ODE integration on a fixed grid (with no step size control).
Useful in certain scenarios to avoid the overhead of adaptive step size
control, e.g. when differentiation of the integration result is desired and/or
the time grid is known a priori to be sufficient.
Args:
func: Function that maps a Tensor holding the state `y` and a scalar Tensor
`t` into a Tensor of state derivatives with respect to time.
y0: N-D Tensor giving starting value of `y` at time point `t[0]`.
t: 1-D Tensor holding a sequence of time points for which to solve for
`y`. The initial time point should be the first element of this sequence,
and each time must be larger than the previous time. May have any floating
point dtype.
dt: 0-D or 1-D Tensor providing time step suggestion to be used on time
integration intervals in `t`. 1-D Tensor should provide values
for all intervals, must have 1 less element than that of `t`.
If given a 0-D Tensor, the value is interpreted as time step suggestion
same for all intervals. If passed None, then time step is set to be the
t[1:] - t[:-1]. Defaults to None. The actual step size is obtained by
insuring an integer number of steps per interval, potentially reducing the
time step.
method: One of 'midpoint' or 'rk4'.
name: Optional name for the resulting operation.
Returns:
y: (N+1)-D tensor, where the first dimension corresponds to different
time points. Contains the solved value of y for each desired time point in
`t`, with the initial value `y0` being the first element along the first
dimension.
Raises:
ValueError: Upon caller errors.
"""
with ops.name_scope(name, 'odeint_fixed', [y0, t, dt]):
t = ops.convert_to_tensor(t, preferred_dtype=dtypes.float64, name='t')
y0 = ops.convert_to_tensor(y0, name='y0')
intervals = t[1:] - t[:-1]
if dt is None:
dt = intervals
dt = ops.convert_to_tensor(dt, preferred_dtype=dtypes.float64, name='dt')
steps_on_intervals = math_ops.ceil(intervals / dt)
dt = intervals / steps_on_intervals
steps_on_intervals = math_ops.cast(steps_on_intervals, dtype=dtypes.int32)
_check_input_types(y0, t, dt)
_check_input_sizes(t, dt)
with _assert_increasing(t):
with ops.name_scope(method):
if method == 'midpoint':
return _MidpointFixedGridIntegrator().integrate(func, y0, t, dt,
steps_on_intervals)
elif method == 'rk4':
return _RK4FixedGridIntegrator().integrate(func, y0, t, dt,
steps_on_intervals)
else:
raise ValueError('method not supported: {!s}'.format(method))
|
tensorflow-master
|
tensorflow/contrib/integrate/python/ops/odes.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""IgniteDataset that allows to get data from Apache Ignite.
Apache Ignite is a memory-centric distributed database, caching, and
processing platform for transactional, analytical, and streaming workloads,
delivering in-memory speeds at petabyte scale. This contrib package
contains an integration between Apache Ignite and TensorFlow. The
integration is based on tf.data from TensorFlow side and Binary Client
Protocol from Apache Ignite side. It allows to use Apache Ignite as a
datasource for neural network training, inference and all other
computations supported by TensorFlow. Ignite Dataset is based on Apache
Ignite Binary Client Protocol:
https://apacheignite.readme.io/v2.6/docs/binary-client-protocol.
@@IgniteDataset
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.ignite.python.ops.ignite_dataset_ops import IgniteDataset
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = [
"IgniteDataset",
]
remove_undocumented(__name__)
|
tensorflow-master
|
tensorflow/contrib/ignite/__init__.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
# ==============================================================================
"""Tests for IgniteDataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow import compat
from tensorflow.contrib.ignite import IgniteDataset
from tensorflow.python.client import session
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.platform import test
class IgniteDatasetTest(test.TestCase):
"""The Apache Ignite servers have to setup before the test and tear down
after the test manually. The docker engine has to be installed.
To setup Apache Ignite servers:
$ bash start_ignite.sh
To tear down Apache Ignite servers:
$ bash stop_ignite.sh
"""
def test_ignite_dataset_with_plain_client(self):
"""Test Ignite Dataset with plain client.
"""
self._clear_env()
ds = IgniteDataset(cache_name="SQL_PUBLIC_TEST_CACHE", port=42300)
self._check_dataset(ds)
def _clear_env(self):
"""Clears environment variables used by Ignite Dataset.
"""
if "IGNITE_DATASET_USERNAME" in os.environ:
del os.environ["IGNITE_DATASET_USERNAME"]
if "IGNITE_DATASET_PASSWORD" in os.environ:
del os.environ["IGNITE_DATASET_PASSWORD"]
if "IGNITE_DATASET_CERTFILE" in os.environ:
del os.environ["IGNITE_DATASET_CERTFILE"]
if "IGNITE_DATASET_CERT_PASSWORD" in os.environ:
del os.environ["IGNITE_DATASET_CERT_PASSWORD"]
def _check_dataset(self, dataset):
"""Checks that dataset provides correct data."""
self.assertEqual(dtypes.int64, dataset.output_types["key"])
self.assertEqual(dtypes.string, dataset.output_types["val"]["NAME"])
self.assertEqual(dtypes.int64, dataset.output_types["val"]["VAL"])
it = compat.v1.data.make_one_shot_iterator(dataset)
ne = it.get_next()
with session.Session() as sess:
rows = [sess.run(ne), sess.run(ne), sess.run(ne)]
with self.assertRaises(errors.OutOfRangeError):
sess.run(ne)
self.assertEqual({"key": 1, "val": {"NAME": b"TEST1", "VAL": 42}}, rows[0])
self.assertEqual({"key": 2, "val": {"NAME": b"TEST2", "VAL": 43}}, rows[1])
self.assertEqual({"key": 3, "val": {"NAME": b"TEST3", "VAL": 44}}, rows[2])
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/ignite/python/tests/ignite_dataset_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
# ==============================================================================
"""Tests for IGFS."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.contrib.ignite.python.ops.igfs_ops # pylint: disable=unused-import
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
class IGFSTest(test.TestCase):
"""The Apache Ignite servers have to setup before the test and tear down
after the test manually. The docker engine has to be installed.
To setup Apache Ignite servers:
$ bash start_ignite.sh
To tear down Apache Ignite servers:
$ bash stop_ignite.sh
"""
def test_create_file(self):
"""Test create file.
"""
# Setup and check preconditions.
file_name = "igfs:///test_create_file/1"
self.assertFalse(gfile.Exists(file_name))
# Create file.
with gfile.Open(file_name, mode="w") as w:
w.write("")
# Check that file was created.
self.assertTrue(gfile.Exists(file_name))
def test_write_read_file(self):
"""Test write/read file.
"""
# Setup and check preconditions.
file_name = "igfs:///test_write_read_file/1"
rows = 10000
self.assertFalse(gfile.Exists(file_name))
# Write data.
with gfile.Open(file_name, mode="w") as w:
for i in range(rows):
w.write("This is row\n")
# Read data.
with gfile.Open(file_name, mode="r") as r:
lines = r.readlines()
# Check that data is equal.
self.assertEqual(rows, len(lines))
for i in range(rows):
self.assertEqual("This is row\n", lines[i])
def test_delete_recursively(self):
"""Test delete recursively.
"""
# Setup and check preconditions.
dir_name = "igfs:///test_delete_recursively/"
file_name = "igfs:///test_delete_recursively/1"
self.assertFalse(gfile.Exists(dir_name))
self.assertFalse(gfile.Exists(file_name))
gfile.MkDir(dir_name)
with gfile.Open(file_name, mode="w") as w:
w.write("")
self.assertTrue(gfile.Exists(dir_name))
self.assertTrue(gfile.Exists(file_name))
# Delete directory recursively.
gfile.DeleteRecursively(dir_name)
# Check that directory was deleted.
self.assertFalse(gfile.Exists(dir_name))
self.assertFalse(gfile.Exists(file_name))
def test_copy(self):
"""Test copy.
"""
# Setup and check preconditions.
src_file_name = "igfs:///test_copy/1"
dst_file_name = "igfs:///test_copy/2"
self.assertFalse(gfile.Exists(src_file_name))
self.assertFalse(gfile.Exists(dst_file_name))
with gfile.Open(src_file_name, mode="w") as w:
w.write("42")
self.assertTrue(gfile.Exists(src_file_name))
self.assertFalse(gfile.Exists(dst_file_name))
# Copy file.
gfile.Copy(src_file_name, dst_file_name)
# Check that files are identical.
self.assertTrue(gfile.Exists(src_file_name))
self.assertTrue(gfile.Exists(dst_file_name))
with gfile.Open(dst_file_name, mode="r") as r:
data = r.read()
self.assertEqual("42", data)
def test_is_directory(self):
"""Test is directory.
"""
# Setup and check preconditions.
dir_name = "igfs:///test_is_directory/1"
file_name = "igfs:///test_is_directory/2"
with gfile.Open(file_name, mode="w") as w:
w.write("")
gfile.MkDir(dir_name)
# Check that directory is a directory.
self.assertTrue(gfile.IsDirectory(dir_name))
# Check that file is not a directory.
self.assertFalse(gfile.IsDirectory(file_name))
def test_list_directory(self):
"""Test list directory.
"""
# Setup and check preconditions.
dir_name = "igfs:///test_list_directory/"
file_names = [
"igfs:///test_list_directory/1", "igfs:///test_list_directory/2/3"
]
ch_dir_names = [
"igfs:///test_list_directory/4",
]
for file_name in file_names:
with gfile.Open(file_name, mode="w") as w:
w.write("")
for ch_dir_name in ch_dir_names:
gfile.MkDir(ch_dir_name)
ls_expected_result = file_names + ch_dir_names
# Get list of files in directory.
ls_result = gfile.ListDirectory(dir_name)
# Check that list of files is correct.
self.assertEqual(len(ls_expected_result), len(ls_result))
for e in ["1", "2", "4"]:
self.assertTrue(e in ls_result)
def test_make_dirs(self):
"""Test make dirs.
"""
# Setup and check preconditions.
dir_name = "igfs:///test_make_dirs/"
self.assertFalse(gfile.Exists(dir_name))
# Make directory.
gfile.MkDir(dir_name)
# Check that directory was created.
self.assertTrue(gfile.Exists(dir_name))
def test_remove(self):
"""Test remove.
"""
# Setup and check preconditions.
file_name = "igfs:///test_remove/1"
self.assertFalse(gfile.Exists(file_name))
with gfile.Open(file_name, mode="w") as w:
w.write("")
self.assertTrue(gfile.Exists(file_name))
# Remove file.
gfile.Remove(file_name)
# Check that file was removed.
self.assertFalse(gfile.Exists(file_name))
def test_rename_file(self):
"""Test rename file.
"""
# Setup and check preconditions.
src_file_name = "igfs:///test_rename_file/1"
dst_file_name = "igfs:///test_rename_file/2"
with gfile.Open(src_file_name, mode="w") as w:
w.write("42")
self.assertTrue(gfile.Exists(src_file_name))
# Rename file.
gfile.Rename(src_file_name, dst_file_name)
# Check that only new name of file is available.
self.assertFalse(gfile.Exists(src_file_name))
self.assertTrue(gfile.Exists(dst_file_name))
with gfile.Open(dst_file_name, mode="r") as r:
data = r.read()
self.assertEqual("42", data)
def test_rename_dir(self):
"""Test rename dir.
"""
# Setup and check preconditions.
src_dir_name = "igfs:///test_rename_dir/1"
dst_dir_name = "igfs:///test_rename_dir/2"
gfile.MkDir(src_dir_name)
# Rename directory.
gfile.Rename(src_dir_name, dst_dir_name)
# Check that only new name of directory is available.
self.assertFalse(gfile.Exists(src_dir_name))
self.assertTrue(gfile.Exists(dst_dir_name))
self.assertTrue(gfile.IsDirectory(dst_dir_name))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/ignite/python/tests/igfs_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ignite File System for checkpointing and communication with TensorBoard.
Apache Ignite is a memory-centric distributed database, caching, and
processing platform for transactional, analytical, and streaming workloads,
delivering in-memory speeds at petabyte scale. In addition to database
functionality Apache Ignite provides a distributed file system called
IGFS (https://ignite.apache.org/features/igfs.html). IGFS delivers a similar
functionality to Hadoop HDFS, but only in-memory. In fact, in addition to
its own APIs, IGFS implements Hadoop FileSystem API and can be transparently
plugged into Hadoop or Spark deployments. This contrib package contains an
integration between IGFS and TensorFlow.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.contrib.ignite.python.ops import ignite_op_loader # pylint: disable=unused-import
from tensorflow.python.framework import load_library
from tensorflow.python.platform import resource_loader
file_system_library = os.path.join(resource_loader.get_data_files_path(),
"../../_ignite_ops.so")
load_library.load_file_system_library(file_system_library)
|
tensorflow-master
|
tensorflow/contrib/ignite/python/ops/igfs_ops.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ignite Dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import socket
import ssl
import struct
import six
from tensorflow.contrib.ignite.python.ops import gen_dataset_ops
from tensorflow.contrib.ignite.python.ops import ignite_op_loader # pylint: disable=unused-import
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import structure
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.util import deprecation
@six.add_metaclass(abc.ABCMeta)
class Readable(object):
"""Abstract class that exposes methods to do reading-related operations."""
@abc.abstractmethod
def __init__(self):
pass
def read_byte(self):
"""Reads and returnes byte."""
return self._read("b", 1)
def read_short(self):
"""Reads and returns short (2 bytes, little-endian)."""
return self._read("h", 2)
def read_int(self):
"""Reads and returns int (4 bytes, little-endian)."""
return self._read("i", 4)
def read_long(self):
"""Reads and returns long (8 bytes, little-endian)."""
return self._read("q", 8)
def skip(self, length):
"""Skips the specified number of bytes."""
self.read_data(length)
@abc.abstractmethod
def read_data(self, length):
"""Reads the specified number of bytes and returns them as a buffer."""
return None
def _read(self, data_type, length):
"""Reads, unpacks and returns specified type (little-endian)."""
data_buffer = self.read_data(length)
return struct.unpack("<" + data_type, data_buffer)[0]
class DataBuffer(Readable):
"""DataBuffer class that exposes methods to read data from a byte buffer."""
def __init__(self, data_buffer):
"""Constructs a new instance based on the specified byte buffer.
Args:
data_buffer: Buffer to be read.
"""
Readable.__init__(self)
self.buffer = data_buffer
self.ptr = 0
def read_data(self, length):
"""Reads the specified number of bytes and returns them as a buffer."""
data_buffer = self.buffer[self.ptr:][:length]
self.ptr += length
return data_buffer
class TcpClient(Readable):
"""TcpClient class that exposes methods to read data from a socket."""
def __init__(self, host, port, certfile=None, keyfile=None, password=None):
"""Constructs a new instance based on the specified host and port.
Args:
host: Host to be connected.
port: Port to be connected.
certfile: File in PEM format containing the certificate as well as any
number of CA certificates needed to establish the certificate's
authenticity.
keyfile: File containing the private key (otherwise the private key will
be taken from certfile as well).
password: Password to be used if the private key is encrypted and a
password is necessary.
Raises:
ValueError: If the wrong combination of arguments is provided.
"""
Readable.__init__(self)
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if certfile is not None:
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.load_cert_chain(certfile, keyfile, password)
self.sock = context.wrap_socket(self.sock)
else:
if keyfile is not None:
raise ValueError("SSL is disabled, keyfile must not be specified "
"(to enable SSL specify certfile)")
if password is not None:
raise ValueError("SSL is disabled, password must not be specified "
"(to enable SSL specify certfile)")
self.host = host
self.port = port
def __enter__(self):
"""Connects to host and port specified in the constructor."""
self.sock.connect((self.host, self.port))
return self
def __exit__(self, t, v, traceback):
"""Disconnects the socket."""
self.sock.close()
def write_byte(self, v):
"""Writes the specified byte."""
self._write(v, "b")
def write_short(self, v):
"""Writes the specified short (2 bytes, little-endian)."""
self._write(v, "h")
def write_int(self, v):
"""Writes the specified short (4 bytes, little-endian)."""
self._write(v, "i")
def write_long(self, v):
"""Writes the specified int (8 bytes, little-endian)."""
self._write(v, "q")
def write_string(self, v):
"""Writes the specified string."""
self.sock.sendall(v.encode("UTF-8"))
def read_data(self, length):
"""Reads the specified number of bytes and returns them as a buffer."""
data_buffer = None
rem = length
while rem > 0:
buf = self.sock.recv(rem)
rem = rem - len(buf)
if data_buffer is None:
data_buffer = buf
else:
data_buffer += buf
return data_buffer
def _write(self, value, data_type):
"""Packs and writes data using the specified type (little-endian)."""
data_buffer = struct.pack("<" + data_type, value)
self.sock.sendall(data_buffer)
class BinaryType(object):
"""BinaryType class that encapsulated type id, type name and fields."""
def __init__(self, type_id, type_name, fields):
"""Constructs a new instance of BinaryType."""
self.type_id = type_id
self.type_name = type_name
self.fields = fields
class BinaryField(object):
"""BinaryField class that encapsulated field name, type id and field id."""
def __init__(self, field_name, type_id, field_id):
"""Constructs a new instance of BinaryField."""
self.field_name = field_name
self.type_id = type_id
self.field_id = field_id
# Binary types defined in Apache Ignite Thin client and supported by
# TensorFlow on Apache Ignite, see
# https://apacheignite.readme.io/v2.6/docs/binary-client-protocol.
# True means that type is a vector, False means type is scalar.
types = {
1: (dtypes.uint8, False),
2: (dtypes.int16, False),
3: (dtypes.int32, False),
4: (dtypes.int64, False),
5: (dtypes.float32, False),
6: (dtypes.float64, False),
7: (dtypes.uint16, False),
8: (dtypes.bool, False),
9: (dtypes.string, False),
12: (dtypes.uint8, True),
13: (dtypes.int16, True),
14: (dtypes.int32, True),
15: (dtypes.int64, True),
16: (dtypes.float32, True),
17: (dtypes.float64, True),
18: (dtypes.uint16, True),
19: (dtypes.bool, True),
20: (dtypes.string, True)
}
class TypeTreeNode(object):
"""TypeTreeNode class exposes methods to format object tree structure data."""
def __init__(self, name, type_id, fields=None, permutation=None):
"""Constructs a new instance of TypeTreeNode.
Args:
name: Name of the object tree node.
type_id: Type id of the object tree node.
fields: List of fields (children of the object tree node).
permutation: Permutation that should be applied to order object children.
"""
self.name = name
self.type_id = type_id
self.fields = fields
self.permutation = permutation
def to_output_classes(self):
"""Formats the tree object as required by `Dataset.output_classes`."""
if self.fields is None:
return ops.Tensor
output_classes = {}
for field in self.fields:
output_classes[field.name] = field.to_output_classes()
return output_classes
def to_output_shapes(self):
"""Formats the tree object as required by `Dataset.output_shapes`."""
if self.fields is None:
if self.type_id in types:
object_type = types[self.type_id]
is_array = object_type[1]
if is_array:
return tensor_shape.TensorShape([None])
return tensor_shape.TensorShape([])
raise ValueError("Unsupported type [type_id=%d]" % self.type_id)
output_shapes = {}
for field in self.fields:
output_shapes[field.name] = field.to_output_shapes()
return output_shapes
def to_output_types(self):
"""Formats the tree object as required by `Dataset.output_types`."""
if self.fields is None:
if self.type_id in types:
object_type = types[self.type_id]
return object_type[0]
raise ValueError("Unsupported type [type_id=%d]" % self.type_id)
else:
output_types = {}
for field in self.fields:
output_types[field.name] = field.to_output_types()
return output_types
def to_flat(self):
"""Returns a list of node types."""
return self.to_flat_rec([])
def to_permutation(self):
"""Returns a permutation that should be applied to order object leaves."""
correct_order_dict = {}
self.traversal_rec(correct_order_dict, 0)
object_order = []
self.traversal_permutation_rec(object_order)
return [correct_order_dict[o] for o in object_order]
def to_flat_rec(self, flat):
"""Formats a list of leaf node types in pre-order."""
if self.fields is None:
flat.append(self.type_id)
else:
for field in self.fields:
field.to_flat_rec(flat)
return flat
def traversal_permutation_rec(self, permutation):
"""Collects nodes in accordance with permutation."""
if self.fields is None:
permutation.append(self)
else:
for idx in self.permutation:
field = self.fields[idx]
field.traversal_permutation_rec(permutation)
def traversal_rec(self, d, i):
"""Collects nodes in pre-order traversal."""
if self.fields is None:
d[self] = i
i += 1
else:
for field in self.fields:
i = field.traversal_rec(d, i)
return i
class IgniteClient(TcpClient):
"""IgniteClient enables working with Apache Ignite using a thin client.
This client works with assumption that all object in the cache
have the same structure (homogeneous objects) and the cache contains at
least one object.
"""
def __init__(self,
host,
port,
username=None,
password=None,
certfile=None,
keyfile=None,
cert_password=None):
"""Constructs a new instance of IgniteClient.
Args:
host: Apache Ignite Thin client host to be connected.
port: Apache Ignite Thin client port to be connected.
username: Apache Ignite Thin Client authentication username.
password: Apache Ignite Thin Client authentication password.
certfile: File in PEM format containing the certificate as well as any
number of CA certificates needed to establish the certificate's
authenticity.
keyfile: File containing the private key (otherwise the private key will
be taken from certfile as well).
cert_password: Password to be used if the private key is encrypted and a
password is necessary.
"""
TcpClient.__init__(self, host, port, certfile, keyfile, cert_password)
self.username = username
self.password = password
def handshake(self):
"""Makes a handshake after connect and before any other calls."""
msg_len = 8
if self.username is None:
msg_len += 1
else:
msg_len += 5 + len(self.username)
if self.password is None:
msg_len += 1
else:
msg_len += 5 + len(self.password)
self.write_int(msg_len) # Message length
self.write_byte(1) # Handshake operation
self.write_short(1) # Version (1.1.0)
self.write_short(1)
self.write_short(0)
self.write_byte(2) # Thin client
if self.username is None: # Username
self.write_byte(101)
else:
self.write_byte(9)
self.write_int(len(self.username))
self.write_string(self.username)
if self.password is None: # Password
self.write_byte(101)
else:
self.write_byte(9)
self.write_int(len(self.password))
self.write_string(self.password)
self.read_int() # Result length
res = self.read_byte()
if res != 1:
serv_ver_major = self.read_short()
serv_ver_minor = self.read_short()
serv_ver_patch = self.read_short()
err_msg = self._parse_string()
if err_msg is None:
raise RuntimeError(
"Handshake Error [result=%d, version=%d.%d.%d]" %
(res, serv_ver_major, serv_ver_minor, serv_ver_patch))
else:
raise RuntimeError(
"Handshake Error [result=%d, version=%d.%d.%d, message='%s']" %
(res, serv_ver_major, serv_ver_minor, serv_ver_patch, err_msg))
def get_cache_type(self, cache_name):
"""Collects type information about objects stored in the specified cache."""
cache_name_hash = self._java_hash_code(cache_name)
self.write_int(25) # Message length
self.write_short(2000) # Operation code
self.write_long(0) # Request ID
self.write_int(cache_name_hash) # Cache name
self.write_byte(0) # Flags
self.write_byte(101) # Filter (NULL)
self.write_int(1) # Cursor page size
self.write_int(-1) # Partition to query
self.write_byte(0) # Local flag
result_length = self.read_int()
self.read_long() # Request id
status = self.read_int()
if status != 0:
err_msg = self._parse_string()
if err_msg is None:
raise RuntimeError("Scan Query Error [status=%s]" % status)
else:
raise RuntimeError(
"Scan Query Error [status=%s, message='%s']" % (status, err_msg))
self.read_long() # Cursor id
row_count = self.read_int()
if row_count == 0:
raise RuntimeError("Scan Query returned empty result, so it's "
"impossible to derive the cache type")
payload = DataBuffer(self.read_data(result_length - 25))
self.read_byte() # Next page
res = TypeTreeNode("root", 0, [
self._collect_types("key", payload),
self._collect_types("val", payload)
], [0, 1])
return res
def _java_hash_code(self, s):
"""Computes hash code of the specified string using Java code."""
h = 0
for c in s:
h = (31 * h + ord(c)) & 0xFFFFFFFF
return ((h + 0x80000000) & 0xFFFFFFFF) - 0x80000000
def _collect_types(self, field_name, data):
"""Extracts type information from the specified object."""
type_id = data.read_byte()
# Byte scalar.
if type_id == 1:
data.skip(1)
return TypeTreeNode(field_name, type_id)
# Short scalar.
if type_id == 2:
data.skip(2)
return TypeTreeNode(field_name, type_id)
# Integer scalar.
if type_id == 3:
data.skip(4)
return TypeTreeNode(field_name, type_id)
# Long scalar.
if type_id == 4:
data.skip(8)
return TypeTreeNode(field_name, type_id)
# Float scalar.
if type_id == 5:
data.skip(4)
return TypeTreeNode(field_name, type_id)
# Double scalar.
if type_id == 6:
data.skip(8)
return TypeTreeNode(field_name, type_id)
# Char scalar.
if type_id == 7:
data.skip(2)
return TypeTreeNode(field_name, type_id)
# Bool scalar.
if type_id == 8:
data.skip(1)
return TypeTreeNode(field_name, type_id)
# String scalar.
if type_id == 9:
length = data.read_int()
data.skip(length)
return TypeTreeNode(field_name, type_id)
# UUID scalar.
if type_id == 10:
data.skip(16)
return TypeTreeNode(field_name, type_id)
# Date scalar.
if type_id == 11:
data.skip(8)
return TypeTreeNode(field_name, type_id)
# Byte array.
if type_id == 12:
length = data.read_int()
data.skip(length)
return TypeTreeNode(field_name, type_id)
# Short array.
if type_id == 13:
length = data.read_int()
data.skip(length * 2)
return TypeTreeNode(field_name, type_id)
# Integer array.
if type_id == 14:
length = data.read_int()
data.skip(length * 4)
return TypeTreeNode(field_name, type_id)
# Long array.
if type_id == 15:
length = data.read_int()
data.skip(length * 8)
return TypeTreeNode(field_name, type_id)
# Float array.
if type_id == 16:
length = data.read_int()
data.skip(length * 4)
return TypeTreeNode(field_name, type_id)
# Double array.
if type_id == 17:
length = data.read_int()
data.skip(length * 8)
return TypeTreeNode(field_name, type_id)
# Char array.
if type_id == 18:
length = data.read_int()
data.skip(length * 2)
return TypeTreeNode(field_name, type_id)
# Bool array.
if type_id == 19:
length = data.read_int()
data.skip(length)
return TypeTreeNode(field_name, type_id)
# String array.
if type_id == 20:
length = data.read_int()
for _ in range(length):
header = data.read_byte()
if header == 9:
str_length = data.read_int()
data.skip(str_length)
elif header == 101:
pass
else:
raise RuntimeError(
"Unknown binary type when expected string [type_id=%d]" % header)
return TypeTreeNode(field_name, type_id)
# UUID array.
if type_id == 21:
length = data.read_int()
data.skip(length * 16) # TODO(dmitrievanthony): support NULL values.
return TypeTreeNode(field_name, type_id)
# Date array.
if type_id == 22:
length = data.read_int()
data.skip(length * 8)
return TypeTreeNode(field_name, type_id)
# Wrapped Binary Object.
if type_id == 27:
length = data.read_int()
inner_data = data.read_data(length)
data.read_int() # Offset
return self._collect_types(field_name, DataBuffer(inner_data))
# Complex Object.
if type_id == 103:
data.read_byte() # Object version
data.read_short() # Object flags
obj_type_id = data.read_int()
data.read_int() # Object hash code
obj_length = data.read_int()
data.read_int() # Object schema id
obj_schema_offset = data.read_int()
obj_type = self._get_type(obj_type_id)
children = []
for obj_field in obj_type.fields:
child = self._collect_types(obj_field.field_name, data)
children.append(child)
children_sorted = sorted(children, key=lambda child: child.name)
permutation = [children_sorted.index(child) for child in children]
children = children_sorted
data.skip(obj_length - obj_schema_offset)
return TypeTreeNode(field_name, type_id, children, permutation)
raise RuntimeError("Unknown binary type [type_id=%d]" % type_id)
def _get_type(self, type_id):
"""Queries Apache Ignite information about type by type id."""
self.write_int(14) # Message length
self.write_short(3002) # Operation code
self.write_long(0) # Request ID
self.write_int(type_id) # Type ID
self.read_int() # Result length
self.read_long() # Request id
status = self.read_int()
if status != 0:
err_msg = self._parse_string()
if err_msg is None:
raise RuntimeError("Get Binary Type Error [status=%d, message='%s']" %
(status, err_msg))
else:
raise RuntimeError("Get Binary Type Error [status=%d]" % status)
binary_type_exists = self.read_byte()
if binary_type_exists == 0:
raise RuntimeError("Binary type not found [type_id=%d] " % type_id)
binary_type_id = self.read_int()
binary_type_name = self._parse_string()
self._parse_string() # Affinity field name
fields = []
for _ in range(self.read_int()):
field_name = self._parse_string()
field_type_id = self.read_int()
field_id = self.read_int()
field = BinaryField(field_name, field_type_id, field_id)
fields.append(field)
is_enum = self.read_byte()
if is_enum == 1:
raise RuntimeError("Enum fields are not supported yet")
schema_cnt = self.read_int()
for _ in range(schema_cnt):
self.read_int() # Schema id
field_cnt = self.read_int()
self.skip(field_cnt * 4)
return BinaryType(binary_type_id, binary_type_name, fields)
def _parse_string(self):
"""Parses string."""
header = self.read_byte()
if header == 9:
length = self.read_int()
return self.read_data(length).decode("utf-8")
if header == 101:
return None
raise RuntimeError(
"Unknown binary type when expected string [type_id=%d]" % header)
class IgniteDataset(dataset_ops.DatasetSource):
"""Apache Ignite is a memory-centric distributed database.
It acts as a caching and processing platform for transactional, analytical,
and streaming workloads, delivering in-memory speeds at petabyte scale.
This contrib package contains an integration between Apache Ignite and
TensorFlow. The integration is based on tf.data from TensorFlow side and
Binary Client Protocol from Apache Ignite side. It allows to use Apache
Ignite as a datasource for neural network training, inference and all other
computations supported by TensorFlow. Ignite Dataset is based on Apache
Ignite Binary Client Protocol.
"""
@deprecation.deprecated(
None,
"tf.contrib.ignite will be removed in 2.0, the support for Apache Ignite "
"will continue to be provided through the tensorflow/io GitHub project.")
def __init__(self,
cache_name,
host="localhost",
port=10800,
local=False,
part=-1,
page_size=100,
username=None,
password=None,
certfile=None,
keyfile=None,
cert_password=None):
"""Create a IgniteDataset.
Args:
cache_name: Cache name to be used as datasource.
host: Apache Ignite Thin Client host to be connected.
port: Apache Ignite Thin Client port to be connected.
local: Local flag that defines to query only local data.
part: Number of partitions to be queried.
page_size: Apache Ignite Thin Client page size.
username: Apache Ignite Thin Client authentication username.
password: Apache Ignite Thin Client authentication password.
certfile: File in PEM format containing the certificate as well as any
number of CA certificates needed to establish the certificate's
authenticity.
keyfile: File containing the private key (otherwise the private key will
be taken from certfile as well).
cert_password: Password to be used if the private key is encrypted and a
password is necessary.
"""
with IgniteClient(host, port, username, password, certfile, keyfile,
cert_password) as client:
client.handshake()
self.cache_type = client.get_cache_type(cache_name)
self.cache_name = ops.convert_to_tensor(
cache_name, dtype=dtypes.string, name="cache_name")
self.host = ops.convert_to_tensor(host, dtype=dtypes.string, name="host")
self.port = ops.convert_to_tensor(port, dtype=dtypes.int32, name="port")
self.local = ops.convert_to_tensor(local, dtype=dtypes.bool, name="local")
self.part = ops.convert_to_tensor(part, dtype=dtypes.int32, name="part")
self.page_size = ops.convert_to_tensor(
page_size, dtype=dtypes.int32, name="page_size")
self.schema = ops.convert_to_tensor(
self.cache_type.to_flat(), dtype=dtypes.int32, name="schema")
self.permutation = ops.convert_to_tensor(
self.cache_type.to_permutation(),
dtype=dtypes.int32,
name="permutation")
self._structure = structure.convert_legacy_structure(
self.cache_type.to_output_types(), self.cache_type.to_output_shapes(),
self.cache_type.to_output_classes())
super(IgniteDataset, self).__init__(self._as_variant_tensor())
def _as_variant_tensor(self):
return gen_dataset_ops.ignite_dataset(self.cache_name, self.host, self.port,
self.local, self.part, self.page_size,
self.schema, self.permutation)
@property
def _element_structure(self):
return self._structure
|
tensorflow-master
|
tensorflow/contrib/ignite/python/ops/ignite_dataset_ops.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python helper for loading IGFS ops and kernels."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.util import loader
from tensorflow.python.platform import resource_loader
_dataset_ops = loader.load_op_library(
resource_loader.get_path_to_datafile("../../_ignite_ops.so"))
|
tensorflow-master
|
tensorflow/contrib/ignite/python/ops/igfs_op_loader.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python helper for loading Ignite ops and kernels."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.util import loader
from tensorflow.python.platform import resource_loader
_dataset_ops = loader.load_op_library(
resource_loader.get_path_to_datafile("../../_ignite_ops.so"))
|
tensorflow-master
|
tensorflow/contrib/ignite/python/ops/ignite_op_loader.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
|
tensorflow-master
|
tensorflow/contrib/specs/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Builder for TensorFlow models specified using specs_ops.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six import exec_
from tensorflow.contrib.specs.python import params_ops
from tensorflow.contrib.specs.python import specs_lib
from tensorflow.contrib.specs.python import specs_ops
from tensorflow.python.util import tf_inspect
def eval_params(params, environment=None):
"""Evaluates a parameter specification and returns the environment.
Args:
params: parameter assignments as a string
environment: a dictionary of input bindings
Returns:
Environment with additional bindings created by
executing `params`
Raises:
Exception: other exceptions raised during execution of `params`
"""
specs_lib.check_keywords(params)
bindings = {}
if environment:
bindings.update(environment)
exec_(params, vars(params_ops), bindings) # pylint: disable=exec-used
return bindings
def eval_spec(spec, environment=None):
"""Evaluates a spec and returns the environment.
This function allows you to use a spec to obtain multiple bindings
in an environment. That is useful if you use the spec language to
specify multiple components of a larger network, for example: "left
= Cr(64, [5,5]); right = Fc(64)" Usually, you will want to use
`create_net` or `create_net_fun` below.
Args:
spec: specification as a string
environment: a dictionary of input bindings
Returns:
Environment with additional bindings created by spec.
Raises:
Exception: other exceptions raised during execution of `spec`
"""
specs_lib.check_keywords(spec)
bindings = {}
if environment:
bindings.update(environment)
exec_(spec, vars(specs_ops), bindings) # pylint: disable=exec-used
return bindings
def create_net_fun(spec, environment=None):
"""Evaluates a spec and returns the binding of `net`.
Specs are written in a DSL based on function composition. A spec
like `net = Cr(64, [3, 3])` assigns an object that represents a
single argument function capable of creating a network to
the variable `net`.
Args:
spec: specification as a string, ending with a `net = ...` statement
environment: a dictionary of input bindings
Returns:
A callable that instantiates the `net` binding.
Raises:
ValueError: spec failed to create a `net`
Exception: other exceptions raised during execution of `spec`
"""
bindings = eval_spec(spec, environment)
net = bindings.get("net", None)
if net is None:
raise ValueError("spec failed to create 'net': %s" % (spec,))
return net.funcall
def create_net(spec, inputs, environment=None):
"""Evaluates a spec and creates a network instance given the inputs.
Args:
spec: specification as a string, ending with a `net = ...` statement
inputs: input that `net` is applied to
environment: a dictionary of input bindings
Returns:
A callable that instantiates the `net` binding.
Raises:
ValueError: spec failed to create a `net`
Exception: other exceptions raised during execution of `spec`
"""
return create_net_fun(spec, environment)(inputs)
class LocalImport(object):
"""A class that allows us to temporarily import something.
Attributes:
frame: the frame in which the context manager was invocked
names: a dictionary containing the new bindings
old: variable bindings that have been shadowed by the import
"""
def __init__(self, names):
"""Create a context manager that binds the names in values.
Args:
names: A dictionary or module containing the bindings.
"""
if not isinstance(names, dict):
names = vars(names)
self.names = names
def __enter__(self):
self.frame = tf_inspect.currentframe()
bindings = self.frame.f_back.f_globals
self.old = {k: bindings.get(k, None) for k in self.names.keys()}
bindings.update(self.names)
def __exit__(self, some_type, value, traceback):
del some_type, value, traceback
bindings = self.frame.f_back.f_globals
bindings.update(self.old)
for k, v in self.old.items():
if v is None:
del bindings[k]
del self.frame
ops = LocalImport(specs_ops)
|
tensorflow-master
|
tensorflow/contrib/specs/python/specs.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implement the "specs" DSL for describing deep networks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import importlib
import operator
import re
from six import exec_
QUOTED = re.compile(r"""
"([^"\\]|\\.)*" |
'([^'\\]|\\.)*'
""", re.VERBOSE)
KEYWORDS = re.compile(r"""\b(import|while|def|exec)\b""")
debug_ = False
def check_keywords(spec):
"""Check for common Python keywords in spec.
This function discourages the use of complex constructs
in TensorFlow specs; it doesn't completely prohibit them
(if necessary, we could check the AST).
Args:
spec: spec string
Raises:
ValueError: raised if spec contains a prohibited keyword.
"""
spec = re.sub(QUOTED, "", spec)
match = re.search(KEYWORDS, spec)
if match:
raise ValueError("keyword '%s' found in spec" % match.group(1))
def get_positional(args, kw, kw_overrides=False):
"""Interpolates keyword arguments into argument lists.
If `kw` contains keywords of the form "_0", "_1", etc., these
are positionally interpolated into the argument list.
Args:
args: argument list
kw: keyword dictionary
kw_overrides: key/value pairs that override kw
Returns:
(new_args, new_kw), new argument lists and keyword dictionaries
with values interpolated.
"""
new_kw = {k: v for k, v in kw.items() if k[0] != "_"}
if len(new_kw) == len(kw):
return args, kw
new_args = list(args)
for key, value in kw.items():
if key[0] != "_": continue
index = int(key[1:])
while len(new_args) <= index:
new_args += [None]
if kw_overrides or new_args[index] is None:
new_args[index] = value
return new_args, new_kw
class Composable(object):
"""A composable function.
This defines the operators common to all composable objects.
Currently defines copmosition (via "|") and repeated application
(via "**"), and maps addition ("+") and multiplication ("*")
as "(f + g)(x) = f(x) + g(x)".
"""
def __or__(self, f):
return Composition(self, f)
def __add__(self, g):
return Operator(operator.add, self, g)
def __mul__(self, g):
return Operator(operator.mul, self, g)
def __pow__(self, n):
assert n >= 0
if n == 0:
return Function(lambda x, *args, **kw: x)
result = self
for _ in range(n-1):
result = Composition(result, self)
return result
class Callable(Composable):
"""A composable function that simply defers to a callable function.
"""
def __init__(self, f):
self.f = f
def funcall(self, x):
return self.f(x)
class Operator(Composable):
"""A wrapper for an operator.
This takes an operator and an argument list and returns
the result of applying the operator to the results of applying
the functions in the argument list.
"""
def __init__(self, op, *args):
self.op = op
self.funs = args
def funcall(self, x):
outputs = [f.funcall(x) for f in self.funs]
return self.op(*outputs)
class Function(Composable):
"""A composable function wrapper for a regular Python function.
This overloads the regular __call__ operator for currying, i.e.,
arguments passed to __call__ are remembered for the eventual
function application.
The final function application happens via the `of` method.
"""
def __init__(self, f, *args, **kw):
if not callable(f):
raise ValueError("%s: is not callable" % f)
self.f = f
self.args = list(args)
self.kw = kw
def __call__(self, *args, **kw):
new_args = list(args) + self.args
new_kw = self.kw.copy()
new_kw.update(kw)
return Function(self.f, *new_args, **new_kw)
# TODO(tmb) The `of` method may be renamed to `function`.
def funcall(self, x):
args, kw = get_positional(self.args, self.kw)
if debug_:
print("DEBUG:", self.f, x, args, kw)
return self.f(x, *args, **kw)
class Composition(Composable):
"""A function composition.
This simply composes its two argument functions when
applied to a final argument via `of`.
"""
def __init__(self, f, g):
self.f = f
self.g = g
def funcall(self, x):
return self.g.funcall(self.f.funcall(x))
# These are DSL names, not Python names
# pylint: disable=invalid-name, exec-used
def External(module_name, function_name):
"""Import a function from an external module.
Note that the `module_name` must be a module name
that works with the usual import mechanisms. Shorthands
like "tf.nn" will not work.
Args:
module_name: name of the module
function_name: name of the function within the module
Returns:
Function-wrapped value of symbol.
"""
module = importlib.import_module(module_name)
return Function(vars(module)[function_name])
def Import(statements):
"""Import a function by exec.
Args:
statements: Python statements
Returns:
Function-wrapped value of `f`.
Raises:
ValueError: the statements didn't define a value for "f"
"""
environ = {}
exec_(statements, environ)
if "f" not in environ:
raise ValueError("failed to define \"f\": %s", statements)
f = environ["f"]
return Function(f)
# pylint: enable=invalid-name, exec-used
def debug(mode=True):
"""Turn on/off debugging mode.
Debugging mode prints more information about the construction
of a network.
Args:
mode: True if turned on, False otherwise
"""
global debug_
debug_ = mode
|
tensorflow-master
|
tensorflow/contrib/specs/python/specs_lib.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operators for concise TensorFlow parameter specifications.
This module is used as an environment for evaluating expressions
in the "params" DSL.
Specifications are intended to assign simple numerical
values. Examples:
--params "n=64; d=5" --spec "(Cr(n) | Mp([2, 2])) ** d | Fm"
The random parameter primitives are useful for running large numbers
of experiments with randomly distributed parameters:
--params "n=Li(5,500); d=Ui(1,5)" --spec "(Cr(n) | Mp([2, 2])) ** d | Fm"
Internally, this might be implemented as follows:
params = specs.create_params(FLAGS.params, {})
logging.info(repr(params))
net = specs.create_net(FLAGS.spec, inputs, params)
Note that separating the specifications into parameters and network
creation allows us to log the random parameter values easily.
The implementation of this will change soon in order to support
hyperparameter tuning with steering. Instead of returning a number,
the primitives below will return a class instance that is then
used to generate a random number by the framework.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Lint disabled because these are operators in the DSL, not regular
# Python functions.
# pylint: disable=invalid-name
# pylint: disable=wildcard-import,unused-wildcard-import,redefining-builtin
# pylint: disable=redefined-builtin,g-importing-member,no-member
# make available all math expressions
import math
from math import *
import random
# pylint: enable=wildcard-import,unused-wildcard-import,redefining-builtin
# pylint: enable=redefined-builtin,g-importing-member,no-member
def Uf(lo=0.0, hi=1.0):
"""Uniformly distributed floating number."""
return random.uniform(lo, hi)
def Ui(lo, hi):
"""Uniformly distributed integer, inclusive limits."""
return random.randint(lo, hi)
def Lf(lo, hi):
"""Log-uniform distributed floatint point number."""
return math.exp(random.uniform(math.log(lo), math.log(hi)))
def Li(lo, hi):
"""Log-uniform distributed integer, inclusive limits."""
return int(math.floor(math.exp(random.uniform(math.log(lo),
math.log(hi+1-1e-5)))))
def Nt(mu, sigma, limit=3.0):
"""Normally distributed floating point number with truncation."""
return min(max(random.gauss(mu, sigma), mu-limit*sigma), mu+limit*sigma)
# pylint: enable=invalid-name
|
tensorflow-master
|
tensorflow/contrib/specs/python/params_ops.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for specs-related summarization functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.specs.python import specs
from tensorflow.contrib.specs.python import summaries
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
def _rand(*size):
return np.random.uniform(size=size).astype("f")
class SummariesTest(test.TestCase):
def testStructure(self):
with self.cached_session():
inputs_shape = (1, 18, 19, 5)
inputs = constant_op.constant(_rand(*inputs_shape))
spec = "net = Cr(64, [5, 5])"
outputs = specs.create_net(spec, inputs)
variables.global_variables_initializer().run()
result = outputs.eval()
self.assertEqual(tuple(result.shape), (1, 18, 19, 64))
self.assertEqual(
summaries.tf_spec_structure(
spec, input_shape=inputs_shape),
"_ variablev2 conv variablev2 biasadd relu")
def testStructureFromTensor(self):
with self.cached_session():
inputs = constant_op.constant(_rand(1, 18, 19, 5))
spec = "net = Cr(64, [5, 5])"
outputs = specs.create_net(spec, inputs)
variables.global_variables_initializer().run()
result = outputs.eval()
self.assertEqual(tuple(result.shape), (1, 18, 19, 64))
self.assertEqual(
summaries.tf_spec_structure(spec, inputs),
"_ variablev2 conv variablev2 biasadd relu")
def testPrint(self):
with self.cached_session():
inputs = constant_op.constant(_rand(1, 18, 19, 5))
spec = "net = Cr(64, [5, 5])"
outputs = specs.create_net(spec, inputs)
variables.global_variables_initializer().run()
result = outputs.eval()
self.assertEqual(tuple(result.shape), (1, 18, 19, 64))
summaries.tf_spec_print(spec, inputs)
def testSummary(self):
with self.cached_session():
inputs = constant_op.constant(_rand(1, 18, 19, 5))
spec = "net = Cr(64, [5, 5])"
outputs = specs.create_net(spec, inputs)
variables.global_variables_initializer().run()
result = outputs.eval()
self.assertEqual(tuple(result.shape), (1, 18, 19, 64))
summaries.tf_spec_summary(spec, inputs)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/specs/python/summaries_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Testing specs specifications."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.specs import python
from tensorflow.contrib.specs.python import summaries
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import variables
import tensorflow.python.ops.math_ops # pylint: disable=unused-import
from tensorflow.python.platform import test
specs = python
def _rand(*size):
return np.random.uniform(size=size).astype("f")
class SpecsTest(test.TestCase):
def testSimpleConv(self):
with self.cached_session():
inputs = constant_op.constant(_rand(1, 18, 19, 5))
spec = "net = Cr(64, [5, 5])"
outputs = specs.create_net(spec, inputs)
self.assertEqual(outputs.get_shape().as_list(), [1, 18, 19, 64])
variables.global_variables_initializer().run()
result = outputs.eval()
self.assertEqual(tuple(result.shape), (1, 18, 19, 64))
self.assertEqual(
summaries.tf_spec_structure(spec, inputs),
"_ variablev2 conv variablev2 biasadd relu")
def testUnary(self):
# This is just a quick and dirty check that these ops exist
# and work as unary ops.
with self.cached_session():
inputs = constant_op.constant(_rand(17, 55))
spec = "net = Do(0.5) | Bn | Unit(1) | Relu | Sig | Tanh | Smax"
outputs = specs.create_net(spec, inputs)
self.assertEqual(outputs.get_shape().as_list(), [17, 55])
variables.global_variables_initializer().run()
result = outputs.eval()
self.assertEqual(tuple(result.shape), (17, 55))
def testAdd(self):
with self.cached_session():
inputs = constant_op.constant(_rand(17, 55))
spec = "net = Fs(10) + Fr(10)"
outputs = specs.create_net(spec, inputs)
self.assertEqual(outputs.get_shape().as_list(), [17, 10])
variables.global_variables_initializer().run()
result = outputs.eval()
self.assertEqual(tuple(result.shape), (17, 10))
self.assertRegexpMatches(
summaries.tf_spec_structure(spec, inputs),
"_ variablev2 dot variablev2 biasadd sig "
"<> variablev2 dot variablev2 biasadd relu add(v2)?")
def testMpPower(self):
with self.cached_session():
inputs = constant_op.constant(_rand(1, 64, 64, 5))
spec = "M2 = Mp([2, 2]); net = M2**3"
outputs = specs.create_net(spec, inputs)
self.assertEqual(outputs.get_shape().as_list(), [1, 8, 8, 5])
variables.global_variables_initializer().run()
result = outputs.eval()
self.assertEqual(tuple(result.shape), (1, 8, 8, 5))
self.assertEqual(
summaries.tf_spec_structure(spec, inputs),
"_ maxpool maxpool maxpool")
def testAbbrevPower(self):
with self.cached_session():
inputs = constant_op.constant(_rand(1, 64, 64, 5))
spec = "C3 = Cr([3, 3]); M2 = Mp([2, 2]); net = (C3(5) | M2)**3"
outputs = specs.create_net(spec, inputs)
self.assertEqual(outputs.get_shape().as_list(), [1, 8, 8, 5])
variables.global_variables_initializer().run()
result = outputs.eval()
self.assertEqual(tuple(result.shape), (1, 8, 8, 5))
self.assertEqual(
summaries.tf_spec_structure(spec, inputs),
"_ variablev2 conv variablev2 biasadd relu maxpool"
" variablev2 conv variablev2"
" biasadd relu maxpool variablev2 conv variablev2"
" biasadd relu maxpool")
def testAbbrevPower2(self):
with self.cached_session():
inputs = constant_op.constant(_rand(1, 64, 64, 5))
spec = "C3 = Cr(_1=[3, 3]); M2 = Mp([2, 2]);"
spec += "net = (C3(_0=5) | M2)**3"
outputs = specs.create_net(spec, inputs)
self.assertEqual(outputs.get_shape().as_list(), [1, 8, 8, 5])
variables.global_variables_initializer().run()
result = outputs.eval()
self.assertEqual(tuple(result.shape), (1, 8, 8, 5))
self.assertEqual(
summaries.tf_spec_structure(spec, inputs),
"_ variablev2 conv variablev2 biasadd relu maxpool"
" variablev2 conv variablev2 biasadd relu"
" maxpool variablev2 conv variablev2 biasadd relu"
" maxpool")
def testConc(self):
with self.cached_session():
inputs = constant_op.constant(_rand(10, 20))
spec = "net = Conc(1, Fs(20), Fs(10))"
outputs = specs.create_net(spec, inputs)
self.assertEqual(outputs.get_shape().as_list(), [10, 30])
variables.global_variables_initializer().run()
result = outputs.eval()
self.assertEqual(tuple(result.shape), (10, 30))
self.assertEqual(
summaries.tf_spec_structure(spec, inputs),
"_ variablev2 dot variablev2 biasadd sig "
"<> variablev2 dot variablev2 biasadd sig _ concatv2")
def testImport(self):
with self.cached_session():
inputs = constant_op.constant(_rand(10, 20))
spec = ("S = Import('from tensorflow.python.ops" +
" import math_ops; f = math_ops.sigmoid')")
spec += "; net = S | S"
outputs = specs.create_net(spec, inputs)
self.assertEqual(outputs.get_shape().as_list(), [10, 20])
variables.global_variables_initializer().run()
result = outputs.eval()
self.assertEqual(tuple(result.shape), (10, 20))
self.assertEqual(summaries.tf_spec_structure(spec, inputs), "_ sig sig")
def testKeywordRestriction(self):
with self.cached_session():
inputs = constant_op.constant(_rand(10, 20))
spec = "import re; net = Conc(1, Fs(20), Fs(10))"
self.assertRaises(ValueError, lambda: specs.create_net(spec, inputs))
def testParams(self):
params = "x = 3; y = Ui(-10, 10); z = Lf(1, 100); q = Nt(0.0, 1.0)"
bindings = specs.eval_params(params, {})
self.assertTrue("x" in bindings)
self.assertEqual(bindings["x"], 3)
self.assertTrue("y" in bindings)
self.assertTrue("z" in bindings)
self.assertTrue("q" in bindings)
# XXX: the cleverness of this code is over 9000
# TODO: original author please fix
def DISABLED_testSpecsOps(self):
# pylint: disable=undefined-variable
with self.assertRaises(NameError):
_ = Cr
with specs.ops:
self.assertIsNotNone(Cr)
self.assertTrue(callable(Cr(64, [3, 3])))
with self.assertRaises(NameError):
_ = Cr
# XXX: the cleverness of this code is over 9000
# TODO: original author please fix
def DISABLED_testVar(self):
with self.cached_session() as sess:
with specs.ops:
# pylint: disable=undefined-variable
v = Var("test_var",
shape=[2, 2],
initializer=init_ops.constant_initializer(42.0))
inputs = constant_op.constant(_rand(10, 100))
outputs = v.funcall(inputs)
self.assertEqual(len(variables.global_variables()), 1)
sess.run([outputs.initializer])
outputs_value = outputs.eval()
self.assertEqual(outputs_value.shape, (2, 2))
self.assertEqual(outputs_value[1, 1], 42.0)
# XXX: the cleverness of this code is over 9000
# TODO: original author please fix
def DISABLED_testShared(self):
with self.cached_session():
with specs.ops:
# pylint: disable=undefined-variable
f = Shared(Fr(100))
g = f | f | f | f
inputs = constant_op.constant(_rand(10, 100))
_ = g.funcall(inputs)
self.assertEqual(len(variables.global_variables()), 2)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/specs/python/specs_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Init file, giving convenient access to all specs ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import,g-importing-member,redefined-builtin
from tensorflow.contrib.specs.python.params_ops import *
from tensorflow.contrib.specs.python.specs import *
from tensorflow.contrib.specs.python.specs_lib import *
from tensorflow.contrib.specs.python.specs_ops import *
from tensorflow.contrib.specs.python.summaries import *
# pylint: enable=wildcard-import,redefined-builtin
|
tensorflow-master
|
tensorflow/contrib/specs/python/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operators for concise TensorFlow network models.
This module is used as an environment for evaluating expressions
in the "specs" DSL.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.layers.python.layers import layers
from tensorflow.contrib.specs.python import specs_lib
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variable_scope
# The following assignments don't appear to follow Google naming
# conventions, but that's because these are functions defined by
# higher-order function application, not "constants" and because they
# are the commands of the DSL.
# pylint: disable=invalid-name
class Idx(specs_lib.Composable):
"""Implements the identity function in network specifications."""
def funcall(self, x):
return x
class Conc(specs_lib.Composable):
"""Implements tensor concatenation in network specifications."""
def __init__(self, dim, *args):
"""Concatenates tensors along the given dimension.
Args:
dim: dimension along which concatenation takes place
*args: argument tensor functions to be concatenated
"""
self.dim = dim
self.funs = args
def funcall(self, x):
outputs = [f.funcall(x) for f in self.funs]
return array_ops.concat(outputs, self.dim)
External = specs_lib.External
Import = specs_lib.Import
Fun = specs_lib.Function
debug = specs_lib.debug
Print = Fun(logging_ops.Print)
Id = Fun(array_ops.identity)
# TODO(tmb) add Assert
# Two letter names for the most common layers.
# 2D Convolutional layers with nonlinearities (s/t/r/m/l)
# TODO(tmb) add Cbs, Fbs etc. for batch norms
Cx = Fun(layers.conv2d)
Cs = Fun(layers.conv2d, activation_fn=math_ops.sigmoid)
Ct = Fun(layers.conv2d, activation_fn=math_ops.tanh)
Cr = Fun(layers.conv2d, activation_fn=nn_ops.relu)
Cm = Fun(layers.conv2d, activation_fn=nn_ops.softmax)
Cl = Fun(layers.conv2d, activation_fn=None)
# Fully connected slim with nonlinearities (s/t/r/m/l)
Fx = Fun(layers.fully_connected)
Fs = Fun(layers.fully_connected, activation_fn=math_ops.sigmoid)
Ft = Fun(layers.fully_connected, activation_fn=math_ops.tanh)
Fr = Fun(layers.fully_connected, activation_fn=nn_ops.relu)
Fm = Fun(layers.fully_connected, activation_fn=nn_ops.softmax)
Fl = Fun(layers.fully_connected, activation_fn=None)
# Pooling
Mp = Fun(layers.max_pool2d)
Ap = Fun(layers.avg_pool2d)
# Batch manipulations
Do = Fun(layers.dropout)
Bn = Fun(layers.batch_norm)
Lrn = Fun(nn.local_response_normalization)
Unit = Fun(layers.unit_norm)
# Shape changes
Flat = Fun(layers.flatten)
Reshape = Fun(array_ops.reshape)
Transpose = Fun(array_ops.transpose)
Squeeze = Fun(array_ops.squeeze)
Expand = Fun(array_ops.expand_dims)
# Nonlinearities (rarely needed on their own)
Relu = Fun(nn_ops.relu)
Sig = Fun(math_ops.sigmoid)
Tanh = Fun(math_ops.tanh)
Smax = Fun(nn_ops.softmax)
def Dws(n):
"""Depth-wise convolution + sigmoid (used after LSTM)."""
return Cs(n, [1, 1])
def Dwm(n):
"""Depth-wise convolution + softmax (used after LSTM)."""
return Cm(n, [1, 1])
# Sharing of Variables
def Var(name, *args, **kw):
"""Implements an operator that generates a variable.
This function is still experimental. Use it only
for generating a single variable instance for
each name.
Args:
name: Name of the variable.
*args: Other arguments to get_variable.
**kw: Other keywords for get_variable.
Returns:
A specs object for generating a variable.
"""
def var(_):
return variable_scope.get_variable(name, *args, **kw)
return specs_lib.Callable(var)
class Shared(specs_lib.Composable):
"""Wraps a scope with variable reuse around the subnetwork.
This function is still experimental.
Attributes:
f: The shared subnetwork.
name: A name for the shared scope.
used: A flag indicating whether the scope has already been used.
"""
shared_number = 1
def __init__(self, subnet, name=None, scope=None):
"""Create the Shared operator.
Use this as:
f = Shared(Cr(100, 3))
g = f | f | f
Ordinarily, you do not need to provide either a name or a scope.
Providing a name is useful if you want a well-defined namespace
for the variables (e.g., for saving a subnet).
Args:
subnet: Definition of the shared network.
name: Optional name for the shared context.
scope: Optional shared scope (must be a Scope, not a string).
Raises:
ValueError: Scope is not of type tf.Scope, name is not
of type string, or both scope and name are given together.
"""
if scope is not None and not isinstance(scope,
variable_scope.VariableScope):
raise ValueError("scope must be None or a VariableScope")
if name is not None and not isinstance(scope, str):
raise ValueError("name must be None or a string")
if scope is not None and name is not None:
raise ValueError("cannot provide both a name and a scope")
if name is None:
name = "Shared_%d" % Shared.shared_number
Shared.shared_number += 1
self.subnet = subnet
self.name = name
self.scope = scope
def funcall(self, x):
"""Apply the shared operator to an input.
This wraps a variable scope around the creation of the subnet.
Args:
x: The input argument on which the subnet is invoked.
Returns:
The output tensor from invoking the subnet constructor.
"""
if self.scope is None:
with variable_scope.variable_scope(self.name, values=[x]) as scope:
self.scope = scope
return self.subnet.funcall(x)
else:
with variable_scope.variable_scope(self.scope, values=[x], reuse=True):
return self.subnet.funcall(x)
|
tensorflow-master
|
tensorflow/contrib/specs/python/specs_ops.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions for summarizing and describing TensorFlow graphs.
This contains functions that generate string descriptions from
TensorFlow graphs, for debugging, testing, and model size
estimation.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
from tensorflow.contrib.specs.python import specs
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
# These are short abbreviations for common TensorFlow operations used
# in test cases with tf_structure to verify that specs_lib generates a
# graph structure with the right operations. Operations outside the
# scope of specs (e.g., Const and Placeholder) are just assigned "_"
# since they are not relevant to testing.
SHORT_NAMES_SRC = """
BiasAdd biasadd
Const _
Conv2D conv
MatMul dot
Placeholder _
Sigmoid sig
Variable var
""".split()
SHORT_NAMES = {
x: y
for x, y in zip(SHORT_NAMES_SRC[::2], SHORT_NAMES_SRC[1::2])
}
def _truncate_structure(x):
"""A helper function that disables recursion in tf_structure.
Some constructs (e.g., HorizontalLstm) are complex unrolled
structures and don't need to be represented in the output
of tf_structure or tf_print. This helper function defines
which tree branches should be pruned. This is a very imperfect
way of dealing with unrolled LSTM's (since it truncates
useful information as well), but it's not worth doing something
better until the new fused and unrolled ops are ready.
Args:
x: a Tensor or Op
Returns:
A bool indicating whether the subtree should be pruned.
"""
if "/HorizontalLstm/" in x.name:
return True
return False
def tf_structure(x, include_shapes=False, finished=None):
"""A postfix expression summarizing the TF graph.
This is intended to be used as part of test cases to
check for gross differences in the structure of the graph.
The resulting string is not invertible or unabiguous
and cannot be used to reconstruct the graph accurately.
Args:
x: a tf.Tensor or tf.Operation
include_shapes: include shapes in the output string
finished: a set of ops that have already been output
Returns:
A string representing the structure as a string of
postfix operations.
"""
if finished is None:
finished = set()
if isinstance(x, ops.Tensor):
shape = x.get_shape().as_list()
x = x.op
else:
shape = []
if x in finished:
return " <>"
finished |= {x}
result = ""
if not _truncate_structure(x):
for y in x.inputs:
result += tf_structure(y, include_shapes, finished)
if include_shapes:
result += " %s" % (shape,)
if x.type != "Identity":
name = SHORT_NAMES.get(x.type, x.type.lower())
result += " " + name
return result
def tf_print(x, depth=0, finished=None, printer=print):
"""A simple print function for a TensorFlow graph.
Args:
x: a tf.Tensor or tf.Operation
depth: current printing depth
finished: set of nodes already output
printer: print function to use
Returns:
Total number of parameters found in the
subtree.
"""
if finished is None:
finished = set()
if isinstance(x, ops.Tensor):
shape = x.get_shape().as_list()
x = x.op
else:
shape = ""
if x.type == "Identity":
x = x.inputs[0].op
if x in finished:
printer("%s<%s> %s %s" % (" " * depth, x.name, x.type, shape))
return
finished |= {x}
printer("%s%s %s %s" % (" " * depth, x.name, x.type, shape))
if not _truncate_structure(x):
for y in x.inputs:
tf_print(y, depth + 1, finished, printer=printer)
def tf_num_params(x):
"""Number of parameters in a TensorFlow subgraph.
Args:
x: root of the subgraph (Tensor, Operation)
Returns:
Total number of elements found in all Variables
in the subgraph.
"""
if isinstance(x, ops.Tensor):
shape = x.get_shape()
x = x.op
if x.type in ["Variable", "VariableV2"]:
return shape.num_elements()
totals = [tf_num_params(y) for y in x.inputs]
return sum(totals)
def tf_left_split(op):
"""Split the parameters of op for left recursion.
Args:
op: tf.Operation
Returns:
A tuple of the leftmost input tensor and a list of the
remaining arguments.
"""
if len(op.inputs) < 1:
return None, []
if op.type == "Concat":
return op.inputs[1], op.inputs[2:]
return op.inputs[0], op.inputs[1:]
def tf_parameter_iter(x):
"""Iterate over the left branches of a graph and yield sizes.
Args:
x: root of the subgraph (Tensor, Operation)
Yields:
A triple of name, number of params, and shape.
"""
while 1:
if isinstance(x, ops.Tensor):
shape = x.get_shape().as_list()
x = x.op
else:
shape = ""
left, right = tf_left_split(x)
totals = [tf_num_params(y) for y in right]
total = sum(totals)
yield x.name, total, shape
if left is None:
break
x = left
def _combine_filter(x):
"""A filter for combining successive layers with similar names."""
last_name = None
last_total = 0
last_shape = None
for name, total, shape in x:
name = re.sub("/.*", "", name)
if name == last_name:
last_total += total
continue
if last_name is not None:
yield last_name, last_total, last_shape
last_name = name
last_total = total
last_shape = shape
if last_name is not None:
yield last_name, last_total, last_shape
def tf_parameter_summary(x, printer=print, combine=True):
"""Summarize parameters by depth.
Args:
x: root of the subgraph (Tensor, Operation)
printer: print function for output
combine: combine layers by top-level scope
"""
seq = tf_parameter_iter(x)
if combine:
seq = _combine_filter(seq)
seq = reversed(list(seq))
for name, total, shape in seq:
printer("%10d %-20s %s" % (total, name, shape))
def tf_spec_structure(spec,
inputs=None,
input_shape=None,
input_type=dtypes.float32):
"""Return a postfix representation of the specification.
This is intended to be used as part of test cases to
check for gross differences in the structure of the graph.
The resulting string is not invertible or unabiguous
and cannot be used to reconstruct the graph accurately.
Args:
spec: specification
inputs: input to the spec construction (usually a Tensor)
input_shape: tensor shape (in lieu of inputs)
input_type: type of the input tensor
Returns:
A string with a postfix representation of the
specification.
"""
if inputs is None:
inputs = array_ops.placeholder(input_type, input_shape)
outputs = specs.create_net(spec, inputs)
return str(tf_structure(outputs).strip())
def tf_spec_summary(spec,
inputs=None,
input_shape=None,
input_type=dtypes.float32):
"""Output a summary of the specification.
This prints a list of left-most tensor operations and summarized the
variables found in the right branches. This kind of representation
is particularly useful for networks that are generally structured
like pipelines.
Args:
spec: specification
inputs: input to the spec construction (usually a Tensor)
input_shape: optional shape of input
input_type: type of the input tensor
"""
if inputs is None:
inputs = array_ops.placeholder(input_type, input_shape)
outputs = specs.create_net(spec, inputs)
tf_parameter_summary(outputs)
def tf_spec_print(spec,
inputs=None,
input_shape=None,
input_type=dtypes.float32):
"""Print a tree representing the spec.
Args:
spec: specification
inputs: input to the spec construction (usually a Tensor)
input_shape: optional shape of input
input_type: type of the input tensor
"""
if inputs is None:
inputs = array_ops.placeholder(input_type, input_shape)
outputs = specs.create_net(spec, inputs)
tf_print(outputs)
|
tensorflow-master
|
tensorflow/contrib/specs/python/summaries.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Shared representations for tree-based models in tensorflow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,wildcard-import
from tensorflow.contrib.decision_trees.proto import *
# pylint: enable=unused-import,wildcard-import
from tensorflow.python.util.all_util import remove_undocumented
remove_undocumented(__name__)
|
tensorflow-master
|
tensorflow/contrib/decision_trees/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops and modules related to factorization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import
from tensorflow.contrib.factorization.python.ops.clustering_ops import *
from tensorflow.contrib.factorization.python.ops.factorization_ops import *
from tensorflow.contrib.factorization.python.ops.gmm import *
from tensorflow.contrib.factorization.python.ops.gmm_ops import *
from tensorflow.contrib.factorization.python.ops.kmeans import *
from tensorflow.contrib.factorization.python.ops.wals import *
# pylint: enable=wildcard-import
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = [
'COSINE_DISTANCE',
'GMM',
'gmm',
'GmmAlgorithm',
'KMeans',
'KMEANS_PLUS_PLUS_INIT',
'KMeansClustering',
'RANDOM_INIT',
'SQUARED_EUCLIDEAN_DISTANCE',
'WALSMatrixFactorization',
'WALSModel',
]
remove_undocumented(__name__, _allowed_symbols)
|
tensorflow-master
|
tensorflow/contrib/factorization/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The python module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
|
tensorflow-master
|
tensorflow/contrib/factorization/python/__init__.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmark for masked_matmul_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=g-bad-todo, g-import-not-at-top
import time
from tensorflow.contrib.factorization.python.ops import gen_factorization_ops
from tensorflow.python.client import session as session_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class MaskedmatmulBenchmark(test.Benchmark):
"""Benchmark masked_matmul_ops."""
def _make_sparse_mask(self, mask_shape, nnz, sort=False):
"""Creates a sparse tensor to be used as a mask in masked_matmul.
Args:
mask_shape: int list, the shape of the mask.
nnz: int, the number of non-zero elements in the mask.
sort: boolean, whether to sort the indices of the mask (in lexicographic
order).
Returns:
A sparse tensor, with nnz indices, drawn uniformly at random.
"""
num_rows = mask_shape[0]
num_cols = mask_shape[1]
row_idx = random_ops.random_uniform(
[nnz], minval=0, maxval=num_rows, dtype=dtypes.int64)
col_idx = random_ops.random_uniform(
[nnz], minval=0, maxval=num_cols, dtype=dtypes.int64)
indices = array_ops.stack([row_idx, col_idx], axis=1)
values = array_ops.ones([nnz])
unordered_mask = sparse_tensor.SparseTensor(indices, values, mask_shape)
return sparse_ops.sparse_reorder(unordered_mask) if sort else unordered_mask
def _run_graph(self, a_shape, b_shape, nnz, num_iters, sort=False,
transpose_a=False, transpose_b=False):
"""Run the graph and return its average execution time.
Args:
a_shape: int list, the shape of the a matrix.
b_shape: int list, the shape of the b matrix.
nnz: int, the number of non-zero elements in the mask.
num_iters: int, the number of iterations to run (the output is the average
execution time, over num_iters).
sort: Boolean, whether to sort the indices in the mask.
transpose_a: boolean, whether to transpose the a matrix.
transpose_b: boolean, whether to transpose the b matrix.
Returns:
The average duration of the masked_matmul op in seconds.
"""
graph = ops.Graph()
with graph.as_default(), session_lib.Session(graph=graph) as session:
mask_shape = [a_shape[0], b_shape[1]]
a_shape = a_shape if not transpose_a else [a_shape[1], a_shape[0]]
b_shape = b_shape if not transpose_b else [b_shape[1], b_shape[0]]
a_var = variables.Variable(random_ops.random_normal(a_shape))
b_var = variables.Variable(random_ops.random_normal(b_shape))
mask_indices_ph = array_ops.placeholder(dtypes.int64, shape=[nnz, 2])
a_ph = array_ops.placeholder(dtypes.float32, shape=a_shape)
b_ph = array_ops.placeholder(dtypes.float32, shape=b_shape)
mask = self._make_sparse_mask(mask_shape, nnz, sort)
masked_prod = gen_factorization_ops.masked_matmul(
a_ph, b_ph, mask_indices_ph, transpose_a, transpose_b)
with ops.control_dependencies([masked_prod]):
result = control_flow_ops.no_op()
variables.global_variables_initializer().run()
avg_wall_time = 0
for _ in range(num_iters):
a, b, mask_indices = session.run([a_var, b_var, mask.indices])
feed_dict = {
mask_indices_ph: mask_indices,
a_ph: a,
b_ph: b
}
start_time = time.time()
session.run(result, feed_dict=feed_dict)
avg_wall_time += (time.time() - start_time)/num_iters
bench_name = (
"cpu nnz:{nnz} a_shape:{a_shape} b_shape:{b_shape} tr_a:{tr_a} "
"tr_b:{tr_b} sort:{sort}"
).format(
nnz=nnz,
a_shape=a_shape,
b_shape=b_shape,
tr_a=int(transpose_a),
tr_b=int(transpose_b),
sort=int(sort)
)
print(bench_name + " - %f secs" % avg_wall_time)
name = bench_name.replace(", ", "_").replace(":", "_").replace(" ", "_")
self.report_benchmark(
name=name,
iters=num_iters,
wall_time=avg_wall_time)
return avg_wall_time
# TODO(walidk): compare benchmarks to using existing tf ops.
def benchmark_matmul(self):
num_iters = 10
nnz = 100000
for transpose_a in [False, True]:
for transpose_b in [False, True]:
for dim in [200, 400, 800]:
for sort in [False, True]:
a_shape = [10000, dim]
b_shape = [dim, 10000]
self._run_graph(a_shape, b_shape, nnz, num_iters, sort, transpose_a,
transpose_b)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/factorization/python/kernel_tests/masked_matmul_benchmark.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
# ==============================================================================
"""Tests for masked_matmul_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=g-bad-todo, g-import-not-at-top
import numpy as np
from tensorflow.contrib.factorization.python.ops import gen_factorization_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
def MakeMask():
inds = [[0, 0], [0, 2], [1, 1], [2, 0], [2, 3]] * 100
indices = np.array(inds).astype(np.int64)
shape = np.array([5, 4]).astype(np.int64)
return (indices, shape)
class MaskedProductOpsTest(test.TestCase):
def setUp(self):
a = [
[0.1, 0.2, 0.3],
[0.4, 0.5, 0.6],
[0.7, 0.8, 0.9],
[1.1, 1.2, 1.3],
[1.4, 1.5, 1.6],
]
b = [
[0.1, 0.4, 0.7, 1.1],
[0.2, 0.5, 0.8, 1.2],
[0.3, 0.6, 0.9, 1.3],
]
self._dot_products = np.array([0.14, 0.5, 0.77, 0.5, 2.9] * 100)
self._a = np.array(a).astype(np.float32)
self._b = np.array(b).astype(np.float32)
self._mask_ind, self._mask_shape = MakeMask()
def _runTestMaskedProduct(self, transpose_a, transpose_b):
with ops.Graph().as_default(), self.cached_session() as sess:
a = self._a if not transpose_a else array_ops.transpose(self._a)
b = self._b if not transpose_b else array_ops.transpose(self._b)
def AssertClose(sp_x, sp_y):
x_inds, x_vals, y_inds, y_vals = sess.run(
[sp_x.indices, sp_x.values,
sp_y.indices, sp_y.values])
self.assertAllClose(x_inds, y_inds)
self.assertAllClose(x_vals, y_vals)
values = gen_factorization_ops.masked_matmul(
a, b, self._mask_ind, transpose_a, transpose_b)
result = sparse_tensor.SparseTensor(
self._mask_ind, values, self._mask_shape)
true_result = sparse_tensor.SparseTensor(
self._mask_ind, self._dot_products, self._mask_shape)
AssertClose(result, true_result)
def _runTestEmptyMaskedProduct(self):
with ops.Graph().as_default(), self.cached_session() as sess:
empty_mask = constant_op.constant(0, shape=[0, 2], dtype=dtypes.int64)
values = gen_factorization_ops.masked_matmul(
self._a, self._b, empty_mask, False, False)
self.assertEqual(len(values.eval(session=sess)), 0)
def testMaskedProduct(self):
self._runTestMaskedProduct(False, False)
def testMaskedProductTransposeA(self):
self._runTestMaskedProduct(True, False)
def testMaskedProductTransposeB(self):
self._runTestMaskedProduct(False, True)
def testMaskedProductTransposeAAndB(self):
self._runTestMaskedProduct(True, True)
def testEmptyMaskedProduct(self):
self._runTestEmptyMaskedProduct()
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/factorization/python/kernel_tests/masked_matmul_ops_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
# ==============================================================================
"""Tests for clustering_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.factorization.python.ops import clustering_ops
from tensorflow.python.platform import test
class KmeansPlusPlusInitializationTest(test.TestCase):
# All but one input point are close to (101, 1). With uniform random sampling,
# it is highly improbable for (-1, -1) to be selected.
def setUp(self):
self._points = np.array([[100., 0.],
[101., 2.],
[102., 0.],
[100., 1.],
[100., 2.],
[101., 0.],
[101., 0.],
[101., 1.],
[102., 0.],
[-1., -1.]]).astype(np.float32)
def runTestWithSeed(self, seed):
with self.cached_session():
sampled_points = clustering_ops.kmeans_plus_plus_initialization(
self._points, 3, seed, (seed % 5) - 1)
self.assertAllClose(
sorted(sampled_points.eval().tolist()), [[-1., -1.],
[101., 1.],
[101., 1.]],
atol=1.0)
def testBasic(self):
for seed in range(100):
self.runTestWithSeed(seed)
class KMC2InitializationTest(test.TestCase):
def runTestWithSeed(self, seed):
with self.cached_session():
distances = np.zeros(1000).astype(np.float32)
distances[6] = 10e7
distances[4] = 10e3
sampled_point = clustering_ops.kmc2_chain_initialization(distances, seed)
self.assertEquals(sampled_point.eval(), 6)
distances[6] = 0.0
sampled_point = clustering_ops.kmc2_chain_initialization(distances, seed)
self.assertEquals(sampled_point.eval(), 4)
def testBasic(self):
for seed in range(100):
self.runTestWithSeed(seed)
class KMC2InitializationLargeTest(test.TestCase):
def setUp(self):
self._distances = np.zeros(1001)
self._distances[500] = 100.0
self._distances[1000] = 50.0
def testBasic(self):
with self.cached_session():
counts = {}
seed = 0
for i in range(50):
sample = clustering_ops.kmc2_chain_initialization(
self._distances, seed + i).eval()
counts[sample] = counts.get(sample, 0) + 1
self.assertEquals(len(counts), 2)
self.assertTrue(500 in counts)
self.assertTrue(1000 in counts)
self.assertGreaterEqual(counts[500], 5)
self.assertGreaterEqual(counts[1000], 5)
class KMC2InitializationCornercaseTest(test.TestCase):
def setUp(self):
self._distances = np.zeros(10)
def runTestWithSeed(self, seed):
with self.cached_session():
sampled_point = clustering_ops.kmc2_chain_initialization(
self._distances, seed)
self.assertEquals(sampled_point.eval(), 0)
def testBasic(self):
for seed in range(100):
self.runTestWithSeed(seed)
# A simple test that can be verified by hand.
class NearestCentersTest(test.TestCase):
def setUp(self):
self._points = np.array([[100., 0.],
[101., 2.],
[99., 2.],
[1., 1.]]).astype(np.float32)
self._centers = np.array([[100., 0.],
[99., 1.],
[50., 50.],
[0., 0.],
[1., 1.]]).astype(np.float32)
def testNearest1(self):
with self.cached_session():
[indices, distances] = clustering_ops.nearest_neighbors(self._points,
self._centers, 1)
self.assertAllClose(indices.eval(), [[0], [0], [1], [4]])
self.assertAllClose(distances.eval(), [[0.], [5.], [1.], [0.]])
def testNearest2(self):
with self.cached_session():
[indices, distances] = clustering_ops.nearest_neighbors(self._points,
self._centers, 2)
self.assertAllClose(indices.eval(), [[0, 1], [0, 1], [1, 0], [4, 3]])
self.assertAllClose(distances.eval(),
[[0., 2.], [5., 5.], [1., 5.], [0., 2.]])
# A test with large inputs.
class NearestCentersLargeTest(test.TestCase):
def setUp(self):
num_points = 1000
num_centers = 2000
num_dim = 100
max_k = 5
# Construct a small number of random points and later tile them.
points_per_tile = 10
assert num_points % points_per_tile == 0
points = np.random.standard_normal(
[points_per_tile, num_dim]).astype(np.float32)
# Construct random centers.
self._centers = np.random.standard_normal(
[num_centers, num_dim]).astype(np.float32)
# Exhaustively compute expected nearest neighbors.
def squared_distance(x, y):
return np.linalg.norm(x - y, ord=2)**2
nearest_neighbors = [
sorted([(squared_distance(point, self._centers[j]), j)
for j in range(num_centers)])[:max_k] for point in points
]
expected_nearest_neighbor_indices = np.array(
[[i for _, i in nn] for nn in nearest_neighbors])
expected_nearest_neighbor_squared_distances = np.array(
[[dist for dist, _ in nn] for nn in nearest_neighbors])
# Tile points and expected results to reach requested size (num_points)
(self._points, self._expected_nearest_neighbor_indices,
self._expected_nearest_neighbor_squared_distances) = (
np.tile(x, (int(num_points / points_per_tile), 1))
for x in (points, expected_nearest_neighbor_indices,
expected_nearest_neighbor_squared_distances))
def testNearest1(self):
with self.cached_session():
[indices, distances] = clustering_ops.nearest_neighbors(self._points,
self._centers, 1)
self.assertAllClose(indices.eval(),
self._expected_nearest_neighbor_indices[:, [0]])
self.assertAllClose(
distances.eval(),
self._expected_nearest_neighbor_squared_distances[:, [0]])
def testNearest5(self):
with self.cached_session():
[indices, distances] = clustering_ops.nearest_neighbors(self._points,
self._centers, 5)
self.assertAllClose(indices.eval(),
self._expected_nearest_neighbor_indices[:, 0:5])
self.assertAllClose(
distances.eval(),
self._expected_nearest_neighbor_squared_distances[:, 0:5])
if __name__ == "__main__":
np.random.seed(0)
test.main()
|
tensorflow-master
|
tensorflow/contrib/factorization/python/kernel_tests/clustering_ops_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
# ==============================================================================
"""Tests for wals_solver_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.factorization.python.ops import gen_factorization_ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.platform import test
def SparseBlock3x3():
ind = np.array(
[[0, 0], [0, 2], [1, 1], [2, 0], [2, 1], [3, 2]]).astype(np.int64)
val = np.array([0.1, 0.2, 1.1, 2.0, 2.1, 3.2]).astype(np.float32)
shape = np.array([4, 3]).astype(np.int64)
return sparse_tensor.SparseTensor(ind, val, shape)
class WalsSolverOpsTest(test.TestCase):
def setUp(self):
self._column_factors = np.array([
[0.1, 0.2, 0.3],
[0.4, 0.5, 0.6],
[0.7, 0.8, 0.9],
]).astype(np.float32)
self._row_factors = np.array([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6],
[0.7, 0.8, 0.9],
[1.1, 1.2, 1.3]]).astype(np.float32)
self._column_weights = np.array([0.1, 0.2, 0.3]).astype(np.float32)
self._row_weights = np.array([0.1, 0.2, 0.3, 0.4]).astype(np.float32)
self._unobserved_weights = 0.1
def testWalsSolverLhs(self):
sparse_block = SparseBlock3x3()
with self.cached_session():
[lhs_tensor,
rhs_matrix] = gen_factorization_ops.wals_compute_partial_lhs_and_rhs(
self._column_factors, self._column_weights, self._unobserved_weights,
self._row_weights, sparse_block.indices, sparse_block.values,
[],
input_block_size=sparse_block.dense_shape[0],
input_is_transpose=False)
self.assertAllClose(lhs_tensor.eval(), [[
[0.014800, 0.017000, 0.019200],
[0.017000, 0.019600, 0.022200],
[0.019200, 0.022200, 0.025200],
], [
[0.0064000, 0.0080000, 0.0096000],
[0.0080000, 0.0100000, 0.0120000],
[0.0096000, 0.0120000, 0.0144000],
], [
[0.0099000, 0.0126000, 0.0153000],
[0.0126000, 0.0162000, 0.0198000],
[0.0153000, 0.0198000, 0.0243000],
], [
[0.058800, 0.067200, 0.075600],
[0.067200, 0.076800, 0.086400],
[0.075600, 0.086400, 0.097200],
]])
self.assertAllClose(rhs_matrix.eval(), [[0.019300, 0.023000, 0.026700],
[0.061600, 0.077000, 0.092400],
[0.160400, 0.220000, 0.279600],
[0.492800, 0.563200, 0.633600]])
def testWalsSolverLhsEntryWeights(self):
sparse_block = SparseBlock3x3()
with self.cached_session():
[lhs_tensor,
rhs_matrix] = gen_factorization_ops.wals_compute_partial_lhs_and_rhs(
self._column_factors, [], self._unobserved_weights,
[], sparse_block.indices, sparse_block.values,
[0.01, 0.03, 0.04, 0.03, 0.06, 0.12],
input_block_size=sparse_block.dense_shape[0],
input_is_transpose=False)
self.assertAllClose(lhs_tensor.eval(), [[
[0.014800, 0.017000, 0.019200],
[0.017000, 0.019600, 0.022200],
[0.019200, 0.022200, 0.025200],
], [
[0.0064000, 0.0080000, 0.0096000],
[0.0080000, 0.0100000, 0.0120000],
[0.0096000, 0.0120000, 0.0144000],
], [
[0.0099000, 0.0126000, 0.0153000],
[0.0126000, 0.0162000, 0.0198000],
[0.0153000, 0.0198000, 0.0243000],
], [
[0.058800, 0.067200, 0.075600],
[0.067200, 0.076800, 0.086400],
[0.075600, 0.086400, 0.097200],
]])
self.assertAllClose(rhs_matrix.eval(), [[0.019300, 0.023000, 0.026700],
[0.061600, 0.077000, 0.092400],
[0.160400, 0.220000, 0.279600],
[0.492800, 0.563200, 0.633600]])
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/factorization/python/kernel_tests/wals_solver_ops_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of Gaussian mixture model (GMM) clustering using tf.Learn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
from tensorflow.contrib import framework
from tensorflow.contrib.factorization.python.ops import gmm_ops
from tensorflow.contrib.framework.python.framework import checkpoint_utils
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import model_fn as model_fn_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import logging_ops as logging
from tensorflow.python.ops import state_ops
from tensorflow.python.ops.control_flow_ops import with_dependencies
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import training_util
def _streaming_sum(scalar_tensor):
"""Create a sum metric and update op."""
sum_metric = framework.local_variable(constant_op.constant(0.0))
sum_update = sum_metric.assign_add(scalar_tensor)
return sum_metric, sum_update
class _InitializeClustersHook(session_run_hook.SessionRunHook):
"""Initializes clusters or waits for cluster initialization."""
def __init__(self, init_op, is_initialized_op, is_chief):
self._init_op = init_op
self._is_chief = is_chief
self._is_initialized_op = is_initialized_op
def after_create_session(self, session, _):
assert self._init_op.graph == ops.get_default_graph()
assert self._is_initialized_op.graph == self._init_op.graph
while True:
try:
if session.run(self._is_initialized_op):
break
elif self._is_chief:
session.run(self._init_op)
else:
time.sleep(1)
except RuntimeError as e:
logging.info(e)
class GMM(estimator.Estimator):
"""An estimator for GMM clustering."""
SCORES = 'scores'
LOG_LIKELIHOOD = 'loss'
ASSIGNMENTS = 'assignments'
def __init__(self,
num_clusters,
model_dir=None,
random_seed=0,
params='wmc',
initial_clusters='random',
covariance_type='full',
config=None):
"""Creates a model for running GMM training and inference.
Args:
num_clusters: number of clusters to train.
model_dir: the directory to save the model results and log files.
random_seed: Python integer. Seed for PRNG used to initialize centers.
params: Controls which parameters are updated in the training process.
Can contain any combination of "w" for weights, "m" for means,
and "c" for covars.
initial_clusters: specifies how to initialize the clusters for training.
See gmm_ops.gmm for the possible values.
covariance_type: one of "full", "diag".
config: See Estimator
"""
self._num_clusters = num_clusters
self._params = params
self._training_initial_clusters = initial_clusters
self._covariance_type = covariance_type
self._training_graph = None
self._random_seed = random_seed
super(GMM, self).__init__(
model_fn=self._model_builder(), model_dir=model_dir, config=config)
def predict_assignments(self, input_fn=None, batch_size=None, outputs=None):
"""See BaseEstimator.predict."""
results = self.predict(input_fn=input_fn,
batch_size=batch_size,
outputs=outputs)
for result in results:
yield result[GMM.ASSIGNMENTS]
def score(self, input_fn=None, batch_size=None, steps=None):
"""Predict total log-likelihood.
Args:
input_fn: see predict.
batch_size: see predict.
steps: see predict.
Returns:
Total log-likelihood.
"""
results = self.evaluate(input_fn=input_fn, batch_size=batch_size,
steps=steps)
return np.log(np.sum(np.exp(results[GMM.SCORES])))
def weights(self):
"""Returns the cluster weights."""
return checkpoint_utils.load_variable(
self.model_dir, gmm_ops.GmmAlgorithm.CLUSTERS_WEIGHT)
def clusters(self):
"""Returns cluster centers."""
clusters = checkpoint_utils.load_variable(
self.model_dir, gmm_ops.GmmAlgorithm.CLUSTERS_VARIABLE)
return np.squeeze(clusters, 1)
def covariances(self):
"""Returns the covariances."""
return checkpoint_utils.load_variable(
self.model_dir, gmm_ops.GmmAlgorithm.CLUSTERS_COVS_VARIABLE)
def _parse_tensor_or_dict(self, features):
if isinstance(features, dict):
return array_ops.concat([features[k] for k in sorted(features.keys())],
1)
return features
def _model_builder(self):
"""Creates a model function."""
def _model_fn(features, labels, mode, config):
"""Model function."""
assert labels is None, labels
(loss,
scores,
model_predictions,
training_op,
init_op,
is_initialized) = gmm_ops.gmm(self._parse_tensor_or_dict(features),
self._training_initial_clusters,
self._num_clusters, self._random_seed,
self._covariance_type,
self._params)
incr_step = state_ops.assign_add(training_util.get_global_step(), 1)
training_op = with_dependencies([training_op, incr_step], loss)
training_hooks = [_InitializeClustersHook(
init_op, is_initialized, config.is_chief)]
predictions = {
GMM.ASSIGNMENTS: model_predictions[0][0],
}
eval_metric_ops = {
GMM.SCORES: scores,
GMM.LOG_LIKELIHOOD: _streaming_sum(loss),
}
return model_fn_lib.ModelFnOps(mode=mode, predictions=predictions,
eval_metric_ops=eval_metric_ops,
loss=loss, train_op=training_op,
training_hooks=training_hooks)
return _model_fn
|
tensorflow-master
|
tensorflow/contrib/factorization/python/ops/gmm.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for factorization_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.contrib.factorization.python.ops import factorization_ops
from tensorflow.contrib.factorization.python.ops import factorization_ops_test_utils
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
INPUT_MATRIX = factorization_ops_test_utils.INPUT_MATRIX
np_matrix_to_tf_sparse = factorization_ops_test_utils.np_matrix_to_tf_sparse
class WalsModelTest(test.TestCase):
def sparse_input(self):
return np_matrix_to_tf_sparse(INPUT_MATRIX)
def count_rows(self, sp_input):
return math_ops.cast(
array_ops.shape(array_ops.unique(sp_input.indices[:, 0])[0])[0],
dtypes.float32)
def count_cols(self, sp_input):
return math_ops.cast(
array_ops.shape(array_ops.unique(sp_input.indices[:, 1])[0])[0],
dtypes.float32)
def calculate_loss_from_wals_model(self, wals_model, sp_inputs):
current_rows = embedding_ops.embedding_lookup(
wals_model.row_factors,
math_ops.range(wals_model._input_rows),
partition_strategy="div")
current_cols = embedding_ops.embedding_lookup(
wals_model.col_factors,
math_ops.range(wals_model._input_cols),
partition_strategy="div")
row_wts = embedding_ops.embedding_lookup(
wals_model._row_weights,
math_ops.range(wals_model._input_rows),
partition_strategy="div")
col_wts = embedding_ops.embedding_lookup(
wals_model._col_weights,
math_ops.range(wals_model._input_cols),
partition_strategy="div")
return factorization_ops_test_utils.calculate_loss(
sp_inputs, current_rows, current_cols, wals_model._regularization,
wals_model._unobserved_weight, row_wts, col_wts)
def setUp(self):
self.col_init = [
# shard 0
[
[-0.36444709, -0.39077035, -0.32528427], # pyformat line break
[1.19056475, 0.07231052, 2.11834812],
[0.93468881, -0.71099287, 1.91826844]
],
# shard 1
[[1.18160152, 1.52490723, -0.50015002],
[1.82574749, -0.57515913, -1.32810032]],
# shard 2
[[-0.15515432, -0.84675711, 0.13097958],
[-0.9246484, 0.69117504, 1.2036494]]
]
self.row_wts = [[0.1, 0.2, 0.3], [0.4, 0.5]]
self.col_wts = [[0.1, 0.2, 0.3], [0.4, 0.5], [0.6, 0.7]]
# Values of factor shards after running one iteration of row and column
# updates.
self._row_factors_0 = [
[0.097689, -0.219293, -0.020780], # pyformat line break
[0.50842, 0.64626, 0.22364],
[0.401159, -0.046558, -0.192854]
]
self._row_factors_1 = [[1.20597, -0.48025, 0.35582],
[1.5564, 1.2528, 1.0528]]
self._col_factors_0 = [
[2.4725, -1.2950, -1.9980], # pyformat line break
[0.44625, 1.50771, 1.27118],
[1.39801, -2.10134, 0.73572]
]
self._col_factors_1 = [[3.36509, -0.66595, -3.51208],
[0.57191, 1.59407, 1.33020]]
self._col_factors_2 = [[3.3459, -1.3341, -3.3008],
[0.57366, 1.83729, 1.26798]]
def _run_test_sum_weights(self, test_rows):
# test_rows: True to test row weights, False to test column weights.
num_rows = 5
num_cols = 5
unobserved_weight = 0.1
row_weights = [[8., 18., 28., 38., 48.]]
col_weights = [[90., 91., 92., 93., 94.]]
sparse_indices = [[0, 1], [2, 3], [4, 1]]
sparse_values = [666., 777., 888.]
unobserved = unobserved_weight * num_rows * num_cols
observed = 8. * 91. + 28. * 93. + 48. * 91.
# sparse_indices has three unique rows and two unique columns
observed *= num_rows / 3. if test_rows else num_cols / 2.
want_weight_sum = unobserved + observed
with ops.Graph().as_default(), self.cached_session() as sess:
wals_model = factorization_ops.WALSModel(
input_rows=num_rows,
input_cols=num_cols,
n_components=5,
unobserved_weight=unobserved_weight,
row_weights=row_weights,
col_weights=col_weights,
use_factors_weights_cache=False)
wals_model.initialize_op.run()
wals_model.worker_init.run()
update_factors = (wals_model.update_row_factors
if test_rows else wals_model.update_col_factors)
(_, _, _, _, sum_weights) = update_factors(
sp_input=sparse_tensor.SparseTensor(
indices=sparse_indices,
values=sparse_values,
dense_shape=[num_rows, num_cols]),
transpose_input=False)
got_weight_sum = sess.run(sum_weights)
self.assertNear(
got_weight_sum,
want_weight_sum,
err=.001,
msg="got weight sum [{}], want weight sum [{}]".format(
got_weight_sum, want_weight_sum))
def _run_test_process_input(self,
use_factors_weights_cache,
compute_loss=False):
with ops.Graph().as_default(), self.cached_session() as sess:
self._wals_inputs = self.sparse_input()
sp_feeder = array_ops.sparse_placeholder(dtypes.float32)
num_rows = 5
num_cols = 7
factor_dim = 3
wals_model = factorization_ops.WALSModel(
num_rows,
num_cols,
factor_dim,
num_row_shards=2,
num_col_shards=3,
regularization=0.01,
unobserved_weight=0.1,
col_init=self.col_init,
row_weights=self.row_wts,
col_weights=self.col_wts,
use_factors_weights_cache=use_factors_weights_cache)
wals_model.initialize_op.run()
wals_model.worker_init.run()
# Split input into multiple sparse tensors with scattered rows. Note that
# this split can be different than the factor sharding and the inputs can
# consist of non-consecutive rows. Each row needs to include all non-zero
# elements in that row.
sp_r0 = np_matrix_to_tf_sparse(INPUT_MATRIX, [0, 2]).eval()
sp_r1 = np_matrix_to_tf_sparse(INPUT_MATRIX, [1, 4], shuffle=True).eval()
sp_r2 = np_matrix_to_tf_sparse(INPUT_MATRIX, [3], shuffle=True).eval()
input_scattered_rows = [sp_r0, sp_r1, sp_r2]
# Test updating row factors.
# Here we feed in scattered rows of the input.
wals_model.row_update_prep_gramian_op.run()
wals_model.initialize_row_update_op.run()
(_, process_input_op, unregularized_loss, regularization,
_) = wals_model.update_row_factors(
sp_input=sp_feeder, transpose_input=False)
factor_loss = unregularized_loss + regularization
for inp in input_scattered_rows:
feed_dict = {sp_feeder: inp}
process_input_op.run(feed_dict=feed_dict)
row_factors = [x.eval() for x in wals_model.row_factors]
self.assertAllClose(row_factors[0], self._row_factors_0, atol=1e-3)
self.assertAllClose(row_factors[1], self._row_factors_1, atol=1e-3)
# Test row projection.
# Using the specified projection weights for the 2 row feature vectors.
# This is expected to reproduce the same row factors in the model as the
# weights and feature vectors are identical to that used in model
# training.
projected_rows = wals_model.project_row_factors(
sp_input=sp_feeder,
transpose_input=False,
projection_weights=[0.2, 0.5])
# Don't specify the projection weight, so 1.0 will be used. The feature
# weights will be those specified in model.
projected_rows_no_weights = wals_model.project_row_factors(
sp_input=sp_feeder, transpose_input=False)
feed_dict = {
sp_feeder:
np_matrix_to_tf_sparse(INPUT_MATRIX, [1, 4], shuffle=False)
.eval()
}
self.assertAllClose(
projected_rows.eval(feed_dict=feed_dict),
[self._row_factors_0[1], self._row_factors_1[1]],
atol=1e-3)
self.assertAllClose(
projected_rows_no_weights.eval(feed_dict=feed_dict),
[[0.569082, 0.715088, 0.31777], [1.915879, 1.992677, 1.109057]],
atol=1e-3)
if compute_loss:
# Test loss computation after the row update
loss = sum(
sess.run(
factor_loss * self.count_rows(inp) / num_rows,
feed_dict={sp_feeder: inp}) for inp in input_scattered_rows)
true_loss = self.calculate_loss_from_wals_model(wals_model,
self._wals_inputs)
self.assertNear(
loss,
true_loss,
err=.001,
msg="After row update, computed loss [{}] does not match"
" true loss [{}]".format(loss, true_loss))
# Split input into multiple sparse tensors with scattered columns. Note
# that here the elements in the sparse tensors are not ordered and also
# do not need to consist of consecutive columns. However, each column
# needs to include all non-zero elements in that column.
sp_c0 = np_matrix_to_tf_sparse(INPUT_MATRIX, col_slices=[2, 0]).eval()
sp_c1 = np_matrix_to_tf_sparse(
INPUT_MATRIX, col_slices=[5, 3, 1], shuffle=True).eval()
sp_c2 = np_matrix_to_tf_sparse(INPUT_MATRIX, col_slices=[4, 6]).eval()
sp_c3 = np_matrix_to_tf_sparse(
INPUT_MATRIX, col_slices=[3, 6], shuffle=True).eval()
input_scattered_cols = [sp_c0, sp_c1, sp_c2, sp_c3]
input_scattered_cols_non_duplicate = [sp_c0, sp_c1, sp_c2]
# Test updating column factors.
# Here we feed in scattered columns of the input.
wals_model.col_update_prep_gramian_op.run()
wals_model.initialize_col_update_op.run()
(_, process_input_op, unregularized_loss, regularization,
_) = wals_model.update_col_factors(
sp_input=sp_feeder, transpose_input=False)
factor_loss = unregularized_loss + regularization
for inp in input_scattered_cols:
feed_dict = {sp_feeder: inp}
process_input_op.run(feed_dict=feed_dict)
col_factors = [x.eval() for x in wals_model.col_factors]
self.assertAllClose(col_factors[0], self._col_factors_0, atol=1e-3)
self.assertAllClose(col_factors[1], self._col_factors_1, atol=1e-3)
self.assertAllClose(col_factors[2], self._col_factors_2, atol=1e-3)
# Test column projection.
# Using the specified projection weights for the 3 column feature vectors.
# This is expected to reproduce the same column factors in the model as
# the weights and feature vectors are identical to that used in model
# training.
projected_cols = wals_model.project_col_factors(
sp_input=sp_feeder,
transpose_input=False,
projection_weights=[0.6, 0.4, 0.2])
# Don't specify the projection weight, so 1.0 will be used. The feature
# weights will be those specified in model.
projected_cols_no_weights = wals_model.project_col_factors(
sp_input=sp_feeder, transpose_input=False)
feed_dict = {
sp_feeder:
np_matrix_to_tf_sparse(
INPUT_MATRIX, col_slices=[5, 3, 1], shuffle=False).eval()
}
self.assertAllClose(
projected_cols.eval(feed_dict=feed_dict), [
self._col_factors_2[0], self._col_factors_1[0],
self._col_factors_0[1]
],
atol=1e-3)
self.assertAllClose(
projected_cols_no_weights.eval(feed_dict=feed_dict),
[[3.471045, -1.250835, -3.598917], [3.585139, -0.487476, -3.852232],
[0.346433, 1.360644, 1.677121]],
atol=1e-3)
if compute_loss:
# Test loss computation after the column update.
loss = sum(
sess.run(
factor_loss * self.count_cols(inp) / num_cols,
feed_dict={sp_feeder: inp})
for inp in input_scattered_cols_non_duplicate)
true_loss = self.calculate_loss_from_wals_model(wals_model,
self._wals_inputs)
self.assertNear(
loss,
true_loss,
err=.001,
msg="After col update, computed loss [{}] does not match"
" true loss [{}]".format(loss, true_loss))
def _run_test_process_input_transposed(self,
use_factors_weights_cache,
compute_loss=False):
with ops.Graph().as_default(), self.cached_session() as sess:
self._wals_inputs = self.sparse_input()
sp_feeder = array_ops.sparse_placeholder(dtypes.float32)
num_rows = 5
num_cols = 7
factor_dim = 3
wals_model = factorization_ops.WALSModel(
num_rows,
num_cols,
factor_dim,
num_row_shards=2,
num_col_shards=3,
regularization=0.01,
unobserved_weight=0.1,
col_init=self.col_init,
row_weights=self.row_wts,
col_weights=self.col_wts,
use_factors_weights_cache=use_factors_weights_cache)
wals_model.initialize_op.run()
wals_model.worker_init.run()
# Split input into multiple SparseTensors with scattered rows.
# Here the inputs are transposed. But the same constraints as described in
# the previous non-transposed test case apply to these inputs (before they
# are transposed).
sp_r0_t = np_matrix_to_tf_sparse(
INPUT_MATRIX, [0, 3], transpose=True).eval()
sp_r1_t = np_matrix_to_tf_sparse(
INPUT_MATRIX, [4, 1], shuffle=True, transpose=True).eval()
sp_r2_t = np_matrix_to_tf_sparse(INPUT_MATRIX, [2], transpose=True).eval()
sp_r3_t = sp_r1_t
input_scattered_rows = [sp_r0_t, sp_r1_t, sp_r2_t, sp_r3_t]
input_scattered_rows_non_duplicate = [sp_r0_t, sp_r1_t, sp_r2_t]
# Test updating row factors.
# Here we feed in scattered rows of the input.
# Note that the needed suffix of placeholder are in the order of test
# case name lexicographical order and then in the line order of where
# they appear.
wals_model.row_update_prep_gramian_op.run()
wals_model.initialize_row_update_op.run()
(_, process_input_op, unregularized_loss, regularization,
_) = wals_model.update_row_factors(
sp_input=sp_feeder, transpose_input=True)
factor_loss = unregularized_loss + regularization
for inp in input_scattered_rows:
feed_dict = {sp_feeder: inp}
process_input_op.run(feed_dict=feed_dict)
row_factors = [x.eval() for x in wals_model.row_factors]
self.assertAllClose(row_factors[0], self._row_factors_0, atol=1e-3)
self.assertAllClose(row_factors[1], self._row_factors_1, atol=1e-3)
# Test row projection.
# Using the specified projection weights for the 2 row feature vectors.
# This is expected to reproduce the same row factors in the model as the
# weights and feature vectors are identical to that used in model
# training.
projected_rows = wals_model.project_row_factors(
sp_input=sp_feeder,
transpose_input=True,
projection_weights=[0.5, 0.2])
# Don't specify the projection weight, so 1.0 will be used. The feature
# weights will be those specified in model.
projected_rows_no_weights = wals_model.project_row_factors(
sp_input=sp_feeder, transpose_input=True)
feed_dict = {
sp_feeder:
np_matrix_to_tf_sparse(
INPUT_MATRIX, [4, 1], shuffle=False, transpose=True).eval()
}
self.assertAllClose(
projected_rows.eval(feed_dict=feed_dict),
[self._row_factors_1[1], self._row_factors_0[1]],
atol=1e-3)
self.assertAllClose(
projected_rows_no_weights.eval(feed_dict=feed_dict),
[[1.915879, 1.992677, 1.109057], [0.569082, 0.715088, 0.31777]],
atol=1e-3)
if compute_loss:
# Test loss computation after the row update
loss = sum(
sess.run(
factor_loss * self.count_cols(inp) / num_rows,
feed_dict={sp_feeder: inp})
for inp in input_scattered_rows_non_duplicate)
true_loss = self.calculate_loss_from_wals_model(wals_model,
self._wals_inputs)
self.assertNear(
loss,
true_loss,
err=.001,
msg="After row update, computed loss [{}] does not match"
" true loss [{}]".format(loss, true_loss))
# Split input into multiple SparseTensors with scattered columns.
# Here the inputs are transposed. But the same constraints as described in
# the previous non-transposed test case apply to these inputs (before they
# are transposed).
sp_c0_t = np_matrix_to_tf_sparse(
INPUT_MATRIX, col_slices=[0, 1], transpose=True).eval()
sp_c1_t = np_matrix_to_tf_sparse(
INPUT_MATRIX, col_slices=[4, 2], transpose=True).eval()
sp_c2_t = np_matrix_to_tf_sparse(
INPUT_MATRIX, col_slices=[5], transpose=True, shuffle=True).eval()
sp_c3_t = np_matrix_to_tf_sparse(
INPUT_MATRIX, col_slices=[3, 6], transpose=True).eval()
sp_c4_t = sp_c2_t
input_scattered_cols = [sp_c0_t, sp_c1_t, sp_c2_t, sp_c3_t, sp_c4_t]
input_scattered_cols_non_duplicate = [sp_c0_t, sp_c1_t, sp_c2_t, sp_c3_t]
# Test updating column factors.
# Here we feed in scattered columns of the input.
wals_model.col_update_prep_gramian_op.run()
wals_model.initialize_col_update_op.run()
(_, process_input_op, unregularized_loss, regularization,
_) = wals_model.update_col_factors(
sp_input=sp_feeder, transpose_input=True)
factor_loss = unregularized_loss + regularization
for inp in input_scattered_cols:
feed_dict = {sp_feeder: inp}
process_input_op.run(feed_dict=feed_dict)
col_factors = [x.eval() for x in wals_model.col_factors]
self.assertAllClose(col_factors[0], self._col_factors_0, atol=1e-3)
self.assertAllClose(col_factors[1], self._col_factors_1, atol=1e-3)
self.assertAllClose(col_factors[2], self._col_factors_2, atol=1e-3)
# Test column projection.
# Using the specified projection weights for the 2 column feature vectors.
# This is expected to reproduce the same column factors in the model as
# the weights and feature vectors are identical to that used in model
# training.
projected_cols = wals_model.project_col_factors(
sp_input=sp_feeder,
transpose_input=True,
projection_weights=[0.4, 0.7])
# Don't specify the projection weight, so 1.0 will be used. The feature
# weights will be those specified in model.
projected_cols_no_weights = wals_model.project_col_factors(
sp_input=sp_feeder, transpose_input=True)
feed_dict = {sp_feeder: sp_c3_t}
self.assertAllClose(
projected_cols.eval(feed_dict=feed_dict),
[self._col_factors_1[0], self._col_factors_2[1]],
atol=1e-3)
self.assertAllClose(
projected_cols_no_weights.eval(feed_dict=feed_dict),
[[3.585139, -0.487476, -3.852232], [0.557937, 1.813907, 1.331171]],
atol=1e-3)
if compute_loss:
# Test loss computation after the col update
loss = sum(
sess.run(
factor_loss * self.count_rows(inp) / num_cols,
feed_dict={sp_feeder: inp})
for inp in input_scattered_cols_non_duplicate)
true_loss = self.calculate_loss_from_wals_model(wals_model,
self._wals_inputs)
self.assertNear(
loss,
true_loss,
err=.001,
msg="After col update, computed loss [{}] does not match"
" true loss [{}]".format(loss, true_loss))
# Note that when row_weights and col_weights are 0, WALS gives identical
# results as ALS (Alternating Least Squares). However our implementation does
# not handle the case of zero weights differently. Instead, when row_weights
# and col_weights are set to None, we interpret that as the ALS case, and
# trigger the more efficient ALS updates.
# Here we test that those two give identical results.
def _run_test_als(self, use_factors_weights_cache):
with ops.Graph().as_default(), self.cached_session():
self._wals_inputs = self.sparse_input()
col_init = np.random.rand(7, 3)
als_model = factorization_ops.WALSModel(
5,
7,
3,
col_init=col_init,
row_weights=None,
col_weights=None,
use_factors_weights_cache=use_factors_weights_cache)
als_model.initialize_op.run()
als_model.worker_init.run()
als_model.row_update_prep_gramian_op.run()
als_model.initialize_row_update_op.run()
process_input_op = als_model.update_row_factors(self._wals_inputs)[1]
process_input_op.run()
row_factors1 = [x.eval() for x in als_model.row_factors]
# Testing row projection. Projection weight doesn't matter in this case
# since the model is ALS special case.
als_projected_row_factors1 = als_model.project_row_factors(
self._wals_inputs).eval()
wals_model = factorization_ops.WALSModel(
5,
7,
3,
col_init=col_init,
row_weights=0,
col_weights=0,
use_factors_weights_cache=use_factors_weights_cache)
wals_model.initialize_op.run()
wals_model.worker_init.run()
wals_model.row_update_prep_gramian_op.run()
wals_model.initialize_row_update_op.run()
process_input_op = wals_model.update_row_factors(self._wals_inputs)[1]
process_input_op.run()
row_factors2 = [x.eval() for x in wals_model.row_factors]
for r1, r2 in zip(row_factors1, row_factors2):
self.assertAllClose(r1, r2, atol=1e-3)
self.assertAllClose(
als_projected_row_factors1,
[row for shard in row_factors2 for row in shard],
atol=1e-3)
# Here we test partial column updates.
sp_c = np_matrix_to_tf_sparse(
INPUT_MATRIX, col_slices=[2, 0], shuffle=True).eval()
sp_feeder = array_ops.sparse_placeholder(dtypes.float32)
feed_dict = {sp_feeder: sp_c}
als_model.col_update_prep_gramian_op.run()
als_model.initialize_col_update_op.run()
process_input_op = als_model.update_col_factors(sp_input=sp_feeder)[1]
process_input_op.run(feed_dict=feed_dict)
col_factors1 = [x.eval() for x in als_model.col_factors]
# Testing column projection. Projection weight doesn't matter in this case
# since the model is ALS special case.
als_projected_col_factors1 = als_model.project_col_factors(
np_matrix_to_tf_sparse(
INPUT_MATRIX, col_slices=[2, 0], shuffle=False)).eval()
feed_dict = {sp_feeder: sp_c}
wals_model.col_update_prep_gramian_op.run()
wals_model.initialize_col_update_op.run()
process_input_op = wals_model.update_col_factors(sp_input=sp_feeder)[1]
process_input_op.run(feed_dict=feed_dict)
col_factors2 = [x.eval() for x in wals_model.col_factors]
for c1, c2 in zip(col_factors1, col_factors2):
self.assertAllClose(c1, c2, rtol=5e-3, atol=1e-2)
self.assertAllClose(
als_projected_col_factors1, [col_factors2[0][2], col_factors2[0][0]],
atol=1e-2)
def _run_test_als_transposed(self, use_factors_weights_cache):
with ops.Graph().as_default(), self.cached_session():
self._wals_inputs = self.sparse_input()
col_init = np.random.rand(7, 3)
als_model = factorization_ops.WALSModel(
5,
7,
3,
col_init=col_init,
row_weights=None,
col_weights=None,
use_factors_weights_cache=use_factors_weights_cache)
als_model.initialize_op.run()
als_model.worker_init.run()
wals_model = factorization_ops.WALSModel(
5,
7,
3,
col_init=col_init,
row_weights=[0] * 5,
col_weights=[0] * 7,
use_factors_weights_cache=use_factors_weights_cache)
wals_model.initialize_op.run()
wals_model.worker_init.run()
sp_feeder = array_ops.sparse_placeholder(dtypes.float32)
# Here test partial row update with identical inputs but with transposed
# input for als.
sp_r_t = np_matrix_to_tf_sparse(
INPUT_MATRIX, [3, 1], transpose=True).eval()
sp_r = np_matrix_to_tf_sparse(INPUT_MATRIX, [3, 1]).eval()
feed_dict = {sp_feeder: sp_r_t}
als_model.row_update_prep_gramian_op.run()
als_model.initialize_row_update_op.run()
process_input_op = als_model.update_row_factors(
sp_input=sp_feeder, transpose_input=True)[1]
process_input_op.run(feed_dict=feed_dict)
# Only updated row 1 and row 3, so only compare these rows since others
# have randomly initialized values.
row_factors1 = [
als_model.row_factors[0].eval()[1], als_model.row_factors[0].eval()[3]
]
# Testing row projection. Projection weight doesn't matter in this case
# since the model is ALS special case. Note that the ordering of the
# returned results will be preserved as the input feature vectors
# ordering.
als_projected_row_factors1 = als_model.project_row_factors(
sp_input=sp_feeder, transpose_input=True).eval(feed_dict=feed_dict)
feed_dict = {sp_feeder: sp_r}
wals_model.row_update_prep_gramian_op.run()
wals_model.initialize_row_update_op.run()
process_input_op = wals_model.update_row_factors(sp_input=sp_feeder)[1]
process_input_op.run(feed_dict=feed_dict)
# Only updated row 1 and row 3, so only compare these rows since others
# have randomly initialized values.
row_factors2 = [
wals_model.row_factors[0].eval()[1],
wals_model.row_factors[0].eval()[3]
]
for r1, r2 in zip(row_factors1, row_factors2):
self.assertAllClose(r1, r2, atol=1e-3)
# Note that the ordering of the returned projection results is preserved
# as the input feature vectors ordering.
self.assertAllClose(
als_projected_row_factors1, [row_factors2[1], row_factors2[0]],
atol=1e-3)
def simple_train(self, model, inp, num_iterations):
"""Helper function to train model on inp for num_iterations."""
row_update_op = model.update_row_factors(sp_input=inp)[1]
col_update_op = model.update_col_factors(sp_input=inp)[1]
model.initialize_op.run()
model.worker_init.run()
for _ in xrange(num_iterations):
model.row_update_prep_gramian_op.run()
model.initialize_row_update_op.run()
row_update_op.run()
model.col_update_prep_gramian_op.run()
model.initialize_col_update_op.run()
col_update_op.run()
# Trains an ALS model for a low-rank matrix and make sure the product of
# factors is close to the original input.
def _run_test_train_full_low_rank_als(self, use_factors_weights_cache):
rows = 15
cols = 11
dims = 3
with ops.Graph().as_default(), self.cached_session():
data = np.dot(np.random.rand(rows, 3), np.random.rand(
3, cols)).astype(np.float32) / 3.0
indices = [[i, j] for i in xrange(rows) for j in xrange(cols)]
values = data.reshape(-1)
inp = sparse_tensor.SparseTensor(indices, values, [rows, cols])
model = factorization_ops.WALSModel(
rows,
cols,
dims,
regularization=1e-5,
row_weights=None,
col_weights=None,
use_factors_weights_cache=use_factors_weights_cache)
self.simple_train(model, inp, 25)
row_factor = model.row_factors[0].eval()
col_factor = model.col_factors[0].eval()
self.assertAllClose(
data,
np.dot(row_factor, np.transpose(col_factor)),
rtol=0.01,
atol=0.01)
# Trains a WALS model for a low-rank matrix and make sure the product of
# factors is close to the original input.
def _run_test_train_full_low_rank_wals(self, use_factors_weights_cache):
rows = 15
cols = 11
dims = 3
with ops.Graph().as_default(), self.cached_session():
data = np.dot(np.random.rand(rows, 3), np.random.rand(
3, cols)).astype(np.float32) / 3.0
indices = [[i, j] for i in xrange(rows) for j in xrange(cols)]
values = data.reshape(-1)
inp = sparse_tensor.SparseTensor(indices, values, [rows, cols])
model = factorization_ops.WALSModel(
rows,
cols,
dims,
regularization=1e-5,
row_weights=0,
col_weights=[0] * cols,
use_factors_weights_cache=use_factors_weights_cache)
self.simple_train(model, inp, 25)
row_factor = model.row_factors[0].eval()
col_factor = model.col_factors[0].eval()
self.assertAllClose(
data,
np.dot(row_factor, np.transpose(col_factor)),
rtol=0.01,
atol=0.01)
# Trains a WALS model for a partially observed low-rank matrix and makes
# sure the product of factors is reasonably close to the original input.
def _run_test_train_matrix_completion_wals(self, use_factors_weights_cache):
rows = 11
cols = 9
dims = 4
def keep_index(x):
return not (x[0] + x[1]) % 4
with ops.Graph().as_default(), self.cached_session():
row_wts = 0.1 + np.random.rand(rows)
col_wts = 0.1 + np.random.rand(cols)
data = np.dot(np.random.rand(rows, 3), np.random.rand(
3, cols)).astype(np.float32) / 3.0
indices = np.array(
list(
filter(keep_index,
[[i, j] for i in xrange(rows) for j in xrange(cols)])))
values = data[indices[:, 0], indices[:, 1]]
inp = sparse_tensor.SparseTensor(indices, values, [rows, cols])
model = factorization_ops.WALSModel(
rows,
cols,
dims,
unobserved_weight=0.01,
regularization=0.001,
row_weights=row_wts,
col_weights=col_wts,
use_factors_weights_cache=use_factors_weights_cache)
self.simple_train(model, inp, 25)
row_factor = model.row_factors[0].eval()
col_factor = model.col_factors[0].eval()
out = np.dot(row_factor, np.transpose(col_factor))
for i in xrange(rows):
for j in xrange(cols):
if keep_index([i, j]):
self.assertNear(
data[i][j], out[i][j], err=0.4, msg="%d, %d" % (i, j))
else:
self.assertNear(0, out[i][j], err=0.5, msg="%d, %d" % (i, j))
def test_process_input_with_cache(self):
self._run_test_process_input(True)
def test_process_input_without_cache(self):
self._run_test_process_input(False)
def test_process_input_transposed_with_cache(self):
self._run_test_process_input_transposed(True)
def test_process_input_transposed_without_cache(self):
self._run_test_process_input_transposed(False)
def test_als_with_cache(self):
self._run_test_als(True)
def test_als_without_cache(self):
self._run_test_als(False)
def test_als_transposed_with_cache(self):
self._run_test_als_transposed(True)
def test_als_transposed_without_cache(self):
self._run_test_als_transposed(False)
def test_train_full_low_rank_wals_with_cache(self):
self._run_test_train_full_low_rank_wals(True)
def test_train_full_low_rank_wals_without_cache(self):
self._run_test_train_full_low_rank_wals(False)
def test_train_matrix_completion_wals_with_cache(self):
self._run_test_train_matrix_completion_wals(True)
def test_train_matrix_completion_wals_without_cache(self):
self._run_test_train_matrix_completion_wals(False)
def test_loss_transposed_with_cache(self):
self._run_test_process_input_transposed(True, compute_loss=True)
def test_loss_transposed_without_cache(self):
self._run_test_process_input_transposed(False, compute_loss=True)
def test_loss_with_cache(self):
self._run_test_process_input(True, compute_loss=True)
def test_loss_without_cache(self):
self._run_test_process_input(False, compute_loss=True)
def test_sum_row_weights(self):
self._run_test_sum_weights(True)
def test_sum_col_weights(self):
self._run_test_sum_weights(False)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/factorization/python/ops/factorization_ops_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for KMeans."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import time
import numpy as np
from sklearn.cluster import KMeans as SklearnKMeans
# pylint: disable=g-import-not-at-top
from tensorflow.contrib.factorization.python.ops import kmeans as kmeans_lib
from tensorflow.python.estimator import run_config
from tensorflow.python.feature_column import feature_column_lib as fc
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import benchmark
from tensorflow.python.platform import flags
from tensorflow.python.platform import test
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import queue_runner
FLAGS = flags.FLAGS
def normalize(x):
return x / np.sqrt(np.sum(x * x, axis=-1, keepdims=True))
def cosine_similarity(x, y):
return np.dot(normalize(x), np.transpose(normalize(y)))
def make_random_centers(num_centers, num_dims, center_norm=500):
return np.round(
np.random.rand(num_centers, num_dims).astype(np.float32) * center_norm)
def make_random_points(centers, num_points, max_offset=20):
num_centers, num_dims = centers.shape
assignments = np.random.choice(num_centers, num_points)
offsets = np.round(
np.random.randn(num_points, num_dims).astype(np.float32) * max_offset)
return (centers[assignments] + offsets, assignments, np.add.reduce(
offsets * offsets, 1))
class KMeansTestBase(test.TestCase):
def input_fn(self,
batch_size=None,
points=None,
randomize=None,
num_epochs=None):
"""Returns an input_fn that randomly selects batches from given points."""
batch_size = batch_size or self.batch_size
points = points if points is not None else self.points
num_points = points.shape[0]
if randomize is None:
randomize = (self.use_mini_batch and
self.mini_batch_steps_per_iteration <= 1)
def _fn():
x = constant_op.constant(points)
if batch_size == num_points:
return input_lib.limit_epochs(x, num_epochs=num_epochs), None
if randomize:
indices = random_ops.random_uniform(
constant_op.constant([batch_size]),
minval=0,
maxval=num_points - 1,
dtype=dtypes.int32,
seed=10)
else:
# We need to cycle through the indices sequentially. We create a queue
# to maintain the list of indices.
q = data_flow_ops.FIFOQueue(num_points, dtypes.int32, ())
# Conditionally initialize the Queue.
def _init_q():
with ops.control_dependencies(
[q.enqueue_many(math_ops.range(num_points))]):
return control_flow_ops.no_op()
init_q = control_flow_ops.cond(q.size() <= 0, _init_q,
control_flow_ops.no_op)
with ops.control_dependencies([init_q]):
offsets = q.dequeue_many(batch_size)
with ops.control_dependencies([q.enqueue_many(offsets)]):
indices = array_ops.identity(offsets)
batch = array_ops.gather(x, indices)
return (input_lib.limit_epochs(batch, num_epochs=num_epochs), None)
return _fn
@staticmethod
def config(tf_random_seed):
return run_config.RunConfig().replace(tf_random_seed=tf_random_seed)
@property
def initial_clusters(self):
return kmeans_lib.KMeansClustering.KMEANS_PLUS_PLUS_INIT
@property
def batch_size(self):
return self.num_points
@property
def use_mini_batch(self):
return False
@property
def mini_batch_steps_per_iteration(self):
return 1
class KMeansTest(KMeansTestBase):
def setUp(self):
np.random.seed(3)
self.num_centers = 5
self.num_dims = 2
self.num_points = 1000
self.true_centers = make_random_centers(self.num_centers, self.num_dims)
self.points, _, self.scores = make_random_points(self.true_centers,
self.num_points)
self.true_score = np.add.reduce(self.scores)
def _kmeans(self, relative_tolerance=None):
return kmeans_lib.KMeansClustering(
self.num_centers,
initial_clusters=self.initial_clusters,
distance_metric=kmeans_lib.KMeansClustering.SQUARED_EUCLIDEAN_DISTANCE,
use_mini_batch=self.use_mini_batch,
mini_batch_steps_per_iteration=self.mini_batch_steps_per_iteration,
random_seed=24,
relative_tolerance=relative_tolerance)
def test_clusters(self):
kmeans = self._kmeans()
kmeans.train(input_fn=self.input_fn(), steps=1)
clusters = kmeans.cluster_centers()
self.assertAllEqual(list(clusters.shape), [self.num_centers, self.num_dims])
def test_fit(self):
kmeans = self._kmeans()
kmeans.train(input_fn=self.input_fn(), steps=1)
score1 = kmeans.score(input_fn=self.input_fn(batch_size=self.num_points))
steps = 10 * self.num_points // self.batch_size
kmeans.train(input_fn=self.input_fn(), steps=steps)
score2 = kmeans.score(input_fn=self.input_fn(batch_size=self.num_points))
self.assertTrue(score1 > score2)
self.assertNear(self.true_score, score2, self.true_score * 0.05)
def test_monitor(self):
if self.use_mini_batch:
# We don't test for use_mini_batch case since the loss value can be noisy.
return
kmeans = kmeans_lib.KMeansClustering(
self.num_centers,
initial_clusters=self.initial_clusters,
distance_metric=kmeans_lib.KMeansClustering.SQUARED_EUCLIDEAN_DISTANCE,
use_mini_batch=self.use_mini_batch,
mini_batch_steps_per_iteration=self.mini_batch_steps_per_iteration,
config=self.config(14),
random_seed=12,
relative_tolerance=1e-4)
kmeans.train(
input_fn=self.input_fn(),
# Force it to train until the relative tolerance monitor stops it.
steps=None)
score = kmeans.score(input_fn=self.input_fn(batch_size=self.num_points))
self.assertNear(self.true_score, score, self.true_score * 0.01)
def _infer_helper(self, kmeans, clusters, num_points):
points, true_assignments, true_offsets = make_random_points(
clusters, num_points)
input_fn = self.input_fn(batch_size=num_points, points=points, num_epochs=1)
# Test predict
assignments = list(kmeans.predict_cluster_index(input_fn))
self.assertAllEqual(assignments, true_assignments)
# Test score
score = kmeans.score(input_fn=lambda: (constant_op.constant(points), None))
self.assertNear(score, np.sum(true_offsets), 0.01 * score)
# Test transform
transform = list(kmeans.transform(input_fn))
true_transform = np.maximum(
0,
np.sum(np.square(points), axis=1, keepdims=True) -
2 * np.dot(points, np.transpose(clusters)) + np.transpose(
np.sum(np.square(clusters), axis=1, keepdims=True)))
self.assertAllClose(transform, true_transform, rtol=0.05, atol=10)
def test_infer(self):
kmeans = self._kmeans()
# Make a call to fit to initialize the cluster centers.
max_steps = 1
kmeans.train(input_fn=self.input_fn(), max_steps=max_steps)
clusters = kmeans.cluster_centers()
# Run inference on small datasets.
self._infer_helper(kmeans, clusters, 10)
self._infer_helper(kmeans, clusters, 1)
def _parse_feature_dict_helper(self, features, parsed_feature_dict):
# Perform a sanity check.
self.assertEqual(features.shape, parsed_feature_dict.shape)
self.assertEqual(features.dtype, parsed_feature_dict.dtype)
# Then check that running the tensor yields the original list of points.
with self.cached_session() as sess:
parsed_points = sess.run(parsed_feature_dict)
self.assertAllEqual(self.points, parsed_points)
def test_parse_features(self):
"""Tests the various behaviours of kmeans._parse_features_if_necessary."""
# No-op if a tensor is passed in.
features = constant_op.constant(self.points)
parsed_features = kmeans_lib._parse_features_if_necessary(features, None)
self.assertAllEqual(features, parsed_features)
# All values from a feature dict are transformed into a tensor.
feature_dict = {
'x': [[point[0]] for point in self.points],
'y': [[point[1]] for point in self.points]
}
parsed_feature_dict = kmeans_lib._parse_features_if_necessary(
feature_dict, None)
self._parse_feature_dict_helper(features, parsed_feature_dict)
# Only the feature_columns of a feature dict are transformed into a tensor.
feature_dict_with_extras = {
'foo': 'bar',
'x': [[point[0]] for point in self.points],
'baz': {'fizz': 'buzz'},
'y': [[point[1]] for point in self.points]
}
feature_columns = [fc.numeric_column(key='x'), fc.numeric_column(key='y')]
parsed_feature_dict = kmeans_lib._parse_features_if_necessary(
feature_dict_with_extras, feature_columns)
self._parse_feature_dict_helper(features, parsed_feature_dict)
class KMeansTestMultiStageInit(KMeansTestBase):
def test_random(self):
points = np.array(
[[1, 2], [3, 4], [5, 6], [7, 8], [9, 0]], dtype=np.float32)
kmeans = kmeans_lib.KMeansClustering(
num_clusters=points.shape[0],
initial_clusters=kmeans_lib.KMeansClustering.RANDOM_INIT,
distance_metric=kmeans_lib.KMeansClustering.SQUARED_EUCLIDEAN_DISTANCE,
use_mini_batch=True,
mini_batch_steps_per_iteration=100,
random_seed=24,
relative_tolerance=None)
kmeans.train(
input_fn=self.input_fn(batch_size=1, points=points, randomize=False),
steps=1)
clusters = kmeans.cluster_centers()
self.assertAllEqual(points, clusters)
def test_kmeans_plus_plus_batch_just_right(self):
points = np.array([[1, 2]], dtype=np.float32)
kmeans = kmeans_lib.KMeansClustering(
num_clusters=points.shape[0],
initial_clusters=kmeans_lib.KMeansClustering.KMEANS_PLUS_PLUS_INIT,
distance_metric=kmeans_lib.KMeansClustering.SQUARED_EUCLIDEAN_DISTANCE,
use_mini_batch=True,
mini_batch_steps_per_iteration=100,
random_seed=24,
relative_tolerance=None)
kmeans.train(
input_fn=self.input_fn(batch_size=1, points=points, randomize=False),
steps=1)
clusters = kmeans.cluster_centers()
self.assertAllEqual(points, clusters)
def test_kmeans_plus_plus_batch_too_small(self):
points = np.array(
[[1, 2], [3, 4], [5, 6], [7, 8], [9, 0]], dtype=np.float32)
kmeans = kmeans_lib.KMeansClustering(
num_clusters=points.shape[0],
initial_clusters=kmeans_lib.KMeansClustering.KMEANS_PLUS_PLUS_INIT,
distance_metric=kmeans_lib.KMeansClustering.SQUARED_EUCLIDEAN_DISTANCE,
use_mini_batch=True,
mini_batch_steps_per_iteration=100,
random_seed=24,
relative_tolerance=None)
with self.assertRaisesOpError(AssertionError):
kmeans.train(
input_fn=self.input_fn(batch_size=4, points=points, randomize=False),
steps=1)
class MiniBatchKMeansTest(KMeansTest):
@property
def batch_size(self):
return 50
@property
def use_mini_batch(self):
return True
class FullBatchAsyncKMeansTest(KMeansTest):
@property
def batch_size(self):
return 50
@property
def use_mini_batch(self):
return True
@property
def mini_batch_steps_per_iteration(self):
return self.num_points // self.batch_size
class KMeansCosineDistanceTest(KMeansTestBase):
def setUp(self):
self.points = np.array(
[[2.5, 0.1], [2, 0.2], [3, 0.1], [4, 0.2], [0.1, 2.5], [0.2, 2],
[0.1, 3], [0.2, 4]],
dtype=np.float32)
self.num_points = self.points.shape[0]
self.true_centers = np.array(
[
normalize(
np.mean(normalize(self.points)[0:4, :], axis=0,
keepdims=True))[0],
normalize(
np.mean(normalize(self.points)[4:, :], axis=0,
keepdims=True))[0]
],
dtype=np.float32)
self.true_assignments = np.array([0] * 4 + [1] * 4)
self.true_score = len(self.points) - np.tensordot(
normalize(self.points), self.true_centers[self.true_assignments])
self.num_centers = 2
self.kmeans = kmeans_lib.KMeansClustering(
self.num_centers,
initial_clusters=kmeans_lib.KMeansClustering.RANDOM_INIT,
distance_metric=kmeans_lib.KMeansClustering.COSINE_DISTANCE,
use_mini_batch=self.use_mini_batch,
mini_batch_steps_per_iteration=self.mini_batch_steps_per_iteration,
config=self.config(3))
def test_fit(self):
max_steps = 10 * self.num_points // self.batch_size
self.kmeans.train(input_fn=self.input_fn(), max_steps=max_steps)
centers = normalize(self.kmeans.cluster_centers())
centers = centers[centers[:, 0].argsort()]
true_centers = self.true_centers[self.true_centers[:, 0].argsort()]
self.assertAllClose(centers, true_centers, atol=0.04)
def test_transform(self):
self.kmeans.train(input_fn=self.input_fn(), steps=10)
centers = normalize(self.kmeans.cluster_centers())
true_transform = 1 - cosine_similarity(self.points, centers)
transform = list(
self.kmeans.transform(
input_fn=self.input_fn(batch_size=self.num_points, num_epochs=1)))
self.assertAllClose(transform, true_transform, atol=1e-3)
def test_predict(self):
max_steps = 10 * self.num_points // self.batch_size
self.kmeans.train(input_fn=self.input_fn(), max_steps=max_steps)
centers = normalize(self.kmeans.cluster_centers())
assignments = list(
self.kmeans.predict_cluster_index(
input_fn=self.input_fn(num_epochs=1, batch_size=self.num_points)))
self.assertAllClose(
centers[assignments],
self.true_centers[self.true_assignments],
atol=1e-2)
centers = centers[centers[:, 0].argsort()]
true_centers = self.true_centers[self.true_centers[:, 0].argsort()]
self.assertAllClose(centers, true_centers, atol=0.04)
score = self.kmeans.score(
input_fn=self.input_fn(batch_size=self.num_points))
self.assertAllClose(score, self.true_score, atol=1e-2)
def test_predict_kmeans_plus_plus(self):
# Most points are concentrated near one center. KMeans++ is likely to find
# the less populated centers.
points = np.array(
[[2.5, 3.5], [2.5, 3.5], [-2, 3], [-2, 3], [-3, -3], [-3.1, -3.2],
[-2.8, -3.], [-2.9, -3.1], [-3., -3.1], [-3., -3.1], [-3.2, -3.],
[-3., -3.]],
dtype=np.float32)
true_centers = np.array(
[
normalize(
np.mean(normalize(points)[0:2, :], axis=0, keepdims=True))[0],
normalize(
np.mean(normalize(points)[2:4, :], axis=0, keepdims=True))[0],
normalize(np.mean(normalize(points)[4:, :], axis=0,
keepdims=True))[0]
],
dtype=np.float32)
true_assignments = [0] * 2 + [1] * 2 + [2] * 8
true_score = len(points) - np.tensordot(
normalize(points), true_centers[true_assignments])
kmeans = kmeans_lib.KMeansClustering(
3,
initial_clusters=self.initial_clusters,
distance_metric=kmeans_lib.KMeansClustering.COSINE_DISTANCE,
use_mini_batch=self.use_mini_batch,
mini_batch_steps_per_iteration=self.mini_batch_steps_per_iteration,
config=self.config(3))
kmeans.train(
input_fn=lambda: (constant_op.constant(points), None), steps=30)
centers = normalize(kmeans.cluster_centers())
self.assertAllClose(
sorted(centers.tolist()), sorted(true_centers.tolist()), atol=1e-2)
def _input_fn():
return (input_lib.limit_epochs(
constant_op.constant(points), num_epochs=1), None)
assignments = list(kmeans.predict_cluster_index(input_fn=_input_fn))
self.assertAllClose(
centers[assignments], true_centers[true_assignments], atol=1e-2)
score = kmeans.score(input_fn=lambda: (constant_op.constant(points), None))
self.assertAllClose(score, true_score, atol=1e-2)
class MiniBatchKMeansCosineTest(KMeansCosineDistanceTest):
@property
def batch_size(self):
return 2
@property
def use_mini_batch(self):
return True
class FullBatchAsyncKMeansCosineTest(KMeansCosineDistanceTest):
@property
def batch_size(self):
return 2
@property
def use_mini_batch(self):
return True
@property
def mini_batch_steps_per_iteration(self):
return self.num_points // self.batch_size
class KMeansBenchmark(benchmark.Benchmark):
"""Base class for benchmarks."""
def SetUp(self,
dimension=50,
num_clusters=50,
points_per_cluster=10000,
center_norm=500,
cluster_width=20):
np.random.seed(123456)
self.num_clusters = num_clusters
self.num_points = num_clusters * points_per_cluster
self.centers = make_random_centers(
self.num_clusters, dimension, center_norm=center_norm)
self.points, _, scores = make_random_points(
self.centers, self.num_points, max_offset=cluster_width)
self.score = float(np.sum(scores))
def _report(self, num_iters, start, end, scores):
print(scores)
self.report_benchmark(
iters=num_iters,
wall_time=(end - start) / num_iters,
extras={'true_sum_squared_distances': self.score,
'fit_scores': scores})
def _fit(self, num_iters=10):
pass
def benchmark_01_2dim_5center_500point(self):
self.SetUp(dimension=2, num_clusters=5, points_per_cluster=100)
self._fit()
def benchmark_02_20dim_20center_10kpoint(self):
self.SetUp(dimension=20, num_clusters=20, points_per_cluster=500)
self._fit()
def benchmark_03_100dim_50center_50kpoint(self):
self.SetUp(dimension=100, num_clusters=50, points_per_cluster=1000)
self._fit()
def benchmark_03_100dim_50center_50kpoint_unseparated(self):
self.SetUp(
dimension=100,
num_clusters=50,
points_per_cluster=1000,
cluster_width=250)
self._fit()
def benchmark_04_100dim_500center_500kpoint(self):
self.SetUp(dimension=100, num_clusters=500, points_per_cluster=1000)
self._fit(num_iters=4)
def benchmark_05_100dim_500center_500kpoint_unseparated(self):
self.SetUp(
dimension=100,
num_clusters=500,
points_per_cluster=1000,
cluster_width=250)
self._fit(num_iters=4)
class TensorflowKMeansBenchmark(KMeansBenchmark):
def _fit(self, num_iters=10):
scores = []
start = time.time()
for i in range(num_iters):
print('Starting tensorflow KMeans: %d' % i)
tf_kmeans = kmeans_lib.KMeansClustering(
self.num_clusters,
initial_clusters=kmeans_lib.KMeansClustering.KMEANS_PLUS_PLUS_INIT,
kmeans_plus_plus_num_retries=int(math.log(self.num_clusters) + 2),
random_seed=i * 42,
relative_tolerance=1e-6,
config=self.config(3))
tf_kmeans.train(
input_fn=lambda: (constant_op.constant(self.points), None), steps=50)
_ = tf_kmeans.cluster_centers()
scores.append(
tf_kmeans.score(
input_fn=lambda: (constant_op.constant(self.points), None)))
self._report(num_iters, start, time.time(), scores)
class SklearnKMeansBenchmark(KMeansBenchmark):
def _fit(self, num_iters=10):
scores = []
start = time.time()
for i in range(num_iters):
print('Starting sklearn KMeans: %d' % i)
sklearn_kmeans = SklearnKMeans(
n_clusters=self.num_clusters,
init='k-means++',
max_iter=50,
n_init=1,
tol=1e-4,
random_state=i * 42)
sklearn_kmeans.train(self.points)
scores.append(sklearn_kmeans.inertia_)
self._report(num_iters, start, time.time(), scores)
class KMeansTestQueues(test.TestCase):
def input_fn(self):
def _fn():
queue = data_flow_ops.FIFOQueue(
capacity=10, dtypes=dtypes.float32, shapes=[10, 3])
enqueue_op = queue.enqueue(array_ops.zeros([10, 3], dtype=dtypes.float32))
queue_runner.add_queue_runner(
queue_runner.QueueRunner(queue, [enqueue_op]))
return queue.dequeue(), None
return _fn
# This test makes sure that there are no deadlocks when using a QueueRunner.
# Note that since cluster initialization is dependent on inputs, if input
# is generated using a QueueRunner, one has to make sure that these runners
# are started before the initialization.
def test_queues(self):
kmeans = kmeans_lib.KMeansClustering(5)
kmeans.train(input_fn=self.input_fn(), steps=1)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/factorization/python/ops/kmeans_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test utils for factorization_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import sparse_ops
INPUT_MATRIX = np.array(
[[0.1, 0.0, 0.2, 0.0, 0.4, 0.5, 0.0],
[0.0, 1.1, 0.0, 1.3, 1.4, 0.0, 1.6],
[2.0, 0.0, 0.0, 2.3, 0.0, 2.5, 0.0],
[3.0, 0.0, 3.2, 3.3, 0.0, 3.5, 0.0],
[0.0, 4.1, 0.0, 0.0, 4.4, 0.0, 4.6]]).astype(np.float32)
def remove_empty_rows_columns(np_matrix):
"""Simple util to remove empty rows and columns of a matrix.
Args:
np_matrix: A numpy array.
Returns:
A tuple consisting of:
mat: A numpy matrix obtained by removing empty rows and columns from
np_matrix.
nz_row_ids: A numpy array of the ids of non-empty rows, such that
nz_row_ids[i] is the old row index corresponding to new index i.
nz_col_ids: A numpy array of the ids of non-empty columns, such that
nz_col_ids[j] is the old column index corresponding to new index j.
"""
nz_row_ids = np.where(np.sum(np_matrix, axis=1) != 0)[0]
nz_col_ids = np.where(np.sum(np_matrix, axis=0) != 0)[0]
mat = np_matrix[np.ix_(nz_row_ids, nz_col_ids)]
return mat, nz_row_ids, nz_col_ids
def np_matrix_to_tf_sparse(np_matrix,
row_slices=None,
col_slices=None,
transpose=False,
shuffle=False):
"""Simple util to slice non-zero np matrix elements as tf.SparseTensor."""
indices = np.nonzero(np_matrix)
# Only allow slices of whole rows or whole columns.
assert not (row_slices is not None and col_slices is not None)
if row_slices is not None:
selected_ind = np.concatenate(
[np.where(indices[0] == r)[0] for r in row_slices], 0)
indices = (indices[0][selected_ind], indices[1][selected_ind])
if col_slices is not None:
selected_ind = np.concatenate(
[np.where(indices[1] == c)[0] for c in col_slices], 0)
indices = (indices[0][selected_ind], indices[1][selected_ind])
if shuffle:
shuffled_ind = [x for x in range(len(indices[0]))]
random.shuffle(shuffled_ind)
indices = (indices[0][shuffled_ind], indices[1][shuffled_ind])
ind = (np.concatenate((np.expand_dims(indices[1], 1),
np.expand_dims(indices[0], 1)), 1).astype(np.int64) if
transpose else np.concatenate((np.expand_dims(indices[0], 1),
np.expand_dims(indices[1], 1)),
1).astype(np.int64))
val = np_matrix[indices].astype(np.float32)
shape = (np.array([max(indices[1]) + 1, max(indices[0]) + 1]).astype(np.int64)
if transpose else np.array(
[max(indices[0]) + 1, max(indices[1]) + 1]).astype(np.int64))
return sparse_tensor.SparseTensor(ind, val, shape)
def calculate_loss(input_mat, row_factors, col_factors, regularization=None,
w0=1., row_weights=None, col_weights=None):
"""Calculates the loss of a given factorization.
Using a non distributed method, different than the one implemented in the
WALS model. The weight of an observed entry (i, j) (i.e. such that
input_mat[i, j] is non zero) is (w0 + row_weights[i]col_weights[j]).
Args:
input_mat: The input matrix, a SparseTensor of rank 2.
row_factors: The row factors, a dense Tensor of rank 2.
col_factors: The col factors, a dense Tensor of rank 2.
regularization: the regularization coefficient, a scalar.
w0: the weight of unobserved entries. A scalar.
row_weights: A dense tensor of rank 1.
col_weights: A dense tensor of rank 1.
Returns:
The total loss.
"""
wr = (array_ops.expand_dims(row_weights, 1) if row_weights is not None
else constant_op.constant(1.))
wc = (array_ops.expand_dims(col_weights, 0) if col_weights is not None
else constant_op.constant(1.))
reg = (regularization if regularization is not None
else constant_op.constant(0.))
row_indices, col_indices = array_ops.split(input_mat.indices,
axis=1,
num_or_size_splits=2)
gathered_row_factors = array_ops.gather(row_factors, row_indices)
gathered_col_factors = array_ops.gather(col_factors, col_indices)
sp_approx_vals = array_ops.squeeze(math_ops.matmul(
gathered_row_factors, gathered_col_factors, adjoint_b=True))
sp_approx = sparse_tensor.SparseTensor(
indices=input_mat.indices,
values=sp_approx_vals,
dense_shape=input_mat.dense_shape)
sp_approx_sq = math_ops.square(sp_approx)
row_norm = math_ops.reduce_sum(math_ops.square(row_factors))
col_norm = math_ops.reduce_sum(math_ops.square(col_factors))
row_col_norm = math_ops.reduce_sum(math_ops.square(math_ops.matmul(
row_factors, col_factors, transpose_b=True)))
resid = sparse_ops.sparse_add(input_mat, sp_approx * (-1))
resid_sq = math_ops.square(resid)
loss = w0 * (
sparse_ops.sparse_reduce_sum(resid_sq) -
sparse_ops.sparse_reduce_sum(sp_approx_sq)
)
loss += (sparse_ops.sparse_reduce_sum(wr * (resid_sq * wc)) +
w0 * row_col_norm + reg * (row_norm + col_norm))
return loss.eval()
|
tensorflow-master
|
tensorflow/contrib/factorization/python/ops/factorization_ops_test_utils.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Clustering Operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_clustering_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_impl
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops.embedding_ops import embedding_lookup
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_clustering_ops import *
# pylint: enable=wildcard-import
# Euclidean distance between vectors U and V is defined as \\(||U - V||_F\\)
# which is the square root of the sum of the absolute squares of the elements
# difference.
SQUARED_EUCLIDEAN_DISTANCE = 'squared_euclidean'
# Cosine distance between vectors U and V is defined as
# \\(1 - (U \dot V) / (||U||_F ||V||_F)\\)
COSINE_DISTANCE = 'cosine'
RANDOM_INIT = 'random'
KMEANS_PLUS_PLUS_INIT = 'kmeans_plus_plus'
KMC2_INIT = 'kmc2'
# The name of the variable holding the cluster centers. Used by the Estimator.
CLUSTERS_VAR_NAME = 'clusters'
class KMeans(object):
"""Creates the graph for k-means clustering."""
def __init__(self,
inputs,
num_clusters,
initial_clusters=RANDOM_INIT,
distance_metric=SQUARED_EUCLIDEAN_DISTANCE,
use_mini_batch=False,
mini_batch_steps_per_iteration=1,
random_seed=0,
kmeans_plus_plus_num_retries=2,
kmc2_chain_length=200):
"""Creates an object for generating KMeans clustering graph.
This class implements the following variants of K-means algorithm:
If use_mini_batch is False, it runs standard full batch K-means. Each step
runs a single iteration of K-Means. This step can be run sharded across
multiple workers by passing a list of sharded inputs to this class. Note
however that a single step needs to process the full input at once.
If use_mini_batch is True, it runs a generalization of the mini-batch
K-means algorithm. It runs multiple iterations, where each iteration is
composed of mini_batch_steps_per_iteration steps. Two copies of cluster
centers are maintained: one that is updated at the end of each iteration,
and one that is updated every step. The first copy is used to compute
cluster allocations for each step, and for inference, while the second copy
is the one updated each step using the mini-batch update rule. After each
iteration is complete, this second copy is copied back the first copy.
Note that for use_mini_batch=True, when mini_batch_steps_per_iteration=1,
the algorithm reduces to the standard mini-batch algorithm. Also by setting
mini_batch_steps_per_iteration = num_inputs / batch_size, the algorithm
becomes an asynchronous version of the full-batch algorithm. Note however
that there is no guarantee by this implementation that each input is seen
exactly once per iteration. Also, different updates are applied
asynchronously without locking. So this asynchronous version may not behave
exactly like a full-batch version.
Args:
inputs: An input tensor or list of input tensors. It is assumed that the
data points have been previously randomly permuted.
num_clusters: An integer tensor specifying the number of clusters. This
argument is ignored if initial_clusters is a tensor or numpy array.
initial_clusters: Specifies the clusters used during initialization. One
of the following:
- a tensor or numpy array with the initial cluster centers.
- a function f(inputs, k) that returns up to k centers from `inputs`.
- "random": Choose centers randomly from `inputs`.
- "kmeans_plus_plus": Use kmeans++ to choose centers from `inputs`.
- "kmc2": Use the fast k-MC2 algorithm to choose centers from `inputs`.
In the last three cases, one batch of `inputs` may not yield
`num_clusters` centers, in which case initialization will require
multiple batches until enough centers are chosen. In the case of
"random" or "kmeans_plus_plus", if the input size is <= `num_clusters`
then the entire batch is chosen to be cluster centers.
distance_metric: Distance metric used for clustering. Supported options:
"squared_euclidean", "cosine".
use_mini_batch: If true, use the mini-batch k-means algorithm. Else assume
full batch.
mini_batch_steps_per_iteration: Number of steps after which the updated
cluster centers are synced back to a master copy.
random_seed: Seed for PRNG used to initialize seeds.
kmeans_plus_plus_num_retries: For each point that is sampled during
kmeans++ initialization, this parameter specifies the number of
additional points to draw from the current distribution before selecting
the best. If a negative value is specified, a heuristic is used to
sample O(log(num_to_sample)) additional points.
kmc2_chain_length: Determines how many candidate points are used by the
k-MC2 algorithm to produce one new cluster centers. If a (mini-)batch
contains less points, one new cluster center is generated from the
(mini-)batch.
Raises:
ValueError: An invalid argument was passed to initial_clusters or
distance_metric.
"""
if isinstance(initial_clusters, str) and initial_clusters not in [
RANDOM_INIT, KMEANS_PLUS_PLUS_INIT, KMC2_INIT
]:
raise ValueError(
"Unsupported initialization algorithm '%s'" % initial_clusters)
if distance_metric not in [SQUARED_EUCLIDEAN_DISTANCE, COSINE_DISTANCE]:
raise ValueError("Unsupported distance metric '%s'" % distance_metric)
self._inputs = inputs if isinstance(inputs, list) else [inputs]
self._num_clusters = num_clusters
self._initial_clusters = initial_clusters
self._distance_metric = distance_metric
self._use_mini_batch = use_mini_batch
self._mini_batch_steps_per_iteration = int(mini_batch_steps_per_iteration)
self._random_seed = random_seed
self._kmeans_plus_plus_num_retries = kmeans_plus_plus_num_retries
self._kmc2_chain_length = kmc2_chain_length
@classmethod
def _distance_graph(cls, inputs, clusters, distance_metric):
"""Computes distance between each input and each cluster center.
Args:
inputs: list of input Tensors.
clusters: cluster Tensor.
distance_metric: distance metric used for clustering
Returns:
list of Tensors, where each element corresponds to each element in inputs.
The value is the distance of each row to all the cluster centers.
Currently only Euclidean distance and cosine distance are supported.
"""
assert isinstance(inputs, list)
if distance_metric == SQUARED_EUCLIDEAN_DISTANCE:
return cls._compute_euclidean_distance(inputs, clusters)
elif distance_metric == COSINE_DISTANCE:
return cls._compute_cosine_distance(
inputs, clusters, inputs_normalized=True)
else:
assert False, str(distance_metric)
@classmethod
def _compute_euclidean_distance(cls, inputs, clusters):
"""Computes Euclidean distance between each input and each cluster center.
Args:
inputs: list of input Tensors.
clusters: cluster Tensor.
Returns:
list of Tensors, where each element corresponds to each element in inputs.
The value is the distance of each row to all the cluster centers.
"""
output = []
for inp in inputs:
with ops.colocate_with(inp, ignore_existing=True):
# Computes Euclidean distance. Note the first and third terms are
# broadcast additions.
squared_distance = (
math_ops.reduce_sum(math_ops.square(inp), 1, keepdims=True) -
2 * math_ops.matmul(inp, clusters, transpose_b=True) +
array_ops.transpose(
math_ops.reduce_sum(
math_ops.square(clusters), 1, keepdims=True)))
output.append(squared_distance)
return output
@classmethod
def _compute_cosine_distance(cls, inputs, clusters, inputs_normalized=True):
"""Computes cosine distance between each input and each cluster center.
Args:
inputs: list of input Tensor.
clusters: cluster Tensor
inputs_normalized: if True, it assumes that inp and clusters are
normalized and computes the dot product which is equivalent to the cosine
distance. Else it L2 normalizes the inputs first.
Returns:
list of Tensors, where each element corresponds to each element in inp.
The value is the distance of each row to all the cluster centers.
"""
output = []
if not inputs_normalized:
with ops.colocate_with(clusters, ignore_existing=True):
clusters = nn_impl.l2_normalize(clusters, dim=1)
for inp in inputs:
with ops.colocate_with(inp, ignore_existing=True):
if not inputs_normalized:
inp = nn_impl.l2_normalize(inp, dim=1)
output.append(1 - math_ops.matmul(inp, clusters, transpose_b=True))
return output
def _infer_graph(self, inputs, clusters):
"""Maps input to closest cluster and the score.
Args:
inputs: list of input Tensors.
clusters: Tensor of cluster centers.
Returns:
List of tuple, where each value in tuple corresponds to a value in inp.
The tuple has following three elements:
all_scores: distance of each input to each cluster center.
score: distance of each input to closest cluster center.
cluster_idx: index of cluster center closest to the corresponding input.
"""
assert isinstance(inputs, list)
# Pairwise distances are used only by transform(). In all other cases, this
# sub-graph is not evaluated.
scores = self._distance_graph(inputs, clusters, self._distance_metric)
output = []
if (self._distance_metric == COSINE_DISTANCE and
not self._clusters_l2_normalized()):
# The cosine distance between normalized vectors x and y is the same as
# 2 * squared_euclidean_distance. We are using this fact and reusing the
# nearest_neighbors op.
# TODO(ands): Support COSINE distance in nearest_neighbors and remove
# this.
with ops.colocate_with(clusters, ignore_existing=True):
clusters = nn_impl.l2_normalize(clusters, dim=1)
for inp, score in zip(inputs, scores):
with ops.colocate_with(inp, ignore_existing=True):
(indices, distances) = gen_clustering_ops.nearest_neighbors(
inp, clusters, 1)
if self._distance_metric == COSINE_DISTANCE:
distances *= 0.5
output.append((score, array_ops.squeeze(distances, [-1]),
array_ops.squeeze(indices, [-1])))
return zip(*output)
def _clusters_l2_normalized(self):
"""Returns True if clusters centers are kept normalized."""
return (self._distance_metric == COSINE_DISTANCE and
(not self._use_mini_batch or
self._mini_batch_steps_per_iteration > 1))
def _create_variables(self, num_clusters):
"""Creates variables.
Args:
num_clusters: an integer Tensor providing the number of clusters.
Returns:
Tuple with following elements:
- cluster_centers: a Tensor for storing cluster centers
- cluster_centers_initialized: bool Variable indicating whether clusters
are initialized.
- cluster_counts: a Tensor for storing counts of points assigned to this
cluster. This is used by mini-batch training.
- cluster_centers_updated: Tensor representing copy of cluster centers
that are updated every step.
- update_in_steps: numbers of steps left before we sync
cluster_centers_updated back to cluster_centers.
"""
init_value = array_ops.constant([], dtype=dtypes.float32)
cluster_centers = variable_scope.variable(
init_value, name=CLUSTERS_VAR_NAME, validate_shape=False)
cluster_centers_initialized = variable_scope.variable(
False, dtype=dtypes.bool, name='initialized')
if self._use_mini_batch and self._mini_batch_steps_per_iteration > 1:
# Copy of cluster centers actively updated each step according to
# mini-batch update rule.
cluster_centers_updated = variable_scope.variable(
init_value, name='clusters_updated', validate_shape=False)
# How many steps till we copy the updated clusters to cluster_centers.
update_in_steps = variable_scope.variable(
self._mini_batch_steps_per_iteration,
dtype=dtypes.int64,
name='update_in_steps')
# Count of points assigned to cluster_centers_updated.
cluster_counts = variable_scope.variable(
array_ops.zeros([num_clusters], dtype=dtypes.int64))
else:
cluster_centers_updated = cluster_centers
update_in_steps = None
cluster_counts = (
variable_scope.variable(
array_ops.ones([num_clusters], dtype=dtypes.int64))
if self._use_mini_batch else None)
return (cluster_centers, cluster_centers_initialized, cluster_counts,
cluster_centers_updated, update_in_steps)
@classmethod
def _l2_normalize_data(cls, inputs):
"""Normalized the input data."""
output = []
for inp in inputs:
with ops.colocate_with(inp, ignore_existing=True):
output.append(nn_impl.l2_normalize(inp, dim=1))
return output
def training_graph(self):
"""Generate a training graph for kmeans algorithm.
This returns, among other things, an op that chooses initial centers
(init_op), a boolean variable that is set to True when the initial centers
are chosen (cluster_centers_initialized), and an op to perform either an
entire Lloyd iteration or a mini-batch of a Lloyd iteration (training_op).
The caller should use these components as follows. A single worker should
execute init_op multiple times until cluster_centers_initialized becomes
True. Then multiple workers may execute training_op any number of times.
Returns:
A tuple consisting of:
all_scores: A matrix (or list of matrices) of dimensions (num_input,
num_clusters) where the value is the distance of an input vector and a
cluster center.
cluster_idx: A vector (or list of vectors). Each element in the vector
corresponds to an input row in 'inp' and specifies the cluster id
corresponding to the input.
scores: Similar to cluster_idx but specifies the distance to the
assigned cluster instead.
cluster_centers_initialized: scalar indicating whether clusters have been
initialized.
init_op: an op to initialize the clusters.
training_op: an op that runs an iteration of training.
"""
# Implementation of kmeans.
if (isinstance(self._initial_clusters, str) or
callable(self._initial_clusters)):
initial_clusters = self._initial_clusters
num_clusters = ops.convert_to_tensor(self._num_clusters)
else:
initial_clusters = ops.convert_to_tensor(self._initial_clusters)
num_clusters = array_ops.shape(initial_clusters)[0]
inputs = self._inputs
(cluster_centers_var, cluster_centers_initialized, total_counts,
cluster_centers_updated,
update_in_steps) = self._create_variables(num_clusters)
init_op = _InitializeClustersOpFactory(
self._inputs, num_clusters, initial_clusters, self._distance_metric,
self._random_seed, self._kmeans_plus_plus_num_retries,
self._kmc2_chain_length, cluster_centers_var, cluster_centers_updated,
cluster_centers_initialized).op()
cluster_centers = cluster_centers_var
if self._distance_metric == COSINE_DISTANCE:
inputs = self._l2_normalize_data(inputs)
if not self._clusters_l2_normalized():
cluster_centers = nn_impl.l2_normalize(cluster_centers, dim=1)
all_scores, scores, cluster_idx = self._infer_graph(inputs, cluster_centers)
if self._use_mini_batch:
sync_updates_op = self._mini_batch_sync_updates_op(
update_in_steps, cluster_centers_var, cluster_centers_updated,
total_counts)
assert sync_updates_op is not None
with ops.control_dependencies([sync_updates_op]):
training_op = self._mini_batch_training_op(
inputs, cluster_idx, cluster_centers_updated, total_counts)
else:
assert cluster_centers == cluster_centers_var
training_op = self._full_batch_training_op(
inputs, num_clusters, cluster_idx, cluster_centers_var)
return (all_scores, cluster_idx, scores, cluster_centers_initialized,
init_op, training_op)
def _mini_batch_sync_updates_op(self, update_in_steps, cluster_centers_var,
cluster_centers_updated, total_counts):
if self._use_mini_batch and self._mini_batch_steps_per_iteration > 1:
assert update_in_steps is not None
with ops.colocate_with(update_in_steps, ignore_existing=True):
def _f():
# Note that there is a race condition here, so we do a best effort
# updates here. We reset update_in_steps first so that other workers
# don't duplicate the updates. Also we update cluster_center_vars
# before resetting total_counts to avoid large updates to
# cluster_centers_updated based on partially updated
# cluster_center_vars.
with ops.control_dependencies([
state_ops.assign(update_in_steps,
self._mini_batch_steps_per_iteration - 1)
]):
with ops.colocate_with(
cluster_centers_updated, ignore_existing=True):
if self._distance_metric == COSINE_DISTANCE:
cluster_centers = nn_impl.l2_normalize(
cluster_centers_updated, dim=1)
else:
cluster_centers = cluster_centers_updated
with ops.colocate_with(cluster_centers_var, ignore_existing=True):
with ops.control_dependencies(
[state_ops.assign(cluster_centers_var, cluster_centers)]):
with ops.colocate_with(None, ignore_existing=True):
with ops.control_dependencies([
state_ops.assign(total_counts,
array_ops.zeros_like(total_counts))
]):
return array_ops.identity(update_in_steps)
return control_flow_ops.cond(
update_in_steps <= 0, _f,
lambda: state_ops.assign_sub(update_in_steps, 1))
else:
return control_flow_ops.no_op()
def _mini_batch_training_op(self, inputs, cluster_idx_list, cluster_centers,
total_counts):
"""Creates an op for training for mini batch case.
Args:
inputs: list of input Tensors.
cluster_idx_list: A vector (or list of vectors). Each element in the
vector corresponds to an input row in 'inp' and specifies the cluster id
corresponding to the input.
cluster_centers: Tensor Ref of cluster centers.
total_counts: Tensor Ref of cluster counts.
Returns:
An op for doing an update of mini-batch k-means.
"""
update_ops = []
for inp, cluster_idx in zip(inputs, cluster_idx_list):
with ops.colocate_with(inp, ignore_existing=True):
assert total_counts is not None
cluster_idx = array_ops.reshape(cluster_idx, [-1])
# Dedupe the unique ids of cluster_centers being updated so that updates
# can be locally aggregated.
unique_ids, unique_idx = array_ops.unique(cluster_idx)
num_unique_cluster_idx = array_ops.size(unique_ids)
# Fetch the old values of counts and cluster_centers.
with ops.colocate_with(total_counts, ignore_existing=True):
old_counts = array_ops.gather(total_counts, unique_ids)
# TODO(agarwal): This colocation seems to run into problems. Fix it.
with ops.colocate_with(cluster_centers, ignore_existing=True):
old_cluster_centers = array_ops.gather(cluster_centers, unique_ids)
# Locally aggregate the increment to counts.
count_updates = math_ops.unsorted_segment_sum(
array_ops.ones_like(unique_idx, dtype=total_counts.dtype),
unique_idx, num_unique_cluster_idx)
# Locally compute the sum of inputs mapped to each id.
# For a cluster with old cluster value x, old count n, and with data
# d_1,...d_k newly assigned to it, we recompute the new value as
# \\(x += (sum_i(d_i) - k * x) / (n + k)\\).
# Compute \\(sum_i(d_i)\\), see comment above.
cluster_center_updates = math_ops.unsorted_segment_sum(
inp, unique_idx, num_unique_cluster_idx)
# Shape to enable broadcasting count_updates and learning_rate to inp.
# It extends the shape with 1's to match the rank of inp.
broadcast_shape = array_ops.concat([
array_ops.reshape(num_unique_cluster_idx, [1]),
array_ops.ones(
array_ops.reshape(array_ops.rank(inp) - 1, [1]),
dtype=dtypes.int32)
], 0)
# Subtract k * x, see comment above.
cluster_center_updates -= math_ops.cast(
array_ops.reshape(count_updates, broadcast_shape),
inp.dtype) * old_cluster_centers
learning_rate = math_ops.reciprocal(
math_ops.cast(old_counts + count_updates, inp.dtype))
learning_rate = array_ops.reshape(learning_rate, broadcast_shape)
# scale by 1 / (n + k), see comment above.
cluster_center_updates *= learning_rate
# Apply the updates.
update_counts = state_ops.scatter_add(total_counts, unique_ids,
count_updates)
update_cluster_centers = state_ops.scatter_add(
cluster_centers, unique_ids, cluster_center_updates)
update_ops.extend([update_counts, update_cluster_centers])
return control_flow_ops.group(*update_ops)
def _full_batch_training_op(self, inputs, num_clusters, cluster_idx_list,
cluster_centers):
"""Creates an op for training for full batch case.
Args:
inputs: list of input Tensors.
num_clusters: an integer Tensor providing the number of clusters.
cluster_idx_list: A vector (or list of vectors). Each element in the
vector corresponds to an input row in 'inp' and specifies the cluster id
corresponding to the input.
cluster_centers: Tensor Ref of cluster centers.
Returns:
An op for doing an update of mini-batch k-means.
"""
cluster_sums = []
cluster_counts = []
epsilon = constant_op.constant(1e-6, dtype=inputs[0].dtype)
for inp, cluster_idx in zip(inputs, cluster_idx_list):
with ops.colocate_with(inp, ignore_existing=True):
cluster_sums.append(
math_ops.unsorted_segment_sum(inp, cluster_idx, num_clusters))
cluster_counts.append(
math_ops.unsorted_segment_sum(
array_ops.reshape(
array_ops.ones(
array_ops.reshape(array_ops.shape(inp)[0], [-1])),
[-1, 1]), cluster_idx, num_clusters))
with ops.colocate_with(cluster_centers, ignore_existing=True):
new_clusters_centers = math_ops.add_n(cluster_sums) / (
math_ops.cast(math_ops.add_n(cluster_counts), cluster_sums[0].dtype) +
epsilon)
if self._clusters_l2_normalized():
new_clusters_centers = nn_impl.l2_normalize(new_clusters_centers, dim=1)
return state_ops.assign(cluster_centers, new_clusters_centers)
class _InitializeClustersOpFactory(object):
"""Internal class to create the op to initialize the clusters.
The op performs this algorithm (see constructor args):
num_remaining = num_clusters - length(cluster_centers)
if num_remaining == 0:
assert that cluster_centers_initialized is true
else:
assert that num_remaining > 0
new_centers = choose up to num_remaining initial centers
l2-normalize new_centers if using cosine distance
all_centers = concat(cluster_centers, new_centers)
cluster_centers := all_centers
if there is a cluster_centers_updated variable:
cluster_centers_updated := cluster_centers
num_now_remaining = num_clusters - length(cluster_centers)
if num_now_remaining == 0:
cluster_centers_initialized := true
"""
# TODO(ccolby): Refactor this class so that kmc2 isn't so much a special case.
def __init__(self, inputs, num_clusters, initial_clusters, distance_metric,
random_seed, kmeans_plus_plus_num_retries, kmc2_chain_length,
cluster_centers, cluster_centers_updated,
cluster_centers_initialized):
"""Creates an op factory.
Args:
inputs: See KMeans constructor.
num_clusters: An integer Tensor providing the number of clusters.
initial_clusters: See KMeans constructor.
distance_metric: See KMeans constructor.
random_seed: See KMeans constructor.
kmeans_plus_plus_num_retries: See KMeans constructor.
kmc2_chain_length: See KMeans constructor.
cluster_centers: The TF variable holding the initial centers. It may
already contain some centers when the op is executed.
cluster_centers_updated: A second TF variable to hold a copy of the
initial centers, used for full-batch mode. In mini-batch mode,
cluster_centers_updated is the same variable as cluster_centers.
cluster_centers_initialized: A boolean TF variable that will be set
to true when all the initial centers have been chosen.
"""
# All of these instance variables are constants.
self._inputs = inputs
self._num_clusters = num_clusters
self._initial_clusters = initial_clusters
self._distance_metric = distance_metric
self._random_seed = random_seed
self._kmeans_plus_plus_num_retries = kmeans_plus_plus_num_retries
self._kmc2_chain_length = kmc2_chain_length
self._cluster_centers = cluster_centers
self._cluster_centers_updated = cluster_centers_updated
self._cluster_centers_initialized = cluster_centers_initialized
self._num_selected = array_ops.shape(self._cluster_centers)[0]
self._num_remaining = self._num_clusters - self._num_selected
self._num_data = math_ops.add_n(
[array_ops.shape(i)[0] for i in self._inputs])
def _random(self):
indices = random_ops.random_uniform(
array_ops.reshape(self._num_remaining, [-1]),
minval=0,
maxval=math_ops.cast(self._num_data, dtypes.int64),
seed=self._random_seed,
dtype=dtypes.int64)
return embedding_lookup(self._inputs, indices, partition_strategy='div')
def _kmeans_plus_plus(self):
# Points from only the first shard are used for initializing centers.
# TODO(ands): Use all points.
inp = self._inputs[0]
if self._distance_metric == COSINE_DISTANCE:
inp = nn_impl.l2_normalize(inp, dim=1)
return gen_clustering_ops.kmeans_plus_plus_initialization(
inp,
math_ops.cast(self._num_remaining, dtypes.int64),
self._random_seed,
self._kmeans_plus_plus_num_retries)
def _kmc2_multiple_centers(self):
"""Adds new initial cluster centers using the k-MC2 algorithm.
In each call to the op, the provided batch is split into subsets based on
the specified `kmc2_chain_length`. On each subset, a single Markov chain of
the k-MC2 algorithm is used to add *one* new center cluster center. If there
are less than `kmc2_chain_length` points in the subset, a single center is
added using one Markov chain on the full input. It is assumed that the
provided batch has previously been randomly permuted. Otherwise, k-MC2 may
return suboptimal centers.
Returns:
An op that adds new cluster centers.
"""
# The op only operates on the first shard of data.
first_shard = self._inputs[0]
# Number of points in the input that can be used.
batch_size = array_ops.shape(first_shard)[0]
# Maximum number of subsets such that the size of each subset is at least
# `kmc2_chain_length`. Final subsets may be larger.
max_to_sample = math_ops.cast(
batch_size / self._kmc2_chain_length, dtype=dtypes.int32)
# We sample at least one new center and at most all remaining centers.
num_to_sample = math_ops.maximum(
math_ops.minimum(self._num_remaining, max_to_sample), 1)
def _cond(i, _):
"""Stopping condition for the while loop."""
return math_ops.less(i, num_to_sample)
def _body(i, _):
"""Body that adds a single new center based on a subset."""
def _sample_random():
"""Returns a random point as a cluster center."""
# By assumption the batch is reshuffled and _sample_random is always
# called for i=0. Hence, we simply return the first point.
new_center = array_ops.reshape(first_shard[0], [1, -1])
if self._distance_metric == COSINE_DISTANCE:
new_center = nn_impl.l2_normalize(new_center, dim=1)
return new_center
def _sample_kmc2_chain():
"""Returns previous centers as well as a new center sampled using k-MC2.
"""
# Extract the subset from the underlying batch.
start = i * self._kmc2_chain_length
end = start + self._kmc2_chain_length
subset = first_shard[start:end]
# Compute the distances from points in the subset to previous centers.
_, distances = gen_clustering_ops.nearest_neighbors(
subset, self._cluster_centers, 1)
# Sample index of new center using k-MC2 Markov chain.
new_center_index = gen_clustering_ops.kmc2_chain_initialization(
array_ops.squeeze(distances), self._random_seed)
# Extract actual new center.
newly_sampled_center = array_ops.reshape(subset[new_center_index],
[1, -1])
# Return concatenation with previously sampled centers.
if self._distance_metric == COSINE_DISTANCE:
newly_sampled_center = nn_impl.l2_normalize(
newly_sampled_center, dim=1)
return array_ops.concat([self._cluster_centers, newly_sampled_center],
0)
# Obtain a random point if there are no previously sampled centers.
# Otherwise, construct a k-MC2 Markov chain.
new_centers = control_flow_ops.cond(
math_ops.equal(self._num_selected, 0), _sample_random,
_sample_kmc2_chain)
# Assign new cluster centers to underlying variable.
assigned_centers = state_ops.assign(
self._cluster_centers, new_centers, validate_shape=False)
if self._cluster_centers_updated is not self._cluster_centers:
assigned_centers = state_ops.assign(
self._cluster_centers_updated,
assigned_centers,
validate_shape=False)
return i + 1, self._num_clusters - array_ops.shape(assigned_centers)[0]
# Add num_to_sample new data points.
_, num_remaining = control_flow_ops.while_loop(_cond, _body, [0, 0])
return num_remaining
def _greedy_batch_sampler(self, sampler):
# If the input dataset size is smaller than the number of centers
# remaining, choose the entire input dataset as centers. This can happen
# with mini-batch. Otherwise, sample the batch according to the provided
# sampler.
return control_flow_ops.cond(self._num_data <= self._num_remaining,
lambda: array_ops.concat(self._inputs, 0),
sampler)
def _single_batch_sampler(self, sampler):
# Enforce that there are at least as many data points as centers
# remaining. This gives the provided sampler the chance to select all
# remaining centers from a single batch.
with ops.control_dependencies(
[check_ops.assert_greater_equal(self._num_data, self._num_remaining)]):
return sampler()
def _choose_initial_centers(self):
if isinstance(self._initial_clusters, str):
if self._initial_clusters == RANDOM_INIT:
return self._greedy_batch_sampler(self._random)
else: # self._initial_clusters == KMEANS_PLUS_PLUS_INIT
return self._single_batch_sampler(self._kmeans_plus_plus)
elif callable(self._initial_clusters):
return self._initial_clusters(self._inputs, self._num_remaining)
else:
with ops.control_dependencies([
check_ops.assert_equal(self._num_remaining,
array_ops.shape(self._initial_clusters)[0])
]):
return self._initial_clusters
def _add_new_centers(self):
"""Adds some centers and returns the number of centers remaining."""
new_centers = self._choose_initial_centers()
if self._distance_metric == COSINE_DISTANCE:
new_centers = nn_impl.l2_normalize(new_centers, dim=1)
# If cluster_centers is empty, it doesn't have the right shape for concat.
all_centers = control_flow_ops.cond(
math_ops.equal(self._num_selected, 0), lambda: new_centers,
lambda: array_ops.concat([self._cluster_centers, new_centers], 0))
# TODO(ccolby): De-dupe all_centers?
a = state_ops.assign(
self._cluster_centers, all_centers, validate_shape=False)
if self._cluster_centers_updated is not self._cluster_centers:
a = state_ops.assign(
self._cluster_centers_updated, a, validate_shape=False)
return self._num_clusters - array_ops.shape(a)[0]
def _initialize(self):
with ops.control_dependencies([
check_ops.assert_positive(self._num_remaining),
]):
if self._initial_clusters == KMC2_INIT:
num_now_remaining = self._kmc2_multiple_centers()
else:
num_now_remaining = self._add_new_centers()
return control_flow_ops.cond(
math_ops.equal(num_now_remaining, 0),
lambda: state_ops.assign(self._cluster_centers_initialized, True),
control_flow_ops.no_op)
def op(self):
"""Returns the cluster initializer op."""
return control_flow_ops.cond(
math_ops.equal(self._num_remaining, 0),
lambda: check_ops.assert_equal(self._cluster_centers_initialized, True),
self._initialize)
|
tensorflow-master
|
tensorflow/contrib/factorization/python/ops/clustering_ops.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Gaussian mixture models Operations."""
# TODO(xavigonzalvo): Factor out covariance matrix operations to make
# code reusable for different types (e.g. diag).
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.ops.embedding_ops import embedding_lookup
# Machine epsilon.
MEPS = np.finfo(float).eps
FULL_COVARIANCE = 'full'
DIAG_COVARIANCE = 'diag'
def _covariance(x, diag):
"""Defines the covariance operation of a matrix.
Args:
x: a matrix Tensor. Dimension 0 should contain the number of examples.
diag: if True, it computes the diagonal covariance.
Returns:
A Tensor representing the covariance of x. In the case of
diagonal matrix just the diagonal is returned.
"""
num_points = math_ops.cast(array_ops.shape(x)[0], dtypes.float32)
x -= math_ops.reduce_mean(x, 0, keepdims=True)
if diag:
cov = math_ops.reduce_sum(
math_ops.square(x), 0, keepdims=True) / (num_points - 1)
else:
cov = math_ops.matmul(x, x, transpose_a=True) / (num_points - 1)
return cov
def _init_clusters_random(data, num_clusters, random_seed):
"""Does random initialization of clusters.
Args:
data: a list of Tensors with a matrix of data, each row is an example.
num_clusters: an integer with the number of clusters.
random_seed: Seed for PRNG used to initialize seeds.
Returns:
A Tensor with num_clusters random rows of data.
"""
assert isinstance(data, list)
num_data = math_ops.add_n([array_ops.shape(inp)[0] for inp in data])
with ops.control_dependencies(
[check_ops.assert_less_equal(num_clusters, num_data)]):
indices = random_ops.random_uniform(
[num_clusters],
minval=0,
maxval=math_ops.cast(num_data, dtypes.int64),
seed=random_seed,
dtype=dtypes.int64)
indices %= math_ops.cast(num_data, dtypes.int64)
clusters_init = embedding_lookup(data, indices, partition_strategy='div')
return clusters_init
class GmmAlgorithm(object):
"""Tensorflow Gaussian mixture model clustering class."""
CLUSTERS_WEIGHT = 'alphas'
CLUSTERS_VARIABLE = 'clusters'
CLUSTERS_COVS_VARIABLE = 'clusters_covs'
def __init__(self,
data,
num_classes,
initial_means=None,
params='wmc',
covariance_type=FULL_COVARIANCE,
random_seed=0):
"""Constructor.
Args:
data: a list of Tensors with data, each row is a new example.
num_classes: number of clusters.
initial_means: a Tensor with a matrix of means. If None, means are
computed by sampling randomly.
params: Controls which parameters are updated in the training
process. Can contain any combination of "w" for weights, "m" for
means, and "c" for covariances.
covariance_type: one of "full", "diag".
random_seed: Seed for PRNG used to initialize seeds.
Raises:
Exception if covariance type is unknown.
"""
self._params = params
self._random_seed = random_seed
self._covariance_type = covariance_type
if self._covariance_type not in [DIAG_COVARIANCE, FULL_COVARIANCE]:
raise Exception( # pylint: disable=g-doc-exception
'programmer error: Invalid covariance type: %s' %
self._covariance_type)
# Create sharded variables for multiple shards. The following
# lists are indexed by shard.
# Probability per example in a class.
num_shards = len(data)
self._probs = [None] * num_shards
# Prior probability.
self._prior_probs = [None] * num_shards
# Membership weights w_{ik} where "i" is the i-th example and "k"
# is the k-th mixture.
self._w = [None] * num_shards
# Number of examples in a class.
self._points_in_k = [None] * num_shards
first_shard = data[0]
self._dimensions = array_ops.shape(first_shard)[1]
self._num_classes = num_classes
# Small value to guarantee that covariances are invertible.
self._min_var = array_ops.diag(
array_ops.ones(array_ops.stack([self._dimensions]))) * 1e-3
self._create_variables()
self._initialize_variables(data, initial_means)
# Operations of partial statistics for the computation of the means.
self._w_mul_x = []
# Operations of partial statistics for the computation of the covariances.
self._w_mul_x2 = []
self._define_graph(data)
def _create_variables(self):
"""Initializes GMM algorithm."""
init_value = array_ops.constant([], dtype=dtypes.float32)
self._means = variables.VariableV1(init_value,
name=self.CLUSTERS_VARIABLE,
validate_shape=False)
self._covs = variables.VariableV1(
init_value, name=self.CLUSTERS_COVS_VARIABLE, validate_shape=False)
# Mixture weights, representing the probability that a randomly
# selected unobservable data (in EM terms) was generated by component k.
self._alpha = variable_scope.variable(
array_ops.tile([1.0 / self._num_classes], [self._num_classes]),
name=self.CLUSTERS_WEIGHT,
validate_shape=False)
self._cluster_centers_initialized = variables.VariableV1(False,
dtype=dtypes.bool,
name='initialized')
def _initialize_variables(self, data, initial_means=None):
"""Initializes variables.
Args:
data: a list of Tensors with data, each row is a new example.
initial_means: a Tensor with a matrix of means.
"""
first_shard = data[0]
# Initialize means: num_classes X 1 X dimensions.
if initial_means is not None:
means = array_ops.expand_dims(initial_means, 1)
else:
# Sample data randomly
means = array_ops.expand_dims(
_init_clusters_random(data, self._num_classes, self._random_seed), 1)
# Initialize covariances.
if self._covariance_type == FULL_COVARIANCE:
cov = _covariance(first_shard, False) + self._min_var
# A matrix per class, num_classes X dimensions X dimensions
covs = array_ops.tile(
array_ops.expand_dims(cov, 0), [self._num_classes, 1, 1])
elif self._covariance_type == DIAG_COVARIANCE:
cov = _covariance(first_shard, True) + self._min_var
# A diagonal per row, num_classes X dimensions.
covs = array_ops.tile(
array_ops.expand_dims(array_ops.diag_part(cov), 0),
[self._num_classes, 1])
with ops.colocate_with(self._cluster_centers_initialized):
initialized = control_flow_ops.with_dependencies(
[means, covs],
array_ops.identity(self._cluster_centers_initialized))
self._init_ops = []
with ops.colocate_with(self._means):
init_means = state_ops.assign(self._means, means, validate_shape=False)
init_means = control_flow_ops.with_dependencies(
[init_means],
state_ops.assign(self._cluster_centers_initialized, True))
self._init_ops.append(control_flow_ops.cond(initialized,
control_flow_ops.no_op,
lambda: init_means).op)
with ops.colocate_with(self._covs):
init_covs = state_ops.assign(self._covs, covs, validate_shape=False)
init_covs = control_flow_ops.with_dependencies(
[init_covs],
state_ops.assign(self._cluster_centers_initialized, True))
self._init_ops.append(control_flow_ops.cond(initialized,
control_flow_ops.no_op,
lambda: init_covs).op)
def init_ops(self):
"""Returns the initialization operation."""
return control_flow_ops.group(*self._init_ops)
def training_ops(self):
"""Returns the training operation."""
return control_flow_ops.group(*self._train_ops)
def is_initialized(self):
"""Returns a boolean operation for initialized variables."""
return self._cluster_centers_initialized
def alphas(self):
return self._alpha
def clusters(self):
"""Returns the clusters with dimensions num_classes X 1 X num_dimensions."""
return self._means
def covariances(self):
"""Returns the covariances matrices."""
return self._covs
def assignments(self):
"""Returns a list of Tensors with the matrix of assignments per shard."""
ret = []
for w in self._w:
ret.append(math_ops.argmax(w, 1))
return ret
def scores(self):
"""Returns the per-sample likelihood fo the data.
Returns:
Log probabilities of each data point.
"""
return self._scores
def log_likelihood_op(self):
"""Returns the log-likelihood operation."""
return self._log_likelihood_op
def _define_graph(self, data):
"""Define graph for a single iteration.
Args:
data: a list of Tensors defining the training data.
"""
for shard_id, shard in enumerate(data):
self._num_examples = array_ops.shape(shard)[0]
shard = array_ops.expand_dims(shard, 0)
self._define_log_prob_operation(shard_id, shard)
self._define_prior_log_prob_operation(shard_id)
self._define_expectation_operation(shard_id)
self._define_partial_maximization_operation(shard_id, shard)
self._define_maximization_operation(len(data))
self._define_loglikelihood_operation()
self._define_score_samples()
def _define_full_covariance_probs(self, shard_id, shard):
"""Defines the full covariance probabilities per example in a class.
Updates a matrix with dimension num_examples X num_classes.
Args:
shard_id: id of the current shard.
shard: current data shard, 1 X num_examples X dimensions.
"""
diff = shard - self._means
cholesky = linalg_ops.cholesky(self._covs + self._min_var)
log_det_covs = 2.0 * math_ops.reduce_sum(
math_ops.log(array_ops.matrix_diag_part(cholesky)), 1)
x_mu_cov = math_ops.square(
linalg_ops.matrix_triangular_solve(
cholesky, array_ops.transpose(
diff, perm=[0, 2, 1]), lower=True))
diag_m = array_ops.transpose(math_ops.reduce_sum(x_mu_cov, 1))
self._probs[shard_id] = (
-0.5 * (diag_m + math_ops.cast(self._dimensions, dtypes.float32) *
math_ops.log(2 * np.pi) + log_det_covs))
def _define_diag_covariance_probs(self, shard_id, shard):
"""Defines the diagonal covariance probabilities per example in a class.
Args:
shard_id: id of the current shard.
shard: current data shard, 1 X num_examples X dimensions.
Returns a matrix num_examples * num_classes.
"""
# num_classes X 1
# TODO(xavigonzalvo): look into alternatives to log for
# reparametrization of variance parameters.
det_expanded = math_ops.reduce_sum(
math_ops.log(self._covs + 1e-3), 1, keepdims=True)
x2 = math_ops.squared_difference(shard, self._means)
cov_expanded = array_ops.expand_dims(1.0 / (self._covs + 1e-3), 2)
# num_classes X num_examples
x2_cov = math_ops.matmul(x2, cov_expanded)
x2_cov = array_ops.transpose(array_ops.squeeze(x2_cov, [2]))
self._probs[shard_id] = -0.5 * (
math_ops.cast(self._dimensions, dtypes.float32) *
math_ops.log(2.0 * np.pi) +
array_ops.transpose(det_expanded) + x2_cov)
def _define_log_prob_operation(self, shard_id, shard):
"""Probability per example in a class.
Updates a matrix with dimension num_examples X num_classes.
Args:
shard_id: id of the current shard.
shard: current data shard, 1 X num_examples X dimensions.
"""
# TODO(xavigonzalvo): Use the pdf defined in
# third_party/tensorflow/contrib/distributions/python/ops/gaussian.py
if self._covariance_type == FULL_COVARIANCE:
self._define_full_covariance_probs(shard_id, shard)
elif self._covariance_type == DIAG_COVARIANCE:
self._define_diag_covariance_probs(shard_id, shard)
self._probs[shard_id] += math_ops.log(self._alpha)
def _define_prior_log_prob_operation(self, shard_id):
"""Computes the prior probability of all samples.
Updates a vector where each item is the prior probability of an
input example.
Args:
shard_id: id of current shard_id.
"""
self._prior_probs[shard_id] = math_ops.reduce_logsumexp(
self._probs[shard_id], axis=1, keepdims=True)
def _define_expectation_operation(self, shard_id):
# Shape broadcasting.
probs = array_ops.expand_dims(self._probs[shard_id], 0)
# Membership weights are computed as:
# $$w_{ik} = \frac{\alpha_k f(\mathbf{y_i}|\mathbf{\theta}_k)}$$
# $$ {\sum_{m=1}^{K}\alpha_mf(\mathbf{y_i}|\mathbf{\theta}_m)}$$
# where "i" is the i-th example, "k" is the k-th mixture, theta are
# the model parameters and y_i the observations.
# These are defined for each shard.
self._w[shard_id] = array_ops.reshape(
math_ops.exp(probs - self._prior_probs[shard_id]),
array_ops.stack([self._num_examples, self._num_classes]))
def _define_partial_maximization_operation(self, shard_id, shard):
"""Computes the partial statistics of the means and covariances.
Args:
shard_id: current shard id.
shard: current data shard, 1 X num_examples X dimensions.
"""
# Soft assignment of each data point to each of the two clusters.
self._points_in_k[shard_id] = math_ops.reduce_sum(
self._w[shard_id], 0, keepdims=True)
# Partial means.
w_mul_x = array_ops.expand_dims(
math_ops.matmul(
self._w[shard_id], array_ops.squeeze(shard, [0]), transpose_a=True),
1)
self._w_mul_x.append(w_mul_x)
# Partial covariances.
x = array_ops.concat([shard for _ in range(self._num_classes)], 0)
x_trans = array_ops.transpose(x, perm=[0, 2, 1])
x_mul_w = array_ops.concat([
array_ops.expand_dims(x_trans[k, :, :] * self._w[shard_id][:, k], 0)
for k in range(self._num_classes)
], 0)
self._w_mul_x2.append(math_ops.matmul(x_mul_w, x))
def _define_maximization_operation(self, num_batches):
"""Maximization operations."""
# TODO(xavigonzalvo): some of these operations could be moved to C++.
# Compute the effective number of data points assigned to component k.
with ops.control_dependencies(self._w):
points_in_k = array_ops.squeeze(
math_ops.add_n(self._points_in_k), axis=[0])
# Update alpha.
if 'w' in self._params:
final_points_in_k = points_in_k / num_batches
num_examples = math_ops.cast(math_ops.reduce_sum(final_points_in_k),
dtypes.float32)
self._alpha_op = self._alpha.assign(final_points_in_k /
(num_examples + MEPS))
else:
self._alpha_op = control_flow_ops.no_op()
self._train_ops = [self._alpha_op]
# Update means.
points_in_k_expanded = array_ops.reshape(points_in_k,
[self._num_classes, 1, 1])
if 'm' in self._params:
self._means_op = self._means.assign(
math_ops.div(
math_ops.add_n(self._w_mul_x), points_in_k_expanded + MEPS))
else:
self._means_op = control_flow_ops.no_op()
# means are (num_classes x 1 x dims)
# Update covariances.
with ops.control_dependencies([self._means_op]):
b = math_ops.add_n(self._w_mul_x2) / (points_in_k_expanded + MEPS)
new_covs = []
for k in range(self._num_classes):
mean = self._means.value()[k, :, :]
square_mean = math_ops.matmul(mean, mean, transpose_a=True)
new_cov = b[k, :, :] - square_mean + self._min_var
if self._covariance_type == FULL_COVARIANCE:
new_covs.append(array_ops.expand_dims(new_cov, 0))
elif self._covariance_type == DIAG_COVARIANCE:
new_covs.append(
array_ops.expand_dims(array_ops.diag_part(new_cov), 0))
new_covs = array_ops.concat(new_covs, 0)
if 'c' in self._params:
# Train operations don't need to take care of the means
# because covariances already depend on it.
with ops.control_dependencies([self._means_op, new_covs]):
self._train_ops.append(
state_ops.assign(
self._covs, new_covs, validate_shape=False))
def _define_loglikelihood_operation(self):
"""Defines the total log-likelihood of current iteration."""
op = []
for prior_probs in self._prior_probs:
op.append(math_ops.reduce_logsumexp(prior_probs))
self._log_likelihood_op = math_ops.reduce_logsumexp(op)
def _define_score_samples(self):
"""Defines the likelihood of each data sample."""
op = []
for shard_id, prior_probs in enumerate(self._prior_probs):
op.append(prior_probs + math_ops.log(self._w[shard_id]))
self._scores = array_ops.squeeze(
math_ops.reduce_logsumexp(op, axis=2, keepdims=True), axis=0)
def gmm(inp,
initial_clusters,
num_clusters,
random_seed,
covariance_type=FULL_COVARIANCE,
params='wmc'):
"""Creates the graph for Gaussian mixture model (GMM) clustering.
Args:
inp: An input tensor or list of input tensors
initial_clusters: Specifies the clusters used during
initialization. Can be a tensor or numpy array, or a function
that generates the clusters. Can also be "random" to specify
that clusters should be chosen randomly from input data. Note: type
is diverse to be consistent with skflow.
num_clusters: number of clusters.
random_seed: Python integer. Seed for PRNG used to initialize centers.
covariance_type: one of "diag", "full".
params: Controls which parameters are updated in the training
process. Can contain any combination of "w" for weights, "m" for
means, and "c" for covars.
Returns:
Note: tuple of lists returned to be consistent with skflow
A tuple consisting of:
assignments: A vector (or list of vectors). Each element in the vector
corresponds to an input row in 'inp' and specifies the cluster id
corresponding to the input.
training_op: an op that runs an iteration of training.
init_op: an op that runs the initialization.
"""
initial_means = None
if initial_clusters != 'random' and not isinstance(initial_clusters,
ops.Tensor):
initial_means = constant_op.constant(initial_clusters, dtype=dtypes.float32)
# Implementation of GMM.
inp = inp if isinstance(inp, list) else [inp]
gmm_tool = GmmAlgorithm(inp, num_clusters, initial_means, params,
covariance_type, random_seed)
assignments = gmm_tool.assignments()
scores = gmm_tool.scores()
loss = gmm_tool.log_likelihood_op()
return (loss, scores, [assignments], gmm_tool.training_ops(),
gmm_tool.init_ops(), gmm_tool.is_initialized())
|
tensorflow-master
|
tensorflow/contrib/factorization/python/ops/gmm_ops.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A canned Estimator for k-means clustering."""
# TODO(ccolby): Move clustering_ops.py into this file and streamline the code.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
from tensorflow.contrib.factorization.python.ops import clustering_ops
from tensorflow.python.estimator import estimator
from tensorflow.python.estimator import model_fn as model_fn_lib
from tensorflow.python.estimator.export import export_output
from tensorflow.python.feature_column import feature_column_lib as fc
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import metrics
from tensorflow.python.ops import state_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.summary import summary
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import training_util
class _LossRelativeChangeHook(session_run_hook.SessionRunHook):
"""Stops when the change in loss goes below a tolerance."""
def __init__(self, loss_tensor, tolerance):
"""Creates a _LossRelativeChangeHook.
Args:
loss_tensor: A scalar tensor of the loss value.
tolerance: A relative tolerance of loss change between iterations.
"""
self._loss_tensor = loss_tensor
self._tolerance = tolerance
self._prev_loss = None
def before_run(self, run_context):
del run_context # unused
return session_run_hook.SessionRunArgs(self._loss_tensor)
def after_run(self, run_context, run_values):
loss = run_values.results
assert loss is not None
if self._prev_loss:
relative_change = (
abs(loss - self._prev_loss) / (1 + abs(self._prev_loss)))
if relative_change < self._tolerance:
run_context.request_stop()
self._prev_loss = loss
class _InitializeClustersHook(session_run_hook.SessionRunHook):
"""Initializes the cluster centers.
The chief repeatedly invokes an initialization op until all cluster centers
are initialized. The workers wait for the initialization phase to complete.
"""
def __init__(self, init_op, is_initialized_var, is_chief):
"""Creates an _InitializeClustersHook.
Args:
init_op: An op that, when run, will choose some initial cluster centers.
This op may need to be run multiple times to choose all the centers.
is_initialized_var: A boolean variable reporting whether all initial
centers have been chosen.
is_chief: A boolean specifying whether this task is the chief.
"""
self._init_op = init_op
self._is_initialized_var = is_initialized_var
self._is_chief = is_chief
def after_create_session(self, session, coord):
del coord # unused
assert self._init_op.graph is ops.get_default_graph()
assert self._is_initialized_var.graph is self._init_op.graph
while True:
try:
if session.run(self._is_initialized_var):
break
elif self._is_chief:
session.run(self._init_op)
else:
time.sleep(1)
except RuntimeError as e:
logging.info(e)
def _parse_features_if_necessary(features, feature_columns):
"""Helper function to convert the input points into a usable format.
Args:
features: The input features.
feature_columns: An optionable iterable containing all the feature columns
used by the model. All items in the set should be feature column instances
that can be passed to `tf.compat.v1.feature_column.input_layer`. If this
is None, all features will be used.
Returns:
If `features` is a dict of `k` features (optionally filtered by
`feature_columns`), each of which is a vector of `n` scalars, the return
value is a Tensor of shape `(n, k)` representing `n` input points, where the
items in the `k` dimension are sorted lexicographically by `features` key.
If `features` is not a dict, it is returned unmodified.
"""
if not isinstance(features, dict):
return features
if feature_columns:
return fc.input_layer(features, feature_columns)
keys = sorted(features.keys())
with ops.colocate_with(features[keys[0]]):
return array_ops.concat([features[k] for k in keys], axis=1)
class _ModelFn(object):
"""Model function for the estimator."""
def __init__(self, num_clusters, initial_clusters, distance_metric,
random_seed, use_mini_batch, mini_batch_steps_per_iteration,
kmeans_plus_plus_num_retries, relative_tolerance,
feature_columns):
self._num_clusters = num_clusters
self._initial_clusters = initial_clusters
self._distance_metric = distance_metric
self._random_seed = random_seed
self._use_mini_batch = use_mini_batch
self._mini_batch_steps_per_iteration = mini_batch_steps_per_iteration
self._kmeans_plus_plus_num_retries = kmeans_plus_plus_num_retries
self._relative_tolerance = relative_tolerance
self._feature_columns = feature_columns
def model_fn(self, features, mode, config):
"""Model function for the estimator.
Note that this does not take a `labels` arg. This works, but `input_fn` must
return either `features` or, equivalently, `(features, None)`.
Args:
features: The input points. See `tf.estimator.Estimator`.
mode: See `tf.estimator.Estimator`.
config: See `tf.estimator.Estimator`.
Returns:
A `tf.estimator.EstimatorSpec` (see `tf.estimator.Estimator`) specifying
this behavior:
* `train_op`: Execute one mini-batch or full-batch run of Lloyd's
algorithm.
* `loss`: The sum of the squared distances from each input point to its
closest center.
* `eval_metric_ops`: Maps `SCORE` to `loss`.
* `predictions`: Maps `ALL_DISTANCES` to the distance from each input
point to each cluster center; maps `CLUSTER_INDEX` to the index of
the closest cluster center for each input point.
"""
# input_points is a single Tensor. Therefore, the sharding functionality
# in clustering_ops is unused, and some of the values below are lists of a
# single item.
input_points = _parse_features_if_necessary(features, self._feature_columns)
# Let N = the number of input_points.
# all_distances: A list of one matrix of shape (N, num_clusters). Each value
# is the distance from an input point to a cluster center.
# model_predictions: A list of one vector of shape (N). Each value is the
# cluster id of an input point.
# losses: Similar to cluster_idx but provides the distance to the cluster
# center.
# is_initialized: scalar indicating whether the initial cluster centers
# have been chosen; see init_op.
# init_op: an op to choose the initial cluster centers. A single worker
# repeatedly executes init_op until is_initialized becomes True.
# training_op: an op that runs an iteration of training, either an entire
# Lloyd iteration or a mini-batch of a Lloyd iteration. Multiple workers
# may execute this op, but only after is_initialized becomes True.
(all_distances, model_predictions, losses, is_initialized, init_op,
training_op) = clustering_ops.KMeans(
inputs=input_points,
num_clusters=self._num_clusters,
initial_clusters=self._initial_clusters,
distance_metric=self._distance_metric,
use_mini_batch=self._use_mini_batch,
mini_batch_steps_per_iteration=self._mini_batch_steps_per_iteration,
random_seed=self._random_seed,
kmeans_plus_plus_num_retries=self._kmeans_plus_plus_num_retries
).training_graph()
loss = math_ops.reduce_sum(losses)
summary.scalar('loss/raw', loss)
incr_step = state_ops.assign_add(training_util.get_global_step(), 1)
training_op = control_flow_ops.with_dependencies([training_op, incr_step],
loss)
training_hooks = [
_InitializeClustersHook(init_op, is_initialized, config.is_chief)
]
if self._relative_tolerance is not None:
training_hooks.append(
_LossRelativeChangeHook(loss, self._relative_tolerance))
export_outputs = {
KMeansClustering.ALL_DISTANCES:
export_output.PredictOutput(all_distances[0]),
KMeansClustering.CLUSTER_INDEX:
export_output.PredictOutput(model_predictions[0]),
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
export_output.PredictOutput(model_predictions[0])
}
return model_fn_lib.EstimatorSpec(
mode=mode,
predictions={
KMeansClustering.ALL_DISTANCES: all_distances[0],
KMeansClustering.CLUSTER_INDEX: model_predictions[0],
},
loss=loss,
train_op=training_op,
eval_metric_ops={KMeansClustering.SCORE: metrics.mean(loss)},
training_hooks=training_hooks,
export_outputs=export_outputs)
# TODO(agarwal,ands): support sharded input.
class KMeansClustering(estimator.Estimator):
"""An Estimator for K-Means clustering.
Example:
```
import numpy as np
import tensorflow as tf
num_points = 100
dimensions = 2
points = np.random.uniform(0, 1000, [num_points, dimensions])
def input_fn():
return tf.compat.v1.train.limit_epochs(
tf.convert_to_tensor(points, dtype=tf.float32), num_epochs=1)
num_clusters = 5
kmeans = tf.contrib.factorization.KMeansClustering(
num_clusters=num_clusters, use_mini_batch=False)
# train
num_iterations = 10
previous_centers = None
for _ in xrange(num_iterations):
kmeans.train(input_fn)
cluster_centers = kmeans.cluster_centers()
if previous_centers is not None:
print 'delta:', cluster_centers - previous_centers
previous_centers = cluster_centers
print 'score:', kmeans.score(input_fn)
print 'cluster centers:', cluster_centers
# map the input points to their clusters
cluster_indices = list(kmeans.predict_cluster_index(input_fn))
for i, point in enumerate(points):
cluster_index = cluster_indices[i]
center = cluster_centers[cluster_index]
print 'point:', point, 'is in cluster', cluster_index, 'centered at', center
```
The `SavedModel` saved by the `export_savedmodel` method does not include the
cluster centers. However, the cluster centers may be retrieved by the
latest checkpoint saved during training. Specifically,
```
kmeans.cluster_centers()
```
is equivalent to
```
tf.train.load_variable(
kmeans.model_dir, KMeansClustering.CLUSTER_CENTERS_VAR_NAME)
```
"""
# Valid values for the distance_metric constructor argument.
SQUARED_EUCLIDEAN_DISTANCE = clustering_ops.SQUARED_EUCLIDEAN_DISTANCE
COSINE_DISTANCE = clustering_ops.COSINE_DISTANCE
# Values for initial_clusters constructor argument.
RANDOM_INIT = clustering_ops.RANDOM_INIT
KMEANS_PLUS_PLUS_INIT = clustering_ops.KMEANS_PLUS_PLUS_INIT
# Metric returned by evaluate(): The sum of the squared distances from each
# input point to its closest center.
SCORE = 'score'
# Keys returned by predict().
# ALL_DISTANCES: The distance from each input point to each cluster center.
# CLUSTER_INDEX: The index of the closest cluster center for each input point.
CLUSTER_INDEX = 'cluster_index'
ALL_DISTANCES = 'all_distances'
# Variable name used by cluster_centers().
CLUSTER_CENTERS_VAR_NAME = clustering_ops.CLUSTERS_VAR_NAME
def __init__(self,
num_clusters,
model_dir=None,
initial_clusters=RANDOM_INIT,
distance_metric=SQUARED_EUCLIDEAN_DISTANCE,
random_seed=0,
use_mini_batch=True,
mini_batch_steps_per_iteration=1,
kmeans_plus_plus_num_retries=2,
relative_tolerance=None,
config=None,
feature_columns=None):
"""Creates an Estimator for running KMeans training and inference.
This Estimator implements the following variants of the K-means algorithm:
If `use_mini_batch` is False, it runs standard full batch K-means. Each
training step runs a single iteration of K-Means and must process the full
input at once. To run in this mode, the `input_fn` passed to `train` must
return the entire input dataset.
If `use_mini_batch` is True, it runs a generalization of the mini-batch
K-means algorithm. It runs multiple iterations, where each iteration is
composed of `mini_batch_steps_per_iteration` steps. Each training step
accumulates the contribution from one mini-batch into temporary storage.
Every `mini_batch_steps_per_iteration` steps, the cluster centers are
updated and the temporary storage cleared for the next iteration. Note
that:
* If `mini_batch_steps_per_iteration=1`, the algorithm reduces to the
standard K-means mini-batch algorithm.
* If `mini_batch_steps_per_iteration = num_inputs / batch_size`, the
algorithm becomes an asynchronous version of the full-batch algorithm.
However, there is no guarantee by this implementation that each input
is seen exactly once per iteration. Also, different updates are applied
asynchronously without locking. So this asynchronous version may not
behave exactly like a full-batch version.
Args:
num_clusters: An integer tensor specifying the number of clusters. This
argument is ignored if `initial_clusters` is a tensor or numpy array.
model_dir: The directory to save the model results and log files.
initial_clusters: Specifies how the initial cluster centers are chosen.
One of the following: * a tensor or numpy array with the initial cluster
centers. * a callable `f(inputs, k)` that selects and returns up to
`k` centers from an input batch. `f` is free to return any number of
centers from `0` to `k`. It will be invoked on successive input
batches as necessary until all `num_clusters` centers are chosen.
* `KMeansClustering.RANDOM_INIT`: Choose centers randomly from an input
batch. If the batch size is less than `num_clusters` then the entire
batch is chosen to be initial cluster centers and the remaining
centers are chosen from successive input batches.
* `KMeansClustering.KMEANS_PLUS_PLUS_INIT`: Use kmeans++ to choose
centers from the first input batch. If the batch size is less than
`num_clusters`, a TensorFlow runtime error occurs.
distance_metric: The distance metric used for clustering. One of:
* `KMeansClustering.SQUARED_EUCLIDEAN_DISTANCE`: Euclidean distance
between vectors `u` and `v` is defined as \\(||u - v||_2\\) which is
the square root of the sum of the absolute squares of the elements'
difference.
* `KMeansClustering.COSINE_DISTANCE`: Cosine distance between vectors
`u` and `v` is defined as \\(1 - (u . v) / (||u||_2 ||v||_2)\\).
random_seed: Python integer. Seed for PRNG used to initialize centers.
use_mini_batch: A boolean specifying whether to use the mini-batch k-means
algorithm. See explanation above.
mini_batch_steps_per_iteration: The number of steps after which the
updated cluster centers are synced back to a master copy. Used only if
`use_mini_batch=True`. See explanation above.
kmeans_plus_plus_num_retries: For each point that is sampled during
kmeans++ initialization, this parameter specifies the number of
additional points to draw from the current distribution before selecting
the best. If a negative value is specified, a heuristic is used to
sample `O(log(num_to_sample))` additional points. Used only if
`initial_clusters=KMeansClustering.KMEANS_PLUS_PLUS_INIT`.
relative_tolerance: A relative tolerance of change in the loss between
iterations. Stops learning if the loss changes less than this amount.
This may not work correctly if `use_mini_batch=True`.
config: See `tf.estimator.Estimator`.
feature_columns: An optionable iterable containing all the feature columns
used by the model. All items in the set should be feature column
instances that can be passed to
`tf.compat.v1.feature_column.input_layer`. If this is None, all features
will be used.
Raises:
ValueError: An invalid argument was passed to `initial_clusters` or
`distance_metric`.
"""
if isinstance(initial_clusters, str) and initial_clusters not in [
KMeansClustering.RANDOM_INIT, KMeansClustering.KMEANS_PLUS_PLUS_INIT
]:
raise ValueError("Unsupported initialization algorithm '%s'" %
initial_clusters)
if distance_metric not in [
KMeansClustering.SQUARED_EUCLIDEAN_DISTANCE,
KMeansClustering.COSINE_DISTANCE
]:
raise ValueError("Unsupported distance metric '%s'" % distance_metric)
super(KMeansClustering, self).__init__(
model_fn=_ModelFn(num_clusters, initial_clusters, distance_metric,
random_seed, use_mini_batch,
mini_batch_steps_per_iteration,
kmeans_plus_plus_num_retries, relative_tolerance,
feature_columns).model_fn,
model_dir=model_dir,
config=config)
def _predict_one_key(self, input_fn, predict_key):
for result in self.predict(input_fn=input_fn, predict_keys=[predict_key]):
yield result[predict_key]
def predict_cluster_index(self, input_fn):
"""Finds the index of the closest cluster center to each input point.
Args:
input_fn: Input points. See `tf.estimator.Estimator.predict`.
Yields:
The index of the closest cluster center for each input point.
"""
for index in self._predict_one_key(input_fn,
KMeansClustering.CLUSTER_INDEX):
yield index
def score(self, input_fn):
"""Returns the sum of squared distances to nearest clusters.
Note that this function is different from the corresponding one in sklearn
which returns the negative sum.
Args:
input_fn: Input points. See `tf.estimator.Estimator.evaluate`. Only one
batch is retrieved.
Returns:
The sum of the squared distance from each point in the first batch of
inputs to its nearest cluster center.
"""
return self.evaluate(input_fn=input_fn, steps=1)[KMeansClustering.SCORE]
def transform(self, input_fn):
"""Transforms each input point to its distances to all cluster centers.
Note that if `distance_metric=KMeansClustering.SQUARED_EUCLIDEAN_DISTANCE`,
this
function returns the squared Euclidean distance while the corresponding
sklearn function returns the Euclidean distance.
Args:
input_fn: Input points. See `tf.estimator.Estimator.predict`.
Yields:
The distances from each input point to each cluster center.
"""
for distances in self._predict_one_key(input_fn,
KMeansClustering.ALL_DISTANCES):
yield distances
def cluster_centers(self):
"""Returns the cluster centers."""
return self.get_variable_value(KMeansClustering.CLUSTER_CENTERS_VAR_NAME)
|
tensorflow-master
|
tensorflow/contrib/factorization/python/ops/kmeans.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops for matrix factorization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numbers
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.contrib.factorization.python.ops import gen_factorization_ops
from tensorflow.contrib.util import loader
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import resource_loader
_factorization_ops = loader.load_op_library(
resource_loader.get_path_to_datafile("_factorization_ops.so"))
class WALSModel(object):
r"""A model for Weighted Alternating Least Squares matrix factorization.
It minimizes the following loss function over U, V:
$$
\|\sqrt W \odot (A - U V^T)\|_F^2 + \lambda (\|U\|_F^2 + \|V\|_F^2)
$$
where,
A: input matrix,
W: weight matrix. Note that the (element-wise) square root of the weights
is used in the objective function.
U, V: row_factors and column_factors matrices,
\\(\lambda)\\: regularization.
Also we assume that W is of the following special form:
\\( W_{ij} = W_0 + R_i * C_j \\) if \\(A_{ij} \ne 0\\),
\\(W_{ij} = W_0\\) otherwise.
where,
\\(W_0\\): unobserved_weight,
\\(R_i\\): row_weights,
\\(C_j\\): col_weights.
Note that the current implementation supports two operation modes: The default
mode is for the condition where row_factors and col_factors can individually
fit into the memory of each worker and these will be cached. When this
condition can't be met, setting use_factors_weights_cache to False allows the
larger problem sizes with slight performance penalty as this will avoid
creating the worker caches and instead the relevant weight and factor values
are looked up from parameter servers at each step.
Loss computation: The loss can be computed efficiently by decomposing it into
a sparse term and a Gramian term, see wals.md.
The loss is returned by the update_{col, row}_factors(sp_input), and is
normalized as follows:
_, _, unregularized_loss, regularization, sum_weights =
update_row_factors(sp_input)
if sp_input contains the rows \\({A_i, i \in I}\\), and the input matrix A
has n total rows, then the minibatch loss = unregularized_loss +
regularization is
$$
(\|\sqrt W_I \odot (A_I - U_I V^T)\|_F^2 + \lambda \|U_I\|_F^2) * n / |I| +
\lambda \|V\|_F^2
$$
The sum_weights tensor contains the normalized sum of weights
\\(sum(W_I) * n / |I|\\).
A typical usage example (pseudocode):
with tf.Graph().as_default():
# Set up the model object.
model = tf.contrib.factorization.WALSModel(....)
# To be run only once as part of session initialization. In distributed
# training setting, this should only be run by the chief trainer and all
# other trainers should block until this is done.
model_init_op = model.initialize_op
# To be run once per worker after session is available, prior to
# the prep_gramian_op for row(column) can be run.
worker_init_op = model.worker_init
# To be run once per iteration sweep before the row(column) update
# initialize ops can be run. Note that in the distributed training
# situations, this should only be run by the chief trainer. All other
# trainers need to block until this is done.
row_update_prep_gramian_op = model.row_update_prep_gramian_op
col_update_prep_gramian_op = model.col_update_prep_gramian_op
# To be run once per worker per iteration sweep. Must be run before
# any actual update ops can be run.
init_row_update_op = model.initialize_row_update_op
init_col_update_op = model.initialize_col_update_op
# Ops to update row(column). This can either take the entire sparse
# tensor or slices of sparse tensor. For distributed trainer, each
# trainer handles just part of the matrix.
_, row_update_op, unreg_row_loss, row_reg, _ = model.update_row_factors(
sp_input=matrix_slices_from_queue_for_worker_shard)
row_loss = unreg_row_loss + row_reg
_, col_update_op, unreg_col_loss, col_reg, _ = model.update_col_factors(
sp_input=transposed_matrix_slices_from_queue_for_worker_shard,
transpose_input=True)
col_loss = unreg_col_loss + col_reg
...
# model_init_op is passed to Supervisor. Chief trainer runs it. Other
# trainers wait.
sv = tf.compat.v1.train.Supervisor(is_chief=is_chief,
...,
init_op=tf.group(..., model_init_op, ...), ...)
...
with sv.managed_session(...) as sess:
# All workers/trainers run it after session becomes available.
worker_init_op.run(session=sess)
...
while i in iterations:
# All trainers need to sync up here.
while not_all_ready:
wait
# Row update sweep.
if is_chief:
row_update_prep_gramian_op.run(session=sess)
else:
wait_for_chief
# All workers run upate initialization.
init_row_update_op.run(session=sess)
# Go through the matrix.
reset_matrix_slices_queue_for_worker_shard
while_matrix_slices:
row_update_op.run(session=sess)
# All trainers need to sync up here.
while not_all_ready:
wait
# Column update sweep.
if is_chief:
col_update_prep_gramian_op.run(session=sess)
else:
wait_for_chief
# All workers run upate initialization.
init_col_update_op.run(session=sess)
# Go through the matrix.
reset_transposed_matrix_slices_queue_for_worker_shard
while_transposed_matrix_slices:
col_update_op.run(session=sess)
"""
def __init__(self,
input_rows,
input_cols,
n_components,
unobserved_weight=0.1,
regularization=None,
row_init="random",
col_init="random",
num_row_shards=1,
num_col_shards=1,
row_weights=1,
col_weights=1,
use_factors_weights_cache=True,
use_gramian_cache=True,
use_scoped_vars=False):
"""Creates model for WALS matrix factorization.
Args:
input_rows: total number of rows for input matrix.
input_cols: total number of cols for input matrix.
n_components: number of dimensions to use for the factors.
unobserved_weight: weight given to unobserved entries of matrix.
regularization: weight of L2 regularization term. If None, no
regularization is done.
row_init: initializer for row factor. Can be a tensor or numpy constant.
If set to "random", the value is initialized randomly.
col_init: initializer for column factor. See row_init for details.
num_row_shards: number of shards to use for row factors.
num_col_shards: number of shards to use for column factors.
row_weights: Must be in one of the following three formats: None, a list
of lists of non-negative real numbers (or equivalent iterables) or a
single non-negative real number.
- When set to None, w_ij = unobserved_weight, which simplifies to ALS.
Note that col_weights must also be set to "None" in this case.
- If it is a list of lists of non-negative real numbers, it needs to be
in the form of [[w_0, w_1, ...], [w_k, ... ], [...]], with the number of
inner lists matching the number of row factor shards and the elements in
each inner list are the weights for the rows of the corresponding row
factor shard. In this case, w_ij = unobserved_weight +
row_weights[i] * col_weights[j].
- If this is a single non-negative real number, this value is used for
all row weights and \\(w_ij\\) = unobserved_weight + row_weights *
col_weights[j].
Note that it is allowed to have row_weights as a list while col_weights
a single number or vice versa.
col_weights: See row_weights.
use_factors_weights_cache: When True, the factors and weights will be
cached on the workers before the updates start. Defaults to True. Note
that the weights cache is initialized through `worker_init`, and the
row/col factors cache is initialized through
`initialize_{col/row}_update_op`. In the case where the weights are
computed outside and set before the training iterations start, it is
important to ensure the `worker_init` op is run afterwards for the
weights cache to take effect.
use_gramian_cache: When True, the Gramians will be cached on the workers
before the updates start. Defaults to True.
use_scoped_vars: When True, the factor and weight vars will also be nested
in a tf.name_scope.
"""
self._input_rows = input_rows
self._input_cols = input_cols
self._num_row_shards = num_row_shards
self._num_col_shards = num_col_shards
self._n_components = n_components
self._unobserved_weight = unobserved_weight
self._regularization = regularization
self._regularization_matrix = (
regularization * linalg_ops.eye(self._n_components)
if regularization is not None else None)
assert (row_weights is None) == (col_weights is None)
self._use_factors_weights_cache = use_factors_weights_cache
self._use_gramian_cache = use_gramian_cache
if use_scoped_vars:
with ops.name_scope("row_weights"):
self._row_weights = WALSModel._create_weights(
row_weights, self._input_rows, self._num_row_shards, "row_weights")
with ops.name_scope("col_weights"):
self._col_weights = WALSModel._create_weights(
col_weights, self._input_cols, self._num_col_shards, "col_weights")
with ops.name_scope("row_factors"):
self._row_factors = self._create_factors(
self._input_rows, self._n_components, self._num_row_shards,
row_init, "row_factors")
with ops.name_scope("col_factors"):
self._col_factors = self._create_factors(
self._input_cols, self._n_components, self._num_col_shards,
col_init, "col_factors")
else:
self._row_weights = WALSModel._create_weights(
row_weights, self._input_rows, self._num_row_shards, "row_weights")
self._col_weights = WALSModel._create_weights(
col_weights, self._input_cols, self._num_col_shards, "col_weights")
self._row_factors = self._create_factors(
self._input_rows, self._n_components, self._num_row_shards, row_init,
"row_factors")
self._col_factors = self._create_factors(
self._input_cols, self._n_components, self._num_col_shards, col_init,
"col_factors")
self._row_gramian = self._create_gramian(self._n_components, "row_gramian")
self._col_gramian = self._create_gramian(self._n_components, "col_gramian")
with ops.name_scope("row_prepare_gramian"):
self._row_update_prep_gramian = self._prepare_gramian(
self._col_factors, self._col_gramian)
with ops.name_scope("col_prepare_gramian"):
self._col_update_prep_gramian = self._prepare_gramian(
self._row_factors, self._row_gramian)
with ops.name_scope("transient_vars"):
self._create_transient_vars()
@property
def row_factors(self):
"""Returns a list of tensors corresponding to row factor shards."""
return self._row_factors
@property
def col_factors(self):
"""Returns a list of tensors corresponding to column factor shards."""
return self._col_factors
@property
def row_weights(self):
"""Returns a list of tensors corresponding to row weight shards."""
return self._row_weights
@property
def col_weights(self):
"""Returns a list of tensors corresponding to col weight shards."""
return self._col_weights
@property
def initialize_op(self):
"""Returns an op for initializing tensorflow variables."""
all_vars = self._row_factors + self._col_factors
all_vars.extend([self._row_gramian, self._col_gramian])
if self._row_weights is not None:
assert self._col_weights is not None
all_vars.extend(self._row_weights + self._col_weights)
return variables.variables_initializer(all_vars)
@classmethod
def _shard_sizes(cls, dims, num_shards):
"""Helper function to split dims values into num_shards."""
shard_size, residual = divmod(dims, num_shards)
return [shard_size + 1] * residual + [shard_size] * (num_shards - residual)
@classmethod
def _create_factors(cls, rows, cols, num_shards, init, name):
"""Helper function to create row and column factors."""
if callable(init):
init = init()
if isinstance(init, list):
assert len(init) == num_shards
elif isinstance(init, str) and init == "random":
pass
elif num_shards == 1:
init = [init]
sharded_matrix = []
sizes = cls._shard_sizes(rows, num_shards)
assert len(sizes) == num_shards
def make_initializer(i, size):
def initializer():
if init == "random":
return random_ops.random_normal([size, cols])
else:
return init[i]
return initializer
for i, size in enumerate(sizes):
var_name = "%s_shard_%d" % (name, i)
var_init = make_initializer(i, size)
sharded_matrix.append(
variable_scope.variable(
var_init, dtype=dtypes.float32, name=var_name))
return sharded_matrix
@classmethod
def _create_weights(cls, wt_init, num_wts, num_shards, name):
"""Helper function to create sharded weight vector.
Args:
wt_init: init value for the weight. If None, weights are not created. This
can be one of the None, a list of non-negative real numbers or a single
non-negative real number (or equivalent iterables).
num_wts: total size of all the weight shards
num_shards: number of shards for the weights
name: name for the new Variables.
Returns:
A list of weight shard Tensors.
Raises:
ValueError: If wt_init is not the right format.
"""
if wt_init is None:
return None
init_mode = "list"
if isinstance(wt_init, collections.Iterable):
if num_shards == 1 and len(wt_init) == num_wts:
wt_init = [wt_init]
assert len(wt_init) == num_shards
elif isinstance(wt_init, numbers.Real) and wt_init >= 0:
init_mode = "scalar"
else:
raise ValueError(
"Invalid weight initialization argument. Must be one of these: "
"None, a real non-negative real number, or a list of lists of "
"non-negative real numbers (or equivalent iterables) corresponding "
"to sharded factors.")
sizes = cls._shard_sizes(num_wts, num_shards)
assert len(sizes) == num_shards
def make_wt_initializer(i, size):
def initializer():
if init_mode == "scalar":
return wt_init * array_ops.ones([size])
else:
return wt_init[i]
return initializer
sharded_weight = []
for i, size in enumerate(sizes):
var_name = "%s_shard_%d" % (name, i)
var_init = make_wt_initializer(i, size)
sharded_weight.append(
variable_scope.variable(
var_init, dtype=dtypes.float32, name=var_name))
return sharded_weight
@staticmethod
def _create_gramian(n_components, name):
"""Helper function to create the gramian variable.
Args:
n_components: number of dimensions of the factors from which the gramian
will be calculated.
name: name for the new Variables.
Returns:
A gramian Tensor with shape of [n_components, n_components].
"""
return variable_scope.variable(
array_ops.zeros([n_components, n_components]),
dtype=dtypes.float32,
name=name)
@staticmethod
def _transient_var(name):
"""Helper function to create a Variable."""
return variable_scope.variable(
1.0,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
validate_shape=False,
name=name)
def _prepare_gramian(self, factors, gramian):
"""Helper function to create ops to prepare/calculate gramian.
Args:
factors: Variable or list of Variable representing (sharded) factors.
Used to compute the updated corresponding gramian value.
gramian: Variable storing the gramian calculated from the factors.
Returns:
An op that updates the gramian with the calculated value from the factors.
"""
partial_gramians = []
for f in factors:
with ops.colocate_with(f):
partial_gramians.append(math_ops.matmul(f, f, transpose_a=True))
with ops.colocate_with(gramian):
prep_gramian = state_ops.assign(gramian,
math_ops.add_n(partial_gramians)).op
return prep_gramian
def _cached_copy(self, var, name, pass_through=False):
"""Helper function to create a worker cached copy of a Variable.
This assigns the var (either a single Variable or a list of Variables) to
local transient cache Variable(s). Note that if var is a list of Variables,
the assignment is done sequentially to minimize the memory overheads.
Also note that if pass_through is set to True, this does not create new
Variables but simply return the input back.
Args:
var: A Variable or a list of Variables to cache.
name: name of cached Variable.
pass_through: when set to True, this simply pass through the var back
through identity operator and does not actually creates a cache.
Returns:
Tuple consisting of following three entries:
cache: the new transient Variable or list of transient Variables
corresponding one-to-one with var.
cache_init: op to initialize the Variable or the list of Variables.
cache_reset: op to reset the Variable or the list of Variables to some
default value.
"""
if var is None:
return None, None, None
elif pass_through:
cache = var
cache_init = control_flow_ops.no_op()
cache_reset = control_flow_ops.no_op()
elif isinstance(var, variables.Variable):
cache = WALSModel._transient_var(name=name)
with ops.colocate_with(cache):
cache_init = state_ops.assign(cache, var, validate_shape=False)
cache_reset = state_ops.assign(cache, 1.0, validate_shape=False)
else:
assert isinstance(var, list)
assert var
cache = [
WALSModel._transient_var(name="%s_shard_%d" % (name, i))
for i in xrange(len(var))
]
reset_ops = []
for i, c in enumerate(cache):
with ops.colocate_with(c):
if i == 0:
cache_init = state_ops.assign(c, var[i], validate_shape=False)
else:
with ops.control_dependencies([cache_init]):
cache_init = state_ops.assign(c, var[i], validate_shape=False)
reset_ops.append(state_ops.assign(c, 1.0, validate_shape=False))
cache_reset = control_flow_ops.group(*reset_ops)
return cache, cache_init, cache_reset
def _create_transient_vars(self):
"""Creates local cache of factors, weights and gramian for rows and columns.
Note that currently the caching strategy is as follows:
When initiating a row (resp. column) update:
- The column (resp. row) gramian is computed.
- Optionally, if use_gramian_cache is True, the column (resp. row) Gramian
is cached, while the row (resp. column) gramian is reset.
- Optionally, if use_factors_weights_cache is True, the column (resp. row)
factors and weights are cached, while the row (resp. column) factors and
weights are reset.
"""
(self._row_factors_cache, row_factors_cache_init,
row_factors_cache_reset) = self._cached_copy(
self._row_factors,
"row_factors_cache",
pass_through=not self._use_factors_weights_cache)
(self._col_factors_cache, col_factors_cache_init,
col_factors_cache_reset) = self._cached_copy(
self._col_factors,
"col_factors_cache",
pass_through=not self._use_factors_weights_cache)
(self._row_wt_cache, row_wt_cache_init, _) = self._cached_copy(
self._row_weights,
"row_wt_cache",
pass_through=not self._use_factors_weights_cache)
(self._col_wt_cache, col_wt_cache_init, _) = self._cached_copy(
self._col_weights,
"col_wt_cache",
pass_through=not self._use_factors_weights_cache)
(self._row_gramian_cache, row_gramian_cache_init,
row_gramian_cache_reset) = self._cached_copy(
self._row_gramian,
"row_gramian_cache",
pass_through=not self._use_gramian_cache)
(self._col_gramian_cache, col_gramian_cache_init,
col_gramian_cache_reset) = self._cached_copy(
self._col_gramian,
"col_gramian_cache",
pass_through=not self._use_gramian_cache)
self._row_updates_init = control_flow_ops.group(
col_factors_cache_init, row_factors_cache_reset, col_gramian_cache_init,
row_gramian_cache_reset)
self._col_updates_init = control_flow_ops.group(
row_factors_cache_init, col_factors_cache_reset, row_gramian_cache_init,
col_gramian_cache_reset)
if self._row_wt_cache is not None:
assert self._col_wt_cache is not None
self._worker_init = control_flow_ops.group(
row_wt_cache_init, col_wt_cache_init, name="worker_init")
else:
self._worker_init = control_flow_ops.no_op(name="worker_init")
@property
def worker_init(self):
"""Op to initialize worker state once before starting any updates.
Note that specifically this initializes the cache of the row and column
weights on workers when `use_factors_weights_cache` is True. In this case,
if these weights are being calculated and reset after the object is created,
it is important to ensure this ops is run afterwards so the cache reflects
the correct values.
"""
return self._worker_init
@property
def row_update_prep_gramian_op(self):
"""Op to form the gramian before starting row updates.
Must be run before initialize_row_update_op and should only be run by one
trainer (usually the chief) when doing distributed training.
Returns:
Op to form the gramian.
"""
return self._row_update_prep_gramian
@property
def col_update_prep_gramian_op(self):
"""Op to form the gramian before starting col updates.
Must be run before initialize_col_update_op and should only be run by one
trainer (usually the chief) when doing distributed training.
Returns:
Op to form the gramian.
"""
return self._col_update_prep_gramian
@property
def initialize_row_update_op(self):
"""Op to initialize worker state before starting row updates."""
return self._row_updates_init
@property
def initialize_col_update_op(self):
"""Op to initialize worker state before starting column updates."""
return self._col_updates_init
@staticmethod
def _get_sharding_func(size, num_shards):
"""Create sharding function for scatter update."""
def func(ids):
if num_shards == 1:
return None, ids
else:
ids_per_shard = size // num_shards
extras = size % num_shards
assignments = math_ops.maximum(ids // (ids_per_shard + 1),
(ids - extras) // ids_per_shard)
new_ids = array_ops.where(assignments < extras,
ids % (ids_per_shard + 1),
(ids - extras) % ids_per_shard)
return assignments, new_ids
return func
@classmethod
def scatter_update(cls, factor, indices, values, sharding_func, name=None):
"""Helper function for doing sharded scatter update."""
assert isinstance(factor, list)
if len(factor) == 1:
with ops.colocate_with(factor[0]):
# TODO(agarwal): assign instead of scatter update for full batch update.
return state_ops.scatter_update(
factor[0], indices, values, name=name).op
else:
num_shards = len(factor)
assignments, new_ids = sharding_func(indices)
assert assignments is not None
assignments = math_ops.cast(assignments, dtypes.int32)
sharded_ids = data_flow_ops.dynamic_partition(new_ids, assignments,
num_shards)
sharded_values = data_flow_ops.dynamic_partition(values, assignments,
num_shards)
updates = []
for i in xrange(num_shards):
updates.append(
state_ops.scatter_update(factor[i], sharded_ids[i], sharded_values[
i]))
return control_flow_ops.group(*updates, name=name)
def update_row_factors(self, sp_input=None, transpose_input=False):
r"""Updates the row factors.
Args:
sp_input: A SparseTensor representing a subset of rows of the full input
in any order. Please note that this SparseTensor must retain the
indexing as the original input.
transpose_input: If true, the input will be logically transposed and the
rows corresponding to the transposed input are updated.
Returns:
A tuple consisting of the following elements:
new_values: New values for the row factors.
update_op: An op that assigns the newly computed values to the row
factors.
unregularized_loss: A tensor (scalar) that contains the normalized
minibatch loss corresponding to sp_input, without the regularization
term. If sp_input contains the rows \\({A_{i, :}, i \in I}\\), and the
input matrix A has n total rows, then the unregularized loss is:
\\(\|\sqrt W_I \odot (A_I - U_I V^T)\|_F^2 * n / |I|\\)
The total loss is unregularized_loss + regularization.
regularization: A tensor (scalar) that contains the normalized
regularization term for the minibatch loss corresponding to sp_input.
If sp_input contains the rows \\({A_{i, :}, i \in I}\\), and the input
matrix A has n total rows, then the regularization term is:
\\(\lambda \|U_I\|_F^2) * n / |I| + \lambda \|V\|_F^2\\).
sum_weights: The sum of the weights W_I corresponding to sp_input,
normalized by a factor of \\(n / |I|\\). The root weighted squared
error is: \sqrt(unregularized_loss / sum_weights).
"""
return self._process_input_helper(
True, sp_input=sp_input, transpose_input=transpose_input)
def update_col_factors(self, sp_input=None, transpose_input=False):
r"""Updates the column factors.
Args:
sp_input: A SparseTensor representing a subset of columns of the full
input. Please refer to comments for update_row_factors for
restrictions.
transpose_input: If true, the input will be logically transposed and the
columns corresponding to the transposed input are updated.
Returns:
A tuple consisting of the following elements:
new_values: New values for the column factors.
update_op: An op that assigns the newly computed values to the column
factors.
unregularized_loss: A tensor (scalar) that contains the normalized
minibatch loss corresponding to sp_input, without the regularization
term. If sp_input contains the columns \\({A_{:, j}, j \in J}\\), and
the input matrix A has m total columns, then the unregularized loss is:
\\(\|\sqrt W_J \odot (A_J - U V_J^T)\|_F^2 * m / |I|\\)
The total loss is unregularized_loss + regularization.
regularization: A tensor (scalar) that contains the normalized
regularization term for the minibatch loss corresponding to sp_input.
If sp_input contains the columns \\({A_{:, j}, j \in J}\\), and the
input matrix A has m total columns, then the regularization term is:
\\(\lambda \|V_J\|_F^2) * m / |J| + \lambda \|U\|_F^2\\).
sum_weights: The sum of the weights W_J corresponding to sp_input,
normalized by a factor of \\(m / |J|\\). The root weighted squared
error is: \sqrt(unregularized_loss / sum_weights).
"""
return self._process_input_helper(
False, sp_input=sp_input, transpose_input=transpose_input)
def project_row_factors(self,
sp_input=None,
transpose_input=False,
projection_weights=None):
"""Projects the row factors.
This computes the row embedding \\(u_i\\) for an observed row \\(a_i\\) by
solving one iteration of the update equations.
Args:
sp_input: A SparseTensor representing a set of rows. Please note that the
column indices of this SparseTensor must match the model column feature
indexing while the row indices are ignored. The returned results will be
in the same ordering as the input rows.
transpose_input: If true, the input will be logically transposed and the
rows corresponding to the transposed input are projected.
projection_weights: The row weights to be used for the projection. If None
then 1.0 is used. This can be either a scaler or a rank-1 tensor with
the number of elements matching the number of rows to be projected.
Note that the column weights will be determined by the underlying WALS
model.
Returns:
Projected row factors.
"""
if projection_weights is None:
projection_weights = 1
return self._process_input_helper(
True,
sp_input=sp_input,
transpose_input=transpose_input,
row_weights=projection_weights)[0]
def project_col_factors(self,
sp_input=None,
transpose_input=False,
projection_weights=None):
"""Projects the column factors.
This computes the column embedding \\(v_j\\) for an observed column
\\(a_j\\) by solving one iteration of the update equations.
Args:
sp_input: A SparseTensor representing a set of columns. Please note that
the row indices of this SparseTensor must match the model row feature
indexing while the column indices are ignored. The returned results will
be in the same ordering as the input columns.
transpose_input: If true, the input will be logically transposed and the
columns corresponding to the transposed input are projected.
projection_weights: The column weights to be used for the projection. If
None then 1.0 is used. This can be either a scaler or a rank-1 tensor
with the number of elements matching the number of columns to be
projected. Note that the row weights will be determined by the
underlying WALS model.
Returns:
Projected column factors.
"""
if projection_weights is None:
projection_weights = 1
return self._process_input_helper(
False,
sp_input=sp_input,
transpose_input=transpose_input,
row_weights=projection_weights)[0]
def _process_input_helper(self,
update_row_factors,
sp_input=None,
transpose_input=False,
row_weights=None):
"""Creates the graph for processing a sparse slice of input.
Args:
update_row_factors: if True, update or project the row_factors, else
update or project the column factors.
sp_input: Please refer to comments for update_row_factors,
update_col_factors, project_row_factors, and project_col_factors for
restrictions.
transpose_input: If True, the input is logically transposed and then the
corresponding rows/columns of the transposed input are updated.
row_weights: If not None, this is the row/column weights to be used for
the update or projection. If None, use the corresponding weights from
the model. Note that the feature (column/row) weights will be
determined by the model. When not None, it can either be a scalar or
a rank-1 tensor with the same number of elements as the number of rows
of columns to be updated/projected.
Returns:
A tuple consisting of the following elements:
new_values: New values for the row/column factors.
update_op: An op that assigns the newly computed values to the row/column
factors.
unregularized_loss: A tensor (scalar) that contains the normalized
minibatch loss corresponding to sp_input, without the regularization
term. Add the regularization term below to yield the loss.
regularization: A tensor (scalar) that contains the normalized
regularization term for the minibatch loss corresponding to sp_input.
sum_weights: The sum of the weights corresponding to sp_input. This
can be used with unregularized loss to calculate the root weighted
squared error.
"""
assert isinstance(sp_input, sparse_tensor.SparseTensor)
if update_row_factors:
left = self._row_factors
right_factors = self._col_factors_cache
row_wt = self._row_wt_cache
col_wt = self._col_wt_cache
total_rows = self._input_rows
total_cols = self._input_cols
sharding_func = WALSModel._get_sharding_func(self._input_rows,
self._num_row_shards)
gramian = self._col_gramian_cache
else:
left = self._col_factors
right_factors = self._row_factors_cache
row_wt = self._col_wt_cache
col_wt = self._row_wt_cache
total_rows = self._input_cols
total_cols = self._input_rows
sharding_func = WALSModel._get_sharding_func(self._input_cols,
self._num_col_shards)
gramian = self._row_gramian_cache
transpose_input = not transpose_input
# Note that the row indices of sp_input are based on the original full input
# Here we reindex the rows and give them contiguous ids starting at 0.
# We use tf.unique to achieve this reindexing. Note that this is done so
# that the downstream kernel can assume that the input is "dense" along the
# row dimension.
row_ids, col_ids = array_ops.split(
value=sp_input.indices, num_or_size_splits=2, axis=1)
update_row_indices, all_row_ids = array_ops.unique(row_ids[:, 0])
update_col_indices, all_col_ids = array_ops.unique(col_ids[:, 0])
col_ids = array_ops.expand_dims(math_ops.cast(all_col_ids, dtypes.int64), 1)
row_ids = array_ops.expand_dims(math_ops.cast(all_row_ids, dtypes.int64), 1)
if transpose_input:
update_indices = update_col_indices
row_shape = [
math_ops.cast(array_ops.shape(update_row_indices)[0], dtypes.int64)
]
gather_indices = update_row_indices
else:
update_indices = update_row_indices
row_shape = [
math_ops.cast(array_ops.shape(update_col_indices)[0], dtypes.int64)
]
gather_indices = update_col_indices
num_rows = math_ops.cast(array_ops.shape(update_indices)[0], dtypes.int64)
col_shape = [num_rows]
right = embedding_ops.embedding_lookup(
right_factors, gather_indices, partition_strategy="div")
new_sp_indices = array_ops.concat([row_ids, col_ids], 1)
new_sp_shape = (array_ops.concat([row_shape, col_shape], 0)
if transpose_input else
array_ops.concat([col_shape, row_shape], 0))
new_sp_input = sparse_tensor.SparseTensor(
indices=new_sp_indices,
values=sp_input.values,
dense_shape=new_sp_shape)
# Compute lhs and rhs of the normal equations
total_lhs = (self._unobserved_weight * gramian)
if self._regularization_matrix is not None:
total_lhs += self._regularization_matrix
if self._row_weights is None:
# Special case of ALS. Use a much simpler update rule.
total_rhs = (
self._unobserved_weight * sparse_ops.sparse_tensor_dense_matmul(
new_sp_input, right, adjoint_a=transpose_input))
# TODO(rmlarsen): handle transposing in tf.linalg.solve instead of
# transposing explicitly.
# TODO(rmlarsen): multi-thread tf.matrix_solve.
new_left_values = array_ops.transpose(
linalg_ops.matrix_solve(total_lhs, array_ops.transpose(total_rhs)))
else:
if row_weights is None:
# TODO(yifanchen): Add special handling for single shard without using
# embedding_lookup and perform benchmarks for those cases. Same for
# col_weights lookup below.
row_weights_slice = embedding_ops.embedding_lookup(
row_wt, update_indices, partition_strategy="div")
else:
num_indices = array_ops.shape(update_indices)[0]
with ops.control_dependencies(
[check_ops.assert_less_equal(array_ops.rank(row_weights), 1)]):
row_weights_slice = control_flow_ops.cond(
math_ops.equal(array_ops.rank(row_weights), 0),
lambda: (array_ops.ones([num_indices]) * row_weights),
lambda: math_ops.cast(row_weights, dtypes.float32))
col_weights = embedding_ops.embedding_lookup(
col_wt, gather_indices, partition_strategy="div")
partial_lhs, total_rhs = (
gen_factorization_ops.wals_compute_partial_lhs_and_rhs(
right,
col_weights,
self._unobserved_weight,
row_weights_slice,
new_sp_input.indices,
new_sp_input.values,
[],
num_rows,
transpose_input,
name="wals_compute_partial_lhs_rhs"))
total_lhs = array_ops.expand_dims(total_lhs, 0) + partial_lhs
total_rhs = array_ops.expand_dims(total_rhs, -1)
new_left_values = array_ops.squeeze(
linalg_ops.matrix_solve(total_lhs, total_rhs), [2])
update_op_name = "row_update" if update_row_factors else "col_update"
update_op = self.scatter_update(
left,
update_indices,
new_left_values,
sharding_func,
name=update_op_name)
# Create the loss subgraph
loss_sp_input = (sparse_ops.sparse_transpose(new_sp_input)
if transpose_input else new_sp_input)
# sp_approx is the low rank estimate of the input matrix, formed by
# computing the product <\\(u_i, v_j\\)> for (i, j) in loss_sp_input.indices.
sp_approx_vals = gen_factorization_ops.masked_matmul(
new_left_values,
right,
loss_sp_input.indices,
transpose_a=False,
transpose_b=True)
sp_approx = sparse_tensor.SparseTensor(
loss_sp_input.indices, sp_approx_vals, loss_sp_input.dense_shape)
sp_approx_sq = math_ops.square(sp_approx)
sp_residual = sparse_ops.sparse_add(loss_sp_input, sp_approx * (-1))
sp_residual_sq = math_ops.square(sp_residual)
row_wt_mat = (constant_op.constant(0.)
if self._row_weights is None else array_ops.expand_dims(
row_weights_slice, 1))
col_wt_mat = (constant_op.constant(0.)
if self._col_weights is None else array_ops.expand_dims(
col_weights, 0))
# We return the normalized loss
partial_row_gramian = math_ops.matmul(
new_left_values, new_left_values, transpose_a=True)
normalization_factor = total_rows / math_ops.cast(num_rows, dtypes.float32)
unregularized_loss = (
self._unobserved_weight * ( # pyformat line break
sparse_ops.sparse_reduce_sum(sp_residual_sq) - # pyformat break
sparse_ops.sparse_reduce_sum(sp_approx_sq) + # pyformat break
math_ops.trace(math_ops.matmul(partial_row_gramian, gramian))) +
sparse_ops.sparse_reduce_sum(row_wt_mat * (sp_residual_sq * col_wt_mat))
) * normalization_factor
if self._regularization is not None:
regularization = self._regularization * (
math_ops.trace(partial_row_gramian) * normalization_factor +
math_ops.trace(gramian))
else:
regularization = constant_op.constant(0.)
sum_weights = self._unobserved_weight * math_ops.cast(
total_rows * total_cols, dtypes.float32)
if self._row_weights is not None and self._col_weights is not None:
ones = sparse_tensor.SparseTensor(
indices=loss_sp_input.indices,
values=array_ops.ones(array_ops.shape(loss_sp_input.values)),
dense_shape=loss_sp_input.dense_shape)
sum_weights += sparse_ops.sparse_reduce_sum(row_wt_mat * (
ones * col_wt_mat)) * normalization_factor
return (new_left_values, update_op, unregularized_loss, regularization,
sum_weights)
|
tensorflow-master
|
tensorflow/contrib/factorization/python/ops/factorization_ops.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Weighted Alternating Least Squares (WALS) on the tf.learn API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.factorization.python.ops import factorization_ops
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import training_util
class _SweepHook(session_run_hook.SessionRunHook):
"""Keeps track of row/col sweeps, and runs prep ops before each sweep."""
def __init__(self, is_row_sweep_var, is_sweep_done_var, init_op,
row_prep_ops, col_prep_ops, row_train_op, col_train_op,
switch_op):
"""Initializes SweepHook.
Args:
is_row_sweep_var: A Boolean tf.Variable, determines whether we are
currently doing a row or column sweep. It is updated by the hook.
is_sweep_done_var: A Boolean tf.Variable, determines whether we are
starting a new sweep (this is used to determine when to run the prep ops
below).
init_op: op to be run once before training. This is typically a local
initialization op (such as cache initialization).
row_prep_ops: A list of TensorFlow ops, to be run before the beginning of
each row sweep (and during initialization), in the given order.
col_prep_ops: A list of TensorFlow ops, to be run before the beginning of
each column sweep (and during initialization), in the given order.
row_train_op: A TensorFlow op to be run during row sweeps.
col_train_op: A TensorFlow op to be run during column sweeps.
switch_op: A TensorFlow op to be run before each sweep.
"""
self._is_row_sweep_var = is_row_sweep_var
self._is_sweep_done_var = is_sweep_done_var
self._init_op = init_op
self._row_prep_ops = row_prep_ops
self._col_prep_ops = col_prep_ops
self._row_train_op = row_train_op
self._col_train_op = col_train_op
self._switch_op = switch_op
# Boolean variable that determines whether the init_op has been run.
self._is_initialized = False
def before_run(self, run_context):
"""Runs the appropriate prep ops, and requests running update ops."""
sess = run_context.session
is_sweep_done = sess.run(self._is_sweep_done_var)
if not self._is_initialized:
logging.info("SweepHook running init op.")
sess.run(self._init_op)
if is_sweep_done:
logging.info("SweepHook starting the next sweep.")
sess.run(self._switch_op)
is_row_sweep = sess.run(self._is_row_sweep_var)
if is_sweep_done or not self._is_initialized:
logging.info("SweepHook running prep ops for the {} sweep.".format(
"row" if is_row_sweep else "col"))
prep_ops = self._row_prep_ops if is_row_sweep else self._col_prep_ops
for prep_op in prep_ops:
sess.run(prep_op)
self._is_initialized = True
logging.info("Next fit step starting.")
return session_run_hook.SessionRunArgs(
fetches=[self._row_train_op if is_row_sweep else self._col_train_op])
class _IncrementGlobalStepHook(session_run_hook.SessionRunHook):
"""Hook that increments the global step."""
def __init__(self):
global_step = training_util.get_global_step()
if global_step:
self._global_step_incr_op = state_ops.assign_add(
global_step, 1, name="global_step_incr").op
else:
self._global_step_incr_op = None
def before_run(self, run_context):
if self._global_step_incr_op:
run_context.session.run(self._global_step_incr_op)
class _StopAtSweepHook(session_run_hook.SessionRunHook):
"""Hook that requests stop at a given sweep."""
def __init__(self, last_sweep):
"""Initializes a `StopAtSweepHook`.
This hook requests stop at a given sweep. Relies on the tensor named
COMPLETED_SWEEPS in the default graph.
Args:
last_sweep: Integer, number of the last sweep to run.
"""
self._last_sweep = last_sweep
def begin(self):
try:
self._completed_sweeps_var = ops.get_default_graph().get_tensor_by_name(
WALSMatrixFactorization.COMPLETED_SWEEPS + ":0")
except KeyError:
raise RuntimeError(WALSMatrixFactorization.COMPLETED_SWEEPS +
" counter should be created to use StopAtSweepHook.")
def before_run(self, run_context):
return session_run_hook.SessionRunArgs(self._completed_sweeps_var)
def after_run(self, run_context, run_values):
completed_sweeps = run_values.results
if completed_sweeps >= self._last_sweep:
run_context.request_stop()
def _wals_factorization_model_function(features, labels, mode, params):
"""Model function for the WALSFactorization estimator.
Args:
features: Dictionary of features. See WALSMatrixFactorization.
labels: Must be None.
mode: A model_fn.ModeKeys object.
params: Dictionary of parameters containing arguments passed to the
WALSMatrixFactorization constructor.
Returns:
A ModelFnOps object.
Raises:
ValueError: If `mode` is not recognized.
"""
assert labels is None
use_factors_weights_cache = (params["use_factors_weights_cache_for_training"]
and mode == model_fn.ModeKeys.TRAIN)
use_gramian_cache = (params["use_gramian_cache_for_training"] and
mode == model_fn.ModeKeys.TRAIN)
max_sweeps = params["max_sweeps"]
model = factorization_ops.WALSModel(
params["num_rows"],
params["num_cols"],
params["embedding_dimension"],
unobserved_weight=params["unobserved_weight"],
regularization=params["regularization_coeff"],
row_init=params["row_init"],
col_init=params["col_init"],
num_row_shards=params["num_row_shards"],
num_col_shards=params["num_col_shards"],
row_weights=params["row_weights"],
col_weights=params["col_weights"],
use_factors_weights_cache=use_factors_weights_cache,
use_gramian_cache=use_gramian_cache)
# Get input rows and cols. We either update rows or columns depending on
# the value of row_sweep, which is maintained using a session hook.
input_rows = features[WALSMatrixFactorization.INPUT_ROWS]
input_cols = features[WALSMatrixFactorization.INPUT_COLS]
# TRAIN mode:
if mode == model_fn.ModeKeys.TRAIN:
# Training consists of the following ops (controlled using a SweepHook).
# Before a row sweep:
# row_update_prep_gramian_op
# initialize_row_update_op
# During a row sweep:
# update_row_factors_op
# Before a col sweep:
# col_update_prep_gramian_op
# initialize_col_update_op
# During a col sweep:
# update_col_factors_op
is_row_sweep_var = variable_scope.variable(
True,
trainable=False,
name="is_row_sweep",
collections=[ops.GraphKeys.GLOBAL_VARIABLES])
is_sweep_done_var = variable_scope.variable(
False,
trainable=False,
name="is_sweep_done",
collections=[ops.GraphKeys.GLOBAL_VARIABLES])
completed_sweeps_var = variable_scope.variable(
0,
trainable=False,
name=WALSMatrixFactorization.COMPLETED_SWEEPS,
collections=[ops.GraphKeys.GLOBAL_VARIABLES])
loss_var = variable_scope.variable(
0.,
trainable=False,
name=WALSMatrixFactorization.LOSS,
collections=[ops.GraphKeys.GLOBAL_VARIABLES])
# The root weighted squared error =
# \\(\sqrt( \sum_{i,j} w_ij * (a_ij - r_ij)^2 / \sum_{i,j} w_ij )\\)
rwse_var = variable_scope.variable(
0.,
trainable=False,
name=WALSMatrixFactorization.RWSE,
collections=[ops.GraphKeys.GLOBAL_VARIABLES])
summary.scalar("loss", loss_var)
summary.scalar("root_weighted_squared_error", rwse_var)
summary.scalar("completed_sweeps", completed_sweeps_var)
def create_axis_ops(sp_input, num_items, update_fn, axis_name):
"""Creates book-keeping and training ops for a given axis.
Args:
sp_input: A SparseTensor corresponding to the row or column batch.
num_items: An integer, the total number of items of this axis.
update_fn: A function that takes one argument (`sp_input`), and that
returns a tuple of
* new_factors: A float Tensor of the factor values after update.
* update_op: a TensorFlow op which updates the factors.
* loss: A float Tensor, the unregularized loss.
* reg_loss: A float Tensor, the regularization loss.
* sum_weights: A float Tensor, the sum of factor weights.
axis_name: A string that specifies the name of the axis.
Returns:
A tuple consisting of:
* reset_processed_items_op: A TensorFlow op, to be run before the
beginning of any sweep. It marks all items as not-processed.
* axis_train_op: A Tensorflow op, to be run during this axis' sweeps.
"""
processed_items_init = array_ops.fill(dims=[num_items], value=False)
with ops.colocate_with(processed_items_init):
processed_items = variable_scope.variable(
processed_items_init,
collections=[ops.GraphKeys.GLOBAL_VARIABLES],
trainable=False,
name="processed_" + axis_name)
_, update_op, loss, reg, sum_weights = update_fn(sp_input)
input_indices = sp_input.indices[:, 0]
with ops.control_dependencies([
update_op,
state_ops.assign(loss_var, loss + reg),
state_ops.assign(rwse_var, math_ops.sqrt(loss / sum_weights))]):
with ops.colocate_with(processed_items):
update_processed_items = state_ops.scatter_update(
processed_items,
input_indices,
array_ops.ones_like(input_indices, dtype=dtypes.bool),
name="update_processed_{}_indices".format(axis_name))
with ops.control_dependencies([update_processed_items]):
is_sweep_done = math_ops.reduce_all(processed_items)
axis_train_op = control_flow_ops.group(
state_ops.assign(is_sweep_done_var, is_sweep_done),
state_ops.assign_add(
completed_sweeps_var,
math_ops.cast(is_sweep_done, dtypes.int32)),
name="{}_sweep_train_op".format(axis_name))
return processed_items.initializer, axis_train_op
reset_processed_rows_op, row_train_op = create_axis_ops(
input_rows,
params["num_rows"],
lambda x: model.update_row_factors(sp_input=x, transpose_input=False),
"rows")
reset_processed_cols_op, col_train_op = create_axis_ops(
input_cols,
params["num_cols"],
lambda x: model.update_col_factors(sp_input=x, transpose_input=True),
"cols")
switch_op = control_flow_ops.group(
state_ops.assign(
is_row_sweep_var, math_ops.logical_not(is_row_sweep_var)),
reset_processed_rows_op,
reset_processed_cols_op,
name="sweep_switch_op")
row_prep_ops = [
model.row_update_prep_gramian_op, model.initialize_row_update_op]
col_prep_ops = [
model.col_update_prep_gramian_op, model.initialize_col_update_op]
init_op = model.worker_init
sweep_hook = _SweepHook(
is_row_sweep_var, is_sweep_done_var, init_op,
row_prep_ops, col_prep_ops, row_train_op, col_train_op, switch_op)
global_step_hook = _IncrementGlobalStepHook()
training_hooks = [sweep_hook, global_step_hook]
if max_sweeps is not None:
training_hooks.append(_StopAtSweepHook(max_sweeps))
return model_fn.ModelFnOps(
mode=model_fn.ModeKeys.TRAIN,
predictions={},
loss=loss_var,
eval_metric_ops={},
train_op=control_flow_ops.no_op(),
training_hooks=training_hooks)
# INFER mode
elif mode == model_fn.ModeKeys.INFER:
projection_weights = features.get(
WALSMatrixFactorization.PROJECTION_WEIGHTS)
def get_row_projection():
return model.project_row_factors(
sp_input=input_rows,
projection_weights=projection_weights,
transpose_input=False)
def get_col_projection():
return model.project_col_factors(
sp_input=input_cols,
projection_weights=projection_weights,
transpose_input=True)
predictions = {
WALSMatrixFactorization.PROJECTION_RESULT: control_flow_ops.cond(
features[WALSMatrixFactorization.PROJECT_ROW],
get_row_projection,
get_col_projection)
}
return model_fn.ModelFnOps(
mode=model_fn.ModeKeys.INFER,
predictions=predictions,
loss=None,
eval_metric_ops={},
train_op=control_flow_ops.no_op(),
training_hooks=[])
# EVAL mode
elif mode == model_fn.ModeKeys.EVAL:
def get_row_loss():
_, _, loss, reg, _ = model.update_row_factors(
sp_input=input_rows, transpose_input=False)
return loss + reg
def get_col_loss():
_, _, loss, reg, _ = model.update_col_factors(
sp_input=input_cols, transpose_input=True)
return loss + reg
loss = control_flow_ops.cond(
features[WALSMatrixFactorization.PROJECT_ROW],
get_row_loss,
get_col_loss)
return model_fn.ModelFnOps(
mode=model_fn.ModeKeys.EVAL,
predictions={},
loss=loss,
eval_metric_ops={},
train_op=control_flow_ops.no_op(),
training_hooks=[])
else:
raise ValueError("mode=%s is not recognized." % str(mode))
class WALSMatrixFactorization(estimator.Estimator):
"""An Estimator for Weighted Matrix Factorization, using the WALS method.
WALS (Weighted Alternating Least Squares) is an algorithm for weighted matrix
factorization. It computes a low-rank approximation of a given sparse (n x m)
matrix `A`, by a product of two matrices, `U * V^T`, where `U` is a (n x k)
matrix and `V` is a (m x k) matrix. Here k is the rank of the approximation,
also called the embedding dimension. We refer to `U` as the row factors, and
`V` as the column factors.
See tensorflow/contrib/factorization/g3doc/wals.md for the precise problem
formulation.
The training proceeds in sweeps: during a row_sweep, we fix `V` and solve for
`U`. During a column sweep, we fix `U` and solve for `V`. Each one of these
problems is an unconstrained quadratic minimization problem and can be solved
exactly (it can also be solved in mini-batches, since the solution decouples
across rows of each matrix).
The alternating between sweeps is achieved by using a hook during training,
which is responsible for keeping track of the sweeps and running preparation
ops at the beginning of each sweep. It also updates the global_step variable,
which keeps track of the number of batches processed since the beginning of
training.
The current implementation assumes that the training is run on a single
machine, and will fail if `config.num_worker_replicas` is not equal to one.
Training is done by calling `self.fit(input_fn=input_fn)`, where `input_fn`
provides two tensors: one for rows of the input matrix, and one for rows of
the transposed input matrix (i.e. columns of the original matrix). Note that
during a row sweep, only row batches are processed (ignoring column batches)
and vice-versa.
Also note that every row (respectively every column) of the input matrix
must be processed at least once for the sweep to be considered complete. In
particular, training will not make progress if some rows are not generated by
the `input_fn`.
For prediction, given a new set of input rows `A'`, we compute a corresponding
set of row factors `U'`, such that `U' * V^T` is a good approximation of `A'`.
We call this operation a row projection. A similar operation is defined for
columns. Projection is done by calling
`self.get_projections(input_fn=input_fn)`, where `input_fn` satisfies the
constraints given below.
The input functions must satisfy the following constraints: Calling `input_fn`
must return a tuple `(features, labels)` where `labels` is None, and
`features` is a dict containing the following keys:
TRAIN:
* `WALSMatrixFactorization.INPUT_ROWS`: float32 SparseTensor (matrix).
Rows of the input matrix to process (or to project).
* `WALSMatrixFactorization.INPUT_COLS`: float32 SparseTensor (matrix).
Columns of the input matrix to process (or to project), transposed.
INFER:
* `WALSMatrixFactorization.INPUT_ROWS`: float32 SparseTensor (matrix).
Rows to project.
* `WALSMatrixFactorization.INPUT_COLS`: float32 SparseTensor (matrix).
Columns to project.
* `WALSMatrixFactorization.PROJECT_ROW`: Boolean Tensor. Whether to project
the rows or columns.
* `WALSMatrixFactorization.PROJECTION_WEIGHTS` (Optional): float32 Tensor
(vector). The weights to use in the projection.
EVAL:
* `WALSMatrixFactorization.INPUT_ROWS`: float32 SparseTensor (matrix).
Rows to project.
* `WALSMatrixFactorization.INPUT_COLS`: float32 SparseTensor (matrix).
Columns to project.
* `WALSMatrixFactorization.PROJECT_ROW`: Boolean Tensor. Whether to project
the rows or columns.
"""
# Keys to be used in model_fn
# Features keys
INPUT_ROWS = "input_rows"
INPUT_COLS = "input_cols"
PROJECT_ROW = "project_row"
PROJECTION_WEIGHTS = "projection_weights"
# Predictions key
PROJECTION_RESULT = "projection"
# Name of the completed_sweeps variable
COMPLETED_SWEEPS = "completed_sweeps"
# Name of the loss variable
LOSS = "WALS_loss"
# Name of the Root Weighted Squared Error variable
RWSE = "WALS_RWSE"
def __init__(self,
num_rows,
num_cols,
embedding_dimension,
unobserved_weight=0.1,
regularization_coeff=None,
row_init="random",
col_init="random",
num_row_shards=1,
num_col_shards=1,
row_weights=1,
col_weights=1,
use_factors_weights_cache_for_training=True,
use_gramian_cache_for_training=True,
max_sweeps=None,
model_dir=None,
config=None):
r"""Creates a model for matrix factorization using the WALS method.
Args:
num_rows: Total number of rows for input matrix.
num_cols: Total number of cols for input matrix.
embedding_dimension: Dimension to use for the factors.
unobserved_weight: Weight of the unobserved entries of matrix.
regularization_coeff: Weight of the L2 regularization term. Defaults to
None, in which case the problem is not regularized.
row_init: Initializer for row factor. Must be either:
- A tensor: The row factor matrix is initialized to this tensor,
- A numpy constant,
- "random": The rows are initialized using a normal distribution.
col_init: Initializer for column factor. See row_init.
num_row_shards: Number of shards to use for the row factors.
num_col_shards: Number of shards to use for the column factors.
row_weights: Must be in one of the following three formats:
- None: In this case, the weight of every entry is the unobserved_weight
and the problem simplifies to ALS. Note that, in this case,
col_weights must also be set to "None".
- List of lists of non-negative scalars, of the form
\\([[w_0, w_1, ...], [w_k, ... ], [...]]\\),
where the number of inner lists equal to the number of row factor
shards and the elements in each inner list are the weights for the
rows of that shard. In this case,
\\(w_ij = unonbserved_weight + row_weights[i] * col_weights[j]\\).
- A non-negative scalar: This value is used for all row weights.
Note that it is allowed to have row_weights as a list and col_weights
as a scalar, or vice-versa.
col_weights: See row_weights.
use_factors_weights_cache_for_training: Boolean, whether the factors and
weights will be cached on the workers before the updates start, during
training. Defaults to True.
Note that caching is disabled during prediction.
use_gramian_cache_for_training: Boolean, whether the Gramians will be
cached on the workers before the updates start, during training.
Defaults to True. Note that caching is disabled during prediction.
max_sweeps: integer, optional. Specifies the number of sweeps for which
to train the model, where a sweep is defined as a full update of all the
row factors (resp. column factors).
If `steps` or `max_steps` is also specified in model.fit(), training
stops when either of the steps condition or sweeps condition is met.
model_dir: The directory to save the model results and log files.
config: A Configuration object. See Estimator.
Raises:
ValueError: If config.num_worker_replicas is strictly greater than one.
The current implementation only supports running on a single worker.
"""
# TODO(walidk): Support power-law based weight computation.
# TODO(walidk): Add factor lookup by indices, with caching.
# TODO(walidk): Support caching during prediction.
# TODO(walidk): Provide input pipelines that handle missing rows.
params = {
"num_rows":
num_rows,
"num_cols":
num_cols,
"embedding_dimension":
embedding_dimension,
"unobserved_weight":
unobserved_weight,
"regularization_coeff":
regularization_coeff,
"row_init":
row_init,
"col_init":
col_init,
"num_row_shards":
num_row_shards,
"num_col_shards":
num_col_shards,
"row_weights":
row_weights,
"col_weights":
col_weights,
"max_sweeps":
max_sweeps,
"use_factors_weights_cache_for_training":
use_factors_weights_cache_for_training,
"use_gramian_cache_for_training":
use_gramian_cache_for_training
}
self._row_factors_names = [
"row_factors_shard_%d" % i for i in range(num_row_shards)
]
self._col_factors_names = [
"col_factors_shard_%d" % i for i in range(num_col_shards)
]
super(WALSMatrixFactorization, self).__init__(
model_fn=_wals_factorization_model_function,
params=params,
model_dir=model_dir,
config=config)
if self._config is not None and self._config.num_worker_replicas > 1:
raise ValueError("WALSMatrixFactorization must be run on a single worker "
"replica.")
def get_row_factors(self):
"""Returns the row factors of the model, loading them from checkpoint.
Should only be run after training.
Returns:
A list of the row factors of the model.
"""
return [self.get_variable_value(name) for name in self._row_factors_names]
def get_col_factors(self):
"""Returns the column factors of the model, loading them from checkpoint.
Should only be run after training.
Returns:
A list of the column factors of the model.
"""
return [self.get_variable_value(name) for name in self._col_factors_names]
def get_projections(self, input_fn):
"""Computes the projections of the rows or columns given in input_fn.
Runs predict() with the given input_fn, and returns the results. Should only
be run after training.
Args:
input_fn: Input function which specifies the rows or columns to project.
Returns:
A generator of the projected factors.
"""
return (result[WALSMatrixFactorization.PROJECTION_RESULT]
for result in self.predict(input_fn=input_fn))
|
tensorflow-master
|
tensorflow/contrib/factorization/python/ops/wals.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for WALSMatrixFactorization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import json
import numpy as np
from tensorflow.contrib.factorization.python.ops import factorization_ops_test_utils
from tensorflow.contrib.factorization.python.ops import wals as wals_lib
from tensorflow.contrib.learn.python.learn import run_config
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.learn.python.learn.estimators import run_config as run_config_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import monitored_session
class WALSMatrixFactorizationTest(test.TestCase):
INPUT_MATRIX = factorization_ops_test_utils.INPUT_MATRIX
def np_array_to_sparse(self, np_array):
"""Transforms an np.array to a tf.SparseTensor."""
return factorization_ops_test_utils.np_matrix_to_tf_sparse(np_array)
def calculate_loss(self):
"""Calculates the loss of the current (trained) model."""
current_rows = embedding_ops.embedding_lookup(
self._model.get_row_factors(), math_ops.range(self._num_rows),
partition_strategy='div')
current_cols = embedding_ops.embedding_lookup(
self._model.get_col_factors(), math_ops.range(self._num_cols),
partition_strategy='div')
row_wts = embedding_ops.embedding_lookup(
self._row_weights, math_ops.range(self._num_rows),
partition_strategy='div')
col_wts = embedding_ops.embedding_lookup(
self._col_weights, math_ops.range(self._num_cols),
partition_strategy='div')
sp_inputs = self.np_array_to_sparse(self.INPUT_MATRIX)
return factorization_ops_test_utils.calculate_loss(
sp_inputs, current_rows, current_cols, self._regularization_coeff,
self._unobserved_weight, row_wts, col_wts)
# TODO(walidk): Replace with input_reader_utils functions once open sourced.
def remap_sparse_tensor_rows(self, sp_x, row_ids, shape):
"""Remaps the row ids of a tf.SparseTensor."""
old_row_ids, old_col_ids = array_ops.split(
value=sp_x.indices, num_or_size_splits=2, axis=1)
new_row_ids = array_ops.gather(row_ids, old_row_ids)
new_indices = array_ops.concat([new_row_ids, old_col_ids], 1)
return sparse_tensor.SparseTensor(
indices=new_indices, values=sp_x.values, dense_shape=shape)
# TODO(walidk): Add an option to shuffle inputs.
def input_fn(self, np_matrix, batch_size, mode,
project_row=None, projection_weights=None,
remove_empty_rows_columns=False):
"""Returns an input_fn that selects row and col batches from np_matrix.
This simple utility creates an input function from a numpy_array. The
following transformations are performed:
* The empty rows and columns in np_matrix are removed (if
remove_empty_rows_columns is true)
* np_matrix is converted to a SparseTensor.
* The rows of the sparse matrix (and the rows of its transpose) are batched.
* A features dictionary is created, which contains the row / column batches.
In TRAIN mode, one only needs to specify the np_matrix and the batch_size.
In INFER and EVAL modes, one must also provide project_row, a boolean which
specifies whether we are projecting rows or columns.
Args:
np_matrix: A numpy array. The input matrix to use.
batch_size: Integer.
mode: Can be one of model_fn.ModeKeys.{TRAIN, INFER, EVAL}.
project_row: A boolean. Used in INFER and EVAL modes. Specifies whether
to project rows or columns.
projection_weights: A float numpy array. Used in INFER mode. Specifies
the weights to use in the projection (the weights are optional, and
default to 1.).
remove_empty_rows_columns: A boolean. When true, this will remove empty
rows and columns in the np_matrix. Note that this will result in
modifying the indices of the input matrix. The mapping from new indices
to old indices is returned in the form of two numpy arrays.
Returns:
A tuple consisting of:
_fn: A callable. Calling _fn returns a features dict.
nz_row_ids: A numpy array of the ids of non-empty rows, such that
nz_row_ids[i] is the old row index corresponding to new index i.
nz_col_ids: A numpy array of the ids of non-empty columns, such that
nz_col_ids[j] is the old column index corresponding to new index j.
"""
if remove_empty_rows_columns:
np_matrix, nz_row_ids, nz_col_ids = (
factorization_ops_test_utils.remove_empty_rows_columns(np_matrix))
else:
nz_row_ids = np.arange(np.shape(np_matrix)[0])
nz_col_ids = np.arange(np.shape(np_matrix)[1])
def extract_features(row_batch, col_batch, num_rows, num_cols):
row_ids = row_batch[0]
col_ids = col_batch[0]
rows = self.remap_sparse_tensor_rows(
row_batch[1], row_ids, shape=[num_rows, num_cols])
cols = self.remap_sparse_tensor_rows(
col_batch[1], col_ids, shape=[num_cols, num_rows])
features = {
wals_lib.WALSMatrixFactorization.INPUT_ROWS: rows,
wals_lib.WALSMatrixFactorization.INPUT_COLS: cols,
}
return features
def _fn():
num_rows = np.shape(np_matrix)[0]
num_cols = np.shape(np_matrix)[1]
row_ids = math_ops.range(num_rows, dtype=dtypes.int64)
col_ids = math_ops.range(num_cols, dtype=dtypes.int64)
sp_mat = self.np_array_to_sparse(np_matrix)
sp_mat_t = sparse_ops.sparse_transpose(sp_mat)
row_batch = input_lib.batch(
[row_ids, sp_mat],
batch_size=min(batch_size, num_rows),
capacity=10,
enqueue_many=True)
col_batch = input_lib.batch(
[col_ids, sp_mat_t],
batch_size=min(batch_size, num_cols),
capacity=10,
enqueue_many=True)
features = extract_features(row_batch, col_batch, num_rows, num_cols)
if mode == model_fn.ModeKeys.INFER or mode == model_fn.ModeKeys.EVAL:
self.assertTrue(
project_row is not None,
msg='project_row must be specified in INFER or EVAL mode.')
features[wals_lib.WALSMatrixFactorization.PROJECT_ROW] = (
constant_op.constant(project_row))
if mode == model_fn.ModeKeys.INFER and projection_weights is not None:
weights_batch = input_lib.batch(
projection_weights,
batch_size=batch_size,
capacity=10,
enqueue_many=True)
features[wals_lib.WALSMatrixFactorization.PROJECTION_WEIGHTS] = (
weights_batch)
labels = None
return features, labels
return _fn, nz_row_ids, nz_col_ids
@property
def input_matrix(self):
return self.INPUT_MATRIX
@property
def row_steps(self):
return np.ceil(self._num_rows / self.batch_size)
@property
def col_steps(self):
return np.ceil(self._num_cols / self.batch_size)
@property
def batch_size(self):
return 5
@property
def use_cache(self):
return False
@property
def max_sweeps(self):
return None
def setUp(self):
self._num_rows = 5
self._num_cols = 7
self._embedding_dimension = 3
self._unobserved_weight = 0.1
self._num_row_shards = 2
self._num_col_shards = 3
self._regularization_coeff = 0.01
self._col_init = [
# Shard 0.
[[-0.36444709, -0.39077035, -0.32528427],
[1.19056475, 0.07231052, 2.11834812],
[0.93468881, -0.71099287, 1.91826844]],
# Shard 1.
[[1.18160152, 1.52490723, -0.50015002],
[1.82574749, -0.57515913, -1.32810032]],
# Shard 2.
[[-0.15515432, -0.84675711, 0.13097958],
[-0.9246484, 0.69117504, 1.2036494]],
]
self._row_weights = [[0.1, 0.2, 0.3], [0.4, 0.5]]
self._col_weights = [[0.1, 0.2, 0.3], [0.4, 0.5], [0.6, 0.7]]
# Values of row and column factors after running one iteration or factor
# updates.
self._row_factors_0 = [[0.097689, -0.219293, -0.020780],
[0.50842, 0.64626, 0.22364],
[0.401159, -0.046558, -0.192854]]
self._row_factors_1 = [[1.20597, -0.48025, 0.35582],
[1.5564, 1.2528, 1.0528]]
self._col_factors_0 = [[2.4725, -1.2950, -1.9980],
[0.44625, 1.50771, 1.27118],
[1.39801, -2.10134, 0.73572]]
self._col_factors_1 = [[3.36509, -0.66595, -3.51208],
[0.57191, 1.59407, 1.33020]]
self._col_factors_2 = [[3.3459, -1.3341, -3.3008],
[0.57366, 1.83729, 1.26798]]
self._model = wals_lib.WALSMatrixFactorization(
self._num_rows,
self._num_cols,
self._embedding_dimension,
self._unobserved_weight,
col_init=self._col_init,
regularization_coeff=self._regularization_coeff,
num_row_shards=self._num_row_shards,
num_col_shards=self._num_col_shards,
row_weights=self._row_weights,
col_weights=self._col_weights,
max_sweeps=self.max_sweeps,
use_factors_weights_cache_for_training=self.use_cache,
use_gramian_cache_for_training=self.use_cache)
def test_fit(self):
# Row sweep.
input_fn = self.input_fn(np_matrix=self.input_matrix,
batch_size=self.batch_size,
mode=model_fn.ModeKeys.TRAIN,
remove_empty_rows_columns=True)[0]
self._model.fit(input_fn=input_fn, steps=self.row_steps)
row_factors = self._model.get_row_factors()
self.assertAllClose(row_factors[0], self._row_factors_0, atol=1e-3)
self.assertAllClose(row_factors[1], self._row_factors_1, atol=1e-3)
# Col sweep.
# Running fit a second time will resume training from the checkpoint.
input_fn = self.input_fn(np_matrix=self.input_matrix,
batch_size=self.batch_size,
mode=model_fn.ModeKeys.TRAIN,
remove_empty_rows_columns=True)[0]
self._model.fit(input_fn=input_fn, steps=self.col_steps)
col_factors = self._model.get_col_factors()
self.assertAllClose(col_factors[0], self._col_factors_0, atol=1e-3)
self.assertAllClose(col_factors[1], self._col_factors_1, atol=1e-3)
self.assertAllClose(col_factors[2], self._col_factors_2, atol=1e-3)
def test_predict(self):
input_fn = self.input_fn(np_matrix=self.input_matrix,
batch_size=self.batch_size,
mode=model_fn.ModeKeys.TRAIN,
remove_empty_rows_columns=True,
)[0]
# Project rows 1 and 4 from the input matrix.
proj_input_fn = self.input_fn(
np_matrix=self.INPUT_MATRIX[[1, 4], :],
batch_size=2,
mode=model_fn.ModeKeys.INFER,
project_row=True,
projection_weights=[[0.2, 0.5]])[0]
self._model.fit(input_fn=input_fn, steps=self.row_steps)
projections = self._model.get_projections(proj_input_fn)
projected_rows = list(itertools.islice(projections, 2))
self.assertAllClose(
projected_rows,
[self._row_factors_0[1], self._row_factors_1[1]],
atol=1e-3)
# Project columns 5, 3, 1 from the input matrix.
proj_input_fn = self.input_fn(
np_matrix=self.INPUT_MATRIX[:, [5, 3, 1]],
batch_size=3,
mode=model_fn.ModeKeys.INFER,
project_row=False,
projection_weights=[[0.6, 0.4, 0.2]])[0]
self._model.fit(input_fn=input_fn, steps=self.col_steps)
projections = self._model.get_projections(proj_input_fn)
projected_cols = list(itertools.islice(projections, 3))
self.assertAllClose(
projected_cols,
[self._col_factors_2[0], self._col_factors_1[0],
self._col_factors_0[1]],
atol=1e-3)
def test_eval(self):
# Do a row sweep then evaluate the model on row inputs.
# The evaluate function returns the loss of the projected rows, but since
# projection is idempotent, the eval loss must match the model loss.
input_fn = self.input_fn(np_matrix=self.input_matrix,
batch_size=self.batch_size,
mode=model_fn.ModeKeys.TRAIN,
remove_empty_rows_columns=True,
)[0]
self._model.fit(input_fn=input_fn, steps=self.row_steps)
eval_input_fn_row = self.input_fn(np_matrix=self.input_matrix,
batch_size=1,
mode=model_fn.ModeKeys.EVAL,
project_row=True,
remove_empty_rows_columns=True)[0]
loss = self._model.evaluate(
input_fn=eval_input_fn_row, steps=self._num_rows)['loss']
with self.cached_session():
true_loss = self.calculate_loss()
self.assertNear(
loss, true_loss, err=.001,
msg="""After row update, eval loss = {}, does not match the true
loss = {}.""".format(loss, true_loss))
# Do a col sweep then evaluate the model on col inputs.
self._model.fit(input_fn=input_fn, steps=self.col_steps)
eval_input_fn_col = self.input_fn(np_matrix=self.input_matrix,
batch_size=1,
mode=model_fn.ModeKeys.EVAL,
project_row=False,
remove_empty_rows_columns=True)[0]
loss = self._model.evaluate(
input_fn=eval_input_fn_col, steps=self._num_cols)['loss']
with self.cached_session():
true_loss = self.calculate_loss()
self.assertNear(
loss, true_loss, err=.001,
msg="""After col update, eval loss = {}, does not match the true
loss = {}.""".format(loss, true_loss))
class WALSMatrixFactorizationTestSweeps(WALSMatrixFactorizationTest):
@property
def max_sweeps(self):
return 2
# We set the column steps to None so that we rely only on max_sweeps to stop
# training.
@property
def col_steps(self):
return None
class WALSMatrixFactorizationTestCached(WALSMatrixFactorizationTest):
@property
def use_cache(self):
return True
class WALSMatrixFactorizaiontTestPaddedInput(WALSMatrixFactorizationTest):
PADDED_INPUT_MATRIX = np.pad(
WALSMatrixFactorizationTest.INPUT_MATRIX,
[(1, 0), (1, 0)], mode='constant')
@property
def input_matrix(self):
return self.PADDED_INPUT_MATRIX
class WALSMatrixFactorizationUnsupportedTest(test.TestCase):
def setUp(self):
pass
def testDistributedWALSUnsupported(self):
tf_config = {
'cluster': {
run_config_lib.TaskType.PS: ['host1:1', 'host2:2'],
run_config_lib.TaskType.WORKER: ['host3:3', 'host4:4']
},
'task': {
'type': run_config_lib.TaskType.WORKER,
'index': 1
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
self.assertEqual(config.num_worker_replicas, 2)
with self.assertRaises(ValueError):
self._model = wals_lib.WALSMatrixFactorization(1, 1, 1, config=config)
class SweepHookTest(test.TestCase):
def test_sweeps(self):
is_row_sweep_var = variables.VariableV1(True)
is_sweep_done_var = variables.VariableV1(False)
init_done = variables.VariableV1(False)
row_prep_done = variables.VariableV1(False)
col_prep_done = variables.VariableV1(False)
row_train_done = variables.VariableV1(False)
col_train_done = variables.VariableV1(False)
init_op = state_ops.assign(init_done, True)
row_prep_op = state_ops.assign(row_prep_done, True)
col_prep_op = state_ops.assign(col_prep_done, True)
row_train_op = state_ops.assign(row_train_done, True)
col_train_op = state_ops.assign(col_train_done, True)
train_op = control_flow_ops.no_op()
switch_op = control_flow_ops.group(
state_ops.assign(is_sweep_done_var, False),
state_ops.assign(is_row_sweep_var,
math_ops.logical_not(is_row_sweep_var)))
mark_sweep_done = state_ops.assign(is_sweep_done_var, True)
with self.cached_session() as sess:
sweep_hook = wals_lib._SweepHook(
is_row_sweep_var,
is_sweep_done_var,
init_op,
[row_prep_op],
[col_prep_op],
row_train_op,
col_train_op,
switch_op)
mon_sess = monitored_session._HookedSession(sess, [sweep_hook])
sess.run([variables.global_variables_initializer()])
# Row sweep.
mon_sess.run(train_op)
self.assertTrue(sess.run(init_done),
msg='init op not run by the Sweephook')
self.assertTrue(sess.run(row_prep_done),
msg='row_prep_op not run by the SweepHook')
self.assertTrue(sess.run(row_train_done),
msg='row_train_op not run by the SweepHook')
self.assertTrue(
sess.run(is_row_sweep_var),
msg='Row sweep is not complete but is_row_sweep_var is False.')
# Col sweep.
mon_sess.run(mark_sweep_done)
mon_sess.run(train_op)
self.assertTrue(sess.run(col_prep_done),
msg='col_prep_op not run by the SweepHook')
self.assertTrue(sess.run(col_train_done),
msg='col_train_op not run by the SweepHook')
self.assertFalse(
sess.run(is_row_sweep_var),
msg='Col sweep is not complete but is_row_sweep_var is True.')
# Row sweep.
mon_sess.run(mark_sweep_done)
mon_sess.run(train_op)
self.assertTrue(
sess.run(is_row_sweep_var),
msg='Col sweep is complete but is_row_sweep_var is False.')
class StopAtSweepHookTest(test.TestCase):
def test_stop(self):
hook = wals_lib._StopAtSweepHook(last_sweep=10)
completed_sweeps = variables.VariableV1(
8, name=wals_lib.WALSMatrixFactorization.COMPLETED_SWEEPS)
train_op = state_ops.assign_add(completed_sweeps, 1)
hook.begin()
with self.cached_session() as sess:
sess.run([variables.global_variables_initializer()])
mon_sess = monitored_session._HookedSession(sess, [hook])
mon_sess.run(train_op)
# completed_sweeps is 9 after running train_op.
self.assertFalse(mon_sess.should_stop())
mon_sess.run(train_op)
# completed_sweeps is 10 after running train_op.
self.assertTrue(mon_sess.should_stop())
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/factorization/python/ops/wals_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ops.gmm."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.factorization.python.ops import gmm as gmm_lib
from tensorflow.contrib.learn.python.learn.estimators import kmeans
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import random_seed as random_seed_lib
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import test
from tensorflow.python.training import queue_runner
class GMMTest(test.TestCase):
def input_fn(self, batch_size=None, points=None):
batch_size = batch_size or self.batch_size
points = points if points is not None else self.points
num_points = points.shape[0]
def _fn():
x = constant_op.constant(points)
if batch_size == num_points:
return x, None
indices = random_ops.random_uniform(constant_op.constant([batch_size]),
minval=0, maxval=num_points-1,
dtype=dtypes.int32,
seed=10)
return array_ops.gather(x, indices), None
return _fn
def setUp(self):
np.random.seed(3)
random_seed_lib.set_random_seed(2)
self.num_centers = 2
self.num_dims = 2
self.num_points = 4000
self.batch_size = self.num_points
self.true_centers = self.make_random_centers(self.num_centers,
self.num_dims)
self.points, self.assignments = self.make_random_points(
self.true_centers, self.num_points)
# Use initial means from kmeans (just like scikit-learn does).
clusterer = kmeans.KMeansClustering(num_clusters=self.num_centers)
clusterer.fit(input_fn=lambda: (constant_op.constant(self.points), None),
steps=30)
self.initial_means = clusterer.clusters()
@staticmethod
def make_random_centers(num_centers, num_dims):
return np.round(
np.random.rand(num_centers, num_dims).astype(np.float32) * 500)
@staticmethod
def make_random_points(centers, num_points):
num_centers, num_dims = centers.shape
assignments = np.random.choice(num_centers, num_points)
offsets = np.round(
np.random.randn(num_points, num_dims).astype(np.float32) * 20)
points = centers[assignments] + offsets
return (points, assignments)
def test_weights(self):
"""Tests the shape of the weights."""
gmm = gmm_lib.GMM(self.num_centers,
initial_clusters=self.initial_means,
random_seed=4,
config=run_config.RunConfig(tf_random_seed=2))
gmm.fit(input_fn=self.input_fn(), steps=0)
weights = gmm.weights()
self.assertAllEqual(list(weights.shape), [self.num_centers])
def test_clusters(self):
"""Tests the shape of the clusters."""
gmm = gmm_lib.GMM(self.num_centers,
initial_clusters=self.initial_means,
random_seed=4,
config=run_config.RunConfig(tf_random_seed=2))
gmm.fit(input_fn=self.input_fn(), steps=0)
clusters = gmm.clusters()
self.assertAllEqual(list(clusters.shape), [self.num_centers, self.num_dims])
def test_fit(self):
gmm = gmm_lib.GMM(self.num_centers,
initial_clusters='random',
random_seed=4,
config=run_config.RunConfig(tf_random_seed=2))
gmm.fit(input_fn=self.input_fn(), steps=1)
score1 = gmm.score(input_fn=self.input_fn(batch_size=self.num_points),
steps=1)
gmm.fit(input_fn=self.input_fn(), steps=10)
score2 = gmm.score(input_fn=self.input_fn(batch_size=self.num_points),
steps=1)
self.assertLess(score1, score2)
def test_infer(self):
gmm = gmm_lib.GMM(self.num_centers,
initial_clusters=self.initial_means,
random_seed=4,
config=run_config.RunConfig(tf_random_seed=2))
gmm.fit(input_fn=self.input_fn(), steps=60)
clusters = gmm.clusters()
# Make a small test set
num_points = 40
points, true_assignments = self.make_random_points(clusters, num_points)
assignments = []
for item in gmm.predict_assignments(
input_fn=self.input_fn(points=points, batch_size=num_points)):
assignments.append(item)
assignments = np.ravel(assignments)
self.assertAllEqual(true_assignments, assignments)
def _compare_with_sklearn(self, cov_type):
# sklearn version.
iterations = 40
np.random.seed(5)
sklearn_assignments = np.asarray([0, 0, 1, 0, 0, 0, 1, 0, 0, 1])
sklearn_means = np.asarray([[144.83417719, 254.20130341],
[274.38754816, 353.16074346]])
sklearn_covs = np.asarray([[[395.0081194, -4.50389512],
[-4.50389512, 408.27543989]],
[[385.17484203, -31.27834935],
[-31.27834935, 391.74249925]]])
# skflow version.
gmm = gmm_lib.GMM(self.num_centers,
initial_clusters=self.initial_means,
covariance_type=cov_type,
config=run_config.RunConfig(tf_random_seed=2))
gmm.fit(input_fn=self.input_fn(), steps=iterations)
points = self.points[:10, :]
skflow_assignments = []
for item in gmm.predict_assignments(
input_fn=self.input_fn(points=points, batch_size=10)):
skflow_assignments.append(item)
self.assertAllClose(sklearn_assignments,
np.ravel(skflow_assignments).astype(int))
self.assertAllClose(sklearn_means, gmm.clusters())
if cov_type == 'full':
self.assertAllClose(sklearn_covs, gmm.covariances(), rtol=0.01)
else:
for d in [0, 1]:
self.assertAllClose(
np.diag(sklearn_covs[d]), gmm.covariances()[d, :], rtol=0.01)
def test_compare_full(self):
self._compare_with_sklearn('full')
def test_compare_diag(self):
self._compare_with_sklearn('diag')
def test_random_input_large(self):
# sklearn version.
iterations = 5 # that should be enough to know whether this diverges
np.random.seed(5)
num_classes = 20
x = np.array([[np.random.random() for _ in range(100)]
for _ in range(num_classes)], dtype=np.float32)
# skflow version.
gmm = gmm_lib.GMM(num_classes,
covariance_type='full',
config=run_config.RunConfig(tf_random_seed=2))
def get_input_fn(x):
def input_fn():
return constant_op.constant(x.astype(np.float32)), None
return input_fn
gmm.fit(input_fn=get_input_fn(x), steps=iterations)
self.assertFalse(np.isnan(gmm.clusters()).any())
class GMMTestQueues(test.TestCase):
def input_fn(self):
def _fn():
queue = data_flow_ops.FIFOQueue(capacity=10,
dtypes=dtypes.float32,
shapes=[10, 3])
enqueue_op = queue.enqueue(array_ops.zeros([10, 3], dtype=dtypes.float32))
queue_runner.add_queue_runner(queue_runner.QueueRunner(queue,
[enqueue_op]))
return queue.dequeue(), None
return _fn
# This test makes sure that there are no deadlocks when using a QueueRunner.
# Note that since cluster initialization is dependent on inputs, if input
# is generated using a QueueRunner, one has to make sure that these runners
# are started before the initialization.
def test_queues(self):
gmm = gmm_lib.GMM(2, covariance_type='diag')
gmm.fit(input_fn=self.input_fn(), steps=1)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/factorization/python/ops/gmm_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for gmm_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.contrib.factorization.python.ops import gmm_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed as random_seed_lib
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
class GmmOpsTest(test.TestCase):
def setUp(self):
self.num_examples = 1000
self.iterations = 40
self.seed = 4
random_seed_lib.set_random_seed(self.seed)
np.random.seed(self.seed * 2)
self.data, self.true_assignments = self.make_data(self.num_examples)
# Generate more complicated data.
self.centers = [[1, 1], [-1, 0.5], [2, 1]]
self.more_data, self.more_true_assignments = self.make_data_from_centers(
self.num_examples, self.centers)
@staticmethod
def make_data(num_vectors):
"""Generates 2-dimensional data centered on (2,2), (-1,-1).
Args:
num_vectors: number of training examples.
Returns:
A tuple containing the data as a numpy array and the cluster ids.
"""
vectors = []
classes = []
for _ in xrange(num_vectors):
if np.random.random() > 0.5:
vectors.append([np.random.normal(2.0, 0.6), np.random.normal(2.0, 0.9)])
classes.append(0)
else:
vectors.append(
[np.random.normal(-1.0, 0.4), np.random.normal(-1.0, 0.5)])
classes.append(1)
return np.asarray(vectors), classes
@staticmethod
def make_data_from_centers(num_vectors, centers):
"""Generates 2-dimensional data with random centers.
Args:
num_vectors: number of training examples.
centers: a list of random 2-dimensional centers.
Returns:
A tuple containing the data as a numpy array and the cluster ids.
"""
vectors = []
classes = []
for _ in xrange(num_vectors):
current_class = np.random.random_integers(0, len(centers) - 1)
vectors.append([
np.random.normal(centers[current_class][0],
np.random.random_sample()),
np.random.normal(centers[current_class][1], np.random.random_sample())
])
classes.append(current_class)
return np.asarray(vectors), len(centers)
def test_covariance(self):
start_time = time.time()
data = self.data.T
np_cov = np.cov(data)
logging.info('Numpy took %f', time.time() - start_time)
start_time = time.time()
with self.cached_session() as sess:
op = gmm_ops._covariance(
constant_op.constant(
data.T, dtype=dtypes.float32), False)
op_diag = gmm_ops._covariance(
constant_op.constant(
data.T, dtype=dtypes.float32), True)
variables.global_variables_initializer().run()
tf_cov = sess.run(op)
np.testing.assert_array_almost_equal(np_cov, tf_cov)
logging.info('Tensorflow took %f', time.time() - start_time)
tf_cov = sess.run(op_diag)
np.testing.assert_array_almost_equal(
np.diag(np_cov), np.ravel(tf_cov), decimal=5)
def test_simple_cluster(self):
"""Tests that the clusters are correct."""
num_classes = 2
graph = ops.Graph()
with graph.as_default() as g:
g.seed = 5
with self.cached_session() as sess:
data = constant_op.constant(self.data, dtype=dtypes.float32)
loss_op, scores, assignments, training_op, init_op, _ = gmm_ops.gmm(
data, 'random', num_classes, random_seed=self.seed)
variables.global_variables_initializer().run()
sess.run(init_op)
first_loss = sess.run(loss_op)
for _ in xrange(self.iterations):
sess.run(training_op)
assignments = sess.run(assignments)
end_loss = sess.run(loss_op)
scores = sess.run(scores)
self.assertEqual((self.num_examples, 1), scores.shape)
accuracy = np.mean(
np.asarray(self.true_assignments) == np.squeeze(assignments))
logging.info('Accuracy: %f', accuracy)
logging.info('First loss: %f, end loss: %f', first_loss, end_loss)
self.assertGreater(end_loss, first_loss)
self.assertGreater(accuracy, 0.98)
def testParams(self):
"""Tests that the params work as intended."""
num_classes = 2
with self.cached_session() as sess:
# Experiment 1. Update weights only.
data = constant_op.constant(self.data, dtype=dtypes.float32)
gmm_tool = gmm_ops.GmmAlgorithm([data], num_classes,
[[3.0, 3.0], [0.0, 0.0]], 'w')
training_ops = gmm_tool.training_ops()
variables.global_variables_initializer().run()
sess.run(gmm_tool.init_ops())
for _ in xrange(self.iterations):
sess.run(training_ops)
# Only the probability to each class is updated.
alphas = sess.run(gmm_tool.alphas())
self.assertGreater(alphas[1], 0.6)
means = sess.run(gmm_tool.clusters())
np.testing.assert_almost_equal(
np.expand_dims([[3.0, 3.0], [0.0, 0.0]], 1), means)
covs = sess.run(gmm_tool.covariances())
np.testing.assert_almost_equal(covs[0], covs[1])
# Experiment 2. Update means and covariances.
gmm_tool = gmm_ops.GmmAlgorithm([data], num_classes,
[[3.0, 3.0], [0.0, 0.0]], 'mc')
training_ops = gmm_tool.training_ops()
variables.global_variables_initializer().run()
sess.run(gmm_tool.init_ops())
for _ in xrange(self.iterations):
sess.run(training_ops)
alphas = sess.run(gmm_tool.alphas())
self.assertAlmostEqual(alphas[0], alphas[1])
means = sess.run(gmm_tool.clusters())
np.testing.assert_almost_equal(
np.expand_dims([[2.0, 2.0], [-1.0, -1.0]], 1), means, decimal=1)
covs = sess.run(gmm_tool.covariances())
np.testing.assert_almost_equal(
[[0.371111, -0.0050774], [-0.0050774, 0.8651744]], covs[0], decimal=4)
np.testing.assert_almost_equal(
[[0.146976, 0.0259463], [0.0259463, 0.2543971]], covs[1], decimal=4)
# Experiment 3. Update covariances only.
gmm_tool = gmm_ops.GmmAlgorithm([data], num_classes,
[[-1.0, -1.0], [1.0, 1.0]], 'c')
training_ops = gmm_tool.training_ops()
variables.global_variables_initializer().run()
sess.run(gmm_tool.init_ops())
for _ in xrange(self.iterations):
sess.run(training_ops)
alphas = sess.run(gmm_tool.alphas())
self.assertAlmostEqual(alphas[0], alphas[1])
means = sess.run(gmm_tool.clusters())
np.testing.assert_almost_equal(
np.expand_dims([[-1.0, -1.0], [1.0, 1.0]], 1), means)
covs = sess.run(gmm_tool.covariances())
np.testing.assert_almost_equal(
[[0.1299582, 0.0435872], [0.0435872, 0.2558578]], covs[0], decimal=5)
np.testing.assert_almost_equal(
[[3.195385, 2.6989155], [2.6989155, 3.3881593]], covs[1], decimal=5)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/factorization/python/ops/gmm_ops_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Example mnist model with jointly computed k-means clustering.
This is a toy example of how clustering can be embedded into larger tensorflow
graphs. In this case, we learn a clustering on-the-fly and transform the input
into the 'distance to clusters' space. These are then fed into hidden layers to
learn the supervised objective.
To train this model on real mnist data, run this model as follows:
mnist --fake_data=False --max_steps=2000
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import math
import sys
import tempfile
import time
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from tensorflow.examples.tutorials.mnist import mnist
FLAGS = None
# The MNIST dataset has 10 classes, representing the digits 0 through 9.
NUM_CLASSES = 10
# The MNIST images are always 28x28 pixels.
IMAGE_SIZE = 28
IMAGE_PIXELS = IMAGE_SIZE * IMAGE_SIZE
def placeholder_inputs():
"""Generate placeholder variables to represent the input tensors.
Returns:
images_placeholder: Images placeholder.
labels_placeholder: Labels placeholder.
"""
images_placeholder = tf.placeholder(tf.float32, shape=(None,
mnist.IMAGE_PIXELS))
labels_placeholder = tf.placeholder(tf.int32, shape=(None))
return images_placeholder, labels_placeholder
def fill_feed_dict(data_set, images_pl, labels_pl, batch_size):
"""Fills the feed_dict for training the given step.
Args:
data_set: The set of images and labels, from input_data.read_data_sets()
images_pl: The images placeholder, from placeholder_inputs().
labels_pl: The labels placeholder, from placeholder_inputs().
batch_size: Batch size of data to feed.
Returns:
feed_dict: The feed dictionary mapping from placeholders to values.
"""
# Create the feed_dict for the placeholders filled with the next
# `batch size ` examples.
images_feed, labels_feed = data_set.next_batch(batch_size, FLAGS.fake_data)
feed_dict = {
images_pl: images_feed,
labels_pl: labels_feed,
}
return feed_dict
def do_eval(sess,
eval_correct,
images_placeholder,
labels_placeholder,
data_set):
"""Runs one evaluation against the full epoch of data.
Args:
sess: The session in which the model has been trained.
eval_correct: The Tensor that returns the number of correct predictions.
images_placeholder: The images placeholder.
labels_placeholder: The labels placeholder.
data_set: The set of images and labels to evaluate, from
input_data.read_data_sets().
Returns:
Precision value on the dataset.
"""
# And run one epoch of eval.
true_count = 0 # Counts the number of correct predictions.
batch_size = min(FLAGS.batch_size, data_set.num_examples)
steps_per_epoch = data_set.num_examples // batch_size
num_examples = steps_per_epoch * batch_size
for _ in xrange(steps_per_epoch):
feed_dict = fill_feed_dict(data_set,
images_placeholder,
labels_placeholder,
batch_size)
true_count += sess.run(eval_correct, feed_dict=feed_dict)
precision = true_count / num_examples
print(' Num examples: %d Num correct: %d Precision @ 1: %0.04f' %
(num_examples, true_count, precision))
return precision
def inference(inp, num_clusters, hidden1_units, hidden2_units):
"""Build the MNIST model up to where it may be used for inference.
Args:
inp: input data
num_clusters: number of clusters of input features to train.
hidden1_units: Size of the first hidden layer.
hidden2_units: Size of the second hidden layer.
Returns:
logits: Output tensor with the computed logits.
clustering_loss: Clustering loss.
kmeans_training_op: An op to train the clustering.
"""
# Clustering
kmeans = tf.contrib.factorization.KMeans(
inp,
num_clusters,
distance_metric=tf.contrib.factorization.COSINE_DISTANCE,
# TODO(agarwal): kmeans++ is currently causing crash in dbg mode.
# Enable this after fixing.
# initial_clusters=tf.contrib.factorization.KMEANS_PLUS_PLUS_INIT,
use_mini_batch=True)
(all_scores, _, clustering_scores, _, kmeans_init,
kmeans_training_op) = kmeans.training_graph()
# Some heuristics to approximately whiten this output.
all_scores = (all_scores[0] - 0.5) * 5
# Here we avoid passing the gradients from the supervised objective back to
# the clusters by creating a stop_gradient node.
all_scores = tf.stop_gradient(all_scores)
clustering_loss = tf.reduce_sum(clustering_scores[0])
# Hidden 1
with tf.name_scope('hidden1'):
weights = tf.Variable(
tf.truncated_normal([num_clusters, hidden1_units],
stddev=1.0 / math.sqrt(float(IMAGE_PIXELS))),
name='weights')
biases = tf.Variable(tf.zeros([hidden1_units]),
name='biases')
hidden1 = tf.nn.relu(tf.matmul(all_scores, weights) + biases)
# Hidden 2
with tf.name_scope('hidden2'):
weights = tf.Variable(
tf.truncated_normal([hidden1_units, hidden2_units],
stddev=1.0 / math.sqrt(float(hidden1_units))),
name='weights')
biases = tf.Variable(tf.zeros([hidden2_units]),
name='biases')
hidden2 = tf.nn.relu(tf.matmul(hidden1, weights) + biases)
# Linear
with tf.name_scope('softmax_linear'):
weights = tf.Variable(
tf.truncated_normal([hidden2_units, NUM_CLASSES],
stddev=1.0 / math.sqrt(float(hidden2_units))),
name='weights')
biases = tf.Variable(tf.zeros([NUM_CLASSES]),
name='biases')
logits = tf.matmul(hidden2, weights) + biases
return logits, clustering_loss, kmeans_init, kmeans_training_op
def run_training():
"""Train MNIST for a number of steps."""
# Get the sets of images and labels for training, validation, and
# test on MNIST.
train_dir = tempfile.mkdtemp()
data_sets = input_data.read_data_sets(train_dir, FLAGS.fake_data)
# Tell TensorFlow that the model will be built into the default Graph.
with tf.Graph().as_default():
# Generate placeholders for the images and labels.
images_placeholder, labels_placeholder = placeholder_inputs()
# Build a Graph that computes predictions from the inference model.
logits, clustering_loss, kmeans_init, kmeans_training_op = inference(
images_placeholder,
FLAGS.num_clusters,
FLAGS.hidden1,
FLAGS.hidden2)
# Add to the Graph the Ops for loss calculation.
loss = mnist.loss(logits, labels_placeholder)
# Add to the Graph the Ops that calculate and apply gradients.
train_op = tf.group(mnist.training(loss, FLAGS.learning_rate),
kmeans_training_op)
# Add the Op to compare the logits to the labels during evaluation.
eval_correct = mnist.evaluation(logits, labels_placeholder)
# Add the variable initializer Op.
init = tf.global_variables_initializer()
# Create a session for running Ops on the Graph.
sess = tf.Session()
# Run the Op to initialize the variables.
sess.run(init)
feed_dict = fill_feed_dict(data_sets.train,
images_placeholder,
labels_placeholder,
batch_size=max(FLAGS.batch_size, 5000))
# Run the Op to initialize the clusters.
sess.run(kmeans_init, feed_dict=feed_dict)
# Start the training loop.
max_test_prec = 0
for step in xrange(FLAGS.max_steps):
start_time = time.time()
# Fill a feed dictionary with the actual set of images and labels
# for this particular training step.
feed_dict = fill_feed_dict(data_sets.train,
images_placeholder,
labels_placeholder,
FLAGS.batch_size)
# Run one step of the model.
_, loss_value, clustering_loss_value = sess.run([train_op,
loss,
clustering_loss],
feed_dict=feed_dict)
duration = time.time() - start_time
if step % 100 == 0:
# Print status to stdout.
print('Step %d: loss = %.2f, clustering_loss = %.2f (%.3f sec)' % (
step, loss_value, clustering_loss_value, duration))
# Save a checkpoint and evaluate the model periodically.
if (step + 1) % 1000 == 0 or (step + 1) == FLAGS.max_steps:
# Evaluate against the training set.
print('Training Data Eval:')
do_eval(sess,
eval_correct,
images_placeholder,
labels_placeholder,
data_sets.train)
# Evaluate against the validation set.
print('Validation Data Eval:')
do_eval(sess,
eval_correct,
images_placeholder,
labels_placeholder,
data_sets.validation)
# Evaluate against the test set.
print('Test Data Eval:')
test_prec = do_eval(sess,
eval_correct,
images_placeholder,
labels_placeholder,
data_sets.test)
max_test_prec = max(max_test_prec, test_prec)
return max_test_prec
class MnistTest(tf.test.TestCase):
def test_train(self):
self.assertTrue(run_training() > 0.6)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Basic model parameters as external flags.'
)
parser.register('type', 'bool', lambda v: v.lower() == 'true')
parser.add_argument(
'--learning_rate',
type=float,
default=0.3,
help='Initial learning rate.'
)
parser.add_argument(
'--max_steps',
type=int,
default=200,
help='Number of steps to run trainer.'
)
parser.add_argument(
'--num_clusters',
type=int,
default=384,
help='Number of input feature clusters'
)
parser.add_argument(
'--hidden1',
type=int,
default=256,
help='Number of units in hidden layer 1.'
)
parser.add_argument(
'--hidden2',
type=int,
default=32,
help='Number of units in hidden layer 2.'
)
parser.add_argument(
'--batch_size',
type=int,
default=100,
help='Batch size. Must divide evenly into the dataset sizes.'
)
parser.add_argument(
'--train_dir',
type=str,
default='data',
help='Directory to put the training data.'
)
parser.add_argument(
'--fake_data',
type='bool',
default=True,
help='Use fake input data.'
)
FLAGS, unparsed = parser.parse_known_args()
sys.argv = [sys.argv[0]] + unparsed
tf.test.main()
|
tensorflow-master
|
tensorflow/contrib/factorization/examples/mnist.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Training and input utilities.
See
[Contrib Training](https://tensorflow.org/api_guides/python/contrib.training)
guide.
@@batch_sequences_with_states
@@NextQueuedSequenceBatch
@@SequenceQueueingStateSaver
@@rejection_sample
@@resample_at_rate
@@stratified_sample
@@weighted_resample
@@bucket
@@bucket_by_sequence_length
@@RandomStrategy
@@GreedyLoadBalancingStrategy
@@byte_size_load_fn
@@FailureTolerator
@@rejection_sample
@@stratified_sample
@@resample_at_rate
@@weighted_resample
@@HParams
@@HParamDef
@@parse_values
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,wildcard-import
from tensorflow.contrib.training.python.training.bucket_ops import *
from tensorflow.contrib.training.python.training.device_setter import *
from tensorflow.contrib.training.python.training.evaluation import checkpoints_iterator
from tensorflow.contrib.training.python.training.evaluation import evaluate_once
from tensorflow.contrib.training.python.training.evaluation import evaluate_repeatedly
from tensorflow.contrib.training.python.training.evaluation import get_or_create_eval_step
from tensorflow.contrib.training.python.training.evaluation import StopAfterNEvalsHook
from tensorflow.contrib.training.python.training.evaluation import SummaryAtEndHook
from tensorflow.contrib.training.python.training.evaluation import wait_for_new_checkpoint
from tensorflow.contrib.training.python.training.feeding_queue_runner import FeedingQueueRunner
from tensorflow.contrib.training.python.training.hparam import *
from tensorflow.contrib.training.python.training.resample import *
from tensorflow.contrib.training.python.training.sampling_ops import *
from tensorflow.contrib.training.python.training.sequence_queueing_state_saver import *
from tensorflow.contrib.training.python.training.training import add_gradients_summaries
from tensorflow.contrib.training.python.training.training import clip_gradient_norms
from tensorflow.contrib.training.python.training.training import clip_gradient_norms_fn
from tensorflow.contrib.training.python.training.training import create_train_op
from tensorflow.contrib.training.python.training.training import multiply_gradients
from tensorflow.contrib.training.python.training.training import train
from tensorflow.contrib.training.python.training.tuner import Tuner
# pylint: enable=unused-import,wildcard-import
from tensorflow.python.util.all_util import remove_undocumented
# Allow explicitly imported symbols. Symbols imported with * must also be
# whitelisted here or in the module docstring above.
_allowed_symbols = [
'checkpoints_iterator', 'evaluate_once', 'evaluate_repeatedly',
'FeedingQueueRunner', 'get_or_create_eval_step', 'StopAfterNEvalsHook',
'SummaryAtEndHook', 'wait_for_new_checkpoint', 'add_gradients_summaries',
'clip_gradient_norms', 'clip_gradient_norms_fn', 'create_train_op',
'multiply_gradients', 'train']
remove_undocumented(__name__, _allowed_symbols)
|
tensorflow-master
|
tensorflow/contrib/training/__init__.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
|
tensorflow-master
|
tensorflow/contrib/training/python/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.contrib.training.device_setter."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.contrib.training.python.training import device_setter as device_setter_lib
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import device_setter
from tensorflow.python.training import server_lib
_CLUSTER_SPEC = server_lib.ClusterSpec({
"ps": ["ps0:2222", "ps1:2222"],
"worker": ["worker0:2222", "worker1:2222", "worker2:2222"]
})
MockOperation = collections.namedtuple("MockOperation", "name")
class RandomStrategyTest(test.TestCase):
def testBasic(self):
ps_strategy = device_setter_lib.RandomStrategy(2, seed=0)
with ops.device(
device_setter.replica_device_setter(
cluster=_CLUSTER_SPEC,
ps_strategy=ps_strategy)):
u = variables.Variable(array_ops.zeros([2, 2]))
v = variables.Variable(array_ops.zeros([2, 1]))
w = variables.Variable(array_ops.zeros([2, 2]))
x = variables.Variable(array_ops.zeros([1, 3]))
a = v + w
# Randomly distributed with seed 0.
self.assertDeviceEqual("/job:ps/task:1", u.device)
self.assertDeviceEqual("/job:ps/task:1", u.initializer.device)
self.assertDeviceEqual("/job:ps/task:0", v.device)
self.assertDeviceEqual("/job:ps/task:0", v.initializer.device)
self.assertDeviceEqual("/job:ps/task:1", w.device)
self.assertDeviceEqual("/job:ps/task:1", w.initializer.device)
self.assertDeviceEqual("/job:ps/task:1", x.device)
self.assertDeviceEqual("/job:ps/task:1", x.initializer.device)
self.assertDeviceEqual("/job:worker", a.device)
def testHandlesUnicode(self):
op = MockOperation(u"A unicode \u018e string \xf1")
ps_strategy = device_setter_lib.RandomStrategy(2, seed=0)
ps_task = ps_strategy(op)
self.assertEqual(ps_task, 1)
class GreedyLoadBalancingStrategyTest(test.TestCase):
def testUniformLoadEqualsRoundRobin(self):
def _load_fn(unused_op):
return 1
with ops.device(
device_setter.replica_device_setter(
cluster=_CLUSTER_SPEC,
ps_strategy=device_setter_lib.GreedyLoadBalancingStrategy(
2, _load_fn))):
u = variables.Variable(array_ops.zeros([2, 2]))
v = variables.Variable(array_ops.zeros([2, 1]))
w = variables.Variable(array_ops.zeros([2, 2]))
x = variables.Variable(array_ops.zeros([1, 3]))
a = v + w
self.assertDeviceEqual("/job:ps/task:0", u.device)
self.assertDeviceEqual("/job:ps/task:0", u.initializer.device)
self.assertDeviceEqual("/job:ps/task:1", v.device)
self.assertDeviceEqual("/job:ps/task:1", v.initializer.device)
self.assertDeviceEqual("/job:ps/task:0", w.device)
self.assertDeviceEqual("/job:ps/task:0", w.initializer.device)
self.assertDeviceEqual("/job:ps/task:1", x.device)
self.assertDeviceEqual("/job:ps/task:1", x.initializer.device)
self.assertDeviceEqual("/job:worker", a.device)
def testByteSizeLoadFn(self):
with ops.device(
device_setter.replica_device_setter(
cluster=_CLUSTER_SPEC,
ps_strategy=device_setter_lib.GreedyLoadBalancingStrategy(
2, device_setter_lib.byte_size_load_fn))):
u = variables.VariableV1(array_ops.zeros([2, 2]))
v = variables.VariableV1(array_ops.zeros([2, 1]))
w = variables.VariableV1(array_ops.zeros([2, 2]))
x = variables.VariableV1(array_ops.zeros([1, 3]))
a = v + w
self.assertDeviceEqual("/job:ps/task:0", u.device)
self.assertDeviceEqual("/job:ps/task:0", u.initializer.device)
self.assertDeviceEqual("/job:ps/task:1", v.device)
self.assertDeviceEqual("/job:ps/task:1", v.initializer.device)
self.assertDeviceEqual("/job:ps/task:1", w.device)
self.assertDeviceEqual("/job:ps/task:1", w.initializer.device)
self.assertDeviceEqual("/job:ps/task:0", x.device)
self.assertDeviceEqual("/job:ps/task:0", x.initializer.device)
self.assertDeviceEqual("/job:worker", a.device)
def testByteSizeLoadFnWithScalar(self):
with ops.device(
device_setter.replica_device_setter(
cluster=_CLUSTER_SPEC,
ps_strategy=device_setter_lib.GreedyLoadBalancingStrategy(
2, device_setter_lib.byte_size_load_fn))):
# Note: we must test the load function as part of the device function
# instead of passing u.op to the function directly, because the only
# time that the output Tensor has unknown shape for scalars is during
# Variable construction.
u = variables.Variable(0)
self.assertDeviceEqual("/job:ps/task:0", u.device)
self.assertDeviceEqual("/job:ps/task:0", u.initializer.device)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/training/python/training/device_setter_test.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.